id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11541615
|
class Wrapper:
"""
Wraps an object and pretends to be it.
Can be modified/extended without affecting the underlying object.
"""
def __init__(self, target, *, affect_only_functions=True):
self._target = target
def __bool__(self):
return bool(self._target)
def __call__(self, *args, **kwargs):
return self._target(*args, **kwargs)
def __dir__(self):
return dir(self._target)
def __eq__(self, other):
return other == self._target
def __enter__(self):
return self._target.__enter__()
def __exit__(self, *args, **kwargs):
return self._target.__exit__(*args, **kwargs)
def __float__(self):
return float(self._target)
def __getattr__(self, name):
return getattr(self._target, name)
def __ge__(self, other):
return self._target >= other
def __getitem__(self, key):
return self._target[key]
def __gt__(self, other):
return self._target > other
def __hash__(self):
return hash(self._target)
def __int__(self):
return int(self._target)
def __instancecheck__(self, instance):
return isinstance(self._target, instance)
def __iter__(self):
return iter([self.__class__(thing) for thing in self._target])
def __le__(self, other):
return self._target <= other
def __len__(self):
return len(self._target)
def __lt__(self, other):
return self._target < other
def __ne__(self, other):
return other != self._target
def __next__(self):
return next(self._target)
def __str__(self):
return str(self._target)
def __subclasscheck__(self, subclass):
return issubclass(self._target, subclass)
|
11541637
|
import time
import numpy as np
from Little import solve_tsp
from utils import read_graph_matrix, draw_graph_with_path
if __name__ == '__main__':
m = read_graph_matrix('lab5.in')
out = file('report.md', 'w')
out.write('# lab5\nPazhitnykh Ivan\n\n* Solve **Travelling Salesman Problem**, with matrix:\n')
out.write('```\n{}\n```\n'.format(np.array(m)))
start_time = time.time()
cost, path = solve_tsp(m)
print 'Cost = ', cost
print 'Path = ', path
print 'Time (s)', time.time() - start_time
out.write('* Branch and bound tree:\n')
out.write('\n')
draw_graph_with_path(m, path)
out.write('* Graph with Hamilton cycle for TSP:\n')
out.write('\n')
out.write('\n* Minimal cost:\n')
sum = 0
expression = ''
for i, j in path:
expression = '{0} + {1}'.format(expression, int(m[i][j]))
sum += m[i][j]
out.write('`C = {0} = {1}`'.format(expression[2:], int(sum)))
|
11541639
|
from distutils.core import setup
from Cython.Distutils import Extension
from Cython.Distutils import build_ext
import numpy
import os, tempfile, subprocess, shutil
# see http://openmp.org/wp/openmp-compilers/
omp_test = r"""#include <omp.h>
#include <stdio.h>
int main() {
#pragma omp parallel
printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads());
}
"""
def has_openmp():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
filename = r'test.c'
file = open(filename,'w', 0)
file.write(omp_test)
with open(os.devnull, 'w') as fnull:
result = subprocess.call(['cc', '-fopenmp', filename], stdout=fnull,
stderr=fnull)
file.close
os.chdir(curdir)
#clean up
shutil.rmtree(tmpdir)
return result == 0
ceres_include = "/usr/local/include/ceres/"
ceres_lib = "/usr/local/lib/"
gflags_lib = "/usr/local/lib/"
glog_lib = "/usr/local/lib/"
cholmod_lib = amd_lib = camd_lib = colamd_lib = "/usr/local/lib/"
cxsparse_lib = "/usr/local/lib/"
extra_compile_args = ['-O3']
extra_link_args = []
if has_openmp():
extra_compile_args = ['-fopenmp']
extra_link_args = ['-lgomp']
ext_modules = [
Extension(
"cyres",
["cyres/src/cyres.pyx", "cyres/src/cyres.pxd", "cyres/src/ceres.pxd"],
language="c++",
include_dirs=[ceres_include, numpy.get_include()],
libraries=['ceres', 'gflags', 'glog', "cholmod", "camd", "amd", "colamd", "cxsparse"],
library_dirs=[ceres_lib, gflags_lib, glog_lib, cholmod_lib, amd_lib, camd_lib, colamd_lib, cxsparse_lib],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
]
setup(
name = 'cyres',
version='0.0.1',
cmdclass = {'build_ext': build_ext},
ext_package = 'cyres',
ext_modules = ext_modules,
packages= ['cyres'],
package_data={'cyres': ['src/*.pxd']},
scripts=['scripts/cyresc']
)
|
11541694
|
import h5py
import sys
file = h5py.File(sys.argv[1], "r")
#for chr in file.keys():
# data = file[chr][:]
# print(chr, data.mean())
for line in open(sys.argv[2], "r"):
line = line.split("\t");
c,b,e=line[0], int(line[1]), int(line[2])
print(c,b,e, file[c][b:e].mean())
|
11541700
|
import random
import time
import pickle
from tqdm import tqdm
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
from model.attention_ocr import OCR
from utils.dataset import CaptchaDataset
from utils.train_util import train_batch, eval_batch
device = 'cuda'
def main():
img_width = 160
img_height = 60
max_len = 4
nh = 512
teacher_forcing_ratio = 0.5
batch_size = 32
lr = 3e-4
n_epoch = 100
n_works = 8
save_checkpoint_every = 5
ds_train = CaptchaDataset(img_width, img_height, 10000, max_len)
ds_test = CaptchaDataset(img_width, img_height, 1000, max_len)
tokenizer = ds_train.tokenizer
train_loader = DataLoader(ds_train, batch_size=batch_size, shuffle=True, num_workers=n_works)
test_loader = DataLoader(ds_test, batch_size=batch_size, shuffle=False, num_workers=n_works)
model = OCR(img_width, img_height, nh, tokenizer.n_token,
max_len + 1, tokenizer.SOS_token, tokenizer.EOS_token).to(device=device)
load_weights = torch.load('./inception_v3_google-1a9a5a14.pth')
names = set()
for k, w in model.incept.named_children():
names.add(k)
weights = {}
for k, w in load_weights.items():
if k.split('.')[0] in names:
weights[k] = w
model.incept.load_state_dict(weights)
optimizer = optim.Adam(model.parameters(), lr=lr)
crit = nn.NLLLoss().cuda()
def train_epoch():
sum_loss_train = 0
n_train = 0
sum_acc = 0
sum_sentence_acc = 0
for bi, batch in enumerate(tqdm(train_loader)):
x, y = batch
x = x.to(device=device)
y = y.to(device=device)
loss, acc, sentence_acc = train_batch(x, y, model, optimizer,
crit, teacher_forcing_ratio, max_len,
tokenizer)
sum_loss_train += loss
sum_acc += acc
sum_sentence_acc += sentence_acc
n_train += 1
return sum_loss_train / n_train, sum_acc / n_train, sum_sentence_acc / n_train
def eval_epoch():
sum_loss_eval = 0
n_eval = 0
sum_acc = 0
sum_sentence_acc = 0
for bi, batch in enumerate(tqdm(test_loader)):
x, y = batch
x = x.to(device=device)
y = y.to(device=device)
loss, acc, sentence_acc = eval_batch(x, y, model, crit, max_len, tokenizer)
sum_loss_eval += loss
sum_acc += acc
sum_sentence_acc += sentence_acc
n_eval += 1
return sum_loss_eval / n_eval, sum_acc / n_eval, sum_sentence_acc / n_eval
for epoch in range(n_epoch):
train_loss, train_acc, train_sentence_acc = train_epoch()
eval_loss, eval_acc, eval_sentence_acc = eval_epoch()
print("Epoch %d" % epoch)
print('train_loss: %.4f, train_acc: %.4f, train_sentence: %.4f' % (train_loss, train_acc, train_sentence_acc))
print('eval_loss: %.4f, eval_acc: %.4f, eval_sentence: %.4f' % (eval_loss, eval_acc, eval_sentence_acc))
if epoch % save_checkpoint_every == 0 and epoch > 0:
print('saving checkpoint...')
torch.save(model.state_dict(), './chkpoint/time_%s_epoch_%s.pth' % (time.strftime('%Y-%m-%d_%H-%M-%S'), epoch))
if __name__ == '__main__':
main()
|
11541705
|
from uploader import AbstractUploader
class Uploader(AbstractUploader):
@property
def request_url(self) -> str:
return 'https://mp.yidianzixun.com/upload?action=uploadimage'
@property
def file_key(self) -> str:
return 'upfile'
@property
def parsed(self) -> str:
return self.request.json()['url']
|
11541709
|
from datetime import datetime
import pytest
from advent_readme_stars.advent import most_recent_advent_year
@pytest.mark.parametrize(
"now, expected",
[
(datetime(1999, 11, 30), 1998),
(datetime(1999, 12, 1), 1999),
(datetime(1999, 12, 31), 1999),
(datetime(2000, 1, 1), 1999),
],
)
def test_most_recent_advent_year(now, expected):
assert most_recent_advent_year(now) == expected
|
11541791
|
from vel.api.base import Model
from torch.optim import Optimizer
class OptimizerFactory:
""" Base class for optimizer factories """
def instantiate(self, model: Model) -> Optimizer:
raise NotImplementedError
|
11541823
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import modules.registry as registry
from .innerproduct_similarity import InnerproductSimilarity
@registry.Query.register("DN4")
class DN4(nn.Module):
def __init__(self, in_channels, cfg):
super().__init__()
self.n_way = cfg.n_way
self.k_shot = cfg.k_shot
self.neighbor_k = cfg.model.nbnn_topk
self.inner_simi = InnerproductSimilarity(cfg, metric='cosine')
self.temperature = cfg.model.temperature
self.criterion = nn.CrossEntropyLoss()
def forward(self, support_xf, support_y, query_xf, query_y):
b, q, c, h, w = query_xf.shape
s = support_xf.shape[1]
innerproduct_matrix = self.inner_simi(support_xf, support_y, query_xf, query_y)
topk_value, _ = torch.topk(innerproduct_matrix, self.neighbor_k, -1) # [b, q, N, M_q, neighbor_k]
similarity_matrix = topk_value.mean(-1).view(b, q, self.n_way, -1).sum(-1)
similarity_matrix = similarity_matrix.view(b * q, self.n_way)
query_y = query_y.view(b * q)
if self.training:
loss = self.criterion(similarity_matrix / self.temperature, query_y)
return {"dn4_loss": loss}
else:
_, predict_labels = torch.max(similarity_matrix, 1)
rewards = [1 if predict_labels[j] == query_y[j].to(predict_labels.device) else 0 for j in
range(len(query_y))]
return rewards
|
11541825
|
import os
import random
import signal
import subprocess
import time
import unittest
import warnings
import psutil # type: ignore
from os import path
from typing import List, Tuple
from tests.utils.project import (
GenerateProjectFromFixture,
GenerateProjectWithPyProjectToml,
TempProjectDir
)
class TaskipyTestCase(unittest.TestCase):
def setUp(self):
self._tmp_dirs: List[TempProjectDir] = []
def tearDown(self):
for tmp_dir in self._tmp_dirs:
tmp_dir.clean()
def run_task(self, task: str, args: List[str] = None, cwd=os.curdir) -> Tuple[int, str, str]:
proc = self.start_taskipy_process(task, args=args, cwd=cwd)
stdout, stderr = proc.communicate()
return proc.returncode, stdout.decode(), str(stderr)
def start_taskipy_process(self, task: str, args: List[str] = None, cwd=os.curdir) -> subprocess.Popen:
executable_path = path.abspath('task')
args = args or []
return subprocess.Popen([executable_path, task] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
def create_test_dir_from_fixture(self, fixture_name: str):
project_generator = GenerateProjectFromFixture(path.join('tests', 'fixtures', fixture_name))
tmp_dir = TempProjectDir(project_generator)
self._tmp_dirs.append(tmp_dir)
return tmp_dir.path
def create_test_dir_with_py_project_toml(self, py_project_toml: str):
project_generator = GenerateProjectWithPyProjectToml(py_project_toml)
tmp_dir = TempProjectDir(project_generator)
self._tmp_dirs.append(tmp_dir)
return tmp_dir.path
# pylint: disable=invalid-name
def assertSubstr(self, substr: str, full_string: str):
self.assertTrue(substr in full_string, msg=f'Expected \n "{substr}"\nto be in\n "{full_string}"')
# pylint: disable=invalid-name
def assertNotSubstr(self, substr: str, full_string: str):
self.assertFalse(substr in full_string, msg=f'Expected \n "{substr}"\nnot to be in\n "{full_string}"')
# pylint: disable=invalid-name
def assertSubstrsInOrder(self, substrs: List[str], full_string: str):
self.assertGreater(len(substrs), 0, 'please provide at least one substr')
for substr_a, substr_b in zip(substrs[:-1], substrs[1:]):
self.assertSubstr(substr_a, full_string)
self.assertSubstr(substr_b, full_string)
self.assertLess(full_string.find(substr_a),
full_string.find(substr_b),
msg=f'Expected \n "{substr_a}"\nto appear before\n "{substr_b}"\nin\n "{full_string}"')
def assertTerminalTextEqual(self, expected: str, actual: str):
expected_without_ansi_chars = expected.encode('ascii', 'ignore')
actual_without_ansi_chars = actual.encode('ascii', 'ignore')
self.assertEqual(expected_without_ansi_chars, actual_without_ansi_chars)
class RunTaskTestCase(TaskipyTestCase):
def test_running_task(self):
cwd = self.create_test_dir_from_fixture('project_with_pyproject_and_tasks')
self.run_task('create_hello_txt', cwd=cwd)
with open(path.join(cwd, 'hello.txt'), 'r', encoding='utf-8') as f:
hello_file_contents = f.readline().strip()
self.assertEqual(hello_file_contents, 'hello, world')
def test_exit_code_matches_task_exit_code(self):
cwd = self.create_test_dir_from_fixture('project_with_pyproject_and_tasks')
exit_code, _, _ = self.run_task('exit_17', cwd=cwd)
self.assertEqual(exit_code, 17)
def test_stdout_contains_task_stdout(self):
cwd = self.create_test_dir_from_fixture('project_with_pyproject_and_tasks')
_, stdout, _ = self.run_task('print_hello_stdout', cwd=cwd)
self.assertSubstr('hello stdout', stdout)
def test_stderr_contains_task_stderr(self):
cwd = self.create_test_dir_from_fixture('project_with_pyproject_and_tasks')
_, _, stderr = self.run_task('print_hello_stderr', cwd=cwd)
self.assertSubstr('hello stderr', stderr)
class TaskPrePostHooksTestCase(TaskipyTestCase):
def test_running_pre_task_hook(self):
cwd = self.create_test_dir_from_fixture('project_with_pre_post_task_hooks')
_, stdout, _ = self.run_task('hello', cwd=cwd)
self.assertSubstrsInOrder(['pre_task', 'hello'], stdout)
def test_running_post_task_hook(self):
cwd = self.create_test_dir_from_fixture('project_with_pre_post_task_hooks')
_, stdout, _ = self.run_task('hello', cwd=cwd)
self.assertSubstrsInOrder(['hello', 'post_task'], stdout)
def test_exiting_after_pre_task_hook_if_exit_code_not_0(self):
cwd = self.create_test_dir_from_fixture('project_with_pre_post_task_hooks')
exit_code, stdout, _ = self.run_task('hello_failed_pretask', cwd=cwd)
self.assertSubstr('pre_task', stdout)
self.assertNotSubstr('hello', stdout)
self.assertEqual(exit_code, 1)
def test_exiting_with_post_task_hook_exit_code_if_not_0(self):
cwd = self.create_test_dir_from_fixture('project_with_pre_post_task_hooks')
exit_code, stdout, _ = self.run_task('hello_failed_posttask', cwd=cwd)
self.assertSubstr('post_task', stdout)
self.assertEqual(exit_code, 1)
class PassArgumentsTestCase(TaskipyTestCase):
def test_running_task_with_positional_arguments(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_that_accept_arguments')
some_random_number = random.randint(1, 1000)
exit_code, stdout, _ = self.run_task('echo_number', args=[f'{some_random_number}'], cwd=cwd)
self.assertSubstr(f'the number is {some_random_number}', stdout)
self.assertEqual(exit_code, 0)
def test_running_task_with_named_arguments(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_that_accept_arguments')
exit_code, stdout, _ = self.run_task('echo_named', args=['-h'], cwd=cwd)
self.assertSubstr('got a named argument -h', stdout)
self.assertEqual(exit_code, 0)
def test_running_task_with_multiple_arguments(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_that_accept_arguments')
args = ['one', 'two', 'three', 'four', 'five']
exit_code, stdout, _ = self.run_task('echo_args_count', args=args, cwd=cwd)
self.assertSubstr('the argument count is 5', stdout)
self.assertEqual(exit_code, 0)
def test_running_task_with_arguments_with_spaces(self):
cwd = self.create_test_dir_from_fixture('project_with_task_that_checks_args_passed_with_spaces')
name = '<NAME>'
age = random.randrange(1, 100)
exit_code, stdout, _ = self.run_task('identify', args=['--full-name', name, '--age', f'{age}'], cwd=cwd)
self.assertSubstr(f'name: {name} age: {age}', stdout)
self.assertEqual(exit_code, 0)
def test_running_task_arguments_not_passed_to_pre_hook(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_that_accept_arguments')
some_random_number = random.randint(1, 1000)
exit_code, stdout, _ = self.run_task('echo_on_prehook', args=[f'{some_random_number}'], cwd=cwd)
self.assertSubstr('the number in prehook is', stdout)
self.assertNotSubstr(f'the number in prehook is {some_random_number}', stdout)
self.assertEqual(exit_code, 0)
def test_running_task_arguments_not_passed_to_post_hook(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_that_accept_arguments')
some_random_number = random.randint(1, 1000)
exit_code, stdout, _ = self.run_task('echo_on_posthook', args=[f'{some_random_number}'], cwd=cwd)
self.assertSubstr('the number in posthook is', stdout)
self.assertNotSubstr(f'the number in posthook is {some_random_number}', stdout)
self.assertEqual(exit_code, 0)
class ListTasksTestCase(TaskipyTestCase):
project_tasks_output = "\n".join([
"one echo first task",
"two echo second task",
"three echo third task",
])
def test_running_task_list(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_to_list')
exit_code, stdout, _ = self.run_task('--list', cwd=cwd)
self.assertTerminalTextEqual(self.project_tasks_output, stdout.strip())
self.assertEqual(exit_code, 0)
def test_running_task_list_with_shorthand(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_to_list')
exit_code, stdout, _ = self.run_task('-l', cwd=cwd)
self.assertTerminalTextEqual(self.project_tasks_output, stdout.strip())
self.assertEqual(exit_code, 0)
def test_running_task_list_before_name(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_to_list')
# anything following the flag should be ignored
exit_code, stdout, _ = self.run_task('--list', ['one'], cwd=cwd)
self.assertTerminalTextEqual(self.project_tasks_output, stdout.strip())
self.assertEqual(exit_code, 0)
def test_running_task_list_with_arg(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_to_list')
# when --list follows after task name it should be passed as an argument
exit_code, stdout, _ = self.run_task('one', ['--list'], cwd=cwd)
expected = "first task --list"
self.assertTerminalTextEqual(expected, stdout.strip())
self.assertEqual(exit_code, 0)
class TaskDescriptionTestCase(TaskipyTestCase):
def test_running_task_with_description(self):
py_project_toml = '''
[tool.taskipy.tasks]
print_age = { cmd = "echo age is 29", help = "prints the age" }
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
_, stdout, _ = self.run_task('print_age', cwd=cwd)
self.assertSubstr('age is 29', stdout)
def test_listing_task_with_description(self):
py_project_toml = '''
[tool.taskipy.tasks]
print_age = { cmd = "echo age is 29", help = "prints the age" }
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
_, stdout, _ = self.run_task('--list', cwd=cwd)
self.assertSubstr('prints the age', stdout)
def test_reject_task_for_not_having_cmd(self):
py_project_toml = '''
[tool.taskipy.tasks]
print_age = { help = "prints the age" }
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('print_age', cwd=cwd)
self.assertEqual(exit_code, 1)
self.assertSubstr('the task item does not have the "cmd" property', stdout)
def test_allow_task_to_not_have_help(self):
py_project_toml = '''
[tool.taskipy.tasks]
print_age = { cmd = "echo age is 29" }
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('print_age', cwd=cwd)
self.assertEqual(exit_code, 0)
self.assertSubstr('age is 29', stdout)
def test_reject_task_that_is_not_string_nor_object(self):
py_project_toml = '''
[tool.taskipy.tasks]
print_age = 5
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('print_age', cwd=cwd)
self.assertEqual(exit_code, 1)
self.assertSubstr('tasks must be strings, or dicts that contain { cmd, help, use_vars }', stdout)
class TaskRunFailTestCase(TaskipyTestCase):
def test_exiting_with_code_127_and_printing_if_task_not_found(self):
cwd = self.create_test_dir_from_fixture('project_with_pyproject_and_tasks')
exit_code, stdout, _ = self.run_task('task_that_does_not_exist', cwd=cwd)
self.assertSubstr('could not find task "task_that_does_not_exist"', stdout)
self.assertEqual(exit_code, 127)
def test_exiting_with_code_127_and_printing_if_no_arg_is_passed(self):
cwd = self.create_test_dir_from_fixture('project_with_pyproject_and_tasks')
executable_path = path.abspath('task')
proc = subprocess.Popen(
executable_path,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd
)
stdout, _ = proc.communicate()
self.assertSubstr('usage: task', stdout.decode())
self.assertEqual(proc.returncode, 127)
def test_exiting_with_code_127_and_printing_if_no_tasks_section(self):
cwd = self.create_test_dir_from_fixture('project_with_pyproject_without_tasks_section')
exit_code, stdout, _ = self.run_task('task_that_does_not_exist', cwd=cwd)
self.assertSubstr('no tasks found. add a [tool.taskipy.tasks] section to your pyproject.toml', stdout)
self.assertEqual(exit_code, 127)
def test_exiting_with_code_1_and_printing_if_no_pyproject_toml_file_found(self):
cwd = self.create_test_dir_from_fixture('project_without_pyproject')
exit_code, stdout, _ = self.run_task('some_task', cwd=cwd)
self.assertSubstr('no pyproject.toml file found in this directory or parent directories', stdout)
self.assertEqual(exit_code, 1)
def test_exiting_with_code_1_and_printing_if_pyproject_toml_file_is_malformed(self):
cwd = self.create_test_dir_from_fixture('project_with_malformed_pyproject')
exit_code, stdout, _ = self.run_task('some_task', cwd=cwd)
self.assertSubstr('pyproject.toml file is malformed and could not be read', stdout)
self.assertEqual(exit_code, 1)
class InterruptingTaskTestCase(TaskipyTestCase):
def setUp(self):
super().setUp()
# suppress resource warnings, as they are false positives caused by psutil
warnings.simplefilter('ignore', category=ResourceWarning)
def interrupt_task(self, process: subprocess.Popen):
psutil_process_wrapper = psutil.Process(process.pid)
processes = psutil_process_wrapper.children(recursive=True)
innermost_process = next(filter(lambda p: p.name().lower().startswith('python'), processes))
innermost_process.send_signal(signal.SIGINT)
def test_handling_sigint_according_to_subprocess_if_it_handles_it_gracefully(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_that_handle_interrupts')
process = self.start_taskipy_process('run_loop_with_interrupt_handling', cwd=cwd)
time.sleep(.2)
self.interrupt_task(process)
exit_code = process.wait()
self.assertEqual(exit_code, 0)
def test_handling_sigint_according_to_subprocess_if_it_does_not_handle_it_gracefully(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_that_handle_interrupts')
process = self.start_taskipy_process('run_loop_without_interrupt_handling', cwd=cwd)
time.sleep(.2)
self.interrupt_task(process)
exit_code = process.wait()
self.assertEqual(exit_code, 130)
def test_sigterm_should_be_sent_to_subprocess(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_that_handle_sigterm')
process = self.start_taskipy_process('run_loop_with_sigterm_handling', cwd=cwd)
time.sleep(.2)
process.send_signal(signal.SIGTERM)
exit_code = process.wait()
stdout, _ = process.communicate()
self.assertEqual(exit_code, 123)
self.assertSubstr('sigterm', str(stdout))
class CustomRunnerTestCase(TaskipyTestCase):
def test_running_command_with_custom_runner(self):
py_project_toml = '''
[tool.taskipy.settings]
runner = "time"
[tool.taskipy.tasks]
print_with_python = "python -c 'print(1337)'"
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
_, _, stderr = self.run_task('print_with_python', cwd=cwd)
time_cmd_output_format = 'user'
self.assertSubstr(time_cmd_output_format, stderr)
def test_running_command_with_custom_runner_with_trailing_space(self):
py_project_toml = '''
[tool.taskipy.settings]
runner = "time "
[tool.taskipy.tasks]
print_with_python = "python -c 'print(1337)'"
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
_, _, stderr = self.run_task('print_with_python', cwd=cwd)
time_cmd_output_format = 'user'
self.assertSubstr(time_cmd_output_format, stderr)
def test_running_command_with_custom_runner_fails_if_custom_runner_is_not_string(self):
py_project_toml = '''
[tool.taskipy.settings]
runner = 55
[tool.taskipy.tasks]
print_with_python = "python -c 'print(1337)'"
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('print_with_python', cwd=cwd)
self.assertSubstr('invalid value: runner is not a string. please check [tool.taskipy.settings.runner]', stdout)
self.assertEqual(exit_code, 1)
class TaskFromChildTestCase(TaskipyTestCase):
def test_running_parent_pyproject_task_from_child_directory(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_from_child')
_, stdout, _ = self.run_task('print_current_dir_name', cwd=path.join(cwd, 'child_without_pyproject'))
self.assertSubstr('child_without_pyproject', stdout)
def test_find_nearest_pyproject_from_child_directory(self):
cwd = self.create_test_dir_from_fixture('project_with_tasks_from_child')
_, stdout, _ = self.run_task('hello', cwd=path.join(cwd, 'child_with_pyproject'))
self.assertSubstr('hello from child', stdout)
class UseVarsTestCase(TaskipyTestCase):
def test_use_vars_working(self):
py_project_toml = '''
[tool.taskipy.variables]
name = "<NAME>"
[tool.taskipy.tasks]
echo = { cmd = "echo hello {name:<10}:", use_vars = true }
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('echo', cwd=cwd)
self.assertSubstr('hello <NAME> :', stdout)
self.assertEqual(exit_code, 0)
def test_use_vars_no_param(self):
py_project_toml = '''
[tool.taskipy.variables]
name = "<NAME>"
[tool.taskipy.tasks]
echo = { cmd = "echo hello {name}" }
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('echo', cwd=cwd)
self.assertSubstr('hello {name}', stdout)
self.assertEqual(exit_code, 0)
def test_use_vars_param_disabled(self):
py_project_toml = '''
[tool.taskipy.variables]
name = "<NAME>"
[tool.taskipy.tasks]
echo = { cmd = "echo hello {name}", use_vars = false }
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('echo', cwd=cwd)
self.assertSubstr('hello {name}', stdout)
self.assertEqual(exit_code, 0)
def test_use_vars_str_task_no_param(self):
py_project_toml = '''
[tool.taskipy.variables]
name = "<NAME>"
[tool.taskipy.tasks]
echo = "echo hello {name}"
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('echo', cwd=cwd)
self.assertSubstr('hello {name}', stdout)
self.assertEqual(exit_code, 0)
def test_use_vars_param_malformed(self):
py_project_toml = '''
[tool.taskipy.variables]
name = "<NAME>"
[tool.taskipy.tasks]
echo = { cmd = "echo hello {name}", use_vars = 1 }
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('echo', cwd=cwd)
self.assertSubstr('task\'s "use_vars" arg has to be bool', stdout)
self.assertEqual(exit_code, 1)
def test_use_vars_missing_var(self):
py_project_toml = '''
[tool.taskipy.tasks]
echo = { cmd = "echo hello {name}", use_vars = true }
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('echo', cwd=cwd)
self.assertSubstr("reason: 'name' variable expected in [pyproject.taskipy.variables", stdout)
self.assertEqual(exit_code, 1)
def test_use_vars_setting(self):
py_project_toml = '''
[tool.taskipy.settings]
use_vars = true
[tool.taskipy.variables]
name = "<NAME>"
[tool.taskipy.tasks]
echo = "echo hello {name:<10}:"
'''
cwd = self.create_test_dir_with_py_project_toml(py_project_toml)
exit_code, stdout, _ = self.run_task('echo', cwd=cwd)
self.assertSubstr('hello <NAME> :', stdout)
self.assertEqual(exit_code, 0)
|
11541860
|
import yaml
import caffe
from multiprocessing import Process, Queue
from sbir_util.batch_manager import MemoryBlockManager
from image_proc import Transformer
from sample_util import *
class TripletSamplingLayer(caffe.Layer):
def setup(self, bottom, top):
"""Setup the TripletSamplingLayer."""
layer_params = yaml.load(self.param_str)
self.mini_batchsize = layer_params['batch_size']
self.layer_params = layer_params
# anchor
top[0].reshape(self.mini_batchsize, 1, 225, 225)
# pos
top[1].reshape(self.mini_batchsize, 1, 225, 225)
# neg
top[2].reshape(self.mini_batchsize, 1, 225, 225)
# weights blob: dummy, we don't need that, just fill it with 1.
top[3].reshape(self.mini_batchsize, 1)
def create_sample_fetcher(self):
self._blob_queue = Queue(10)
self._prefetch_process = TripletSamplingDataFetcher(self._blob_queue, self.layer_params)
self._prefetch_process.start()
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def forward(self, bottom, top):
blobs = self._blob_queue.get()
top[0].data[...] = blobs[0].astype(np.float32, copy=False)
top[1].data[...] = blobs[1].astype(np.float32, copy=False)
top[2].data[...] = blobs[2].astype(np.float32, copy=False)
top[3].data[...] = 1. # sample weights, we don't need that
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Donot reshape once started."""
pass
class TripletSamplingDataFetcher(Process):
def __init__(self, queue, layer_params):
"""Setup the TripletSamplingDataLayer."""
super(TripletSamplingDataFetcher, self).__init__()
self._queue = queue
mean = layer_params['mean']
self._phase = layer_params['phase']
self.sketch_transformer = Transformer(225, 1, mean, self._phase == "TRAIN")
self.anc_bm = MemoryBlockManager(layer_params['sketch_dir'])
self.pos_neg_bm = MemoryBlockManager(layer_params['image_dir'])
self.hard_ratio = layer_params['hard_ratio']
self.mini_batchsize = layer_params['batch_size']
self.load_triplets(layer_params['triplet_path'])
def load_triplets(self, triplet_path):
self.triplets, self.neg_list = load_triplets(triplet_path, self._phase)
def get_next_batch(self):
anc_batch = []; pos_batch = []; neg_batch = []
# sampling
anc_inds = self.anc_bm.pop_batch_inds_circular(self.mini_batchsize)
pos_inds, neg_inds = sample_triplets(anc_inds, self.triplets, self.neg_list, self.hard_ratio)
# fetch data
for (anc_id, pos_id, neg_id) in zip(anc_inds, pos_inds, neg_inds):
anc_batch.append(self.anc_bm.get_sample(anc_id).reshape((1, 256, 256)))
pos_batch.append(self.pos_neg_bm.get_sample(pos_id).reshape((1, 256, 256)))
neg_batch.append(self.pos_neg_bm.get_sample(neg_id).reshape((1, 256, 256)))
# apply transform
anc_batch = self.sketch_transformer.transform_all(anc_batch)
pos_batch = self.sketch_transformer.transform_all(pos_batch)
neg_batch = self.sketch_transformer.transform_all(neg_batch)
self._queue.put((anc_batch, pos_batch, neg_batch))
def run(self):
print 'TripletSamplingDataFetcher started'
while True:
self.get_next_batch()
|
11541890
|
class Thimbles:
def thimbleWithBall(self, swaps):
i = 1
for s in swaps:
a, b = map(int, s.split("-"))
if a == i:
i = b
elif b == i:
i = a
return i
|
11541926
|
from myjwt.vulnerabilities import confusion_rsa_hmac
jwt = (
"<KEY>"
"-A2JQ6xcAVucXRhZbdBbAM2DG8io_brP_ROAqYaNlvRVsztXoPHFz_e7D2K0q6f02RXeRwZJGOhy0K"
<KEY>"
<KEY>"
<KEY>"
)
# Header: {"typ": "JWT", "alg": "RS256"}
# Payload: {"login": "a"}
file = "public.pem"
# file is a path file of your public key
jwt = confusion_rsa_hmac(jwt, file)
# same jwt will be print except header alg(set to HS256) and signature
print(jwt)
|
11542007
|
from src.xcode_project_reader import XcodeProjectReader
from helpers import path_helper
import unittest
class TestXcodeProjectReader(unittest.TestCase):
def test_targets(self):
project = XcodeProjectReader(path_helper.xcode_example_project())
output = list(project.targets())
expectation = ['Example', 'ExampleTests', 'ExampleUITests']
self.assertEqual(output, expectation)
|
11542017
|
import warnings
import numpy as np
from qiskit import Aer, execute, QuantumCircuit
from qiskit.circuit.library import (HGate, SdgGate, SGate, TGate,
XGate, YGate, ZGate)
from qiskit.extensions.unitary import UnitaryGate
from qiskit.providers.aer.noise import NoiseModel
from deltalanguage.data_types import UInt, Size
from . import IQuantumSimulator
from ..hal import Masks, Opcode, Shifts
# defining some custom gates that arise from twirling
class SxGate(UnitaryGate):
def __init__(self):
super().__init__(np.array([[0, 1], [1j, 0]]), label='SX')
class SyGate(UnitaryGate):
def __init__(self):
super().__init__(np.array([[0, 1j], [1, 0]]), label='SY')
class PiXY(UnitaryGate):
"""Pi-rotation with axis in x-y-plane gate class."""
def __init__(self, angle):
super().__init__(np.array([[0, -np.sin(angle) - 1j*np.cos(angle)],
[np.sin(angle) - 1j*np.cos(angle), 0]]),
label='PiXY')
class PiYZ(UnitaryGate):
"""Pi-rotation with axis in y-z-plane gate class."""
def __init__(self, angle):
super().__init__(np.array([[np.cos(angle), -1j*np.sin(angle)],
[1j*np.sin(angle), -1*np.cos(angle)]]),
label='PiYZ')
class PiZX(UnitaryGate):
"""Pi-rotation with axis in z-x-plane gate class."""
def __init__(self, angle):
super().__init__(np.array([[np.cos(angle), np.sin(angle)],
[np.sin(angle), -1*np.cos(angle)]]),
label='PiZX')
class RX(UnitaryGate):
""" rotation around x-axis with custom label for noise model"""
def __init__(self, angle):
super().__init__(np.array([[1j*np.cos(angle/2), np.sin(angle/2)],
[np.sin(angle/2), 1j*np.cos(angle/2)]]),
label='RX')
# Some gates that already exist in qiskit need to redefined with a custom label
# so that they can be recognized by the noise model
class X(UnitaryGate):
""" rotation around x-axis with custom label for noise model"""
def __init__(self):
super().__init__((np.array([[0, 1], [1, 0]]) + 0j), label='X')
class RY(UnitaryGate):
""" rotation around y-axis with custom label for noise model"""
def __init__(self, angle):
super().__init__(np.array([[1j*np.cos(angle/2), -1j*np.sin(angle/2)],
[1j*np.sin(angle/2), 1j*np.cos(angle/2)]]),
label='RY')
class Y(UnitaryGate):
""" rotation around x-axis with custom label for noise model"""
def __init__(self):
super().__init__(np.array([[0, -1j], [1j, 0]]), label='Y')
class RZ(UnitaryGate):
""" rotation around z-axis with custom label for noise model"""
def __init__(self, angle):
super().__init__(1j*np.array([[np.exp(-1j*angle/2), 0],
[0, np.exp(1j*angle/2)]]), label='RZ')
class Z(UnitaryGate):
""" rotation around x-axis with custom label for noise model"""
def __init__(self):
super().__init__((np.array([[1, 0], [0, -1]]) + 0j), label='Z')
class SqrtXGate(UnitaryGate):
def __init__(self):
super().__init__(
np.array([[1j, 1], [1, 1j]])/np.sqrt(2), label='sqrt_x')
class QiskitQuantumSimulator(IQuantumSimulator):
"""Qiskit implementation of the IQuantumSimulator interface.
Parameters
----------
register_size : int
Size of the qubit register.
seed : int
Seed for both the random transpiler and the measurement sampling.
simulator_backend : AerBackend
Specifies which qiskit simulator backend to use, possibilities are:
QasmSimulator, StatevectorSimulator/UnitarySimulator
(for debugging).
noise_model : NoiseModel
Qiskit NoiseModel object to apply when running a circuit.
.. TODO::
Add and example how to use it in a graph (without any runtime).
"""
def __init__(self,
register_size: int = 16,
seed: int = None,
simulator_backend=Aer.get_backend('qasm_simulator'),
noise_model: NoiseModel = None
):
self.circuit = None
self._simulator_backend = simulator_backend
np.random.seed(seed)
self._seed = seed
# defaulted to 16 because the bitcode status return
# has 16 bits assigned for measurement results.
self._qubit_register_size = register_size
# stores control qubits
self._control_qubit_indices = []
# assign projectq gate to each opcode
self._parameterised_gate_dict = {
Opcode['R'].value: RZ,
Opcode['RX'].value: RX,
Opcode['RY'].value: RY,
Opcode['RZ'].value: RZ,
Opcode['PIXY'].value: PiXY,
Opcode['PIYZ'].value: PiYZ,
Opcode['PIZX'].value: PiZX,
}
self._constant_gate_dict = {
Opcode['H'].value: HGate,
Opcode['S'].value: SGate,
Opcode['SQRT_X'].value: SqrtXGate,
Opcode['T'].value: TGate,
Opcode['X'].value: X,
Opcode['Y'].value: Y,
Opcode['Z'].value: Z,
Opcode['INVS'].value: SdgGate,
# consecutive S and X gate needed for RC
Opcode['SX'].value: SxGate,
# consecutive S and Y gate needed for RC
Opcode['SY'].value: SyGate,
}
if noise_model is not None:
assert isinstance(noise_model, NoiseModel)
self._noise_model = noise_model
else:
self._noise_model = NoiseModel()
def apply_gate(self, gate, qubit_index: int, parameter: float = None):
"""Receives command information and implements the gate on the
corresponding qubit.
Parameters
----------
gate : UnitaryGate
Qiskit gate to be applied.
qubit_index : int
Index of qubit for gate to be applied to.
parameter : float
Angle of gate if parametrised.
"""
if self.circuit is not None:
if len(self._control_qubit_indices) == 0: # single qubit gate
if parameter is not None:
self.circuit.append(gate(parameter), [qubit_index])
else:
self.circuit.append(gate(), [qubit_index])
else: # controlled gate
if qubit_index in self._control_qubit_indices:
raise ValueError(
f"Target qubit {qubit_index} already set-up as " +
"control qubit!"
)
control_number = len(self._control_qubit_indices)
if gate == X:
gate = XGate
elif gate == Y:
gate = YGate
elif gate == Z:
gate = ZGate
else:
warnings.warn("Noise not supported on cotrolled gates " +
"besides CX, CY and CZ")
gate_indices = self._control_qubit_indices + [qubit_index]
if parameter is not None:
controlled_gate = gate(parameter).control(control_number)
else:
controlled_gate = gate().control(control_number)
self.circuit.append(controlled_gate, gate_indices)
# reset control indices
self._control_qubit_indices = []
def accept_command(
self,
command: UInt(Size(32))
) -> UInt(Size(32)):
op = command >> Shifts.OPCODE.value
qubit_index = (command & Masks.QUBIT_INDEX.value)
if qubit_index + 1 > self._qubit_register_size:
raise ValueError(
f"Qubit index ({qubit_index}) greater than " +
f"register size ({self._qubit_register_size})!"
)
if op == Opcode["STATE_PREPARATION"].value:
self.circuit = None
self.circuit = QuantumCircuit(self._qubit_register_size)
elif op == Opcode["STATE_MEASURE"].value:
self.circuit.measure_all()
job = execute(self.circuit, backend=self._simulator_backend,
optimization_level=0,
basis_gates=self._noise_model.basis_gates,
noise_model=self._noise_model,
seed_simulator=np.random.randint(self._seed), shots=1)
result_dict = job.result().get_counts()
outcome_string = list(result_dict.keys())[0]
# Each measurement sent should have all valid flags.
# Therefore valid mask added to the 16bit measurement bitcode.
measurement_binary = Masks.VALIDS.value
# convert binary string to int,
# qubit index 0 is least significant bit in the binary expansion
measurement_binary += int(outcome_string, 2)
return measurement_binary
elif op == Opcode["CONTROL"].value:
# add qubit index to self.control_qubit_indices, store
# in memory until a gate is called, which is run controlled
# on these qubits.
if qubit_index in self._control_qubit_indices:
raise ValueError(
f"Qubit {qubit_index} already set-up as control qubit!"
)
if len(self._control_qubit_indices)+1 == self._qubit_register_size:
raise ValueError(
"Too many control qubits for register size of " +
"f{self._qubit_register_size}!"
)
self._control_qubit_indices += [qubit_index]
elif op in self._parameterised_gate_dict.keys():
angle = (command & Masks.ARG.value) >> Shifts.ARG.value
angle *= (2 * np.pi) / 1024
gate = self._parameterised_gate_dict[op]
self.apply_gate(gate, qubit_index, angle)
elif op in self._constant_gate_dict.keys():
gate = self._constant_gate_dict[op]
self.apply_gate(gate, qubit_index)
elif op == Opcode['ID'].value:
pass
else:
raise TypeError(f"{op} is not a recognised opcode!")
|
11542042
|
import pytest
import isobar as iso
@pytest.fixture()
def dummy_timeline():
timeline = iso.Timeline(output_device=iso.io.DummyOutputDevice(), clock_source=iso.DummyClock())
timeline.stop_when_done = True
return timeline
|
11542072
|
import jwt
import json
from django.http import JsonResponse
from django.conf import settings
from user.models import User
def login_check(func):
def wrapper(self, request, *args, **kwargs):
try:
access_token = request.headers.get('Authorization', None)
if not access_token:
request.user = None
return func(self, request, *args, **kwargs)
payload = jwt.decode(access_token, settings.SECRET_KEY, settings.ALGORITHM)
login_user = User.objects.get(id=payload['user_id'])
request.user = login_user
return func(self, request, *args, **kwargs)
except jwt.DecodeError:
return JsonResponse({'message' : 'INVALID_TOKEN'}, status=400)
except User.DoesNotExist:
return JsonResponse({'message' : 'INVALID_USER'}, status=401)
return wrapper
def login_decorator(func):
def wrapper(self, request, *args, **kwargs):
if 'Authorization' not in request.headers:
return JsonResponse({'message': 'NEED_LOGIN'}, status=401)
try:
access_token = request.headers['Authorization']
payload = jwt.decode(access_token, settings.SECRET_KEY, settings.ALGORITHM)
login_user = User.objects.get(id=payload['user_id'])
request.user = login_user
return func(self, request, *args, **kwargs)
except jwt.DecodeError:
return JsonResponse({'message': 'INVALID_TOKEN'}, status=401)
except User.DoesNotExist:
return JsonResponse({'message': 'INVALID_USER'}, status=401)
return wrapper
def admin_decorator(func):
def wrapper(self, request, *args, **kwargs):
if 'Authorization' not in request.headers:
return JsonResponse({'message': 'NEED_LOGIN'}, status=401)
try:
access_token = request.headers['Authorization']
payload = jwt.decode(access_token, settings.SECRET_KEY, settings.ALGORITHM)
login_user = User.objects.get(id=payload['user_id'])
if not login_user.type.name == 'admin':
return JsonResponse({'message' : 'ACCESS_DENIED'}, status=401)
request.user = login_user
return func(self, request, *args, **kwargs)
except jwt.DecodeError:
return JsonResponse({'message': 'INVALID_TOKEN'}, status=401)
except User.DoesNotExist:
return JsonResponse({'message': 'INVALID_USER'}, status=401)
return wrapper
|
11542080
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.plant import TemperingValve
log = logging.getLogger(__name__)
class TestTemperingValve(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_temperingvalve(self):
pyidf.validation_level = ValidationLevel.error
obj = TemperingValve()
# alpha
var_name = "Name"
obj.name = var_name
# node
var_inlet_node_name = "node|Inlet Node Name"
obj.inlet_node_name = var_inlet_node_name
# node
var_outlet_node_name = "node|Outlet Node Name"
obj.outlet_node_name = var_outlet_node_name
# node
var_stream_2_source_node_name = "node|Stream 2 Source Node Name"
obj.stream_2_source_node_name = var_stream_2_source_node_name
# node
var_temperature_setpoint_node_name = "node|Temperature Setpoint Node Name"
obj.temperature_setpoint_node_name = var_temperature_setpoint_node_name
# node
var_pump_outlet_node_name = "node|Pump Outlet Node Name"
obj.pump_outlet_node_name = var_pump_outlet_node_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.temperingvalves[0].name, var_name)
self.assertEqual(idf2.temperingvalves[0].inlet_node_name, var_inlet_node_name)
self.assertEqual(idf2.temperingvalves[0].outlet_node_name, var_outlet_node_name)
self.assertEqual(idf2.temperingvalves[0].stream_2_source_node_name, var_stream_2_source_node_name)
self.assertEqual(idf2.temperingvalves[0].temperature_setpoint_node_name, var_temperature_setpoint_node_name)
self.assertEqual(idf2.temperingvalves[0].pump_outlet_node_name, var_pump_outlet_node_name)
|
11542141
|
def test():
from math import radians, degrees, pi
class Angle(object):
def __init__(self,rad):
self._rad = rad
@Property
def rad():
'''The angle in radians'''
def fget(self):
return self._rad
def fset(self,angle):
if isinstance(angle,Angle): angle = angle.rad
self._rad = float(angle)
@Property
def deg():
'''The angle in degrees'''
def fget(self):
return degrees(self._rad)
def fset(self,angle):
if isinstance(angle,Angle): angle = angle.deg
self._rad = radians(angle)
def almostEquals(x,y):
return abs(x-y) < 1e-9
a = Angle(pi/3)
assert a.rad == pi/3 and almostEquals(a.deg, 60)
a.rad = pi/4
assert a.rad == pi/4 and almostEquals(a.deg, 45)
a.deg = 30
assert a.rad == pi/6 and almostEquals(a.deg, 30)
print Angle.rad.__doc__
print Angle.deg.__doc__
def Property(function):
keys = 'fget', 'fset', 'fdel'
func_locals = {'doc':function.__doc__}
def probeFunc(frame, event, arg):
if event == 'return':
locals = frame.f_locals
func_locals.update(dict((k,locals.get(k)) for k in keys))
sys.settrace(None)
return probeFunc
sys.settrace(probeFunc)
function()
return property(**func_locals)
if __name__ == '__main__':
test()
|
11542167
|
from os.path import dirname, join
from pytest import fixture
from shutil import rmtree
from ..asset import Asset
from ..assetset import Assetset
from ..paths import Paths
paths = Paths()
fixtures_dir_path = join(dirname(__file__), 'fixtures', 'assets')
@fixture
def assetset() -> Assetset:
asset = Asset(
filename='acquired_asset',
source=f'file://{fixtures_dir_path}/text-asset'
)
try:
rmtree(str(paths.assets_path))
except FileNotFoundError:
pass
return Assetset([asset])
def test_acquire_downloads_files(assetset):
assetset.acquire()
assert (paths.assets_path / 'acquired_asset').exists()
|
11542169
|
import os
import unittest
from numpy.testing import assert_array_almost_equal
import scipy.io.matlab
import means
import means.examples
import numpy as np
from means.simulation import SolverException
MODELS = {'p53': means.examples.MODEL_P53}
class TestTrajectoriesMatch(unittest.TestCase):
def _read_data_from_matlab(self, matfile):
"""
Returns the tajectories from matlab file provided as `matfile` argument
:param matfile: a file.mat where the trajectory data is stored
:return:
"""
TRAJECTORIES_VARIABLE_NAME = 'trajectories'
TIMEPOINTS_VARIABLE_NAME = 'timepoints'
N_MOMENTS_VARIABLE_NAME = 'nMoments'
PARAMETERS_VARIABLE_NAME = 'parameters'
INITIAL_CONDITIONS_VARIABLE_NAME = 'init_val'
MODEL_VARIABLE_NAME = 'model_name'
CLOSURE_TYPE_VARIABLE_NAME = 'closure'
CLOSURE_MULTIVARIATE_VARIABLE_NAME = 'multivariate'
data = scipy.io.matlab.loadmat(matfile)
return {'trajectories': data[TRAJECTORIES_VARIABLE_NAME],
'n_moments': data[N_MOMENTS_VARIABLE_NAME],
# Reshape the `initial_conditions`, `parameters` and `timepoints` to be one-dimensional
'parameters': data[PARAMETERS_VARIABLE_NAME].reshape(-1),
'initial_conditions': data[INITIAL_CONDITIONS_VARIABLE_NAME].reshape(-1),
'timepoints': data[TIMEPOINTS_VARIABLE_NAME].reshape(-1),
# Scipy reads everything as arrays, even things that shouldn't be, thus [0]'s below
'model_name': data[MODEL_VARIABLE_NAME][0],
'closure': data[CLOSURE_TYPE_VARIABLE_NAME][0],
'closure_is_multivariate': data[CLOSURE_MULTIVARIATE_VARIABLE_NAME][0]}
def _compare_trajectories(self, our_trajectories, matlab_trajectories, only_the_first_n=None):
# Check that we have similar number of trajectories
self.assertEquals(len(our_trajectories), len(matlab_trajectories))
for i, trajectory in enumerate(our_trajectories):
if only_the_first_n is not None and i >= only_the_first_n:
break
matlab_trajectory = matlab_trajectories[i, :]
assert_array_almost_equal(trajectory.values, matlab_trajectory, decimal=4)
def _perform_test(self, matlab_filename):
#-- Parse the data from MATLAB -------------------------
data = self._read_data_from_matlab(matlab_filename)
timepoints = data['timepoints']
matlab_trajectories = data['trajectories']
max_order = data['n_moments'] # We use one more moment than MATLAB for the same thing
parameters = data['parameters']
initial_conditions = data['initial_conditions']
model_name = data['model_name']
closure = data['closure']
multivariate = data['closure_is_multivariate']
#-- Do the test ---------------------------------------
model = MODELS[model_name]
problem = means.approximation.MomentExpansionApproximation(model,
max_order=max_order,
closure=closure,
multivariate=multivariate).run()
# The test script sets maxh equivalent to 0.01 in matlab, so let's do it here as well
simulation = means.simulation.Simulation(problem, solver='ode15s', maxh=0.01)
results = simulation.simulate_system(parameters, initial_conditions, timepoints)
self._compare_trajectories(results, matlab_trajectories, problem.number_of_species)
def test_p53_3_moments_lognormal_multivariate(self):
self._perform_test(os.path.join(os.path.dirname(__file__), 'p53_3_moments_lognormal_multivariate.mat'))
class TestODE15SFailsWhereMatlabDoes(unittest.TestCase):
def test_lognormal_2_mom_fails_early(self):
problem = means.approximation.MomentExpansionApproximation(means.examples.MODEL_P53, 2, closure='log-normal')
problem = problem.run()
s = means.simulation.Simulation(problem, solver='ode15s', maxh=0.1)
try:
trajectories = s.simulate_system([90, 0.002, 1.7, 1.1, 0.93, 0.96, 0.01], [70, 30, 60],
np.arange(0, 40, 0.1))
except SolverException as e:
base_exception = e.base_exception
# Check that the exception occured at timepoint similar to the timepoint in MATLAB
self.assertAlmostEqual(base_exception.t, 17.35795, places=1)
else:
self.fail('ode15s was able to reach output without throwing and exception')
|
11542200
|
import pytest
import numpy as np
from ..posrich import posrich
import pkg_resources
PATH = pkg_resources.resource_filename(__name__, 'test_data/')
def test_posrich():
"Test positional enrichment"
# load data
X_list = open(PATH+'multiple.txt').read().splitlines()
X_err = 'AGT2HT9'
# test posrich single position
posrich_single = posrich(X_list, position=2, aminoacid='A')
assert np.array_equal(posrich_single, np.array([1.,0.,1.]))
# test posrich multiple positions
posrich_multiple = posrich(X_list, position=[2, 4], aminoacid=['A', 'K'])
assert np.array_equal(posrich_multiple[:,0], np.array([1.,0.,1.]))
assert np.array_equal(posrich_multiple[:,1], np.array([1.,0.,0.]))
# test ValueError (erroneous input single)
with pytest.raises(ValueError):
posrich_err = posrich(X_err, position=1, aminoacid='R')
# test ValueError (position / amino acid mismatch)
with pytest.raises(ValueError):
posrich_err = posrich(X_err, position=[1, 2], aminoacid=['R', 'A', 'K'])
# test ValueError (position / amino acid mismatch alphabetical)
with pytest.raises(ValueError):
posrich_err = posrich(X_err, position=[1, 2], aminoacid=['R', 'A'])
# test ValueError (erroneous function arguments)
with pytest.raises(ValueError):
posrich_err = posrich(X_err, position='R', aminoacid=1)
|
11542208
|
from .acquirer import Acquirer
from datetime import datetime
from urllib.parse import urlparse
import posixpath
import sites
class Instagram(Acquirer):
def __init__(self, colymer: sites.Colymer, instagram: sites.Instagram, collection: str):
super().__init__(colymer)
self.instagram = instagram
self.collection = collection
@staticmethod
def append_attachment(attachments, child_node):
url = urlparse(child_node['display_url'])
attachments.append({
'id': child_node['id'],
'filename': posixpath.basename(url.path),
'content_type': 'image/jpeg',
'original_url': child_node['display_url'],
'metadata': child_node['dimensions'],
'persist_info': {
'directly_transfer': True,
'path': url.path,
'referer': 'https://www.instagram.com/',
}
})
if child_node['is_video']:
url = urlparse(child_node['video_url'])
attachments.append({
'id': child_node['id'],
'filename': posixpath.basename(url.path),
'content_type': 'video/mp4',
'original_url': child_node['video_url'],
'metadata': child_node['dimensions'],
'persist_info': {
'directly_transfer': True,
'path': url.path,
'referer': 'https://www.instagram.com/',
}
})
def get_chain_id(self, user_id):
return 'instagram-user-{}-timeline'.format(user_id)
def acquire(self, cursor, min_id, user_id):
result = {
'top_id': None,
'bottom_id': None,
'bottom_cursor': None,
'has_next': True,
'less_than_min_id': False
}
data = self.instagram.owner_to_timeline_media(user_id, after=cursor)
result['bottom_cursor'] = data['user']['edge_owner_to_timeline_media']['page_info']['end_cursor']
result['has_next'] = data['user']['edge_owner_to_timeline_media']['page_info']['has_next_page']
edges = data['user']['edge_owner_to_timeline_media']['edges']
for edge in edges:
node = edge['node']
if min_id is not None and int(node['id']) <= int(min_id):
result['less_than_min_id'] = True
break
attachments = []
if node['__typename'] == 'GraphSidecar':
for child_edge in node['edge_sidecar_to_children']['edges']:
Instagram.append_attachment(
attachments, child_edge['node'])
else:
Instagram.append_attachment(attachments, node)
self.colymer.post_article(self.collection, {
'author': {
'id': node['owner']['id'],
'name': node['owner']['username']
},
'content_type': 'text/plain',
'content': node['edge_media_to_caption']['edges'][0]['node']['text'] if node['edge_media_to_caption']['edges'] else '',
'title': node['shortcode'],
'id': node['id'],
'original_url': 'https://www.instagram.com/p/{}/'.format(node['shortcode']),
'time': datetime.fromtimestamp(node['taken_at_timestamp']).isoformat() + 'Z',
'attachments': attachments,
'metadata': {
'original_data': node
}
}, overwrite=False)
if result['top_id'] is None:
result['top_id'] = node['id']
result['bottom_id'] = node['id']
return result
|
11542224
|
import numpy as np
import math
import tensorflow as tf
import h5py
import sys
import os
######## OPTIONS #########
ver = 6 # Neural network version
table_ver = 6 # Table Version
hu = 25 # Number of hidden units in each hidden layer in network
saveEvery = 1000 # Epoch frequency of saving
totalEpochs = 3000 # Total number of training epochs
trainingDataFiles = "./TrainingData/HCAS_rect_TrainingData_v%d_pra%d_tau%02d.h5" # File format for training data
nnetFiles = "./networks/HCAS_rect_v%d_pra%d_tau%02d_%dHU.nnet" # File format for .nnet files
modelFiles = "./models/HCAS_rect_v%d_pra%d_tau%02d_%dHU.ckpt" # File format for .nnet files
tbFiles = "./tensorboard/HCAS_rect_v%d_pra%d_tau%02d_%dHU"
##########################
# Custom tensorflow session. Sets up training with either a cpu, gpu, or multiple gpus
def get_session(gpu_ind,gpu_mem_frac=0.45):
"""Create a session that dynamically allocates memory."""
if gpu_ind[0]>-1:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(np.char.mod('%d', gpu_ind))
config = tf.ConfigProto(device_count = {'GPU': len(gpu_ind)})
config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_frac
session = tf.Session(config=config)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
session = tf.Session()
return session
# Function to compute network accuracy given. However, this does not
# add the online costs that were used to generate the correct minimum cost index, so
# these accuracies are only an estimate
def custAcc(y_true,y_pred):
maxesPred = tf.argmax(y_pred,axis=1)
inds = tf.argmax(y_true,axis=1)
diff = tf.cast(tf.abs(inds-maxesPred),dtype='float32')
ones = tf.ones_like(diff,dtype='float32')
zeros= tf.zeros_like(diff,dtype='float32')
l = tf.where(diff<0.5,ones,zeros)
return tf.reduce_mean(l)
# Write the .nnet files
def writeNNet(params,fileName):
#Open the file we wish to write
with open(fileName,'w') as f2:
#####################
# First, we write the header lines:
# The first line written is just a line of text
# The second line gives the four values:
# Number of hidden layers in the networks
# Number of inputs to the networks
# Number of outputs from the networks
# Maximum size of any hidden layer
# The third line gives the sizes of each layer, including the input and output layers
# The fourth line specifies if the network is "symmetric", in which the network was only
# trained on half of the state space and then the other half is mirrored. This
# option was explored but not fruitfully, so this value is just set to false, 0.
# The fifth line specifies the minimum values each input can take.
# The sixth line specifies the maximum values each input can take.
# Inputs passed to the network are truncated to be between this range, since neural
# networks are not good at extrapolation
# The seventh line gives the mean value of each input and of the outputs.
# The eighth line gives the range of each input and of the outputs.
# These two lines are used to map raw inputs to the 0 mean, 1 range of the inputs and outputs
# used during training
# The ninth line begins the network weights and biases.
####################
f2.write("// Neural Network File Format by <NAME>, Stanford 2016\n")
#Extract the necessary information and write the header information
keys = sorted(params.keys())
keysW = keys[:int(len(keys)/2)]
keysb = keys[int(len(keys)/2):]
numLayers = len(keysW)
inputSize = params[keysW[0]].shape[0]
outputSize = params[keysb[-1]].shape[0]
maxLayerSize = inputSize;
for key in keysW:
if params[key].shape[1]>maxLayerSize :
maxLayerSize = params[key].shape[0]
str = "%d,%d,%d,%d,\n" % (numLayers,inputSize,outputSize,maxLayerSize)
f2.write(str)
str = "%d," % inputSize
f2.write(str)
for key in keysW:
str = "%d," % params[key].shape[1]
f2.write(str)
f2.write("\n")
#Write Min, Max, Mean, and Range of each of the inputs on outputs for normalization
f2.write("0,\n") # COC cost
f2.write(min_inputs + "\n") #Minimum Input Values
f2.write(max_inputs + "\n") #Maximum Input Values
f2.write(means + "\n") #Means of inputs for normalizations
f2.write(ranges + "\n") #Ranges of inputs for
##################
# Write weights and biases of neural network
# First, the weights from the input layer to the first hidden layer are written
# Then, the biases of the first hidden layer are written
# The pattern is repeated by next writing the weights from the first hidden layer to the second hidden layer,
# followed by the biases of the second hidden layer.
##################
for ii in range(len(keysW)):
data = np.array(params[keysW[ii]]).T
for i in range(len(data)):
for j in range(int(np.size(data)/len(data))):
str = ""
if int(np.size(data)/len(data))==1:
str = "%.5e," % data[i] #Five digits written. More can be used, but that requires more more space.
else:
str = "%.5e," % data[i][j]
f2.write(str)
f2.write("\n")
data = np.array(params[keysb[ii]]).T
for i in range(len(data)):
for j in range(int(np.size(data)/len(data))):
str = ""
if int(np.size(data)/len(data))==1:
str = "%.5e," % data[i] #Five digits written. More can be used, but that requires more more space.
else:
str = "%.5e," % data[i][j]
f2.write(str)
f2.write("\n")
# Train the Tensorflow model given parameters
def run_model(session, predict, loss, Xd, yd,writer,vd,saveFile,saveTF,
epochs=1, batch_size=64,
training=None, save_every=1, test_size=1000):
# have tensorflow compute accuracy
accuracy = custAcc(y,predict)
tf.summary.scalar('accuracy',accuracy)
tf.summary.scalar('mean_loss',loss)
merged_summary = tf.summary.merge_all()
saver = tf.train.Saver()
for e in range(epochs):
# keep track of losses and accuracy
correct = 0
# shuffle indicies
train_indices = np.arange(Xd.shape[0])
np.random.shuffle(train_indices)
# make sure we iterate over the dataset once
for i in range(int(math.ceil(Xd.shape[0]/batch_size))):
# generate indicies for the batch
start_idx = (i*batch_size)%Xd.shape[0]
idx = train_indices[start_idx:np.minimum(start_idx+batch_size,Xd.shape[0])]
# create a feed dictionary for this batch
feed_dict = {X: Xd[idx,:],
y: yd[idx,:]
}
# have tensorflow perform a training step
session.run([training],feed_dict=feed_dict)
test_inds = np.random.choice(Xd.shape[0],test_size, replace=False)
# Save to to summary folder, can view data with Tensorboard
feed_dict = {X: Xd[test_inds,:],
y: yd[test_inds,:]
}
s = sess.run(merged_summary, feed_dict=feed_dict)
writer.add_summary(s,e+1)
# Save graph, write .nnet file
params = sess.run(vd)
writeNNet(params,saveFile)
saver.save(sess, saveTF)
# Save model at specified intervals
if (e+1) % save_every == 0:
params = sess.run(vd)
writeNNet(params,saveFile[:-5] + "_%03d.nnet"%(e+1))
saver.save(sess, saveTF[:-5] + "_%03d.ckpt"%(e+1))
# The previous RA should be given as a command line input
if len(sys.argv) > 2:
pra = int(sys.argv[1])
tau = int(sys.argv[2])
gpu = -1
if len(sys.argv)>3:
gpu = int(sys.argv[3])
print("Loading Data for HCAS, pra %02d, Network Version %d" % (pra, ver))
f = h5py.File(trainingDataFiles % (table_ver,pra,tau),'r')
X_train = np.array(f['X'])
Q = np.array(f['y'])
means = np.array(f['means'])
ranges=np.array(f['ranges'])
mins = np.array(f['min_inputs'])
maxes = np.array(f['max_inputs'])
min_inputs = ",".join(np.char.mod('%f', mins))
max_inputs = ",".join(np.char.mod('%f', maxes))
means = ",".join(np.char.mod('%f', means))
ranges = ",".join(np.char.mod('%f', ranges))
#goodInds = np.where(X_train[:,2]==0.0)[0]
#X_train = X_train[goodInds,:]
#X_train = X_train[:,[0,1,3]]
#Q = Q[goodInds,:]
#means = means[[0,1,3,4]]
#ranges = ranges[[0,1,3,4]]
#min_inputs = min_inputs[[0,1,3]]
#max_inputs = max_inputs[[0,1,3]]
N,numInputs = X_train.shape
N,numOut = Q.shape
print("Setting up Model")
# Asymmetric loss function
lossFactor = 40.0
def asymMSE(y_true, y_pred):
d = y_true-y_pred
maxes = tf.argmax(y_true,axis=1)
maxes_onehot = tf.one_hot(maxes,numOut)
others_onehot = maxes_onehot-1
d_opt = d*maxes_onehot
d_sub = d*others_onehot
a = lossFactor*(numOut-1)*(tf.square(d_opt)+tf.abs(d_opt))
b = tf.square(d_opt)
c = lossFactor*(tf.square(d_sub)+tf.abs(d_sub))
d = tf.square(d_sub)
loss = tf.where(d_sub>0,c,d) + tf.where(d_opt>0,a,b)
return tf.reduce_mean(loss)
# Define model architecture
X = tf.placeholder(tf.float32, [None, numInputs])
y = tf.placeholder(tf.float32, [None, numOut])
layer_sizes = [hu, hu, hu, hu, hu]
layers = np.concatenate(([numInputs],layer_sizes,[numOut]))
vd = {}
inputs = X
for i, (inLayer, outLayer) in enumerate(zip(layers[:-1],layers[1:])):
vd["W" + str(i)] = tf.get_variable("W"+str(i), shape=[inLayer, outLayer])
vd["b" + str(i)] = tf.get_variable("b"+str(i), shape=[outLayer])
if i < len(layers)-2:
inputs = tf.nn.relu(tf.matmul(inputs,vd["W" + str(i)]) + vd["b" + str(i)])
else:
# Don't use ReLU on output layer!
inputs = tf.matmul(inputs,vd["W" + str(i)]) + vd["b" + str(i)]
y_out = inputs
# define our loss
mean_loss = asymMSE(y,y_out)
# define our optimizer
optimizer = tf.train.AdamOptimizer(3e-4) # select optimizer and set learning rate
train_step = optimizer.minimize(mean_loss)
# Initialize session
sess = get_session([gpu], 0.45)
sess.run(tf.global_variables_initializer())
tb_file = tbFiles % (ver, pra, tau, hu)
writer = tf.summary.FileWriter(tb_file)
writer.add_graph(sess.graph)
nnet_file = nnetFiles % (ver, pra, tau, hu)
model_file = modelFiles % (ver, pra, tau, hu)
run_model(sess,y_out,mean_loss,X_train,Q,writer,vd,nnet_file,model_file,totalEpochs,2**9,train_step,saveEvery,1000)
## Train and write nnet files
#epoch= saveEvery
#while epoch <= totalEpochs:
# model.fit(X_train, Q, nb_epoch=saveEvery, batch_size=2**9,shuffle=True)
# saveFile = nnetFiles % (pra, ver,hu,epoch)
# saveNNet(model,saveFile,means,ranges,min_inputs,max_inputs)
# epoch += saveEvery
|
11542230
|
import math
import time
import torch
from utils.utils import print_speed
# -----------------------------
# Main training code for Ocean
# -----------------------------
def ocean_train(train_loader, model, optimizer, epoch, cur_lr, cfg, writer_dict, logger, device):
# unfix for FREEZE-OUT method
# model, optimizer = unfix_more(model, optimizer, epoch, cfg, cur_lr, logger)
# prepare
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
cls_losses_align = AverageMeter()
cls_losses_ori = AverageMeter()
reg_losses = AverageMeter()
end = time.time()
# switch to train mode
model.train()
model = model.to(device)
for iter, input in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# input and output/loss
label_cls = input[2].type(torch.FloatTensor) # BCE need float
template = input[0].to(device)
search = input[1].to(device)
label_cls = label_cls.to(device)
reg_label = input[3].float().to(device)
reg_weight = input[4].float().to(device)
cls_loss_ori, cls_loss_align, reg_loss = model(template, search, label_cls, reg_target=reg_label, reg_weight=reg_weight)
cls_loss_ori = torch.mean(cls_loss_ori)
reg_loss = torch.mean(reg_loss)
if cls_loss_align is not None:
cls_loss_align = torch.mean(cls_loss_align)
loss = cls_loss_ori + cls_loss_align + reg_loss # smaller reg loss is better for stable training (compared to 1.2 in SiamRPN seriese)
else: # I would suggest the readers to perform ablation on the loss trade-off weights when building a new module
cls_loss_align = 0
loss = cls_loss_ori + reg_loss
loss = torch.mean(loss)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
# torch.nn.utils.clip_grad_norm(model.parameters(), 10) # gradient clip
if is_valid_number(loss.item()):
optimizer.step()
# record loss
loss = loss.item()
losses.update(loss, template.size(0))
cls_loss_ori = cls_loss_ori.item()
cls_losses_ori.update(cls_loss_ori, template.size(0))
try:
cls_loss_align = cls_loss_align.item()
except:
cls_loss_align = 0
cls_losses_align.update(cls_loss_align, template.size(0))
reg_loss = reg_loss.item()
reg_losses.update(reg_loss, template.size(0))
batch_time.update(time.time() - end)
end = time.time()
if (iter + 1) % cfg.PRINT_FREQ == 0:
logger.info(
'Epoch: [{0}][{1}/{2}] lr: {lr:.7f}\t Batch Time: {batch_time.avg:.3f}s \t Data Time:{data_time.avg:.3f}s \t CLS_ORI Loss:{cls_loss_ori.avg:.5f} \t CLS_ALIGN Loss:{cls_loss_align.avg:.5f} \t REG Loss:{reg_loss.avg:.5f} \t Loss:{loss.avg:.5f}'.format(
epoch, iter + 1, len(train_loader), lr=cur_lr, batch_time=batch_time, data_time=data_time,
loss=losses, cls_loss_ori=cls_losses_ori, cls_loss_align=cls_losses_align, reg_loss=reg_losses))
print_speed((epoch - 1) * len(train_loader) + iter + 1, batch_time.avg,
cfg.OCEAN.TRAIN.END_EPOCH * len(train_loader), logger)
# write to tensorboard
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', loss, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
return model, writer_dict
# ------------------------------------------
# Main code for Ocean Plus training
# ------------------------------------------
def BNtoFixed(m):
class_name = m.__class__.__name__
if class_name.find('BatchNorm') != -1:
m.eval()
def oceanplus_train(train_loader, model, optimizer, epoch, cur_lr, cfg, writer_dict, logger, device):
# prepare
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
# switch to train mode
print('====> fix again <=====')
model.train()
try:
model.module.features.features.eval()
except:
model.module.features.eval()
try:
model.module.neck.eval()
model.module.neck.apply(BNtoFixed)
except:
pass
try:
model.module.connect_model.eval()
model.module.connect_model.apply(BNtoFixed)
except:
pass
try:
model.module.bbox_tower.eval()
model.module.bbox_tower.apply(BNtoFixed)
except:
pass
try:
model.module.features.features.apply(BNtoFixed)
except:
model.module.features.apply(BNtoFixed)
model.module.mask_model.train()
model = model.to(device)
for iter, input in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# input and output/loss
label_cls = input[2].type(torch.FloatTensor) # BCE need float
template = input[0].to(device)
search = input[1].to(device)
label_cls = label_cls.to(device)
reg_label = input[3].float().to(device)
reg_weight = input[4].float().to(device)
mask = input[6].float().to(device)
template_mask = input[-1].float().to(device)
mask_weight = input[7].float().to(device)
_, _, loss = model(template, search, label_cls, reg_target=reg_label, reg_weight=reg_weight,
mask=mask, mask_weight=mask_weight, template_mask=template_mask)
loss = torch.mean(loss)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
# torch.nn.utils.clip_grad_norm(model.parameters(), 10) # gradient clip
if is_valid_number(loss.item()):
optimizer.step()
torch.nn.utils.clip_grad_norm_(model.parameters(), 10) # gradient clip
# record loss
loss = loss.item()
losses.update(loss, template.size(0))
batch_time.update(time.time() - end)
end = time.time()
if (iter + 1) % cfg.PRINT_FREQ == 0:
logger.info(
'Epoch: [{0}][{1}/{2}] lr: {lr:.7f}\t Batch Time: {batch_time.avg:.3f}s \t Data Time:{data_time.avg:.3f}s \t MASK Loss:{mask_loss.avg:.5f}'.format(
epoch, iter + 1, len(train_loader), lr=cur_lr, batch_time=batch_time, data_time=data_time, mask_loss=losses))
print_speed((epoch - 1) * len(train_loader) + iter + 1, batch_time.avg,
cfg.FREEMASK.TRAIN.END_EPOCH * len(train_loader), logger)
# write to tensorboard
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', loss, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
return model, writer_dict
# ===========================================================
# Main code for SiamDW train
# ===========================================================
def siamdw_train(train_loader, model, optimizer, epoch, cur_lr, cfg, writer_dict, logger):
# unfix for FREEZE-OUT method
# model, optimizer = unfix_more(model, optimizer, epoch, cfg, cur_lr, logger) # you may try freeze-out
# prepare
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
# switch to train mode
model.train()
model = model.cuda()
for iter, input in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# input and output/loss
label_cls = input[2].type(torch.FloatTensor) # BCE need float
template = input[0].cuda()
search = input[1].cuda()
label_cls = label_cls.cuda()
loss = model(template, search, label_cls)
loss = torch.mean(loss)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), 10) # gradient clip
if is_valid_number(loss.data[0]):
optimizer.step()
# record loss
loss = loss.data[0]
losses.update(loss, template.size(0))
batch_time.update(time.time() - end)
end = time.time()
if (iter + 1) % cfg.PRINT_FREQ == 0:
logger.info('Epoch: [{0}][{1}/{2}] lr: {lr:.7f}\t Batch Time: {batch_time.avg:.3f}s \t Data Time:{data_time.avg:.3f}s \t Loss:{loss.avg:.5f}'.format(
epoch, iter + 1, len(train_loader), lr=cur_lr, batch_time=batch_time, data_time=data_time, loss=losses))
print_speed((epoch - 1) * len(train_loader) + iter + 1, batch_time.avg, cfg.SIAMFC.TRAIN.END_EPOCH * len(train_loader), logger)
# write to tensorboard
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', loss, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
return model, writer_dict
def is_valid_number(x):
return not(math.isnan(x) or math.isinf(x) or x > 1e4)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
|
11542239
|
from ..cw_model import CWModel
class AccountingBatch(CWModel):
def __init__(self, json_dict=None):
self.thruDate = None # (String)
self.transactionsClosedDate = None # (String)
self.locationId = None # (Integer)
self.summarizeInvoices = None # (Integer)
self.includedInvoiceIds = None # (Integer[])
self.includedExpenseIds = None # (Integer[])
self.includedProductIds = None # (Integer[])
self.excludedInvoiceIds = None # (Integer[])
self.excludedExpenseIds = None # (Integer[])
self.excludedProductIds = None # (Integer[])
self.id = None # (Integer)
self.batchIdentifier = None # *(String(50))
self.exportInvoicesFlag = None # (Boolean)
self.exportExpensesFlag = None # (Boolean)
self.exportProductsFlag = None # (Boolean)
self.closedFlag = None # (Boolean)
self._info = None # (Metadata)
# initialize object with json dict
super().__init__(json_dict)
|
11542288
|
import datetime
from ming.datastore import DataStore
from ming import Session
from ming.orm.ormsession import ThreadLocalORMSession
bind = DataStore('mongodb://localhost:27017/orm_tutorial')
doc_session = Session(bind)
session = ThreadLocalORMSession(doc_session=doc_session)
from ming import schema
from ming.orm.mapped_class import MappedClass
from ming.orm.property import FieldProperty, ForeignIdProperty, RelationProperty
class WikiPage(MappedClass):
class __mongometa__:
session = session
name = 'pages'
_id = FieldProperty(schema.ObjectId)
author = FieldProperty(str)
title = FieldProperty(str)
tags = FieldProperty([str])
date = FieldProperty(datetime.datetime)
text = FieldProperty(str)
comments=RelationProperty('WikiComment')
class WikiComment(MappedClass):
class __mongometa__:
session = session
name = 'wiki_comment'
_id = FieldProperty(schema.ObjectId)
page_id = ForeignIdProperty('WikiPage')
text=FieldProperty(str, if_missing='')
page=RelationProperty('WikiPage')
MappedClass.compile_all()
session
def snippet1():
WikiPage.query.find().all()
wp = WikiPage(title='FirstPage',
text='This is my first page')
wp
session
session.flush()
session
session.clear()
session
def snippet2():
wp = WikiPage.query.get(title='FirstPage')
session
# Verify the IdentityMap keeps only one copy of the object
wp2 = WikiPage.query.get(title='FirstPage')
wp is wp2
# Modify the object in memory
wp.title = 'MyFirstPage'
# Notice that the object has been marked dirty
session
wp
session.flush()
# We can also delete objects
wp = WikiPage.query.get(title='MyFirstPage')
wp.delete()
session
# Rather than flushing, we'll keep the object
# around and just clear the session instead
session.clear()
def snippet3():
wp = WikiPage.query.get(title='MyFirstPage')
# Create some comments
WikiComment(page_id=wp._id,
text='A comment')
WikiComment(page_id=wp._id,
text='Another comment')
session.flush()
session.clear()
# Load the original page
wp = WikiPage.query.get(title='MyFirstPage')
session
# View its comments
wp.comments
session
wp.comments[0].page
wp.comments[0].page is wp
def snippet4():
wp = WikiPage.query.get(title='MyFirstPage')
results = WikiComment.query.find(dict(page_id=wp._id))
list(results)
def snippet5():
from ming.orm.base import mapper
m = mapper(WikiPage)
# m.doc_cls is the 'base' Ming document class
m.doc_cls
# Retrieve the 'base' Ming session
session.impl
|
11542365
|
class Solution:
def simplifyPath(self, path: str) -> str:
stack = []
for p in path.split('/'):
if p not in {'', '.', '..'}:
stack.append(p)
elif p == '..' and stack:
stack.pop()
return '/' + '/'.join(stack)
|
11542409
|
import time,sqlite3
import pandas as pd
import sqlalchemy as sqla
from . import config as cfg
__version__ = '0.1.3'
class CypTyper:
'''This is really just a db interface. Typing is done in the db'''
def __init__(self,alleles,variants,dbFile,dbStore=False):
self.alleles = alleles
self.variants = variants
self.dbFile = dbFile
self.db = self._openDB(dbFile)
self.dbStore = dbStore
self._import2DB()
def __enter__(self):
return self
def __exit__(self,exc_type,exc_value,traceback):
if not self.dbStore:
metadata = sqla.MetaData(bind=self.db)
metadata.reflect()
uuids = list(self.alleles.index)
for table in ['variantTable','alleleTable']:
try:
t = metadata.tables[cfg.database[table]]
t.delete()\
.where(t.c.uuid.in_(uuids))\
.execute()
except (sqla.exc.OperationalError,sqlite3.OperationalError) as e:
raise Typer_Error(f'Unable to delete records in {self.dbFile}. DB error as follows:\n\n{e}')
def getSummary(self,table,failed=False):
uuids = tuple(self.alleles.index)
sql = f'SELECT * FROM {table}'
try:
result = pd.read_sql(sql,con=self.db)
if 'uuid' in result.columns: #full table
query = 'uuid in @uuids'
if not failed:
query += ' and status == "passed"'
return result.query(query) \
.iloc[:,1:] \
.sort_values(cfg.typer['sortColumns'])
else:
return result #need some better filtering here
except (sqla.exc.OperationalError,sqlite3.OperationalError) as e:
raise Typer_Error(f'Unable to execute query in {self.dbFile}. DB error as follows:\n\n{e}')
def _openDB(self,dbFile):
return sqla.create_engine(f'sqlite:///{dbFile}', echo=False)
def _import2DB(self):
tables = [cfg.database[t] for t in ['alleleTable','variantTable']]
support = cfg.database['supportField']
maxTries = cfg.database['maxTries']
for pydf,sqldf in zip([self.alleles,self.variants],tables):
pbaa = pydf.reset_index()\
.reindex(columns=list(cfg.tableMap[sqldf].values()))
pbaa.columns = list(cfg.tableMap[sqldf].keys())
#make sure support dtyep is "string"
if support in pbaa.columns:
pbaa[support] = pbaa[support].astype(str)
tries = 0
while tries < maxTries:
try:
pbaa.set_index('uuid').to_sql(sqldf, con=self.db, if_exists='append')
break
#except sqlite3.OperationalError as e:
except sqla.exc.OperationalError as e:
tries += 1
print(f'WARNING: sqlite3 import error try #{tries} of {maxTries}')
if tries == maxTries:
raise Typer_Error(f'Unable to import {pydf.source.unique()} to {self.dbFile}')
else:
time.sleep(1)
return None
class Typer_Error(Exception):
pass
|
11542446
|
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers import *
import tensorflow as tf
training_data_df = pd.read_csv("sales_data_training_scaled.csv")
X = training_data_df.drop('total_earnings', axis=1).values
Y = training_data_df[['total_earnings']].values
# Define the model
model = Sequential()
model.add(Dense(50, input_dim=9, activation='relu', name='layer_1'))
model.add(Dense(100, activation='relu', name='layer_2'))
model.add(Dense(50, activation='relu', name='layer_3'))
model.add(Dense(1, activation='linear', name='output_layer'))
model.compile(loss='mean_squared_error', optimizer='adam')
# Create a TensorBoard logger
logger = keras.callbacks.TensorBoard(
log_dir='logs',
histogram_freq=5,
write_graph=True
)
# Train the model
model.fit(
X,
Y,
epochs=50,
shuffle=True,
verbose=2,
callbacks=[logger]
)
# Load the separate test data set
test_data_df = pd.read_csv("sales_data_test_scaled.csv")
X_test = test_data_df.drop('total_earnings', axis=1).values
Y_test = test_data_df[['total_earnings']].values
test_error_rate = model.evaluate(X_test, Y_test, verbose=0)
print("The mean squared error (MSE) for the test data set is: {}".format(test_error_rate))
model_builder = tf.saved_model.builder.SavedModelBuilder("exported_model")
inputs = {
'input': tf.saved_model.utils.build_tensor_info(model.input)
}
outputs = {
'earnings': tf.saved_model.utils.build_tensor_info(model.output)
}
signature_def = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)
model_builder.add_meta_graph_and_variables(
K.get_session(),
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def
}
)
model_builder.save()
|
11542457
|
from . import nsapiwrapper
from .objects import Nation, Region, World, WorldAssembly, Telegram, Cards, IndividualCards
class Nationstates:
def __init__(self, user_agent, version="11", ratelimit_sleep=True,
ratelimit_limit=40, ratelimit_timeframe=30, ratelimit_sleep_time=4,
ratelimit_maxsleeps=10, ratelimit_enabled=True, do_retry=True,
retry_sleep=5, max_retries=5, use_nsdict=True, use_session=True, threading_mode=True,
max_ongoing_requests=20, enable_beta=False):
self.api = nsapiwrapper.Api(user_agent, version=version,
ratelimit_sleep=ratelimit_sleep,
ratelimit_sleep_time=ratelimit_sleep_time,
ratelimit_max=ratelimit_limit,
ratelimit_within=ratelimit_timeframe,
ratelimit_maxsleeps=ratelimit_maxsleeps,
ratelimit_enabled=ratelimit_enabled,
use_session=use_session and not threading_mode)
self.do_retry = do_retry
self.retry_sleep = retry_sleep
self.max_retries = max_retries
self.use_nsdict = use_nsdict
self.enable_beta = enable_beta
def nation(self, nation_name, password=None, autologin=None):
"""Setup access to the Nation API with the Nation object
:param nation_name: Name of the nation
:param password: (<PASSWORD>) <PASSWORD>
:param autologin (Optional) autologin for this nation
:type nation_name: str
:type password: str
:type autologin: str
:returns: Nation Object based off nation_name
:rtype: Nation
"""
return Nation(nation_name, self, password=password, autologin=autologin)
def region(self, region_name):
"""Setup access to the Region API with the Nation object
:param region_name: Name of the region
:type region_name: str
:returns: Region Object based off region_name
:rtype: Region
"""
return Region(region_name, self)
def world(self):
"""Setup access to the World API with the Nation object
:returns: World Object
:rtype: World
"""
return World(self)
def wa(self, chamber):
"""Setup access to the World Assembly API with the WorldAssembly object
:param chamber: Chamber of the WA
:type chamber: str, int
:returns: WorldAssembly Object based off region_name
:rtype: WorldAssembly
"""
if isinstance(chamber, int):
chamber = str(chamber)
return WorldAssembly(chamber, self)
def telegram(self, client_key=None, tgid=None, key=None):
"""Create Telegram Templates which can be used to send telegrams
:param client_key: Client Key Nationstates Gave you
:param tgid: TGID from api template
:param key: Key from api Template
"""
return Telegram(self, client_key, tgid, key)
def cards(self):
"""General Card api
"""
return Cards(self)
def individual_cards(self, cardid=None, season=None):
"""Create Cards api for Individual cards, since they shard
:param cardid: Cards ID
:param season: season
"""
return IndividualCards(self, cardid=cardid, season=season)
@property
def user_agent(self):
return self.api.user_agent
@user_agent.setter
def user_agent(self, ua):
self.api.user_agent = ua
|
11542467
|
from .vgg16 import get_vgg
from .vgg16_deconv import get_vgg_deconv
from .utils import get_image, store_feature, visualize_layer
|
11542470
|
import matplotlib.pyplot as plt
import numpy as np
plt.switch_backend('Agg')
#
# Make image array with using matplotlib
#
def plot_to_buf(x: np.ndarray, align: bool = True) -> np.ndarray:
"""
make plotted image given array
:param x: an array to be plotted
:param align: make limit from -1 to +1 on array
:return: plotted image
"""
fig, ax = plt.subplots()
ax.plot(x)
if align:
ax.set_ylim([-1, 1])
fig.canvas.draw()
im = np.array(fig.canvas.renderer._renderer)
plt.clf()
plt.close('all')
return np.rollaxis(im[..., :3], 2)
def imshow_to_buf(x: np.ndarray) -> np.ndarray:
"""
make image given array
:param x: an array to be painted
:return: painted image
"""
if len(x.shape) == 3:
x = x[0]
fig, ax = plt.subplots()
ax.imshow(x, aspect='auto')
fig.canvas.draw()
im = np.array(fig.canvas.renderer._renderer)
plt.clf()
plt.close('all')
return np.rollaxis(im[..., :3], 2)
|
11542478
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def _radar_factory(num_vars):
theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)
theta += np.pi/2
def unit_poly_verts(theta):
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
class RadarAxes(PolarAxes):
name = 'radar'
RESOLUTION = 1
def fill(self, *args, **kwargs):
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
# FIXME: markers at x[0], y[0] get doubled-up
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta * 180/np.pi, labels)
def _gen_axes_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def _gen_axes_spines(self):
spine_type = 'circle'
verts = unit_poly_verts(theta)
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def radar_graph(labels = [], values = [], optimum = []):
N = len(labels)
theta = _radar_factory(N)
max_val = max(max(optimum), max(values))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='radar')
ax.plot(theta, values, color='k')
ax.plot(theta, optimum, color='r')
ax.set_varlabels(labels)
#plt.show()
# plt.savefig("radar.png", dpi=100)
|
11542486
|
import requests
import jwt
from django.conf import settings
from django.contrib.auth import get_user_model
from .models import IntegrationData
class IntegrationUtils():
@staticmethod
def getNNEProfile(subscription_id):
jwtToken = jwt.encode({'sub': subscription_id}, settings.INTEGRATION_JWT_SECRET, 'HS256')
r = requests.get(settings.INTEGRATION_PROFILE_URL, headers={'Authorization': 'Bearer ' + jwtToken})
return r.json()
def load_profile_all():
if not hasattr(settings, 'INTEGRATION_PROFILE_ALL_EDITIONS') or not settings.INTEGRATION_PROFILE_ALL_EDITIONS:
return
for edition in settings.INTEGRATION_PROFILE_ALL_EDITIONS:
params = {
'edition_id': edition,
'key': settings.INTEGRATION_PROFILE_ALL_SECRET
}
r = requests.get(settings.INTEGRATION_PROFILE_ALL_URL, params=params)
user_model = get_user_model()
if r.ok:
for profile in r.json().get('profiles'):
try:
user = user_model.objects.get(id=profile.get('sub_id'))
print "Added"
except user_model.DoesNotExist:
print "Skipped"
continue
save_integration_data(user, profile)
def save_integration_data(user, json):
if not hasattr(user, 'integration_data'):
user.integration_data = IntegrationData()
user.integration_data.city = json.get('city').get('name')
user.integration_data.edition = json.get('edition')
user.integration_data.formation_type = json.get('formation_type')
user.integration_data.participation_type = json.get('participation_type')
user.integration_data.region = json.get('region').get('name')
user.integration_data.rf = json.get('rf')
user.integration_data.school = json.get('school').get('name')
user.integration_data.track = json.get('track').get('name')
user.integration_data.num_stories_total = json.get('num_stories_total')
user.integration_data.num_stories_started = json.get('num_stories_started')
user.integration_data.num_stories_pending = json.get('num_stories_pending')
user.integration_data.num_stories_published = json.get('num_stories_published')
user.integration_data.save()
|
11542544
|
from typing import List, Union, Optional
from .cache_manager import CacheManager
from ..__init__ import Guild, UnavailableGuild
class GuildManager(CacheManager):
def __init__(
self, client, guilds: Optional[List[Union[Guild, UnavailableGuild]]] = None
):
if guilds is None:
guilds = []
super().__init__("guilds_cache")
self.client = client
self.available_guilds = [
guild for guild in guilds if not isinstance(guild, UnavailableGuild)
]
self.unavailable_guilds = [
guild for guild in guilds if isinstance(guild, UnavailableGuild)
]
self.guilds = guilds
def format_cache(self):
for guild in self.guilds:
self.guilds_cache[guild.id] = guild
return self.guilds_cache
async def fetch(
self,
guild_id: str,
*,
skip_cache: Optional[bool] = False,
with_counts: Optional[bool] = False,
):
if guild_id in self.cache and not skip_cache:
return self.cache[guild_id]
if with_counts:
return Guild(
self.client,
await self.client.http.get(f"/guilds/{guild_id}?with_counts=true"),
)
return Guild(self.client, await self.client.http.get(f"/guilds/{guild_id}"))
# TODO: This might not work...
|
11542584
|
from pyChemometrics.ChemometricsPCA import ChemometricsPCA
from pyChemometrics.ChemometricsScaler import ChemometricsScaler
from nPYc.objects._dataset import Dataset
import copy
def exploratoryAnalysisPCA(npycDataset, scaling=1, maxComponents=10, minQ2=0.05, withExclusions=False, **kwargs):
"""
Performs and exploratory analysis using PCA on the data contained in an :py:class:`~nPYc:objects.Dataset`.
:param Dataset npycDataset: Dataset to model
:param scaling: Choice of scaling.
:param int maxComponents: Maximum number of components to fit.
:param minQ2: Minimum % of improvement in Q2Y over the previous component to add .
:param Boolean withExclusions: If True, PCA will be fitted on the npyc_dataset after applying feature and sample Mask, if False the PCA is performed on whole dataset.
:return: Fitted PCA model
:rtype: ChemometricsPCA
"""
try:
if not isinstance(npycDataset, Dataset):
raise TypeError('npycDataset argument must be one of the nPYc dataset objects')
if not isinstance(scaling, (float, int)) or (scaling < 0 or scaling > 1):
raise TypeError('scaling must be a number between 0 and 1. Recommended values are '
'0 (mean centring), 1 (Unit Variance)and 0.5 (Pareto)')
if not isinstance(maxComponents, (float, int)) or maxComponents <= 0:
raise TypeError('MinQ2 must be a positive number')
if not isinstance(minQ2, (float, int)):
raise TypeError('MinQ2 must be a number')
scaler_obj = ChemometricsScaler(scaling)
PCAmodel = ChemometricsPCA(ncomps=maxComponents, scaler=scaler_obj)
# Parse the dara for the cases with exclusion = True and False
if withExclusions:
npycDatasetmaskApplied = copy.deepcopy(npycDataset)
npycDatasetmaskApplied.applyMasks()
data = npycDatasetmaskApplied.intensityData
# generate hash
# samp_mask_hash = sha1(numpy.ascontiguousarray(npyc_dataset.sampleMask)).hexdigest()
# feat_mask_hash = sha1(numpy.ascontiguousarray(npyc_dataset.featureMask)).hexdigest()
# PCAmodel._npyc_hash = {'SampleMask': samp_mask_hash, 'FeatureMask': feat_mask_hash}
else:
data = npycDataset.intensityData
PCAmodel._npyc_dataset_shape = {'NumberSamples': data.shape[0], 'NumberFeatures': data.shape[1]}
# Do nothing else
PCAmodel.fit(data)
scree_cv = PCAmodel._screecv_optimize_ncomps(data, total_comps=maxComponents, stopping_condition=minQ2, **kwargs)
# After choosing number of components, re-initialize the model
PCAmodel.ncomps = scree_cv['Scree_n_components']
# Set the miminum number of components to 2
# TODO: fix plotScores to enable plotting of one component models
if PCAmodel.ncomps == 1:
scree_cv = PCAmodel._screecv_optimize_ncomps(data, total_comps=2, stopping_condition=-100000, **kwargs)
PCAmodel.ncomps = scree_cv['Scree_n_components']
PCAmodel.fit(data, **kwargs)
# Append the old scree plot to the object
PCAmodel.cvParameters = scree_cv
PCAmodel.cvParameters['total_comps'] = maxComponents
PCAmodel.cvParameters['stopping_condition'] = minQ2
# Cross-validation
PCAmodel.cross_validation(data, press_impute=False, **kwargs)
return PCAmodel
except TypeError as terr:
raise terr
except Exception as exp:
raise exp
|
11542594
|
from six import itervalues
from flask_marshmallow import Schema
class Parameters(Schema):
class Meta:
ordered = True
def __init__(self, **kwargs):
kwargs["strict"] = kwargs.get("strict", True)
super(Parameters, self).__init__(**kwargs)
for required_field_name in getattr(self.Meta, 'required', []):
self.fields[required_field_name].required = True
for field in itervalues(self.fields):
if field.dump_only:
continue
if not field.metadata.get('location'):
field.metadata['location'] = self.LOCATION
def __contains__(self, field):
return field in self.fields
def make_instance(self, data):
# pylint: disable=unused-argument
"""
This is a no-op function which shadows ``ModelSchema.make_instance``
method (when inherited classes inherit from ``ModelSchema``). Thus, we
avoid a new instance creation because it is undesirable behaviour for
parameters (they can be used not only for saving new instances).
"""
return
class JSONParameters(Parameters):
LOCATION = 'json'
class QueryParameters(Parameters):
LOCATION = 'query'
class HeaderParameters(Parameters):
LOCATION = 'headers'
class FileParameters(Parameters):
LOCATION = 'files'
class FormParameters(Parameters):
LOCATION = 'form'
|
11542597
|
import pytest
from .data_providers import valid_customers
@pytest.mark.parametrize("customer", valid_customers, ids=[repr(x) for x in valid_customers])
def test_can_register_customer(app, customer):
old_ids = app.get_customer_ids()
app.register_new_customer(customer)
new_ids = app.get_customer_ids()
assert all([i in new_ids for i in old_ids])
assert len(new_ids) == len(old_ids) + 1
|
11542601
|
import tensorflow as tf
def variable_checkpoint_matcher(conf, vars, model_file=None, ignore_varname_firstag=False):
"""
for every variable in vars takes its name and looks inside the
checkpoint to find variable that matches its name beginning from the end
:param vars:
:return:
"""
if model_file is None:
ckpt = tf.train.get_checkpoint_state(conf['output_dir'])
model_file = ckpt.model_checkpoint_path
print('variable checkpoint matcher using model_file:',model_file)
reader = tf.train.NewCheckpointReader(model_file)
var_to_shape_map = reader.get_variable_to_shape_map()
check_names = list(var_to_shape_map.keys())
vars = dict([(var.name.split(':')[0], var) for var in vars])
new_vars = {}
for varname in list(vars.keys()):
found = False
for ck_name in check_names:
ck_name_parts = ck_name.split('/')
varname_parts = varname.split('/')
if ignore_varname_firstag:
varname_parts = varname_parts[1:]
if varname_parts == ck_name_parts[-len(varname_parts):]:
new_vars[ck_name] = vars[varname]
found = True
# print("found {} in {}".format(varname, ck_name))
break
if not found:
raise ValueError("did not find variable {}".format(varname))
return new_vars
|
11542610
|
import os.path
import textwrap
from typing import Set
desc_id = 0
def _next_desc_id():
global desc_id
desc_id += 1
return f"desc{desc_id}"
class DocWriter(object):
def __init__(self, base_path, name):
self.base_path = base_path
self.name = name
self.path = os.path.join(base_path, f"{self.name}.rst")
assert not os.path.isfile(self.path), self.path
self.doc = ""
self.literals = {}
self.toc = []
def sub_writer(self, name=None):
self.add_toc_item(name)
return DocWriter(self.base_path, name)
def add_header(self, text, underline):
self.doc += f"{text}\n{underline * len(text)}\n\n"
def add_anchor(self, aid):
self.doc += f".. _{aid}:\n\n"
return f":ref:`{aid}`"
def get_anchor(self, aid):
return f":ref:`{aid}`"
def add_paragraph(self, text, indent=""):
self.doc += textwrap.indent(text + "\n\n", indent)
def add_literal(self, name, text):
self.literals[name] = text
def add_unnamed_literal(self, text, indent=""):
lid = _next_desc_id()
self.add_literal(lid, text)
self.add_paragraph(f"|{lid}|", indent)
def add_code(self, language, code):
self.doc += f".. code-block:: {language}\n\n"
self.add_paragraph(code, " ")
def add_parsed_code(self, language, code):
self.doc += f".. parsed-literal::\n\n"
self.add_paragraph(code, " ")
def add_toc_item(self, item):
self.toc.append(item)
def write(self):
with open(self.path, "w") as f:
f.write(self.doc)
for name, text in self.literals.items():
f.write(f".. |{name}| raw:: html\n\n")
f.write(textwrap.indent(text, " "))
f.write("\n\n")
if self.toc:
f.write(".. toctree::\n :hidden:\n\n")
for t in self.toc:
f.write(f" {t}\n")
f.write("\n")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.write()
def shape_args_to_doc(doc: DocWriter, resource_type: str, input_shape, replacement_attributes: Set[str]):
_shape_args_to_doc(doc, replacement_attributes, input_shape, resource_type, set())
def _shape_args_to_doc(doc: DocWriter, replacement_attributes, shape, resource_type, history: Set[str]):
assert shape.type_name == "structure"
doc.add_header("Syntax", "*")
doc.add_header("JSON", "~")
doc.add_parsed_code("json", shape_args_to_json(shape, resource_type))
doc.add_header("YAML", "~")
doc.add_parsed_code("yaml", shape_args_to_yaml(shape, resource_type))
doc.add_header("Properties", "*")
for member_name, member_shape in shape.members.items():
doc.add_anchor(f"member_{shape.name}_{member_name}")
doc.add_header(member_name, "~")
doc.add_unnamed_literal(member_shape.documentation, " ")
required = "Yes" if shape.required_members else "No"
doc.add_paragraph(f"""*Required*: {required}""", " ")
type_name = member_shape.type_name
if type_name == "structure":
if member_shape.name in history:
type_name = doc.get_anchor(f"type_{member_shape.name}")
else:
with doc.sub_writer(f"type_{member_shape.name}") as sub_doc:
type_name = sub_doc.add_anchor(f"type_{member_shape.name}")
sub_doc.add_header(member_shape.name, "=")
_shape_args_to_doc(sub_doc, [], member_shape, None, history)
history.add(member_shape.name)
if type_name == "list":
if member_shape.member.type_name == "structure":
if member_shape.member.name in history:
type_name = doc.get_anchor(f"type_{member_shape.member.name}")
else:
with doc.sub_writer(f"type_{member_shape.member.name}") as sub_doc:
type_name = sub_doc.add_anchor(f"type_{member_shape.member.name}")
sub_doc.add_header(member_shape.member.name, "=")
_shape_args_to_doc(sub_doc, [], member_shape.member, None, history)
history.add(member_shape.member.name)
else:
type_name = member_shape.member.type_name
type_name = "List of " + type_name
doc.add_paragraph(f"""*Type*: {type_name}""", " ")
update_replace = "Replacement" if member_name in replacement_attributes else "No interruption"
doc.add_paragraph(f"""*Update requires*: {update_replace}""", " ")
def shape_args_to_json(shape, resource_type):
if resource_type:
prefix = f'{{\n "Type" : "{resource_type}",\n "Properties" : {{\n' \
' "ServiceToken" : {"Fn::ImportValue": "cfm-reslib"},\n'
indent = " "
suffix = '\n }\n}'
else:
prefix = '{\n'
indent = " "
suffix = '\n}'
members = ",\n".join(f'{indent}"{n}" : {t}' for n, t in _shape_properties(shape))
return prefix + members + suffix
def shape_args_to_yaml(shape, resource_type):
if resource_type:
result = f'Type: {resource_type}\nProperties :\n ServiceToken : !ImportValue cfm-reslib\n'
indent = " "
else:
result = ""
indent = ""
# TODO something not based on the string result of _shape_properties?
for n, t in _shape_properties(shape):
result += f"{indent}{n} :"
if t.startswith("["):
result += f"\n{indent} - {t.strip('[] .,')}\n"
elif t.startswith(":ref:"):
result += f"\n{indent} {t}\n"
else:
result += f" {t}\n"
return result
def _shape_properties(shape):
assert shape.type_name == "structure"
for member_name, member_shape in shape.members.items():
linked_name = f":ref:`member_{shape.name}_{member_name}`"
if member_shape.type_name == "structure":
yield linked_name, f":ref:`type_{member_shape.name}`"
elif member_shape.type_name == "list":
if member_shape.member.type_name == "structure":
yield linked_name, f"[ :ref:`type_{member_shape.member.name}`, ... ]"
else:
yield linked_name, f"[ {member_shape.member.type_name}, ... ]"
else:
yield linked_name, member_shape.type_name
|
11542615
|
import astroid
from tools.pylint import LogNokwargsChecker
from tools.pylint.log_checker import LOGNOKWARGS_SYMBOL
def test_simple_logger_with_kwargs(pylint_test_linter):
"""Test that we can detect the usage of kwargs in a normal logging call
But that we also allow it for RotkehlchenLogsAdapter
"""
checker = LogNokwargsChecker(linter=pylint_test_linter)
# Check that simple loggers with kwargs raise the checker's error
for method_name in ('info', 'debug', 'error', 'warning'):
node = astroid.extract_node(f"""
import logging
logger = logging.getLogger(__name__)
logger.{method_name}('foo', a=1) #@
""")
checker.visit_call(node)
messages = checker.linter.release_messages()
assert len(messages) == 2
for m in messages:
assert m.msg_id == LOGNOKWARGS_SYMBOL
assert m.node == node
# But also check that if it's a RotkehlchenLogsAdapter there is no error
for method_name in ('info', 'debug', 'error', 'warning'):
node = astroid.extract_node(f"""
import logging
from rotkehlchen.logging import RotkehlchenLogsAdapter
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
log.{method_name}('foo', a=1) #@
""")
checker.visit_call(node)
messages = checker.linter.release_messages()
assert len(messages) == 0
def test_works_for_nodes_without_kwargs(pylint_test_linter):
"""Test that the custome checker also works for nodes without kwargs"""
checker = LogNokwargsChecker(linter=pylint_test_linter)
# Check that simple loggers with kwargs raise the checker's error
node = astroid.extract_node("""
import logging
logger = logging.getLogger(__name__)
logger.info('foo') #@
""")
checker.visit_call(node)
messages = checker.linter.release_messages()
assert len(messages) == 0
|
11542644
|
def rightTriangle(nRows):
for i in range(1,nRows+1):
print("*"*i)
def invertedTriangle(nRows):
nSpaces=0
nStars=2*nRows-1
for i in range(1,nRows+1):
print(' '*nSpaces+'*'*nStars)
nStars-=2
nSpaces+=1
def main():
choice=int(input("Enter 1 for right triangle -> \nEnter 2 for inverted triangle -> \n"))
assert choice==1 or choice==2
nRows=int(input("Enter number of rows -> "))
if choice==1:
rightTriangle(nRows)
else:
invertedTriangle(nRows)
if __name__=='__main__':
main()
|
11542654
|
import FWCore.ParameterSet.Config as cms
particleFlowTmpPtrs = cms.EDProducer("PFCandidateFwdPtrProducer",
src = cms.InputTag('particleFlowTmp')
)
|
11542662
|
from bs4 import BeautifulSoup
from terminaltables import SingleTable
import requests, re
from tkinter import *
def searchCopainsdavant(text, nom, city):
url = "http://copainsdavant.linternaute.com/s/?ty=1&prenom=%s&nom=%s&nomjf=&annee=&anneeDelta=&ville=%s"
name = nom
if " " in name:
nom = name.split(" ")[1]
prenom = name.split(" ")[0]
else:
prenom = ""
nom = name
data = requests.get(url % (prenom, nom, city)).content.decode('utf-8')
soup = BeautifulSoup(data, "html.parser")
nameList = soup.find_all("div", {"class": "grid_last"})
addresseList = soup.find_all("span", {"class": "app_list--result__search__place"})
urlList = soup.find_all("h3")
birthdayList = []
travailList = []
urlList2 = []
for url in urlList:
url = url.find("a")
urls = str(url)
href = re.search(r"/p/([a-zA-Z0-9_-]+)", urls).group()
urlList2.append(href)
for url in urlList2:
data = requests.get("http://copainsdavant.linternaute.com/%s" % (url)).content.decode('utf-8')
soup = BeautifulSoup(data, "html.parser")
birthdayList0 = soup.find_all("abbr", {"class": "bday"})
item = len(birthdayList0)
if item == 0:
birthdayList0.append("None")
for b in birthdayList0:
birthdayList.append(str(b))
travailList0 = soup.find_all("p", {"class": "title"})
item = len(travailList0)
if item == 0:
travailList0.append("None")
for t in travailList0:
travailList.append(str(t))
namesList2 = []
addressesList2 = []
birthdayList2 = []
travailList2 = []
for name in nameList:
name = name.find("a")
namesList2.append(name.string)
for addr in addresseList:
addressesList2.append(addr.string.strip())
for date in birthdayList:
date = date.replace("<abbr class=\"bday\" title=\"", "").replace("00:00:00\">", "- ").replace("</abbr>", "").replace("\">", "")
birthdayList2.append(date)
for travail in travailList:
travail = travail.replace("<p class=\"title\">", "").replace("</p>", "")
travailList2.append(travail)
regroup = zip(namesList2, addressesList2, birthdayList2, travailList2, urlList2)
title = " <NAME> "
TABLE_DATA = [
('Name', 'Adresse', 'Date', 'Work', 'url'),
]
count = 0
for info in regroup:
count += 1
name = info[0]
adresse = info[1]
adresse = adresse.split(" - ")[0]
dateBirthday = info[2]
try:
dateBirthday = dateBirthday.split(" - ")[1]
except:
pass
travail = info[3]
url = info[4]
infos = (name, adresse, dateBirthday, travail, url)
TABLE_DATA.append(infos)
if count > 0:
table_instance = SingleTable(TABLE_DATA, title)
text.insert(END,table_instance.table)
# labl = Label(self, text=table_instance.table, bg="black", fg="green",
# font=("comicsansms", 15, "bold"), relief=FLAT)
# labl.place(x=20, y=2)
# print(table_instance.table)
|
11542721
|
import random
import pytest
from .models import db, User, UserType, Friendship, Relation, MYSQL_URL
pytestmark = pytest.mark.asyncio
async def test_create(engine):
nickname = "test_create_{}".format(random.random())
u = await User.create(
bind=engine, timeout=10, nickname=nickname, age=42, type=UserType.USER
)
assert u.id is not None
assert u.nickname == nickname
assert u.type == UserType.USER
assert u.age == 42
u2 = await User.get(u.id, bind=engine, timeout=10)
assert u2.id == u.id
assert u2.nickname == nickname
assert u2.type == UserType.USER
assert u2.age == 42
assert u2 is not u
return u
async def test_create_from_instance(engine):
nickname = "test_create_from_instance_{}".format(random.random())
u = User(nickname="will-be-replaced", type=UserType.USER, age=42)
u.nickname = nickname
u.age = 21
await u.create(bind=engine, timeout=10)
assert u.id is not None
assert u.nickname == nickname
assert u.type == UserType.USER
assert u.age == 21
u2 = await User.get(u.id, bind=engine, timeout=10)
assert u2.id == u.id
assert u2.nickname == nickname
assert u2.type == UserType.USER
assert u2.age == 21
assert u2 is not u
return u
async def test_get(engine):
u1 = await test_create(engine)
u2 = await User.get(u1.id, bind=engine, timeout=10)
assert u1.id == u2.id
assert u1.nickname == u2.nickname
assert u1 is not u2
u3 = await engine.first(u1.query)
assert u1.id == u3.id
assert u1.nickname == u3.nickname
assert u1 is not u3
u4 = await test_create_from_instance(engine)
u5 = await engine.first(u4.query)
assert u4.id == u5.id
assert u4.nickname == u5.nickname
assert u4 is not u5
async def test_textual_sql(engine):
u1 = await test_create(engine)
u2 = await engine.first(
db.text("SELECT * FROM gino_users WHERE id = :uid")
.bindparams(uid=u1.id)
.columns(*User)
.execution_options(model=User)
)
assert isinstance(u2, User)
assert u1.id == u2.id
assert u1.nickname == u2.nickname
assert u1.type is u2.type
assert u1 is not u2
u2 = await engine.first(
db.text("SELECT * FROM gino_users WHERE id = :uid AND type = :utype")
.bindparams(db.bindparam("utype", type_=db.Enum(UserType)))
.bindparams(uid=u1.id, utype=UserType.USER,)
.columns(*User)
.execution_options(model=User)
)
assert isinstance(u2, User)
assert u1.id == u2.id
assert u1.nickname == u2.nickname
assert u1.type is u2.type
assert u1 is not u2
async def test_select(engine):
u = await test_create(engine)
name = await engine.scalar(User.select("nickname").where(User.id == u.id))
assert u.nickname == name
name = await engine.scalar(u.select("nickname"))
assert u.nickname == name
async def test_get_multiple_primary_key(engine):
u1 = await test_create(engine)
u2 = await test_create(engine)
await Friendship.create(bind=engine, my_id=u1.id, friend_id=u2.id)
with pytest.raises(ValueError, match="Incorrect number of values as primary key"):
await Friendship.get((u1.id,), bind=engine)
with pytest.raises(ValueError, match="Incorrect number of values as primary key"):
await Friendship.get(u1.id, bind=engine)
f = await Friendship.get((u1.id, u2.id), bind=engine)
assert f
assert f.my_id == u1.id
assert f.friend_id == u2.id
async def test_multiple_primary_key_order():
import gino
db1 = gino.Gino()
await db1.set_bind(MYSQL_URL)
class NameCard(db1.Model):
__tablename__ = "name_cards"
first_name = db1.Column(db1.Unicode(255), primary_key=True)
last_name = db1.Column(db1.Unicode(255), primary_key=True)
await db1.gino.create_all()
try:
await NameCard.create(first_name="first", last_name="last")
nc = await NameCard.get(("first", "last"))
assert nc.first_name == "first"
assert nc.last_name == "last"
with pytest.raises(ValueError, match="expected 2, got 3"):
await NameCard.get(dict(a=1, first_name="first", last_name="last"))
with pytest.raises(KeyError, match="first_name"):
await NameCard.get(dict(first="first", last_name="last"))
nc = await NameCard.get(dict(first_name="first", last_name="last"))
assert nc.first_name == "first"
assert nc.last_name == "last"
nc = await NameCard.get({0: "first", 1: "last"})
assert nc.first_name == "first"
assert nc.last_name == "last"
finally:
await db1.gino.drop_all()
await db1.pop_bind().close()
db2 = gino.Gino(MYSQL_URL)
await db2.set_bind(MYSQL_URL)
class NameCard(db2.Model):
__tablename__ = "name_cards"
last_name = db2.Column(db2.Unicode(255), primary_key=True)
first_name = db2.Column(db2.Unicode(255), primary_key=True)
await db2.gino.create_all()
try:
await NameCard.create(first_name="first", last_name="last")
nc = await NameCard.get(("last", "first"))
assert nc.first_name == "first"
assert nc.last_name == "last"
nc = await NameCard.get(dict(first_name="first", last_name="last"))
assert nc.first_name == "first"
assert nc.last_name == "last"
nc = await NameCard.get({1: "first", "last_name": "last"})
assert nc.first_name == "first"
assert nc.last_name == "last"
finally:
await db2.gino.drop_all()
await db2.pop_bind().close()
async def test_connection_as_bind(engine):
async with engine.acquire() as conn:
await test_get(conn)
async def test_update(engine, random_name):
u1 = await test_create(engine)
await u1.update(nickname=random_name).apply(bind=engine, timeout=10)
u2 = await User.get(u1.id, bind=engine)
assert u2.nickname == random_name
async def test_update_missing(engine, random_name):
from gino.exceptions import NoSuchRowError
u1 = await test_create(engine)
rq = u1.update(nickname=random_name)
await u1.delete(bind=engine)
with pytest.raises(NoSuchRowError):
await rq.apply(bind=engine, timeout=10)
async def test_update_multiple_primary_key(engine):
u1 = await test_create(engine)
u2 = await test_create(engine)
u3 = await test_create(engine)
await Friendship.create(bind=engine, my_id=u1.id, friend_id=u2.id)
f = await Friendship.get((u1.id, u2.id), bind=engine)
await f.update(my_id=u2.id, friend_id=u3.id).apply(bind=engine)
f2 = await Friendship.get((u2.id, u3.id), bind=engine)
assert f2
async def test_delete(engine):
u1 = await test_create(engine)
await u1.delete(bind=engine, timeout=10)
u2 = await User.get(u1.id, bind=engine)
assert not u2
async def test_delete_bind(bind):
u1 = await test_create(bind)
await u1.delete(timeout=10)
u2 = await User.get(u1.id)
assert not u2
async def test_delete_multiple_primary_key(engine):
u1 = await test_create(engine)
u2 = await test_create(engine)
f = await Friendship.create(bind=engine, my_id=u1.id, friend_id=u2.id)
await f.delete(bind=engine)
f2 = await Friendship.get((u1.id, u2.id), bind=engine)
assert not f2
async def test_string_primary_key(engine):
relations = ["Colleagues", "Friends", "Lovers"]
for r in relations:
await Relation.create(bind=engine, timeout=10, name=r)
r1 = await Relation.get(relations[0], bind=engine, timeout=10)
assert r1.name == relations[0]
async def test_lookup_287(bind):
from gino.exceptions import NoSuchRowError
class Game(db.Model):
__tablename__ = "games"
game_id = db.Column(db.String(32), unique=True)
channel_id = db.Column(db.String(1), default="A")
await Game.gino.create()
try:
game_1 = await Game.create(game_id="1", channel_id="X")
game_2 = await Game.create(game_id="2", channel_id="Y")
# ordinary update should be fine
uq = game_1.update(game_id="3")
with pytest.raises(TypeError, match="Model Game has no table, primary key"):
# but applying the updates to DB should fail
await uq.apply()
with pytest.raises(
LookupError, match="Instance-level CRUD operations not allowed"
):
await game_2.delete()
with pytest.raises(
LookupError, match="Instance-level CRUD operations not allowed"
):
await game_2.query.gino.all()
with pytest.raises(
LookupError, match="Instance-level CRUD operations not allowed"
):
await game_2.select("game_id")
# previous ordinary update still in effect
assert game_1.game_id == "3"
assert await Game.select("game_id").gino.all() == [("1",), ("2",)]
Game.lookup = lambda self: Game.game_id == self.game_id
with pytest.raises(NoSuchRowError):
await game_1.update(channel_id="Z").apply()
await game_2.update(channel_id="Z").apply()
assert await Game.select("channel_id").gino.all() == [("X",), ("Z",)]
finally:
await Game.gino.drop()
async def test_lookup_custom_name(bind):
class ModelWithCustomColumnNames(db.Model):
__tablename__ = "gino_test_custom_column_names"
id = db.Column("other", db.Integer(), primary_key=True)
field = db.Column(db.Text())
await ModelWithCustomColumnNames.gino.create()
try:
# create
m1 = await ModelWithCustomColumnNames.create(id=1, field="A")
m2 = await ModelWithCustomColumnNames.create(id=2, field="B")
# update
uq = m1.update(field="C")
await uq.apply()
# lookup
assert set(
tuple(x) for x in await ModelWithCustomColumnNames.select("id").gino.all()
) == {(1,), (2,)}
assert (await ModelWithCustomColumnNames.get(2)).field == "B"
assert (await ModelWithCustomColumnNames.get(1)).field == "C"
assert await ModelWithCustomColumnNames.get(3) is None
# delete
assert (
await ModelWithCustomColumnNames.delete.where(
ModelWithCustomColumnNames.id == 3
).gino.status()
)[0] == 0
assert (
await ModelWithCustomColumnNames.delete.where(
ModelWithCustomColumnNames.id == 2
).gino.status()
)[0] == 1
assert set(
tuple(x) for x in await ModelWithCustomColumnNames.select("id").gino.all()
) == {(1,)}
finally:
await ModelWithCustomColumnNames.gino.drop()
|
11542777
|
import pytest
import tarski.benchmarks.blocksworld
from tarski.fstrips.representation import is_quantifier_free
from tarski.syntax import *
from tests.common import tarskiworld
from tarski.syntax.transform.nnf import NNFTransformation
from tarski.syntax.transform.cnf import to_conjunctive_normal_form_clauses
from tarski.syntax.transform.prenex import to_prenex_negation_normal_form
from tarski.syntax.transform import CNFTransformation, QuantifierElimination, remove_quantifiers, \
QuantifierEliminationMode
from tarski.syntax.transform import NegatedBuiltinAbsorption
from tarski.syntax.transform.errors import TransformationError
def test_nnf_conjunction():
bw = tarski.benchmarks.blocksworld.generate_fstrips_bw_language()
_ = bw.get_sort('block')
_ = bw.get_sort('place')
loc = bw.get_function('loc')
_ = bw.get_predicate('clear')
b1, b2, b3, b4 = [bw.get_constant('b{}'.format(k)) for k in range(1, 5)]
_ = bw.get_constant('table')
phi = neg(land(loc(b1) != loc(b2), loc(b3) != loc(b4)))
result = NNFTransformation.rewrite(phi)
gamma = lor(neg(loc(b1) != loc(b2)), neg(loc(b3) != loc(b4)))
assert str(result.nnf) == str(gamma)
def test_nnf_double_negation():
bw = tarski.benchmarks.blocksworld.generate_fstrips_bw_language()
_ = bw.get_sort('block')
_ = bw.get_sort('place')
loc = bw.get_function('loc')
_ = bw.get_predicate('clear')
b1, b2, b3, b4 = [bw.get_constant('b{}'.format(k)) for k in range(1, 5)]
_ = bw.get_constant('table')
phi = neg(neg(loc(b1) == loc(b2)))
result = NNFTransformation.rewrite(phi)
gamma = loc(b1) == loc(b2)
assert str(result.nnf) == str(gamma)
def test_nnf_quantifier_flips():
bw = tarski.benchmarks.blocksworld.generate_fstrips_bw_language()
block = bw.get_sort('block')
loc = bw.get_function('loc')
b1, b2, b3, b4 = [bw.get_constant('b{}'.format(k)) for k in range(1, 5)]
x = bw.variable('x', block)
phi = neg(exists(x, loc(x) == loc(b2)))
result = NNFTransformation.rewrite(phi)
gamma = forall(x, neg(loc(x) == loc(b2)))
assert str(result.nnf) == str(gamma)
def test_nnf_lpl_page_321_antecedent():
tw = tarskiworld.create_small_world()
x = tw.variable('x', tw.Object)
y = tw.variable('y', tw.Object)
s = forall(x, neg(land(tw.Cube(x), exists(y, land(tw.Tet(x), tw.LeftOf(x, y))))))
result = NNFTransformation.rewrite(s)
gamma = forall(x, lor(neg(tw.Cube(x)), forall(y, lor(neg(tw.Tet(x)), neg(tw.LeftOf(x, y))))))
assert str(result.nnf) == str(gamma)
def test_prenex_idempotency():
bw = tarski.benchmarks.blocksworld.generate_fstrips_bw_language()
loc = bw.get_function('loc')
b1, b2, b3, b4 = [bw.get_constant('b{}'.format(k)) for k in range(1, 5)]
phi = loc(b1) == b2
assert str(to_prenex_negation_normal_form(bw, phi, do_copy=True)) == str(phi)
def test_prenex_lpl_page_321():
tw = tarskiworld.create_small_world()
x = tw.variable('x', tw.Object)
y = tw.variable('y', tw.Object)
s1 = exists(y, land(tw.Dodec(y), tw.BackOf(x, y)))
s2 = land(tw.Cube(x), exists(y, land(tw.Tet(y), tw.LeftOf(x, y))))
phi = forall(x, implies(s2, s1))
yp = tw.variable("y'", tw.Object)
gamma = NNFTransformation.rewrite(exists(yp, forall(
x, y, implies(land(tw.Cube(x), land(tw.Tet(y), tw.LeftOf(x, y))), land(tw.Dodec(yp), tw.BackOf(x, yp)))))).nnf
assert str(to_prenex_negation_normal_form(tw, phi, do_copy=True)) == str(gamma)
def test_quantifier_elimination_fails_due_to_no_constants():
tw = tarskiworld.create_small_world()
x = tw.variable('x', tw.Object)
y = tw.variable('y', tw.Object)
s1 = exists(y, land(tw.Dodec(y), tw.BackOf(x, y)))
s2 = land(tw.Cube(x), exists(y, land(tw.Tet(y), tw.LeftOf(x, y))))
phi = forall(x, implies(s2, s1))
with pytest.raises(TransformationError):
QuantifierElimination.rewrite(tw, phi, QuantifierEliminationMode.All)
def test_universal_elimination_works():
tw = tarskiworld.create_small_world()
x = tw.variable('x', tw.Object)
y = tw.variable('y', tw.Object)
_ = tw.constant('obj1', tw.Object)
_ = tw.constant('obj2', tw.Object)
_ = tw.constant('obj3', tw.Object)
s1 = exists(y, land(tw.Dodec(y), tw.BackOf(x, y)))
s2 = land(tw.Cube(x), exists(y, land(tw.Tet(y), tw.LeftOf(x, y))))
phi = forall(x, implies(s2, s1))
# print(str(phi))
result = remove_quantifiers(tw, phi, QuantifierEliminationMode.Forall)
result2 = remove_quantifiers(tw, result, QuantifierEliminationMode.Forall)
assert str(result) == str(result2)
def create_small_world_elements(numobjects=3):
lang = tarskiworld.create_small_world()
x, y = lang.variable('x', lang.Object), lang.variable('y', lang.Object)
_ = [lang.constant(f'obj{i}', lang.Object) for i in range(1, numobjects + 1)]
return lang, x, y
def test_existential_elimination1():
lang, x, y = create_small_world_elements(2)
obj1, obj2 = lang.get("obj1"), lang.get("obj2")
phi = exists(y, land(lang.Dodec(y), lang.BackOf(x, y)))
result = remove_quantifiers(lang, phi, QuantifierEliminationMode.Exists)
# We cannot guarantee in which order the expansion of the exists will be done, so we check for both possibilities:
assert result == (lang.Dodec(obj1) & lang.BackOf(x, obj1)) | (lang.Dodec(obj2) & lang.BackOf(x, obj2)) or \
result == (lang.Dodec(obj2) & lang.BackOf(x, obj2)) | (lang.Dodec(obj1) & lang.BackOf(x, obj1))
def test_existential_elimination2():
lang, x, y = create_small_world_elements(2)
s1 = exists(y, land(lang.Dodec(y), lang.BackOf(x, y)))
s2 = land(lang.Cube(x), exists(y, land(lang.Tet(y), lang.LeftOf(x, y))))
phi = forall(x, implies(s2, s1))
result = remove_quantifiers(lang, phi, QuantifierEliminationMode.All)
assert is_quantifier_free(result)
def test_builtin_negation_absorption():
bw = tarski.benchmarks.blocksworld.generate_fstrips_bw_language()
block = bw.get_sort('block')
_ = bw.get_sort('place')
loc = bw.get_function('loc')
_ = bw.get_predicate('clear')
b1, b2, b3, b4 = [bw.get_constant('b{}'.format(k)) for k in range(1, 5)]
_ = bw.get_constant('table')
_ = bw.variable('x', block)
phi = neg(loc(b1) == b2)
psi = loc(b1) != b2
r = NegatedBuiltinAbsorption.rewrite(bw, phi)
assert str(r.formula) == str(psi)
def test_cnf_conversion_easy():
tw = tarskiworld.create_small_world()
obj1 = tw.constant('obj1', tw.Object)
obj2 = tw.constant('obj2', tw.Object)
_ = tw.constant('obj3', tw.Object)
s1 = land(tw.Cube(obj1), neg(tw.Tet(obj2)))
s2 = land(tw.Cube(obj2), neg(tw.Tet(obj1)))
phi = lor(s1, s2)
c1 = lor(tw.Cube(obj1), tw.Cube(obj2))
c2 = lor(tw.Cube(obj1), neg(tw.Tet(obj1)))
c3 = lor(neg(tw.Tet(obj2)), tw.Cube(obj2))
c4 = lor(neg(tw.Tet(obj2)), neg(tw.Tet(obj1)))
psi = land(land(c1, c2), land(c3, c4))
result = CNFTransformation.rewrite(tw, phi)
assert str(result.cnf) == str(psi)
def test_cnf_conversion_complex():
lang, x, y = create_small_world_elements(2)
s1 = exists(y, land(lang.Dodec(y), lang.BackOf(x, y)))
s2 = land(lang.Cube(x), exists(y, land(lang.Tet(y), lang.LeftOf(x, y))))
phi = forall(x, implies(s2, s1))
result = remove_quantifiers(lang, phi, QuantifierEliminationMode.All)
assert len(to_conjunctive_normal_form_clauses(lang, result)) == 30
# Now remove the quantifiers after tranforming to PNNF
result = remove_quantifiers(lang, to_prenex_negation_normal_form(lang, phi), QuantifierEliminationMode.All)
assert len(to_conjunctive_normal_form_clauses(lang, result)) == 126
|
11542811
|
from datetime import datetime, date
from six import string_types
from .utils import valid_platform, platform_names, valid_operand_for_operator, valid_operand_list, \
valid_operand, valid_operator, valid_days, valid_bool, parse_date
from .constants import TAG_FILTER_OPERATOR_LTE, TAG_FILTER_OPERATOR_GTE, TAG_FILTER_OPERATOR_EQ, \
TAG_FILTER_OPERATOR_IN, TAG_FILTER_OPERATOR_BETWEEN, TAG_FILTER_OPERATOR_NOTEQ, TAG_FILTER_OPERATOR_NOTIN
from .exceptions import PushwooshFilterInvalidOperandException, PushwooshFilterInvalidOperatorException
class BaseFilter(object):
def union(self, other):
return UnionFilter(self, other)
def intersect(self, other):
return IntersectFilter(self, other)
def subtract(self, other):
return SubtractFilter(self, other)
class BaseOperatorFilter(BaseFilter):
operation_sign = None
def __init__(self, first_filter, second_filter):
self.first_filter = first_filter
self.second_filter = second_filter
def __str__(self):
return '(%s %s %s)' % (self.first_filter, self.operation_sign, self.second_filter)
class UnionFilter(BaseOperatorFilter):
operation_sign = '+'
class IntersectFilter(BaseOperatorFilter):
operation_sign = '*'
class SubtractFilter(BaseOperatorFilter):
operation_sign = '\\'
class ApplicationFilter(BaseFilter):
prefix = 'A'
def __init__(self, code, platforms=None):
self.code = code
if platforms is not None:
if not isinstance(platforms, list):
platforms = [platforms]
for platform in platforms:
if not valid_platform(platform):
raise TypeError
self.platforms = platforms
def __str__(self):
platforms_str = ''
if self.platforms is not None:
platforms_str = '", "'.join(platform_names(self.platforms))
platforms_str = ', ["%s"]' % platforms_str
return '%s("%s"%s)' % (self.prefix, self.code, platforms_str)
class ApplicationGroupFilter(ApplicationFilter):
prefix = 'G'
class BaseTagFilter(BaseFilter):
prefix = 'T'
operators = tuple()
value_types = tuple()
def __init__(self, tag_name, operator, operand):
self.semantic_validation(operator, operand)
self.tag_name = tag_name
self.operator = operator
self.operand = operand
def __str__(self):
return '%s("%s", %s, %s)' % (self.prefix, self.tag_name, self.operator, self._render_operand())
def semantic_validation(self, operator, operand):
if not valid_operator(operator, self.operators):
raise PushwooshFilterInvalidOperatorException('Invalid operator %s for %s' % (operator, self.__class__.__name__))
if not valid_operand_for_operator(operand, operator):
raise PushwooshFilterInvalidOperandException('Invalid operand type %s for operator %s' % (type(operand).__name__, operator))
if isinstance(operand, list) and not valid_operand_list(operand, self.value_types):
raise PushwooshFilterInvalidOperandException('Invalid operand list value for %s' % self.__class__.__name__)
if not isinstance(operand, list) and not valid_operand(operand, self.value_types):
raise PushwooshFilterInvalidOperandException('Invalid operand type %s for %s' % (type(operand).__name__, self.__class__.__name__))
if operator == TAG_FILTER_OPERATOR_BETWEEN and len(operand) != 2:
raise PushwooshFilterInvalidOperandException('Invalid operand len for operator %s' % operator)
if operator == TAG_FILTER_OPERATOR_IN and len(operand) == 0:
raise PushwooshFilterInvalidOperandException('Invalid operand len for operator %s' % operator)
def _render_operand(self):
if isinstance(self.operand, list):
return self._render_list_operand(self.operand)
elif isinstance(self.operand, int):
return self._render_int_operand(self.operand)
elif isinstance(self.operand, string_types) or isinstance(self.operand, datetime) or isinstance(self.operand, date):
return self._render_str_operand(self.operand)
raise NotImplementedError()
def _render_list_operand(self, operand):
result = []
for op in operand:
if isinstance(op, int):
result.append(self._render_int_operand(op))
elif isinstance(op, string_types):
result.append(self._render_str_operand(op))
return '[%s]' % ', '.join(result)
def _render_str_operand(self, operand):
return '"%s"' % operand
def _render_int_operand(self, operand):
return '%d' % operand
class ApplicationBaseTagFilter(BaseTagFilter):
prefix = 'AT'
def __init__(self, tag_name, operator, operand, code):
super(ApplicationBaseTagFilter, self).__init__(tag_name, operator, operand)
self.code = code
def __str__(self):
return '%s("%s", "%s", %s, %s)' % (self.prefix, self.code, self.tag_name, self.operator,
self._render_operand())
class IntegerTagFilter(BaseTagFilter):
operators = (TAG_FILTER_OPERATOR_LTE, TAG_FILTER_OPERATOR_GTE, TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_BETWEEN,
TAG_FILTER_OPERATOR_IN, TAG_FILTER_OPERATOR_NOTEQ, TAG_FILTER_OPERATOR_NOTIN)
value_types = (int,)
class StringTagFilter(BaseTagFilter):
operators = (TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_IN, TAG_FILTER_OPERATOR_NOTEQ, TAG_FILTER_OPERATOR_NOTIN)
value_types = (int, string_types,)
class ListTagFilter(BaseTagFilter):
operators = (TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_IN,)
value_types = (int, string_types,)
class DateTagFilter(BaseTagFilter):
operators = (TAG_FILTER_OPERATOR_LTE, TAG_FILTER_OPERATOR_GTE, TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_BETWEEN,
TAG_FILTER_OPERATOR_IN, TAG_FILTER_OPERATOR_NOTIN, TAG_FILTER_OPERATOR_NOTEQ)
value_types = (string_types, date, datetime)
def semantic_validation(self, operator, operand):
super(DateTagFilter, self).semantic_validation(operator, operand)
self.operand = parse_date(operand)
if not self.operand:
raise PushwooshFilterInvalidOperandException('Invalid date format')
class DaysTagFilter(BaseTagFilter):
operators = (TAG_FILTER_OPERATOR_LTE, TAG_FILTER_OPERATOR_GTE, TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_BETWEEN,
TAG_FILTER_OPERATOR_IN, TAG_FILTER_OPERATOR_NOTIN, TAG_FILTER_OPERATOR_NOTEQ)
value_types = (int,)
def semantic_validation(self, operator, operand):
super(DaysTagFilter, self).semantic_validation(operator, operand)
if not valid_days(operand):
raise PushwooshFilterInvalidOperandException('Days count must be greater than 0')
class BooleanTagFilter(BaseTagFilter):
operators = (TAG_FILTER_OPERATOR_EQ,)
value_types = (int, string_types)
def semantic_validation(self, operator, operand):
super(BooleanTagFilter, self).semantic_validation(operator, operand)
if not valid_bool(operand):
raise PushwooshFilterInvalidOperandException('%s value must be 0, 1, "true" or "false"' % self.__class__.__name__)
# Application tag filters
class IntegerTagFilterByApplication(ApplicationBaseTagFilter):
operators = (TAG_FILTER_OPERATOR_LTE, TAG_FILTER_OPERATOR_GTE, TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_BETWEEN,
TAG_FILTER_OPERATOR_IN, TAG_FILTER_OPERATOR_NOTEQ, TAG_FILTER_OPERATOR_NOTIN)
value_types = (int,)
class StringTagFilterByApplication(ApplicationBaseTagFilter):
operators = (TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_IN, TAG_FILTER_OPERATOR_NOTEQ, TAG_FILTER_OPERATOR_NOTIN)
value_types = (int, string_types,)
class ListTagFilterByApplication(ApplicationBaseTagFilter):
operators = (TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_IN,)
value_types = (int, string_types,)
class DateTagFilterByApplication(ApplicationBaseTagFilter):
operators = (TAG_FILTER_OPERATOR_LTE, TAG_FILTER_OPERATOR_GTE, TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_BETWEEN,
TAG_FILTER_OPERATOR_IN, TAG_FILTER_OPERATOR_NOTIN, TAG_FILTER_OPERATOR_NOTEQ)
value_types = (string_types,)
def semantic_validation(self, operator, operand):
super(DateTagFilterByApplication, self).semantic_validation(operator, operand)
self.operand = parse_date(operand)
if not self.operand:
raise PushwooshFilterInvalidOperandException('Invalid date format')
class DaysTagFilterByApplication(ApplicationBaseTagFilter):
operators = (TAG_FILTER_OPERATOR_LTE, TAG_FILTER_OPERATOR_GTE, TAG_FILTER_OPERATOR_EQ, TAG_FILTER_OPERATOR_BETWEEN,
TAG_FILTER_OPERATOR_IN, TAG_FILTER_OPERATOR_NOTIN, TAG_FILTER_OPERATOR_NOTEQ)
value_types = (int,)
def semantic_validation(self, operator, operand):
super(DaysTagFilterByApplication, self).semantic_validation(operator, operand)
if not valid_days(operand):
raise PushwooshFilterInvalidOperandException('Days count must be greater than 0')
class BooleanTagFilterByApplication(ApplicationBaseTagFilter):
operators = (TAG_FILTER_OPERATOR_EQ,)
value_types = (int, string_types)
def semantic_validation(self, operator, operand):
super(BooleanTagFilterByApplication, self).semantic_validation(operator, operand)
if not valid_bool(operand):
raise PushwooshFilterInvalidOperandException('%s value must be 0, 1, "true" or "false"' % self.__class__.__name__)
|
11542814
|
from typing import Optional
import operator
import traitlets
import ipywidgets
from ..types import Image, ImageCollection, Proxytype
from .layer import WorkflowsLayer
class LayerPicker(ipywidgets.HBox):
"""
Widget to pick a WorkflowsLayer from a map
In subclasses, set `_attr` to the trait name on WorkflowsLayer that you want mirrored into
the `value` trait of this class.
Attributes
----------
value: ImageCollection, None
The parametrized ImageCollection of the currently-selected layer.
"""
value = traitlets.Instance(klass=ImageCollection, allow_none=True, read_only=True)
_attr = "value"
def __init__(
self,
map=None,
default_layer: Optional[WorkflowsLayer] = None,
hide_deps_of: Optional[Proxytype] = None,
**kwargs,
):
"""
Construct a LayerPicker widget for a map.
Parameters
----------
map: ipyleaflet.Map
The map instance to pick from. Defaults to `wf.map`.
default_layer: WorkflowsLayer
The layer instance to have selected by default
hide_deps_of: Proxytype
Hide any layers from the dropdown that have this object in their ``.params``.
Mainly used by the Picker parameter widget to hide its own layer from the dropdown,
avoiding graft cycles.
"""
super().__init__(**kwargs)
if map is None:
# use wf.map as default
from . import map
# awkwardly handle MapApp without circularly importing it for an isinstance check
try:
map = map.map
except AttributeError:
pass
self._map = map
self._hide_deps_of = hide_deps_of
self._dropdown = ipywidgets.Dropdown(equals=operator.is_)
type_ = type(self).value.klass
if default_layer is not None:
if not isinstance(default_layer, WorkflowsLayer):
raise TypeError(
f"Default values for an {type(self).__name__} can only be WorkflowsLayer instances "
f"(the layer object returned by `.visualize`), not {default_layer!r}."
"Also note that this default value won't be synced when publishing."
)
value = getattr(default_layer, self._attr)
if not isinstance(value, type_):
raise TypeError(
f"Expected a default layer visualizing an {type_.name}, not an {type(value).__name__}. "
"Pick a different layer, or pick a different type for this widget, or remove a "
"reduction operation (like `.mosaic()`, `.mean('images')`) from the code that "
f"produces the layer {default_layer.name!r}"
)
self.set_trait("value", value)
default_layer.observe(self._picked_layer_value_changes, self._attr)
self._picked_layer = default_layer
map.observe(self._update_options, "layers")
self._dropdown.observe(self._layer_picked, "value")
self.children = [self._dropdown]
self._setting_options = False
self._update_options({})
def _update_options(self, change):
type_ = type(self).value.klass
options = [
(lyr.name, lyr)
for lyr in reversed(self._map.layers)
if isinstance(lyr, WorkflowsLayer)
and isinstance(getattr(lyr, self._attr), type_)
and all(p is not self._hide_deps_of for p in lyr.xyz_obj.params)
]
# when changing options, ipywidgets always just picks the first option.
# this is infuriatingly difficult to work around, so we set our own flag to ignore
# changes while this is happening.
self._setting_options = True
self._dropdown.options = options
self._setting_options = False
try:
self._dropdown.value = self._picked_layer
except traitlets.TraitError:
# the previously-picked layer doesn't exist anymore;
# we'd rather just have no value in that case
self._dropdown.value = None
self._picked_layer = None
self.set_trait("value", None)
def _layer_picked(self, change):
new_layer = change["new"]
if self._setting_options or new_layer is self._picked_layer:
return
if self._picked_layer is not None:
self._picked_layer.unobserve(self._picked_layer_value_changes, self._attr)
if new_layer is None:
self._picked_layer = None
self.set_trait("value", None)
else:
new_layer.observe(self._picked_layer_value_changes, self._attr)
self._picked_layer = new_layer
self.set_trait("value", getattr(new_layer, self._attr))
def unlink(self):
self._map.unobserve("layers", self._update_options)
self._dropdown.unobserve("value", self._layer_picked)
if self._picked_layer is not None:
self._picked_layer.unobserve(self._picked_layer_value_changes, self._attr)
self._picked_layer = None
def _picked_layer_value_changes(self, change):
self.set_trait("value", change["new"])
def _ipython_display_(self):
super()._ipython_display_()
class ImagePickerWidget(LayerPicker):
"""
Widget to pick a layer from the map, as an Image.
If selecting an ImageCollection layer, this gives it with its reduction applied.
Note you cannot change the selected layer programmatically,
only by using the widget.
"""
value = traitlets.Instance(klass=Image, allow_none=True, read_only=True)
_attr = "image_value"
class ImageCollectionPickerWidget(LayerPicker):
"""
Widget to pick a layer from the map, as an ImageCollection.
Only layers showing ImageCollections (not Images) will be shown.
Note you cannot change the selected layer programmatically,
only by using the widget.
"""
|
11542823
|
import torch
import torch.nn as nn
import torchtestcase
import unittest
from survae.tests.distributions.conditional import ConditionalDistributionTest
from survae.distributions import ConditionalMeanNormal, ConditionalMeanStdNormal, ConditionalNormal
class ConditionalMeanNormalTest(ConditionalDistributionTest):
def test_distribution_is_well_behaved(self):
batch_size = 16
size = 10
x = torch.randn(batch_size, size)
# Basic check
context = torch.randn(batch_size, 20)
distribution = ConditionalMeanNormal(net=nn.Linear(20,size))
self.assert_distribution_is_well_behaved(distribution, x, context, expected_shape=(batch_size, size))
# Check mean
mean = distribution.mean(context)
self.assert_tensor_is_good(mean, x.shape)
class ConditionalMeanStdNormalTest(ConditionalDistributionTest):
def test_distribution_is_well_behaved(self):
batch_size = 16
size = 10
x = torch.randn(batch_size, size)
# Basic check
context = torch.randn(batch_size, 20)
distribution = ConditionalMeanStdNormal(net=nn.Linear(20,size), scale_shape=(1,))
self.assert_distribution_is_well_behaved(distribution, x, context, expected_shape=(batch_size, size))
# Check mean
mean = distribution.mean(context)
self.assert_tensor_is_good(mean, x.shape)
class ConditionalNormalTest(ConditionalDistributionTest):
def test_distribution_is_well_behaved(self):
batch_size = 16
size = 10
x = torch.randn(batch_size, size)
# Basic check
context = torch.randn(batch_size, 20)
distribution = ConditionalNormal(net=nn.Linear(20,2*size))
self.assert_distribution_is_well_behaved(distribution, x, context, expected_shape=(batch_size, size))
# Check mean
mean = distribution.mean(context)
self.assert_tensor_is_good(mean, x.shape)
# Check mean_stddev
mean, stddev = distribution.mean_stddev(context)
self.assert_tensor_is_good(mean, x.shape)
self.assert_tensor_is_good(stddev, x.shape)
if __name__ == '__main__':
unittest.main()
|
11542843
|
import csv
import loremipsum
import random
import re
from ..loadxl import *
class Anonymizer(object):
"""Change email addresses and names consistently
"""
# From Colander. Not exhaustive, will not match .museum etc.
email_re = re.compile(r'(?i)[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}')
random_words = loremipsum._generator.words
def __init__(self):
self.mapped_emails = {}
self.mapped_names = {}
self.generated_emails = set()
self.generated_names = set()
def replace_emails(self, dictrows):
for row in dictrows:
for k, v in list(row.items()):
if v is None:
continue
new_value, num_subs = self.email_re.subn(
self._replace_emails, v)
row[k] = new_value
yield row
def replace_non_pi_names(self, dictrows):
for row in dictrows:
if row.get('job_title') != 'PI':
if 'first_name' in row:
row['first_name'] = random.choice(self.random_words).capitalize()
if 'last_name' in row:
row['last_name'] = self._random_name()
yield row
def _random_email(self):
for _ in range(1000):
generated = "%s.%s@%s.%s" % \
tuple(random.choice(self.random_words) for n in range(4))
if generated not in self.generated_emails:
self.generated_emails.add(generated)
return generated
raise AssertionError("Unable to find random email")
def _replace_emails(self, matchobj):
found = matchobj.group(0)
new, original = self.mapped_emails.get(found.lower(), (None, None))
if new is not None:
if found != original:
raise ValueError(
"Case mismatch for %s, %s" % (found, original))
return new
new = self._random_email()
self.mapped_emails[found.lower()] = (new, found)
return new
def _random_name(self):
for _ in range(1000):
if random.choice(range(4)):
generated = random.choice(self.random_words).capitalize()
else:
generated = "%s-%s" % \
tuple(random.choice(self.random_words).capitalize()
for n in range(2))
if generated not in self.generated_names:
self.generated_names.add(generated)
return generated
raise AssertionError("Unable to find random name")
def set_existing_key_value(**kw):
def component(dictrows):
for row in dictrows:
for k, v in kw.items():
if k in row:
row[k] = v
yield row
return component
def drop_rows_with_all_key_value(**kw):
def component(dictrows):
for row in dictrows:
if not all(row[k] == v if k in row else False for k, v in kw.items()):
yield row
return component
def extract_pipeline():
return [
skip_rows_with_all_falsey_value('test'),
skip_rows_with_all_key_value(test='skip'),
skip_rows_with_all_falsey_value('test'),
skip_rows_missing_all_keys('uuid'),
drop_rows_with_all_key_value(_skip=True),
]
def anon_pipeline():
anonymizer = Anonymizer()
return extract_pipeline() + [
set_existing_key_value(
fax='000-000-0000',
phone1='000-000-0000',
phone2='000-000-0000',
skype='skype',
google='google',
),
anonymizer.replace_emails,
anonymizer.replace_non_pi_names,
]
def run(pipeline, inpath, outpath):
for item_type in ORDER:
source = read_single_sheet(inpath, item_type)
fieldnames = [k for k in source.fieldnames if ':ignore' not in k]
with open(os.path.join(outpath, item_type + '.tsv'), 'wb') as out:
writer = csv.DictWriter(out, fieldnames, dialect='excel-tab', extrasaction='ignore')
writer.writeheader()
writer.writerows(combine(source, pipeline))
def main():
import argparse
parser = argparse.ArgumentParser(description='Extract test data set.')
parser.add_argument('--anonymize', '-a', action="store_true",
help="anonymize the data.")
parser.add_argument('inpath',
help="input zip file of excel sheets.")
parser.add_argument('outpath',
help="directory to write filtered tsv files to.")
args = parser.parse_args()
pipeline = anon_pipeline() if args.anonymize else extract_pipeline()
import pdb
import sys
import traceback
try:
run(pipeline, args.inpath, args.outpath)
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
if __name__ == '__main__':
main()
|
11542850
|
from functools import lru_cache
from pathlib import Path
import pytest
XML_ROOT = Path(__file__).parent / "tests" / "xml"
@pytest.fixture()
@lru_cache()
def saml_request_minimal() -> str:
with (XML_ROOT / "min/request/sample_saml_request_minimal.xml").open("r") as f:
return f.read()
@pytest.fixture()
@lru_cache()
def sp_metadata_xml() -> str:
with (XML_ROOT / "metadata/sp_metadata.xml").open("r") as f:
return f.read()
|
11542909
|
import os
from typing import NamedTuple
dirname, _ = os.path.split(os.path.dirname(__file__))
class ConveRTModelConfig(NamedTuple):
num_embed_hidden: int = 512
feed_forward1_hidden: int = 2048
feed_forward2_hidden: int = 1024
num_attention_project: int = 64
vocab_size: int = 25000
num_encoder_layers: int = 6
dropout_rate: float = 0.0
n: int = 121
relative_attns: list = [3, 5, 48, 48, 48, 48]
num_attention_heads: int = 2
token_sequence_truncation: int = 60
class ConveRTTrainConfig(NamedTuple):
sp_model_path: str = os.path.join(dirname, "data/en.wiki.bpe.vs25000.model")
dataset_path: str = os.path.join(dirname, "data/sample-dataset.json")
test_dataset_path: str = "data/sample-dataset.json"
model_save_dir: str = "lightning_logs/checkpoints/"
log_dir: str = "lightning_logs"
device: str = "cpu"
use_data_paraller: bool = True
is_reddit: bool = True
train_batch_size: int = 64
test_batch_size: int = 256
split_size: int = 8
learning_rate: float = 1e-3 # final learning rate ie 'lr annealed to'
lr_warmup_start: float = 0.1 # start of lr before initial linear warmup section
lr_warmup_end: float = 1.0 # end of linear warmup section , annealing begin
warmup_batch: float = 10000 # how many batches linear warm up for
final_batch: float = 1e8 # final batch of training when want learning rate
learning_rate_end: float = 0.0001
epochs: int = 10
grad_norm_clip: float = 1.0
smoothing: float = 0.2
l2_weight_decay: float = 1e-5 # note: different from L2 reg, as working with Adam. L2 regularization
# (or any lagrange m on loss) not wise
|
11542926
|
from marltoolbox.algos.lola.train_cg_tune_class_API import LOLAPGCG
from marltoolbox.algos.lola.train_exact_tune_class_API import LOLAExact
from marltoolbox.algos.lola.train_pg_tune_class_API import LOLAPGMatrice
|
11542932
|
from __future__ import unicode_literals
import os
from rbtools.api.errors import APIError
from rbtools.commands import Command, CommandError, Option
class Attach(Command):
"""Attach a file to a review request."""
name = 'attach'
author = 'The Review Board Project'
needs_api = True
args = '<review-request-id> <file>'
option_list = [
Option('--filename',
dest='filename',
default=None,
help='Custom filename for the file attachment.'),
Option('--caption',
dest='caption',
default=None,
help='Caption for the file attachment.'),
Command.server_options,
Command.repository_options,
]
def main(self, review_request_id, path_to_file):
try:
review_request = self.api_root.get_review_request(
review_request_id=review_request_id)
except APIError as e:
raise CommandError('Error getting review request %s: %s'
% (review_request_id, e))
try:
with open(path_to_file, 'rb') as f:
content = f.read()
except IOError:
raise CommandError('%s is not a valid file.' % path_to_file)
# Check if the user specified a custom filename, otherwise
# use the original filename.
filename = self.options.filename or os.path.basename(path_to_file)
try:
review_request.get_file_attachments().upload_attachment(
filename, content, self.options.caption)
except APIError as e:
raise CommandError('Error uploading file: %s' % e)
self.stdout.write('Uploaded %s to review request %s.'
% (path_to_file, review_request_id))
self.json.add('attached_file', path_to_file)
self.json.add('review_request', review_request_id)
|
11542943
|
from dataclasses import dataclass, field
from OnePy.sys_module.models.base_log import TradeLogBase
@dataclass
class StockTradeLog(TradeLogBase):
buy: float = None
sell: float = None
size: float = None
def generate(self):
sell_order_type = self._get_order_type(self.sell)
buy_order_type = self._get_order_type(self.buy)
per_comm_pct = self.env.recorder.per_comm_pct
per_comm = self.env.recorder.per_comm
self.entry_date = self.buy.trading_date
self.entry_price = self.buy.first_cur_price
self.entry_type = f'{buy_order_type} {self.buy.action_type.value}'
self.exit_price = self.sell.first_cur_price
self.exit_type = f'{sell_order_type} {self.sell.action_type.value}'
self.pl_points = (self.sell.first_cur_price -
self.buy.first_cur_price)*self._earn_short()
self.re_pnl = self.pl_points*self.size
if per_comm_pct:
self.commission = per_comm_pct*self.size*self.entry_price
else:
self.commission = per_comm*self.size/100
if self.env.execute_on_close_or_next_open == 'open':
self.exit_date = self.sell.signal.next_datetime
elif self.env.execute_on_close_or_next_open == 'close':
self.exit_date = self.sell.trading_date
return self
def settle_left_trade(self):
cur_price = self.env.feeds[self.buy.ticker].execute_price
buy_order_type = self._get_order_type(self.buy)
per_comm_pct = self.env.recorder.per_comm_pct
per_comm = self.env.recorder.per_comm
self.entry_date = self.buy.trading_date
self.entry_price = self.buy.first_cur_price
self.entry_type = f'{buy_order_type} {self.buy.action_type.value}'
self.exit_date = None
self.exit_price = None
self.exit_type = None
self.pl_points = (
cur_price - self.buy.first_cur_price)*self._earn_short()
self.re_pnl = self.pl_points*self.size
if per_comm_pct:
self.commission = per_comm_pct*self.size*self.entry_price
else:
self.commission = per_comm*self.size/100
return self
|
11542987
|
from typing import Union, Tuple, Optional
from genomics_data_index.storage.model.QueryFeatureHGVS import QueryFeatureHGVS
from genomics_data_index.storage.model.QueryFeatureMutationSPDI import QueryFeatureMutationSPDI
class NucleotideMutationTranslater:
@classmethod
def convert_deletion(cls, deletion: Union[str, int]) -> int:
if isinstance(deletion, str):
if deletion.isdigit():
deletion = int(deletion)
else:
if not set(deletion).issubset({'A', 'T', 'C', 'G'}):
raise Exception('Deletion must either be an integer or a string with alphabet {A,T,C,G}'
f': {deletion}')
deletion = len(deletion)
elif isinstance(deletion, int):
if deletion < 0:
raise Exception(f'ref=[{deletion}] must be a non-negative integer')
else:
raise Exception(f'ref=[{deletion}] must be either a string or a non-negative integer')
return deletion
@classmethod
def from_spdi(cls, spdi: str, convert_deletion=True) -> Tuple[str, int, Union[int, str], str]:
if spdi is None:
raise Exception('Cannot parse value spdi=None')
values = spdi.split(':')
if len(values) != 4:
raise Exception(f'Incorrect number of items for spdi=[{spdi}]')
else:
position = int(values[1])
if convert_deletion:
deletion = cls.convert_deletion(values[2])
else:
deletion = values[2]
if deletion.isdigit():
deletion = int(deletion)
elif deletion == '':
raise Exception(f'deletion=[{deletion}] but convert_deletion is False')
if position < 0:
raise Exception(f'Position must be non-negative: {position}')
return str(values[0]), position, deletion, str(values[3])
@classmethod
def to_spdi(cls, sequence_name: str, position: int, ref: Union[str, int], alt: str,
convert_deletion: bool = True) -> str:
if position < 0:
raise Exception(f'Position must be non-negative: {position}')
if convert_deletion:
ref = cls.convert_deletion(ref)
return f'{sequence_name}:{position}:{ref}:{alt}'
@classmethod
def to_db_feature(cls, feature: QueryFeatureMutationSPDI) -> QueryFeatureMutationSPDI:
new_id = cls.to_spdi(feature.scope, feature.position, feature.ref, feature.alt)
return QueryFeatureMutationSPDI(new_id)
@classmethod
def to_hgvs_id(cls, sequence_name: str, gene: Optional[str], variant: str) -> Optional[str]:
return QueryFeatureHGVS.create(sequence_name=sequence_name,
gene=gene,
variant=variant).id
|
11543013
|
class Artista(object):
def __init__(self, **args):
self._id = args.get('id')
self._area = args.get('area')
self._tipoc = args.get('TipoC')
self._name= args.get('nombre')
self._sortname = args.get('nombreBus')
self._id2 = args.get('id2')
self._eScore = args.get('extScore')
def __str__(self):
return
'''id: {}\n
nombre: {}\n
nombreBus: {}\n
TipoCd: {}\n
ExtScore: {}\n '''.format(self._id2, self._name,self._sortname, self._tipoc, self._eScore)
|
11543022
|
import argparse
from trapperkeeper import models
from trapperkeeper import config
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create schema on configured database.")
parser.add_argument("-c", "--config", default="/etc/trapperkeeper.yaml",
help="Path to config file.")
args = parser.parse_args()
config = config.Config.from_file(args.config, False)
db_engine = models.get_db_engine(config["database"])
models.Model.metadata.create_all(db_engine)
|
11543036
|
from team29.analizer.abstract.expression import Expression, TYPE
from team29.analizer.abstract import expression
from team29.analizer.reports import Nodo
from datetime import datetime
from team29.analizer.statement.expressions.primitive import Primitive
class Current(Expression):
def __init__(self, val, optStr, row, column) -> None:
super().__init__(row, column)
self.val = val
self.optStr = optStr
self.temp = val
if optStr != None:
self.temp += " " + optStr
def execute(self, environment):
try:
if self.val == "CURRENT_DATE":
value = datetime.now().strftime("%Y/%m/%d")
elif self.val == "CURRENT_TIME":
value = datetime.now().strftime("%H:%M:%S")
elif self.val == "TIMESTAMP":
if self.optStr == "now":
value = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
else:
value = self.optStr
else:
# ERROR
expression.list_errors.append(
"Error: 22007: Formato de fecha invalido " + str(self.str)
)
value = self.val
return Primitive(TYPE.STRING, value, self.temp, self.row, self.column)
except:
expression.list_errors.append(
"Error: P0001: Error en expresiones de fechas"
)
pass
def dot(self):
new = Nodo.Nodo(self.val)
return new
|
11543057
|
import math
import torch
import torch.nn as nn
from collections import OrderedDict
import ltr.models.lwl.label_encoder as seg_label_encoder
from ltr import model_constructor
import ltr.models.lwl.linear_filter as target_clf
import ltr.models.target_classifier.features as clf_features
import ltr.models.lwl.initializer as seg_initializer
import ltr.models.lwl.loss_residual_modules as loss_residual_modules
import ltr.models.lwl.decoder as lwtl_decoder
import ltr.models.backbone as backbones
import ltr.models.backbone.resnet_mrcnn as mrcnn_backbones
import ltr.models.meta.steepestdescent as steepestdescent
class LWTLBoxNet(nn.Module):
def __init__(self, feature_extractor, target_model, decoder, target_model_input_layer, decoder_input_layers,
label_encoder=None, box_label_encoder=None):
super().__init__()
self.feature_extractor = feature_extractor # Backbone feature extractor F
self.target_model = target_model # Target model and the few-shot learner
self.decoder = decoder # Segmentation Decoder
self.label_encoder = label_encoder # Few-shot label generator and weight predictor
self.target_model_input_layer = (target_model_input_layer,) if isinstance(target_model_input_layer,
str) else target_model_input_layer
self.decoder_input_layers = decoder_input_layers
self.output_layers = sorted(list(set(self.target_model_input_layer + self.decoder_input_layers)))
self.box_label_encoder = box_label_encoder
self.train_only_box_label_gen = True
def train(self, mode=True):
for x in self.feature_extractor.parameters():
x.requires_grad_(False)
self.feature_extractor.eval()
if mode:
for x in self.box_label_encoder.parameters():
x.requires_grad_(True)
self.box_label_encoder.train()
if self.train_only_box_label_gen:
for x in self.target_model.parameters():
x.requires_grad_(False)
self.target_model.eval()
for x in self.label_encoder.parameters():
x.requires_grad_(False)
self.label_encoder.eval()
for x in self.decoder.parameters():
x.requires_grad_(False)
self.decoder.eval()
else:
for x in self.target_model.parameters():
x.requires_grad_(True)
self.target_model.train()
for x in self.label_encoder.parameters():
x.requires_grad_(True)
self.label_encoder.train()
for x in self.decoder.parameters():
x.requires_grad_(True)
self.decoder.train()
else:
for x in self.target_model.parameters():
x.requires_grad_(False)
self.target_model.eval()
for x in self.label_encoder.parameters():
x.requires_grad_(False)
self.label_encoder.eval()
for x in self.decoder.parameters():
x.requires_grad_(False)
self.decoder.eval()
for x in self.box_label_encoder.parameters():
x.requires_grad_(False)
self.box_label_encoder.eval()
def forward(self, train_imgs, test_imgs, train_masks, test_masks, bb_train, num_refinement_iter=2):
assert train_imgs.dim() == 5 and test_imgs.dim() == 5, 'Expect 5 dimensional inputs'
assert train_masks.dim() == 4, 'Expect 4 dimensional masks'
num_sequences = train_imgs.shape[1]
num_train_frames = train_imgs.shape[0]
num_test_frames = test_imgs.shape[0]
# Extract backbone features
train_feat = self.extract_backbone_features(train_imgs.contiguous().view(-1, train_imgs.shape[-3], train_imgs.shape[-2], train_imgs.shape[-1]))
test_feat = self.extract_backbone_features(test_imgs.contiguous().view(-1, test_imgs.shape[-3], test_imgs.shape[-2], test_imgs.shape[-1]))
# Extract classification features
train_feat_clf = self.extract_classification_feat(train_feat) # seq*frames, channels, height, width
test_feat_clf = self.extract_classification_feat(test_feat) # seq*frames, channels, height, width
bb_mask_enc = self.box_label_encoder(bb_train, train_feat_clf)
box_mask_pred, decoder_feat = self.decoder(bb_mask_enc, test_feat, test_imgs.shape[-2:],
('layer4_dec', 'layer3_dec', 'layer2_dec', 'layer1_dec'))
mask_enc = self.label_encoder(box_mask_pred, train_feat_clf)
mask_enc_test = self.label_encoder(test_masks.contiguous(), test_feat_clf)
train_feat_clf = train_feat_clf.view(num_train_frames, num_sequences, *train_feat_clf.shape[-3:])
filter, filter_iter, _ = self.target_model.get_filter(train_feat_clf, *mask_enc)
test_feat_clf = test_feat_clf.view(num_test_frames, num_sequences, *test_feat_clf.shape[-3:])
target_scores = [self.target_model.classify(f, test_feat_clf) for f in filter_iter]
# target_scores = [s.unsqueeze(dim=2) for s in target_scores]
target_scores_last_iter = target_scores[-1]
mask_pred, decoder_feat = self.decoder(target_scores_last_iter, test_feat, test_imgs.shape[-2:],
('layer4_dec', 'layer3_dec', 'layer2_dec', 'layer1_dec'))
decoder_feat['mask_enc'] = target_scores_last_iter.view(-1, *target_scores_last_iter.shape[-3:])
if isinstance(mask_enc_test, (tuple, list)):
mask_enc_test = mask_enc_test[0]
return mask_pred, target_scores, mask_enc_test, box_mask_pred
def segment_target(self, target_filter, test_feat_tm, test_feat):
# Classification features
assert target_filter.dim() == 5 # seq, filters, ch, h, w
test_feat_tm = test_feat_tm.view(1, 1, *test_feat_tm.shape[-3:])
mask_encoding_pred = self.target_model.apply_target_model(target_filter, test_feat_tm)
mask_pred, decoder_feat = self.decoder(mask_encoding_pred, test_feat,
(test_feat_tm.shape[-2] * 16, test_feat_tm.shape[-1] * 16))
return mask_pred, None
def get_backbone_target_model_features(self, backbone_feat):
# Get the backbone feature block which is input to the target model
feat = OrderedDict({l: backbone_feat[l] for l in self.target_model_input_layer})
if len(self.target_model_input_layer) == 1:
return feat[self.target_model_input_layer[0]]
return feat
def extract_target_model_features(self, backbone_feat):
return self.target_model.extract_target_model_features(self.get_backbone_target_model_features(backbone_feat))
def extract_backbone_features(self, im, layers=None):
if layers is None:
layers = self.output_layers
return self.feature_extractor(im, layers)
@model_constructor
def steepest_descent_resnet50(filter_size=1, num_filters=1, optim_iter=3, optim_init_reg=0.01,
backbone_pretrained=False, clf_feat_blocks=1,
clf_feat_norm=True, final_conv=False,
out_feature_dim=512,
target_model_input_layer='layer3',
decoder_input_layers=("layer4", "layer3", "layer2", "layer1",),
detach_length=float('Inf'),
label_encoder_dims=(1, 1),
frozen_backbone_layers=(),
decoder_mdim=64, filter_groups=1,
use_bn_in_label_enc=True,
dilation_factors=None,
backbone_type='imagenet',
box_label_encoder_dims=(1, 1),
box_label_encoder_type='ResidualDS16FeatSWBoxCatMultiBlock',
use_gauss=False,
use_final_relu=True,
init_bn=1.0,
final_bn=False,
gauss_scale=0.25
):
# backbone feature extractor F
if backbone_type == 'imagenet':
backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)
elif backbone_type == 'mrcnn':
backbone_net = mrcnn_backbones.resnet50(pretrained=False, frozen_layers=frozen_backbone_layers)
else:
raise Exception
norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))
layer_channels = backbone_net.out_feature_channels()
# Extracts features input to the target model
target_model_feature_extractor = clf_features.residual_basic_block(
feature_dim=layer_channels[target_model_input_layer],
num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,
final_conv=final_conv, norm_scale=norm_scale,
out_dim=out_feature_dim)
# Few-shot label generator and weight predictor
label_encoder = seg_label_encoder.ResidualDS16SW(layer_dims=label_encoder_dims + (num_filters,),
use_bn=use_bn_in_label_enc)
# Predicts initial target model parameters
initializer = seg_initializer.FilterInitializerZero(filter_size=filter_size, num_filters=num_filters,
feature_dim=out_feature_dim, filter_groups=filter_groups)
# Computes few-shot learning loss
residual_module = loss_residual_modules.LWTLResidual(init_filter_reg=optim_init_reg)
# Iteratively updates the target model parameters by minimizing the few-shot learning loss
optimizer = steepestdescent.GNSteepestDescent(residual_module=residual_module, num_iter=optim_iter,
detach_length=detach_length,
residual_batch_dim=1, compute_losses=True)
# Target model and Few-shot learner
target_model = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,
filter_optimizer=optimizer, feature_extractor=target_model_feature_extractor,
filter_dilation_factors=dilation_factors)
# Decoder
decoder_input_layers_channels = {L: layer_channels[L] for L in decoder_input_layers}
decoder = lwtl_decoder.LWTLDecoder(num_filters, decoder_mdim, decoder_input_layers_channels, use_bn=True)
if box_label_encoder_type == 'ResidualDS16FeatSWBoxCatMultiBlock':
box_label_encoder = seg_label_encoder.ResidualDS16FeatSWBoxCatMultiBlock(feat_dim=out_feature_dim,
layer_dims=box_label_encoder_dims + (
num_filters,),
use_gauss=use_gauss,
use_final_relu=use_final_relu,
use_bn=use_bn_in_label_enc,
non_default_init=True, init_bn=init_bn,
gauss_scale=gauss_scale,
final_bn=final_bn)
else:
raise Exception
net = LWTLBoxNet(feature_extractor=backbone_net, target_model=target_model, decoder=decoder,
label_encoder=label_encoder,
target_model_input_layer=target_model_input_layer, decoder_input_layers=decoder_input_layers,
box_label_encoder=box_label_encoder)
return net
@model_constructor
def steepest_descent_resnet50_from_checkpoint(net=None, num_filters=1,
out_feature_dim=512,
target_model_input_layer='layer3',
box_label_encoder_dims=(1, 1),
use_bn_in_label_enc=True,
box_label_encoder_type='ResidualDS16FeatSWBoxCatMultiBlock',
use_gauss=False,
use_final_relu=True,
init_bn=1,
gauss_scale=0.25,
decoder_input_layers=("layer4", "layer3", "layer2", "layer1",),
final_bn=False):
if box_label_encoder_type == 'ResidualDS16FeatSWBoxCatMultiBlock':
box_label_encoder = seg_label_encoder.ResidualDS16FeatSWBoxCatMultiBlock(feat_dim=out_feature_dim,
layer_dims=box_label_encoder_dims + (
num_filters,),
use_gauss=use_gauss,
use_final_relu=use_final_relu,
use_bn=use_bn_in_label_enc,
non_default_init=True, init_bn=init_bn,
gauss_scale=gauss_scale,
final_bn=final_bn)
else:
raise Exception
net = LWTLBoxNet(feature_extractor=net.feature_extractor, target_model=net.target_model, decoder=net.decoder,
label_encoder=net.label_encoder, target_model_input_layer=target_model_input_layer,
decoder_input_layers=decoder_input_layers,
box_label_encoder=box_label_encoder)
return net
|
11543075
|
import cv2
import itertools
import math
import numpy as np
import tensorflow as tf
import time
from attacks.local_search_helper import LocalSearchHelper
class ParsimoniousAttack(object):
"""Parsimonious attack using local search algorithm"""
def __init__(self, model, args, **kwargs):
"""Initialize attack.
Args:
model: TensorFlow model
args: arguments
"""
# Hyperparameter setting
self.loss_func = args.loss_func
self.max_queries = args.max_queries
self.epsilon = args.epsilon
self.batch_size = args.batch_size
self.block_size = args.block_size
self.no_hier = args.no_hier
self.max_iters = args.max_iters
# Create helper
self.local_search = LocalSearchHelper(model, args)
def _perturb_image(self, image, noise):
"""Given an image and a noise, generate a perturbed image.
First, resize the noise with the size of the image.
Then, add the resized noise to the image.
Args:
image: numpy array of size [1, 299, 299, 3], an original image
noise: numpy array of size [1, 256, 256, 3], a noise
Returns:
adv_iamge: numpy array of size [1, 299, 299, 3], an perturbed image
"""
adv_image = image + cv2.resize(noise[0, ...], (self.width, self.height), interpolation=cv2.INTER_NEAREST)
adv_image = np.clip(adv_image, 0., 1.)
return adv_image
def _split_block(self, upper_left, lower_right, block_size):
"""Split an image into a set of blocks.
Note that a block consists of [upper_left, lower_right, channel]
Args:
upper_left: [x, y], the coordinate of the upper left of an image
lower_right: [x, y], the coordinate of the lower right of an image
block_size: int, the size of a block
Returns:
blocks: list, the set of blocks
"""
blocks = []
xs = np.arange(upper_left[0], lower_right[0], block_size)
ys = np.arange(upper_left[1], lower_right[1], block_size)
for x, y in itertools.product(xs, ys):
for c in range(3):
blocks.append([[x, y], [x+block_size, y+block_size], c])
return blocks
def perturb(self, image, label, index, sess):
"""Perturb an image.
Args:
image: numpy array of size [1, 299, 299, 3], an original image
label: numpy array of size [1], the label of the image (or target label)
index: int, the index of the image
sess: TensorFlow session
Returns:
adv_image: numpy array of size [1, 299, 299, 3], an adversarial image
num_queries: int, the number of queries
success: bool, True if attack is successful
"""
# Set random seed by index for the reproducibility
np.random.seed(index)
# Class variables
self.width = image.shape[1]
self.height = image.shape[2]
# Local variables
adv_image = np.copy(image)
num_queries = 0
block_size = self.block_size
upper_left = [0, 0]
lower_right = [256, 256]
# Split an image into a set of blocks
blocks = self._split_block(upper_left, lower_right, block_size)
# Initialize a noise to -epsilon
noise = -self.epsilon*np.ones([1, 256, 256, 3], dtype=np.float32)
# Construct a batch
num_blocks = len(blocks)
batch_size = self.batch_size if self.batch_size > 0 else num_blocks
curr_order = np.random.permutation(num_blocks)
# Main loop
while True:
# Run batch
num_batches = int(math.ceil(num_blocks/batch_size))
for i in range(num_batches):
# Pick a mini-batch
bstart = i*batch_size
bend = min(bstart + batch_size, num_blocks)
blocks_batch = [blocks[curr_order[idx]] for idx in range(bstart, bend)]
# Run local search algorithm on the mini-batch
noise, queries, loss, success = self.local_search.perturb(
image, noise, label, sess, blocks_batch)
num_queries += queries
tf.logging.info("Block size: {}, batch: {}, loss: {:.4f}, num queries: {}".format(
block_size, i, loss, num_queries))
# If query count exceeds the maximum queries, then return False
if num_queries > self.max_queries:
return adv_image, num_queries, False
# Generate an adversarial image
adv_image = self._perturb_image(image, noise)
# If attack succeeds, return True
if success:
return adv_image, num_queries, True
# If block size >= 2, then split the iamge into smaller blocks and reconstruct a batch
if not self.no_hier and block_size >= 2:
block_size //= 2
blocks = self._split_block(upper_left, lower_right, block_size)
num_blocks = len(blocks)
batch_size = self.batch_size if self.batch_size > 0 else num_blocks
curr_order = np.random.permutation(num_blocks)
# Otherwise, shuffle the order of the batch
else:
curr_order = np.random.permutation(num_blocks)
|
11543104
|
import random
import string
import smtplib
from django.db import models
from django.core.mail import send_mail
from django.conf import settings
from django.contrib.auth.models import User
def generate_code():
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
class ResetCode(models.Model):
"""
Provide a code to reset a users forgotten details.
"""
code = models.CharField(max_length=8, default=generate_code)
account = models.ForeignKey(User)
def send_email(self):
if self.code and self.account:
subject = '{from_name} user account access code'.format(from_name=settings.EMAIL_FROM)
message = '''Dear {first} {last},\n
You are receiveing this email as you have asked to reset a part of your account.\n
Please input the following code when asked: {code}\n
Thank you,\n
{from_name}'''.format(first=self.account.first_name,
last=self.account.last_name,
code=self.code,
from_name=settings.EMAIL_FROM)
try:
send_mail(subject,
message,
settings.EMAIL_HOST_USER,
(self.account.email,),
fail_silently=False)
except smtplib.SMTPException:
return False
return True
|
11543114
|
import os.path
import random
import torchvision.transforms as transforms
import torch
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
from PIL import Image
import numpy as np
import scipy.io as sio
class nyuv2dataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.batch_size = opt.batch_size
self.root = opt.dataroot # path for nyu2.npy
self.nyu2 = np.load("{}/{}".format(self.root,"nyuv2.npy"),encoding = 'latin1').tolist()
splits = sio.loadmat("{}/{}".format(self.root,"splits.mat"))
self.indexes = [x[0] - 1 for x in splits["trainNdxs"]] if opt.phase == "train" else [x[0] -1 for x in splits["testNdxs"]]
self.num_labels = 41
self.ignore_label = 0
self.class_weights = None
def __getitem__(self, index):
index = self.indexes[index]
rgb_image = np.array(self.nyu2["rgb_images"][index],dtype=np.uint8)
depth_image = self.nyu2["depth_images"][index]
depth_image = np.expand_dims(depth_image,axis=2)
mask = np.array(self.nyu2["masks"][index],dtype=np.uint8)
rgb_image = transforms.ToTensor()(rgb_image)
depth_image = transforms.ToTensor()(depth_image)
mask = torch.from_numpy(mask)
mask = mask.type(torch.LongTensor)
return {'rgb_image': rgb_image, 'depth_image': depth_image, 'mask': mask, 'path': str(index)+".png"}
def __len__(self):
return len(self.indexes)
def name(self):
return 'nyuv2dataset'
|
11543124
|
import sys
import os
PATH = os.path.dirname(__file__)
sys.path.append(PATH)
thirdparties = ['numpy-groupies']
for name in thirdparties:
sys.path.append(os.path.join(PATH, '../../thirdparty/', name))
# Data Layer
from .Data import *
# Layers
from .FC import *
from .Conv import *
from .ConvT import *
from .BatchNorm import *
# Layers without learning
from .Pool import *
from .Dropout import *
from .Reshape import *
from .Crop import *
# Activate Layer
from .Sigmoid import *
from .ReLU import *
from .PReLU import *
from .SELU import *
from .Tanh import *
from .Softmax import *
# Multi IO Layer
from .Concat import *
from .Slice import *
from .Eltwise import *
# Cost Layer
from .MSE import *
from .CrossEntropy import *
from .SigmoidCrossEntropy import *
from .SoftmaxWithLoss import *
from .L1Loss import *
from .SmoothL1Loss import *
from .ContrastiveLoss import *
# Evaluation Layer (No Backward)
from .Accuracy import *
# Test Layer
from .MergeTest import *
from .SplitTest import *
# Operators
from ..operators import *
|
11543131
|
import unittest
from arangodb import six
from arangodb.orm.fields import UuidField
class UuidFieldTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_state(self):
uuid = UuidField()
self.assertEqual(uuid.text, None)
uuid.on_create(model_instance=None)
self.assertTrue(isinstance(uuid.text, six.string_types))
|
11543142
|
import os
''' Create a folder if it doesn't exist '''
def createDirectory(new_dir):
if not os.path.isdir(new_dir):
for p in xrange(2,len(new_dir.split("/"))+1):
tmp_string = "/".join(new_dir.split('/')[:p])
if not os.path.isdir(tmp_string):
try:
os.mkdir(tmp_string)
except:
print "error making dir:", tmp_string
def formatFileString(x):
if len(x) == 1:
return x+'0'
else:
return x
|
11543154
|
import time
import cv2
import numpy as np
from chainer import serializers, Variable
import chainer.functions as F
import argparse
from yolov2 import *
class AnimalPredictor:
def __init__(self):
# hyper parameters
weight_file = "./backup/yolov2_final_cpu.model"
self.n_classes = 10
self.n_boxes = 5
self.detection_thresh = 0.3
self.iou_thresh = 0.3
self.label_file = "./data/label.txt"
with open(self.label_file, "r") as f:
self.labels = f.read().strip().split("\n")
# load model
print("loading animal model...")
yolov2 = YOLOv2(n_classes=self.n_classes, n_boxes=self.n_boxes)
model = YOLOv2Predictor(yolov2)
serializers.load_hdf5(weight_file, model) # load saved model
model.predictor.train = False
model.predictor.finetune = False
self.model = model
def __call__(self, orig_img):
orig_input_height, orig_input_width, _ = orig_img.shape
#img = cv2.resize(orig_img, (640, 640))
img = reshape_to_yolo_size(orig_img)
input_height, input_width, _ = img.shape
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.asarray(img, dtype=np.float32) / 255.0
img = img.transpose(2, 0, 1)
# forward
x_data = img[np.newaxis, :, :, :]
x = Variable(x_data)
x, y, w, h, conf, prob = self.model.predict(x)
# parse results
_, _, _, grid_h, grid_w = x.shape
x = F.reshape(x, (self.n_boxes, grid_h, grid_w)).data
y = F.reshape(y, (self.n_boxes, grid_h, grid_w)).data
w = F.reshape(w, (self.n_boxes, grid_h, grid_w)).data
h = F.reshape(h, (self.n_boxes, grid_h, grid_w)).data
conf = F.reshape(conf, (self.n_boxes, grid_h, grid_w)).data
prob = F.transpose(F.reshape(prob, (self.n_boxes, self.n_classes, grid_h, grid_w)), (1, 0, 2, 3)).data
detected_indices = (conf * prob).max(axis=0) > self.detection_thresh
results = []
for i in range(detected_indices.sum()):
results.append({
"label": self.labels[prob.transpose(1, 2, 3, 0)[detected_indices][i].argmax()],
"probs": prob.transpose(1, 2, 3, 0)[detected_indices][i],
"conf" : conf[detected_indices][i],
"objectness": conf[detected_indices][i] * prob.transpose(1, 2, 3, 0)[detected_indices][i].max(),
"box" : Box(
x[detected_indices][i]*orig_input_width,
y[detected_indices][i]*orig_input_height,
w[detected_indices][i]*orig_input_width,
h[detected_indices][i]*orig_input_height).crop_region(orig_input_height, orig_input_width)
})
# nms
nms_results = nms(results, self.iou_thresh)
return nms_results
if __name__ == "__main__":
# argument parse
parser = argparse.ArgumentParser(description="指定したパスの画像を読み込み、bbox及びクラスの予測を行う")
parser.add_argument('path', help="画像ファイルへのパスを指定")
args = parser.parse_args()
image_file = args.path
# read image
print("loading image...")
orig_img = cv2.imread(image_file)
predictor = AnimalPredictor()
nms_results = predictor(orig_img)
# draw result
for result in nms_results:
left, top = result["box"].int_left_top()
cv2.rectangle(
orig_img,
result["box"].int_left_top(), result["box"].int_right_bottom(),
(255, 0, 255),
3
)
text = '%s(%2d%%)' % (result["label"], result["probs"].max()*result["conf"]*100)
cv2.putText(orig_img, text, (left, top-6), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
print(text)
|
11543164
|
from django.apps import AppConfig
class SampleappoauthConfig(AppConfig):
name = 'SampleAppOAuth'
|
11543214
|
import collections
import datetime
import itertools
import logging
import os
import sys
from typing import List, Optional
import pandas as pd
import requests
from crowdkit.aggregation import BradleyTerry
from crowdkit.aggregation import MajorityVote
from toloka.client import Pool, Project, structure
from toloka.client import TolokaClient
from toloka.client.actions import ChangeOverlap
from toloka.client.collectors import AssignmentsAssessment
from toloka.client.conditions import AssessmentEvent
from toloka.client.exceptions import IncorrectActionsApiError
from toloka.client.task import Task
from toloka.streaming import AssignmentsObserver, Pipeline
from toloka.streaming.event import AssignmentEvent
logging.basicConfig(
format='%(levelname)s - %(asctime)s - %(name)s: %(message)s',
level=logging.INFO,
stream=sys.stdout
)
GITHUB_RAW = 'https://raw.githubusercontent.com'
GITHUB_BASE_PATH = 'Toloka/toloka-kit/main/examples/6.streaming_pipelines'
class VerificationDoneHandler:
"""verification_pool -> find_items_pool back using quality control rule"""
def __init__(self, client: TolokaClient, overlap_verification: int):
self.client = client
self.waiting = collections.defaultdict(list)
self.overlap_verification = overlap_verification
def __call__(self, events: List[AssignmentEvent]) -> None:
for event in events:
for task, solution in zip(event.assignment.tasks, event.assignment.solutions):
answer = (solution.output_values['result'], event.assignment.user_id)
self.waiting[task.input_values['assignment_id']].append(answer)
to_aggregate = []
for assignment_id, answers in self.waiting.items():
if len(answers) >= self.overlap_verification:
to_aggregate.extend((assignment_id, result, user_id) for result, user_id in answers)
if to_aggregate:
to_aggregate_df = pd.DataFrame(to_aggregate, columns=['task', 'label', 'performer'])
aggregated = MajorityVote().fit_predict(to_aggregate_df)
logging.info('Statuses to apply count: %s', collections.Counter(aggregated.values))
for assignment_id, result in aggregated.items():
try:
if result == 'Yes':
self.client.accept_assignment(assignment_id, 'Well done!')
else:
self.client.reject_assignment(assignment_id, 'Incorrect object.')
except IncorrectActionsApiError: # You could have accepted or rejected it in the UI.
logging.exception('Can\'t set status %s at %s', result, assignment_id)
del self.waiting[assignment_id]
logging.info('Waiting for verification count: %d', len(self.waiting))
class AcceptedItemsToComparison:
"""find_items_pool -> sbs_pool"""
def __init__(self, client: TolokaClient, sbs_pool: Pool, overlap_find_items: int, overlap_sbs: int):
self.client = client
self.waiting = collections.defaultdict(list)
self.sbs_pool = sbs_pool
self.overlap_find_items = overlap_find_items
self.overlap_sbs = overlap_sbs
def __call__(self, events: List[AssignmentEvent]) -> None:
for event in events:
for task, solution in zip(event.assignment.tasks, event.assignment.solutions):
self.waiting[task.input_values['image']].append(solution.output_values['found_link'])
to_sbs = [(image, found_links)
for image, found_links in self.waiting.items()
if len(found_links) >= self.overlap_find_items]
if to_sbs:
logging.info('Got images ready for SbS count: %d', len(to_sbs))
sbs_tasks = []
for image, found_links in to_sbs:
for left_link, right_link in itertools.combinations(found_links, 2):
input_values = {'image': image, 'left_link': left_link, 'right_link': right_link}
sbs_tasks.append(Task(pool_id=self.sbs_pool.id, overlap=self.overlap_sbs, input_values=input_values))
logging.info('SbS tasks to create count: %d', len(sbs_tasks))
self.client.create_tasks(sbs_tasks, open_pool=True)
for image, _ in to_sbs:
del self.waiting[image]
logging.info('Waiting for SbS count: %d', len(self.waiting))
class HandleSbS:
"""sbs_pool results aggregation"""
def __init__(self, client: TolokaClient, overlap_sbs: int):
self.overlap_sbs = overlap_sbs
self.client = client
self.waiting = collections.defaultdict(list)
self.scores_by_image = {}
def __call__(self, events: List[AssignmentEvent]) -> None:
for event in events:
for task, solution in zip(event.assignment.tasks, event.assignment.solutions):
answer = {'image': task.input_values['image'],
'performer': event.assignment.user_id,
'left': task.input_values['left_link'],
'right': task.input_values['right_link'],
'label': solution.output_values['result']}
self.waiting[task.input_values['image']].append(answer)
for image, answers in list(self.waiting.items()):
if len(answers) >= self.overlap_sbs:
scores = BradleyTerry(n_iter=100).fit_predict(pd.DataFrame(answers))
self.scores_by_image[image] = scores.sort_values(ascending=False)
del self.waiting[image]
logging.info('Waiting for SbS aggregation count: %d', len(self.waiting))
class FoundItemsHandler:
def __init__(self, client: TolokaClient, verification_pool: Pool, overlap_verification: int):
self.overlap_verification = overlap_verification
self.verification_pool = verification_pool
self.client = client
def __call__(self, events: List[AssignmentEvent]) -> None:
verification_tasks = [
Task(
pool_id=self.verification_pool.id,
unavailable_for=[event.assignment.user_id],
overlap=self.overlap_verification,
input_values={
'image': task.input_values['image'],
'found_link': solution.output_values['found_link'],
'assignment_id': event.assignment.id
},
)
for event in events
for task, solution in zip(event.assignment.tasks, event.assignment.solutions)
]
self.client.create_tasks(verification_tasks, open_pool=True)
logging.info('Verification tasks created count: %d', len(verification_tasks))
def _load_json_from_github(filename: str):
response = requests.get(os.path.join(GITHUB_RAW, GITHUB_BASE_PATH, filename))
response.raise_for_status()
return response.json()
def create_project(client: TolokaClient, filename: str) -> Project:
return client.create_project(_load_json_from_github(filename))
def create_pool(client: TolokaClient, filename: str, project_id: str, reward_per_assignment: float) -> Pool:
pool = structure(_load_json_from_github(filename), Pool)
pool.project_id = project_id
pool.reward_per_assignment = reward_per_assignment
pool.will_expire = datetime.datetime.now() + datetime.timedelta(days=3)
return client.create_pool(pool)
class FindItemsPipeline:
find_items_pool: Optional[Pool]
verification_pool: Optional[Pool]
sbs_pool: Optional[Pool]
def __init__(
self,
client: TolokaClient,
overlap_find_items=12, overlap_verification=3, overlap_sbs=3
):
self.client = client
self.overlap_find_items = overlap_find_items
self.overlap_verification = overlap_verification
self.overlap_sbs = overlap_sbs
self.find_items_pool = None
self.verification_pool = None
self.sbs_pool = None
self.pipeline = None
def init_pipeline(self) -> None:
find_items_project = create_project(self.client, 'find_items_project.json')
find_items_pool = create_pool(self.client, 'find_items_pool.json', find_items_project.id, 0.08)
verification_project = create_project(self.client, 'verification_project.json')
verification_pool = create_pool(self.client, 'verification_pool.json', verification_project.id, 0.02)
sbs_project = create_project(self.client, 'sbs_project.json')
sbs_pool = create_pool(self.client, 'sbs_pool.json', sbs_project.id, 0.04)
find_items_pool.quality_control.add_action(
collector=AssignmentsAssessment(),
conditions=[AssessmentEvent == AssessmentEvent.REJECT],
action=ChangeOverlap(delta=1, open_pool=True),
)
self.client.update_pool(find_items_pool.id, find_items_pool)
pipeline = Pipeline()
found_items_observer = pipeline.register(AssignmentsObserver(self.client, find_items_pool.id))
verification_observer = pipeline.register(AssignmentsObserver(self.client, verification_pool.id))
sbs_observer = pipeline.register(AssignmentsObserver(self.client, sbs_pool.id))
overlap_find_items = 12
overlap_verification = 3
overlap_sbs = 3
found_items_observer.on_submitted(FoundItemsHandler(self.client, verification_pool, overlap_verification))
found_items_observer.on_accepted(AcceptedItemsToComparison(self.client, sbs_pool, overlap_find_items, overlap_sbs))
verification_observer.on_accepted(VerificationDoneHandler(self.client, overlap_verification))
sbs_observer.on_accepted(HandleSbS(self.client, overlap_sbs))
images = [
'https://tlk.s3.yandex.net/wsdm2020/photos/8ca087fe33065d75327cafdb8720204b.jpg',
'https://tlk.s3.yandex.net/wsdm2020/photos/d0c9eb8737f48df5964d93b08ec0d758.jpg',
'https://tlk.s3.yandex.net/wsdm2020/photos/9245eed8aa1d1e6f5d5d39d00ab044c6.jpg',
'https://tlk.s3.yandex.net/wsdm2020/photos/0aff4fc1edbe6096a9a517092902627f.jpg',
'http://tolokaadmin.s3.yandex.net/demo/abb61898-c886-4e20-b7cd-c0d359ddbb9a',
]
tasks = [
Task(pool_id=find_items_pool.id, overlap=overlap_find_items, input_values={'image': image})
for image in images
]
self.client.create_tasks(tasks)
self.find_items_pool = find_items_pool
self.verification_pool = verification_pool
self.sbs_pool = sbs_pool
self.pipeline = pipeline
def run(self):
if self.pipeline is None:
raise RuntimeError('You need to call FindItemsPipeline.init_pipeline before FindItemsPipeline.run')
self.client.open_pool(self.find_items_pool.id)
return self.pipeline.run()
|
11543215
|
import logging
import typing
import random
import heapq
import time
import asyncio
import threading
from .actor import Actor
from .message import ActorMessage
from .state import ActorState, OUTBOX, EXPORT, ERROR, ERROR_NOTRY
from .storage import ActorLocalStorage
from .registery import ActorRegistery
from .builtin_actors.name import (
ACTOR_SYSTEM,
ACTOR_MESSAGE_FETCHER,
ACTOR_MESSAGE_ACKER,
ACTOR_MESSAGE_NOTIFY_SENDER,
ACTOR_STORAGE_COMPACTOR,
)
from .prometheus import metric_queue_op, ACTOR_QUEUE_INBOX_SIZE, ACTOR_QUEUE_OUTBOX_SIZE
LOG = logging.getLogger(__name__)
class ActorStorageState:
def __init__(self, storage, state):
self._storage = storage
self._state = state
def __getattr__(self, *args, **kwargs):
return getattr(self._state, *args, **kwargs)
def apply(self, type, **kwargs):
self._state.apply(type, **kwargs)
self._storage.append(type, **kwargs)
def apply_notify(self, **kwargs):
self.apply('notify', **kwargs)
def apply_inbox(self, **kwargs):
self.apply('inbox', **kwargs)
def apply_execute(self, **kwargs):
self.apply('execute', **kwargs)
def apply_outbox(self, **kwargs):
self.apply('outbox', **kwargs)
def apply_done(self, **kwargs):
self.apply('done', **kwargs)
def apply_complete(self, **kwargs):
self.apply('complete', **kwargs)
def apply_export(self, **kwargs):
self.apply('export', **kwargs)
def apply_acked(self, **kwargs):
self.apply('acked', **kwargs)
def apply_retry(self, **kwargs):
self.apply('retry', **kwargs)
def apply_restart(self, **kwargs):
self.apply('restart', **kwargs)
class ActorQueue:
def __init__(
self,
actor_name: str,
registery: ActorRegistery,
state: ActorState,
schedule_fetcher,
concurrency: int = 100,
max_retry_count: int = 1,
max_retry_time: int = 10 * 60,
fetcher_concurrency: int = 3,
):
self.actor_name = actor_name
self.registery = registery
self.state = state
self.schedule_fetcher = schedule_fetcher
self.inbox_lowsize = max(1, concurrency // 10)
self.inbox_highsize = max(3, concurrency // 3)
self.outbox_lowsize = max(10, concurrency)
self.outbox_highsize = max(30, concurrency * 3)
self.max_retry_count = max_retry_count
self.max_retry_time = max_retry_time
self.fetcher_concurrency = fetcher_concurrency
self.inbox = [] # [(priority, message)]
self.dst_outbox = {} # dst -> [(priority, message)]
self.dst_node_outbox = {} # dst_node -> dst -> [(priority, message)]
self.is_fetching = False
def __repr__(self):
return '<{} {}>'.format(type(self).__name__, self.actor_name)
def stats(self):
dst_stats = []
for dst, v in self.dst_outbox.items():
if v:
dst_stats.append(dict(dst=dst, size=len(v)))
dst_stats = list(sorted(dst_stats, key=lambda x: x['size']))
dst_node_stats = []
for dst_node, d in self.dst_node_outbox.items():
for dst, v in d.items():
if v:
dst_node_stats.append(dict(dst_node=dst_node, dst=dst, size=len(v)))
dst_stats = list(sorted(dst_stats, key=lambda x: x['size']))
return dict(
name=self.actor_name,
inbox_lowsize=self.inbox_lowsize,
inbox_highsize=self.inbox_highsize,
outbox_lowsize=self.outbox_lowsize,
outbox_highsize=self.outbox_highsize,
inbox_size=self.inbox_size(),
outbox_size=self.outbox_size(),
is_fetching=self.is_fetching,
dst_outbox=dst_stats,
dst_node_outbox=dst_node_stats,
)
def inbox_size(self):
return len(self.inbox)
def outbox_size(self):
n = sum(len(x) for x in self.dst_outbox.values())
for box in self.dst_node_outbox.values():
n += sum(len(x) for x in box.values())
for message_ids in self.state.done_message_ids[self.actor_name].values():
n += len(message_ids)
return n
def is_inbox_empty(self):
return self.inbox_size() <= 0
def is_outbox_full(self):
return self.outbox_size() >= self.outbox_highsize
def execute_priority(self):
priority, message = self.inbox[0]
if priority is None:
priority = 100
return priority * (self.outbox_size() / self.outbox_highsize)
def op_notify(self, dst: str, src_node: str, available: bool):
self.state.apply_notify(dst=dst, src_node=src_node, available=available)
self.auto_schedule_fetcher()
def op_inbox(self, message: ActorMessage):
if message.is_expired():
LOG.warning(f'expired message {message}')
return
self.state.apply_inbox(message=message)
self.push_inbox(message)
def op_execute(self) -> ActorMessage:
while self.inbox:
priority, message = heapq.heappop(self.inbox)
if message.is_expired():
LOG.warning(f'expired message {message}')
self.state.apply_complete(message_id=message.id, status=ERROR_NOTRY)
continue
self.state.apply_execute(message_id=message.id)
self.auto_schedule_fetcher()
return message
return None
def op_outbox(self, message_id: str, outbox_messages: [ActorMessage]):
self.state.apply_outbox(message_id=message_id, outbox_messages=outbox_messages)
for x in outbox_messages:
self.push_outbox(x)
self.auto_schedule_fetcher()
def _export_box(self, result, box, retry_base_at):
priority, outbox_message = heapq.heappop(box)
if outbox_message.is_expired():
LOG.warning(f'expired outbox_message {outbox_message}')
self.state.apply_acked(outbox_message_id=outbox_message.id, status=ERROR_NOTRY)
else:
outbox_state = self.state.get_outbox_state(outbox_message.id)
if not outbox_state:
LOG.warning(f'outbox_message {outbox_message} not in state!')
return
executed_count = outbox_state['executed_count']
retry_at = retry_base_at + self.backoff_delay(executed_count)
self.state.apply_export(outbox_message_id=outbox_message.id, retry_at=retry_at)
result.append(outbox_message)
def op_export(self, dst, dst_node, maxsize) -> [ActorMessage]:
ret = []
retry_base_at = time.time() + self.max_retry_time
dst_box = self.dst_node_outbox.get(dst_node)
box = dst_box.get(dst) if dst_box else None
while len(ret) < maxsize and box:
self._export_box(ret, box, retry_base_at)
box = self.dst_outbox.get(dst)
while len(ret) < maxsize and box:
self._export_box(ret, box, retry_base_at)
self.auto_schedule_fetcher()
return ret
def op_done(self, message_id: str, status: str):
self.state.apply_done(message_id=message_id, status=status)
self.auto_schedule_fetcher()
def on_fetcher_done(self):
self.is_fetching = False
self.auto_schedule_fetcher()
def op_acked(self, outbox_message_id: str, status: str):
self.state.apply_acked(outbox_message_id=outbox_message_id, status=status)
self.auto_schedule_fetcher()
def push_inbox(self, message: ActorMessage):
heapq.heappush(self.inbox, (message.priority, message))
def push_outbox(self, outbox_message):
if outbox_message.dst_node:
outbox = self.dst_node_outbox.setdefault(outbox_message.dst_node, {})
outbox = outbox.setdefault(outbox_message.dst, [])
else:
outbox = self.dst_outbox.setdefault(outbox_message.dst, [])
heapq.heappush(outbox, (outbox_message.priority, outbox_message))
def outbox_info(self):
dst_info = []
dst_node_info = []
for dst, box in self.dst_outbox.items():
if box:
dst_info.append(dst)
for dst_node, dst_box in self.dst_node_outbox.items():
for dst, box in dst_box.items():
if box:
dst_node_info.append((dst_node, dst))
return dst_info, dst_node_info
def choice_available_upstream_list(self):
nodes = self.state.upstream.get(self.actor_name, set())
if len(nodes) <= self.fetcher_concurrency:
return nodes
return random.sample(nodes, self.fetcher_concurrency)
def auto_schedule_fetcher(self):
if self.is_fetching:
return
if self.outbox_size() > self.outbox_highsize:
return
if self.inbox_size() > self.inbox_lowsize:
return
upstream_list = self.choice_available_upstream_list()
if not upstream_list:
return
maxsize = self.inbox_highsize - self.inbox_size()
message = self.registery.create_message(
priority=0,
src=self.actor_name,
dst=ACTOR_MESSAGE_FETCHER,
dst_node=self.registery.current_node_name,
content=dict(
actor_name=self.actor_name,
upstream_list=list(upstream_list),
maxsize=maxsize,
),
)
self.schedule_fetcher(message)
self.is_fetching = True
def backoff_delay(self, executed_count):
# 8s, 64s, 8m, ...
random_seconds = random.randint(0, 8 * 1000) / 1000
return min(((8**executed_count) + random_seconds), self.max_retry_time)
def check_timeout_and_retry(self, now):
# TODO: check outbox message expired
retry_outbox_message_ids = []
error_notry_outbox_message_ids = []
for state in self.state.state.values():
if state['status'] != OUTBOX:
continue
for outbox_message_id, outbox_state in state['outbox_states'].items():
outbox_status = outbox_state['status']
retry_at = outbox_state.get('retry_at')
executed_count = outbox_state.get('executed_count')
if outbox_status == ERROR:
if retry_at and now > retry_at:
retry_outbox_message_ids.append(outbox_message_id)
elif outbox_status == EXPORT:
if now > retry_at:
outbox_message = self.state.get_outbox_message(outbox_message_id)
if executed_count > outbox_message.max_retry:
error_notry_outbox_message_ids.append(outbox_message_id)
else:
retry_outbox_message_ids.append(outbox_message_id)
for outbox_message_id in error_notry_outbox_message_ids:
self.op_acked(outbox_message_id, ERROR_NOTRY)
for outbox_message_id in retry_outbox_message_ids:
outbox_message = self.state.get_outbox_message(outbox_message_id)
if outbox_message.is_expired():
LOG.warning(f'expired outbox_message {outbox_message}')
self.state.apply_acked(outbox_message_id=outbox_message.id, status=ERROR_NOTRY)
else:
self.state.apply_retry(outbox_message_id=outbox_message.id)
self.push_outbox(outbox_message)
return len(error_notry_outbox_message_ids)
class ActorMessageQueue:
def __init__(
self,
registery: ActorRegistery,
actors: typing.Dict[str, Actor],
storage: ActorLocalStorage = None,
concurrency: int = 100,
max_retry_count: int = 1,
max_retry_time: int = 10 * 60,
max_complete_size: int = 128,
):
self.registery = registery
self.actors = actors
self.concurrency = concurrency
self.max_retry_count = max_retry_count
self.max_retry_time = max_retry_time
self.max_complete_size = max_complete_size
state = ActorState(max_complete_size=max_complete_size)
self.raw_state = state
if storage:
state = ActorStorageState(storage, state)
self.state = state
self.storage = storage
self.thread_actor_queues = {}
self.async_actor_queues = {}
self.lock = threading.Lock()
self.execute_condition = threading.Condition(self.lock)
self.is_notifing = False
self.is_compacting = False
def actor_queue(self, actor_name: str):
if actor_name not in self.actors:
raise ValueError(f'actor {actor_name} not exists')
actor = self.actors[actor_name]
if actor.is_async:
q = self.async_actor_queues.get(actor_name)
else:
q = self.thread_actor_queues.get(actor_name)
if q is None:
concurrency = self.concurrency
if actor.is_async:
concurrency *= 3
q = ActorQueue(
actor_name=actor_name,
registery=self.registery,
state=self.state,
schedule_fetcher=self._op_inbox,
concurrency=concurrency,
max_retry_count=self.max_retry_count,
max_retry_time=self.max_retry_time,
)
if actor.is_async:
self.async_actor_queues[actor_name] = q
else:
self.thread_actor_queues[actor_name] = q
return q
def all_actor_queues(self) -> typing.List[ActorQueue]:
for actor_queues in [self.async_actor_queues, self.thread_actor_queues]:
yield from actor_queues.values()
def inbox_size(self):
return sum(x.inbox_size() for x in self.all_actor_queues())
def outbox_size(self):
return sum(x.outbox_size() for x in self.all_actor_queues())
def qsize(self):
return self.inbox_size() + self.outbox_size()
def stats(self):
with self.lock:
actor_stats = []
for actor in self.all_actor_queues():
if actor.inbox_size() or actor.outbox_size():
actor_stats.append(actor.stats())
actor_stats = list(sorted(actor_stats, key=lambda x: x['name']))
return dict(
is_compacting=self.is_compacting,
is_notifing=self.is_notifing,
inbox_size=self.inbox_size(),
outbox_size=self.outbox_size(),
concurrency=self.concurrency,
max_retry_count=self.max_retry_count,
max_retry_time=self.max_retry_time,
state=self.state.stats(),
actors=actor_stats,
)
def op_execute(self) -> ActorMessage:
"""
For executors
"""
with self.execute_condition:
while True:
msg = self._op_execute(self.thread_actor_queues)
if msg is not None:
metric_queue_op('execute', msg)
return msg
self.execute_condition.wait()
async def async_op_execute(self) -> ActorMessage:
while True:
with self.lock:
msg = self._op_execute(self.async_actor_queues)
if msg is not None:
metric_queue_op('execute', msg)
return msg
await asyncio.sleep(0.1)
def op_outbox(self, message_id: str, outbox_messages: [ActorMessage]):
"""
For executors
"""
with self.lock:
message = self.state.get_message(message_id)
if not message:
LOG.warning(f'message {message_id} not exists')
return
self.actor_queue(message.dst).op_outbox(message_id, outbox_messages=outbox_messages)
for x in outbox_messages:
metric_queue_op('outbox', x)
def op_done(self, message_id: str, status: str):
"""
For executors
"""
with self.lock:
message = self.state.get_message(message_id)
self.actor_queue(message.dst).op_done(message_id, status=status)
if message.dst == ACTOR_MESSAGE_FETCHER:
self.actor_queue(message.src).on_fetcher_done()
if message.dst == ACTOR_MESSAGE_NOTIFY_SENDER:
self.is_notifing = False
if message.dst == ACTOR_STORAGE_COMPACTOR:
self.is_compacting = False
self.execute_condition.notify()
metric_queue_op('done', message)
def op_export(self, dst: str, dst_node: str, maxsize: int):
"""
For receiver (message exporter)
"""
with self.lock:
if dst == ACTOR_MESSAGE_ACKER:
ret = list(self._export_ack(dst_node, maxsize))
else:
ret = []
for actor_queue in self.all_actor_queues():
ret.extend(actor_queue.op_export(dst, dst_node, maxsize))
maxsize -= len(ret)
if maxsize <= 0:
break
self.execute_condition.notify(len(ret))
for x in ret:
metric_queue_op('export', x)
return ret
def op_notify(self, src_node: str, dst: str, available: bool):
"""
For upstream notify or message fetcher
"""
with self.lock:
self.actor_queue(dst).op_notify(dst=dst, src_node=src_node, available=available)
self.execute_condition.notify()
def op_inbox(self, message: ActorMessage):
"""
For message fetcher or receiver
"""
with self.lock:
self._op_inbox(message)
metric_queue_op('inbox', message)
def op_acked(self, outbox_message_id: ActorMessage, status: str):
"""
For message fetcher
"""
with self.lock:
outbox_message = self.state.get_outbox_message(outbox_message_id)
message = self.state.get_message(outbox_message.parent_id)
self.actor_queue(message.dst).op_acked(outbox_message_id, status=status)
self.execute_condition.notify()
metric_queue_op('acked', outbox_message)
def op_tick(self, now: int):
"""
For message monitor
"""
with self.lock:
self._auto_schedule_notifier()
self._auto_schedule_compactor()
for actor_queue in self.all_actor_queues():
num_error_notry = actor_queue.check_timeout_and_retry(now)
if num_error_notry > 0:
self.execute_condition.notify(num_error_notry)
# TODO: fix fetcher not auto scheduled in actor queue
actor_queue.auto_schedule_fetcher()
ACTOR_QUEUE_INBOX_SIZE.labels(dst=actor_queue.actor_name)\
.set(actor_queue.inbox_size())
ACTOR_QUEUE_OUTBOX_SIZE.labels(dst=actor_queue.actor_name)\
.set(actor_queue.outbox_size())
def op_restart(self):
"""
For application
"""
with self.lock:
if self.storage:
self.storage.load(self.raw_state)
self.state.apply_restart()
for message in self.state.get_inbox_messages():
self.actor_queue(message.dst).push_inbox(message)
if message.dst == ACTOR_MESSAGE_NOTIFY_SENDER:
self.is_notifing = True
if message.dst == ACTOR_STORAGE_COMPACTOR:
self.is_compacting = True
if message.dst == ACTOR_MESSAGE_FETCHER:
self.actor_queue(message.dst).is_fetching = True
for message, outbox_messages in self.state.get_outbox_messages():
for outbox_message in outbox_messages:
self.actor_queue(message.dst).push_outbox(outbox_message)
def _op_inbox(self, message):
self.actor_queue(message.dst).op_inbox(message)
self.execute_condition.notify()
def _ack_of(self, message, status):
return self.registery.create_message(
id=message.id,
priority=0,
src=message.dst,
dst=ACTOR_MESSAGE_ACKER,
dst_node=message.src_node,
content=dict(status=status),
)
def _export_ack(self, src_node, maxsize):
message_and_status = []
for dst, data in self.state.done_message_ids.items():
for message_id in data.get(src_node, []):
status = self.state.state[message_id]['status']
message = self.state.get_message(message_id)
message_and_status.append((message, status))
maxsize -= 1
if maxsize <= 0:
break
for message, status in message_and_status:
self.state.apply_complete(message_id=message.id)
yield self._ack_of(message, status)
def _auto_schedule_notifier(self):
if self.is_notifing:
return
dst_info = set()
dst_node_info = set()
for actor_queue in self.all_actor_queues():
dst_s, dst_node_s = actor_queue.outbox_info()
dst_info.update(dst_s)
dst_node_info.update(dst_node_s)
for dst, dst_node_data in self.state.done_message_ids.items():
for dst_node, items in dst_node_data.items():
if items:
dst_node_info.add((dst_node, ACTOR_MESSAGE_ACKER))
if not dst_info and not dst_node_info:
return
dst_info = [dict(dst=dst) for dst in dst_info]
dst_node_info = [dict(dst=dst, dst_node=dst_node) for dst_node, dst in dst_node_info]
message_notifier = self.registery.create_message(
priority=0,
src=ACTOR_SYSTEM,
dst=ACTOR_MESSAGE_NOTIFY_SENDER,
dst_node=self.registery.current_node_name,
content=dict(dst_info=dst_info, dst_node_info=dst_node_info),
)
self.actor_queue(message_notifier.dst).op_inbox(message_notifier)
self.is_notifing = True
def _auto_schedule_compactor(self):
if self.storage is None:
return
if self.is_compacting:
return
if not self.storage.should_compact(self.raw_state):
return
message_compactor = self.registery.create_message(
priority=0,
src=ACTOR_SYSTEM,
dst=ACTOR_STORAGE_COMPACTOR,
dst_node=self.registery.current_node_name,
)
self.actor_queue(message_compactor.dst).op_inbox(message_compactor)
self.is_compacting = True
def prepare_compact(self):
with self.lock:
return self.storage.prepare_compact(self.raw_state)
def _op_execute(self, actor_queues):
min_priority, min_actor = None, None
for actor in actor_queues.values():
if actor.is_inbox_empty() or actor.is_outbox_full():
continue
priority = actor.execute_priority()
if min_priority is None or priority < min_priority:
min_priority = priority
min_actor = actor
if min_actor is not None:
return min_actor.op_execute()
return None
|
11543229
|
import requests
from bs4 import BeautifulSoup
from accounts.models import Major
def contains_filters(listed_filters, desired_filters=set(), excluded_filters=set()):
# ensure no excluded filters appear
for curr_filter in excluded_filters:
if curr_filter in listed_filters:
return False
# ensure at least one desired filter appears
for curr_filter in desired_filters:
if curr_filter in listed_filters:
return True
return False
def update_all_majors():
# scrapes majors from the official penn catalog of all programs
source = requests.get("https://catalog.upenn.edu/programs/").text
soup = BeautifulSoup(source, "lxml")
bachelor_filter = "filter_6"
master_filter = "filter_25"
phd_filter = "filter_7"
professional_filter = "filter_10"
minor_filter = "filter_26"
desired_filters = {bachelor_filter, master_filter, phd_filter, professional_filter}
excluded_filters = {minor_filter}
listed_majors = set()
# iterate through all list tags with "item" in the class (all programs)
for program in soup.find_all(
"li", class_=lambda value: value and value.startswith("item ")
):
curr_filter_list = program.attrs["class"]
# check if entry meets relevant desired and excluded filter criteria
if not contains_filters(
curr_filter_list,
desired_filters=desired_filters,
excluded_filters=excluded_filters,
):
continue
# grab the major name
major_name = program.find("span", class_="title").text
# identify degree type
if bachelor_filter in curr_filter_list:
curr_degree_type = Major.DEGREE_BACHELOR
elif master_filter in curr_filter_list:
curr_degree_type = Major.DEGREE_MASTER
elif phd_filter in curr_filter_list:
curr_degree_type = Major.DEGREE_PHD
else:
curr_degree_type = Major.DEGREE_PROFESSIONAL
# create new major entry if it does not already exist
Major.objects.update_or_create(
name=major_name, defaults={"degree_type": curr_degree_type}
)
# keep track of found majors
listed_majors.add(major_name)
# iterate through existing majors and set active/inactive status
for existing_major in Major.objects.all():
existing_major.is_active = existing_major.name in listed_majors
existing_major.save()
|
11543292
|
from rest_framework.authentication import SessionAuthentication
from rest_framework import permissions
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return # To not perform the csrf check previously happening
class UserAccessPermission(permissions.BasePermission):
def has_permission(self, request, view): # type: ignore
user = request.user
access_granted = False
if hasattr(view, "permission_required"):
for permission in view.permission_required:
if user.has_perm(permission):
access_granted = True
else:
access_granted = True
if access_granted:
return user and user.is_authenticated
else:
return False
|
11543381
|
import numpy as np
from astropy.coordinates import angles
k_Sun = 132749351440.0
def rv2coe(k, a, ecc, inc, raan, argp, nu):
"""Convierte elementos keplerianos a vectores r y v.
Parámetros
==========
k : float
Parámetro gravitacional (km^3 / s^2)
a : float
Semieje mayor (km)
ecc : float
Excentricidad
inc : float
Inclinación (rad)
raan : float
Ascensión recta del nodo ascendente (rad)
argp : float
Argumento del perigeo (rad)
nu : float
Anomalía verdadera (rad)
Devuelve
========
r, v : arrays
Vectores posición (km) y velocidad (km / s)
"""
p = a * (1 - ecc ** 2)
r_pqw = p * np.array([np.cos(nu) / (1 + ecc * np.cos(nu)),
np.sin(nu) / (1 + ecc * np.cos(nu)),
0])
v_pqw = np.sqrt(k / p) * np.array([-np.sin(nu),
ecc + np.cos(nu),
0])
r_ijk = transform(r_pqw, -argp, 'z')
r_ijk = transform(r_ijk, -inc, 'x')
r_ijk = transform(r_ijk, -raan, 'z')
v_ijk = transform(v_pqw, -argp, 'z')
v_ijk = transform(v_ijk, -inc, 'x')
v_ijk = transform(v_ijk, -raan, 'z')
return r_ijk, v_ijk
def rotate(vec, angle, axis):
"""Rotates the coordinate system around axis 1, 2 or 3 a CCW angle.
Parameters
----------
vec : array
Dimension 3 vector.
ax : int
Axis to be rotated.
angle : float
Angle of rotation (rad).
"""
assert vec.shape == (3,)
rot = np.eye(3)
if axis == 'x':
sl = slice(1, 3)
elif axis == 'y':
sl = slice(0, 3, 2)
elif axis == 'z':
sl = slice(0, 2)
rot[sl, sl] = np.array([
[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]
])
return np.dot(rot, vec)
def transform(vector, angle, axis):
"""Rotates a coordinate system around axis a positive right-handed angle.
Notes
-----
This is a convenience function, equivalent to
`rotate(vec, -angle, axis, unit)`.
Refer to the documentation of that function for further information.
"""
return rotate(vector, -angle, axis)
|
11543439
|
from tornado.web import url
from .handlers import *
urlpattern = [
url("/monitor/", MonitorHandler),
]
|
11543595
|
class Pipeline(object):
"""Common pipeline class fo all pipeline tasks."""
def __init__(self, source=None):
self.source = source
def __iter__(self):
return self.generator()
def generator(self):
"""Yields the pipeline data."""
while self.has_next():
try:
data = next(self.source) if self.source else {}
if self.filter(data):
yield self.map(data)
except StopIteration:
return
def __or__(self, other):
"""Allows to connect the pipeline task using | operator."""
if other is not None:
other.source = self.generator()
return other
else:
return self
def filter(self, data):
"""Overwrite to filter out the pipeline data."""
return True
def map(self, data):
"""Overwrite to map the pipeline data."""
return data
def has_next(self):
"""Overwrite to stop the generator in certain conditions."""
return True
|
11543604
|
import logging
import os
from hydra.experimental.callback import Callback
from mpi4py import MPI
from detectron2.utils import comm as d2_comm
from detectron2.utils.logger import setup_logger
from tridet.utils.s3 import aws_credential_is_available, maybe_download_ckpt_from_url, sync_output_dir_s3
from tridet.utils.setup import setup_distributed
from tridet.utils.wandb import derive_output_dir_from_wandb_id, init_wandb, wandb_credential_is_available
LOG = logging.getLogger(__name__)
class SetupDistributedCallback(Callback):
"""
"""
def on_run_start(self, config, **kwargs): # pylint: disable=unused-argument
world_size = MPI.COMM_WORLD.Get_size()
distributed = world_size > 1
if distributed:
rank = MPI.COMM_WORLD.Get_rank()
setup_distributed(world_size, rank)
def on_job_start(self, config, **kwargs): # pylint: disable=unused-argument
world_size = d2_comm.get_world_size()
rank = d2_comm.get_rank()
LOG.info("Rank of current process: {}. World size: {}".format(rank, world_size))
class WandbInitCallback(Callback):
"""If W&B is enabled, then
1) initialize W&B,
2) derive the path of output directory using W&B ID, and
3) set it as hydra working directory.
"""
def on_run_start(self, config, **kwargs): # pylint: disable=unused-argument
if not config.WANDB.ENABLED:
return
if not wandb_credential_is_available():
LOG.warning(
"W&B credential must be defined in environment variables."
"Use `WANDB.ENABLED=False` to suppress this warning. "
"Skipping `WandbInitCallback`..."
)
return
init_wandb(config)
output_dir = derive_output_dir_from_wandb_id(config)
if output_dir:
config.hydra.run.dir = output_dir
class SyncOutputDirCallback(Callback):
def on_run_start(self, config, **kwargs): # pylint: disable=unused-argument
if d2_comm.is_main_process():
output_dir = config.hydra.run.dir
else:
output_dir = None
output_dir = MPI.COMM_WORLD.bcast(output_dir, root=0)
if output_dir != config.hydra.run.dir:
LOG.warning("Hydra run dir is not synced. Overwriting from rank=0.")
config.hydra.run.dir = output_dir
class D2LoggerCallback(Callback):
def on_run_start(self, config, **kwargs): # pylint: disable=unused-argument
rank = d2_comm.get_rank()
log_output_dir = os.path.join(config.hydra.run.dir, 'logs')
setup_logger(log_output_dir, distributed_rank=rank, name="hydra")
setup_logger(log_output_dir, distributed_rank=rank, name="detectron2", abbrev_name="d2")
setup_logger(log_output_dir, distributed_rank=rank, name="tridet")
setup_logger(log_output_dir, distributed_rank=rank, name="fvcore")
logging.getLogger('numba').setLevel(logging.ERROR) # too much logs
class CkptPathResolverCallback(Callback):
"""
If the checkpoint (`config.model.CKPT`) is an S3 path, then downloaded it and replace the path with
local path.
"""
def on_run_start(self, config, **kwargs): # pylint: disable=unused-argument
if config.MODEL.CKPT:
new_ckpt_path = maybe_download_ckpt_from_url(config)
new_ckpt_path = os.path.abspath(new_ckpt_path)
config.MODEL.CKPT = new_ckpt_path
class SyncOutputS3BeforeEnd(Callback):
"""
"""
def on_run_start(self, config, **kwargs): # pylint: disable=unused-argument
if config.SYNC_OUTPUT_DIR_S3.ENABLED and not aws_credential_is_available():
raise ValueError(f"\n\nAWS credential must be set in environment variables (rank={d2_comm.get_rank()}).\n")
def on_run_end(self, config, **kwargs): # pylint: disable=unused-argument
"""
"""
if config.SYNC_OUTPUT_DIR_S3.ENABLED:
sync_output_dir_s3(config, output_dir=config.hydra.run.dir)
|
11543607
|
from datetime import datetime
from typing import Dict, List, Any
from collections.abc import Iterable
import redis
from flask import (
Blueprint,
Response,
current_app,
jsonify,
redirect,
render_template,
request,
url_for,
get_template_attribute,
)
from werkzeug.utils import cached_property
from werkzeug.urls import url_quote_plus, url_unquote_plus
from .utils import (
_get_db_details,
_get_key_details,
_get_key_info,
VALUE_SETTER_FUNCS,
_decode_bytes,
_update_config,
_get_redis_conn_kwargs,
_get_current_user_redis_cli,
)
from .constant import BADGE_STYLE, INFO_GROUPS, CONFIG
module = Blueprint(
"redisboard",
__name__,
template_folder="templates/redisboard",
static_folder="static",
)
class RedisServer:
@cached_property
def connection(self) -> redis.Redis:
return redis.Redis(**_get_redis_conn_kwargs())
@property
def info(self) -> Dict:
pipe = self.connection.pipeline()
for part in INFO_GROUPS:
pipe.info(part)
results = pipe.execute()
return dict(zip(INFO_GROUPS, results))
@cached_property
def config_file(self) -> str:
return self.connection.info("Server").get("config_file")
def __getattr__(self, attr: Any) -> Any:
if attr in ("keyspace", "memory", "clients", "stats", "commandstats"):
return self.connection.info(attr)
@property
def databases(self) -> List:
return [item[2:] for item in self.keyspace.keys()]
def slowlog_get(self, limit: int = None) -> Dict:
try:
count = limit if limit else current_app.config["REDISBOARD_SLOWLOG_LEN"]
for slowlog in self.connection.slowlog_get(count):
yield dict(
id=slowlog["id"],
ts=datetime.fromtimestamp(slowlog["start_time"]),
duration=slowlog["duration"] // 1000,
command=_decode_bytes(slowlog["command"]),
)
except redis.exceptions.ConnectionError:
pass
server = RedisServer()
@module.context_processor
def inject_param() -> Dict:
return {"databases": server.databases}
@module.errorhandler(Exception)
def handle_exception(error: Exception) -> Response:
return jsonify({"code": 999, "error": str(error)})
@module.route("/")
def home() -> Response:
return redirect(url_for("redisboard.dashboard"))
@module.route("/dashboard/")
def dashboard() -> Response:
total_keys = 0
for k, v in server.keyspace.items():
total_keys += v["keys"]
used_memory = server.memory.get("used_memory_human")
connected_clients = server.clients.get("connected_clients")
return render_template(
"dashboard.html",
total_keys=total_keys,
used_memory=used_memory,
connected_clients=connected_clients,
)
@module.route("/dashboard_api/")
def dashboard_api() -> Response:
cmd_per_sec = server.stats.get("instantaneous_ops_per_sec")
memory = server.memory.get("used_memory") / 1024 / 1024
network_input = server.stats.get("instantaneous_input_kbps")
network_output = server.stats.get("instantaneous_output_kbps")
data = {
"cmd_per_sec": cmd_per_sec,
"memory": memory,
"network_input": network_input,
"network_output": network_output,
"time": datetime.now().strftime("%H:%M:%S"),
}
return jsonify({"code": 0, "data": data})
@module.route("/info/")
def info() -> Response:
return render_template(
"serverinfo.html",
basic_info=server.info,
keyspace=server.keyspace,
cmdstats=server.commandstats,
slowlog=server.slowlog_get(),
)
@module.route("/config/", methods=["GET", "POST"])
def config() -> Response:
conn = server.connection
if request.method == "POST":
value = ""
if "value" in request.form:
value = request.form.get("value")
elif "value[]" in request.form:
value = "".join(request.form.getlist("value[]"))
try:
conn.config_set(request.form.get("name"), value)
except Exception as e:
return jsonify({"code": 999, "error": str(e)})
return jsonify({"code": 0})
config_value = _update_config(CONFIG, conn.config_get())
return render_template("config.html", config_file=server.config_file, config=CONFIG)
@module.route("/db/")
@module.route("/db/<int:db>/")
def db_detail(db: int = 0) -> Response:
db_summary = server.keyspace.get(f"db{db}", dict())
cursor = request.args.get("cursor", type=int, default=0)
keypattern = request.args.get("keypattern", default="")
# when search, use bigger paginate number
count = 1000 if keypattern else 30
key_details, next_cursor = _get_db_details(
server.connection, db, cursor=cursor, keypattern=keypattern, count=count
)
if cursor == 0:
# render index page
return render_template(
"database.html",
db_summary=db_summary,
key_details=key_details,
cursor=next_cursor,
db=db,
badge_style=BADGE_STYLE,
keypattern=keypattern,
)
macro = get_template_attribute("macros.html", "render_key_details")
html = macro(key_details, db, BADGE_STYLE)
url = ""
if next_cursor != 0:
url = url_for(
"redisboard.db_detail", db=db, cursor=next_cursor, keypattern=keypattern
)
return jsonify({"code": 0, "html": html, "data": url})
@module.route("/db/<int:db>/addkey", methods=["POST"])
def add_key(db: int) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
keyname = request.form.get("keyname")
type_ = request.form.get("type")
index = request.form.get("index")
value = request.form.get("value")
set_fn = VALUE_SETTER_FUNCS.get(type_)
set_fn(conn, keyname, index, value)
return redirect(url_for("redisboard.db_detail", db=db))
@module.route("/db/<int:db>/batchttl", methods=["POST"])
def batch_set_ttl(db: int) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
pipe = conn.pipeline()
keys = request.json.get("keys", [])
ttl = int(request.json.get("ttl", -1))
for key in keys:
if ttl <= 0:
pipe.persist(key)
else:
pipe.expire(key, ttl)
pipe.execute()
return jsonify({"code": 0, "data": url_for("redisboard.db_detail", db=db)})
@module.route("/db/<int:db>/batchdel", methods=["POST"])
def batch_delete_keys(db: int) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
keys = request.json.get("keys", [])
conn.delete(*keys)
return jsonify({"code": 0, "data": url_for("redisboard.db_detail", db=db)})
@module.route("/db/<int:db>/flush", methods=["DELETE"])
def db_flush(db: int) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
conn.flushdb()
return jsonify({"code": 0, "data": url_for("redisboard.db_detail", db=db)})
@module.route("/db/<int:db>/key/<key>/del", methods=["DELETE"])
def key_delete(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
key = url_unquote_plus(key)
conn.delete(key)
return jsonify({"code": 0, "data": url_for("redisboard.db_detail", db=db)})
@module.route("/db/<int:db>/<key>/rename", methods=["POST"])
def key_rename(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
key = url_unquote_plus(key)
new_name = request.form["keyname"]
conn.rename(key, new_name)
return jsonify(
{
"code": 0,
"data": url_for(
"redisboard.key_detail", db=db, key=url_quote_plus(new_name)
),
}
)
@module.route("/db/<int:db>/<key>/ttl", methods=["POST"])
def key_set_ttl(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
ttl = request.form.get("ttl", type=int)
if ttl <= 0:
conn.persist(ori_key)
else:
conn.expire(ori_key, ttl)
return jsonify(
{"code": 0, "data": url_for("redisboard.key_detail", db=db, key=key)}
)
@module.route("/db/<int:db>/<key>/list_add", methods=["POST"])
def list_add_value(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
position = request.form.get("position", type=int)
if position == 0:
conn.lpush(ori_key, request.form["value"])
elif position == -1:
conn.rpush(ori_key, request.form["value"])
return jsonify(
{"code": 0, "data": url_for("redisboard.key_detail", db=db, key=key)}
)
@module.route("/db/<int:db>/<key>/list_edit", methods=["POST"])
def list_edit_value(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
index = request.form.get("name", type=int)
conn.lset(ori_key, index, request.form.get("value"))
return jsonify({"code": 0})
@module.route("/db/<int:db>/<key>/list_rem", methods=["POST"])
def list_rem_value(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
count = request.form.get("count", type=int, default=1)
conn.lrem(ori_key, count, request.form["value"])
return jsonify(
{"code": 0, "data": url_for("redisboard.key_detail", db=db, key=key)}
)
@module.route("/db/<int:db>/<key>/hash_add", methods=["POST"])
def hash_add_value(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
index = request.form.get("index", "")
exists = conn.hexists(ori_key, index)
if exists:
return jsonify({"code": 1, "error": "can`t add value to an exist key!"})
else:
conn.hset(ori_key, index, request.form.get("value"))
return jsonify(
{"code": 0, "data": url_for("redisboard.key_detail", db=db, key=key)}
)
@module.route("/db/<int:db>/<key>/hash_edit", methods=["POST"])
def hash_edit_value(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
conn.hset(ori_key, request.form.get("name"), request.form.get("value"))
return jsonify({"code": 0})
@module.route("/db/<int:db>/<key>/hash_rem", methods=["POST"])
def hash_rem_value(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
conn.hdel(ori_key, request.form["index"])
return jsonify(
{"code": 0, "data": url_for("redisboard.key_detail", db=db, key=key)}
)
@module.route("/db/<db>/<key>/set_add", methods=["POST"])
def set_add_value(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
value = request.form.get("value", "")
value = [item.strip() for item in value.split(",")]
result = conn.sadd(ori_key, *value)
# TODO response how many successed operate
return jsonify(
{"code": 0, "data": url_for("redisboard.key_detail", db=db, key=key)}
)
@module.route("/db/<int:db>/<key>/set_rem", methods=["POST"])
def set_rem_value(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
value = request.form.get("value", "")
value = [item.strip() for item in value.split(",")]
conn.srem(ori_key, *value)
return jsonify(
{"code": 0, "data": url_for("redisboard.key_detail", db=db, key=key)}
)
@module.route("/db/<int:db>/<key>/zset_edit", methods=["POST"])
def zset_edit_score(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
maps = {request.form.get("name"): request.form.get("value", type=float)}
conn.zadd(ori_key, maps)
return jsonify({"code": 0})
@module.route("/db/<int:db>/<key>/zset_add", methods=["POST"])
def zset_add_value(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
conn.zadd(
ori_key, {request.form.get("member"): request.form.get("score", type=float)}
)
return jsonify(
{"code": 0, "data": url_for("redisboard.key_detail", db=db, key=key)}
)
@module.route("/db/<int:db>/<key>/zset_rem", methods=["POST"])
def zset_rem_member(db: int, key: str) -> Response:
conn = server.connection
conn.execute_command("SELECT", db)
ori_key = url_unquote_plus(key)
member = request.form.get("member", "")
if member:
members = [item.strip() for item in member.split(",")]
conn.zrem(ori_key, *members)
else:
score_min, score_max = (
request.form.get("min", type=float),
request.form.get("max", type=float),
)
conn.zremrangebyscore(ori_key, score_min, score_max)
return jsonify(
{"code": 0, "data": url_for("redisboard.key_detail", db=db, key=key)}
)
@module.route("/db/<int:db>/<key>", methods=["GET", "POST"])
def key_detail(db: int, key: str) -> Response:
conn = server.connection
key = url_unquote_plus(key)
if request.method == "POST":
conn.set(key, request.form["value"])
key_details = _get_key_details(conn, db, key)
return render_template(
f"keydetail/{key_details['type']}.html", key_details=key_details, db=db
)
@module.route("/command/", methods=["GET", "POST"])
def command() -> Response:
client = _get_current_user_redis_cli()
if request.method == "GET":
return render_template("command.html")
command = request.form.get("command")
result = client.execute_command(command)
if isinstance(result, bytes):
result = result.decode()
elif isinstance(result, Iterable):
result = [r.decode() for r in result]
return jsonify({"code": 0, "data": result})
|
11543622
|
import itertools
import json
from pathlib import Path
from urllib.parse import urljoin
from flexget.utils.soup import get_soup
from loguru import logger
from ..schema.site_base import SiteBase, SignState, NetworkState, Work
from ..utils.net_utils import NetUtils
class NexusPHP(SiteBase):
def get_message(self, entry, config):
self.get_nexusphp_message(entry, config)
def get_details(self, entry, config):
self.get_details_base(entry, config, self.build_selector())
def build_selector(self):
selector = {
'user_id': 'userdetails.php\\?id=(\\d+)',
'detail_sources': {
'default': {
'link': '/userdetails.php?id={}',
'elements': {
'bar': '#info_block > tbody > tr > td > table > tbody > tr > td:nth-child(1) > span',
'table': '#outer table:last-child'
}
}
},
'details': {
'uploaded': {
'regex': ('(上[传傳]量|Uploaded).+?([\\d.]+ ?[ZEPTGMK]?i?B)', 2)
},
'downloaded': {
'regex': ('(下[载載]量|Downloaded).+?([\\d.]+ ?[ZEPTGMK]?i?B)', 2)
},
'share_ratio': {
'regex': ('(分享率|Ratio).*?(---|∞|Inf\\.|无限|無限|[\\d,.]+)', 2),
'handle': self.handle_share_ratio
},
'points': {
'regex': ('(魔力|Bonus).*?([\\d,.]+)', 2)
},
'join_date': {
'regex': ('(加入日期|注册日期|Join.date).*?(\\d{4}-\\d{2}-\\d{2})', 2),
},
'seeding': {
'regex': ('(当前活动|當前活動).*?(\\d+)', 2)
},
'leeching': {
'regex': ('(当前活动|當前活動).*?\\d+\\D+(\\d+)', 2)
},
'hr': {
'regex': 'H&R.*?(\\d+)'
}
}
}
return selector
def get_nexusphp_message(self, entry, config, messages_url='/messages.php?action=viewmailbox&box=1&unread=yes',
unread_elements_selector='td > img[alt*="Unread"]'):
message_url = urljoin(entry['url'], messages_url)
message_box_response = self._request(entry, 'get', message_url)
message_box_network_state = self.check_network_state(entry, message_url, message_box_response)
if message_box_network_state != NetworkState.SUCCEED:
entry.fail_with_prefix('Can not read message box! url:{}'.format(message_url))
return
unread_elements = get_soup(NetUtils.decode(message_box_response)).select(
unread_elements_selector)
failed = False
for unread_element in unread_elements:
td = unread_element.parent.nextSibling.nextSibling
title = td.text
href = td.a.get('href')
message_url = urljoin(message_url, href)
message_response = self._request(entry, 'get', message_url)
message_network_state = self.check_network_state(entry, message_url, message_response)
if message_network_state != NetworkState.SUCCEED:
message_body = 'Can not read message body!'
failed = True
else:
if body_element := get_soup(NetUtils.decode(message_response)).select_one('td[colspan*="2"]'):
message_body = body_element.text.strip()
else:
message_body = 'Can not find message body element!'
entry['messages'] = entry['messages'] + (f'\nTitle: {title}\nLink: {message_url}\n{message_body}')
if failed:
entry.fail_with_prefix('Can not read message body!')
def handle_share_ratio(self, value):
if value in ['---', '∞', 'Inf.', '无限', '無限']:
return '0'
else:
return value
class AttendanceHR(NexusPHP):
def build_workflow(self, entry, config):
return [
Work(
url='/attendance.php',
method='get',
succeed_regex=[
'这是您的第.*?次签到,已连续签到.*?天,本次签到获得.*?魔力值。|這是您的第.*次簽到,已連續簽到.*?天,本次簽到獲得.*?魔力值。',
'[签簽]到已得\\d+',
'您今天已经签到过了,请勿重复刷新。|您今天已經簽到過了,請勿重複刷新。'],
check_state=('final', SignState.SUCCEED),
is_base_content=True
)
]
class Attendance(AttendanceHR):
def build_selector(self):
selector = super(Attendance, self).build_selector()
NetUtils.dict_merge(selector, {
'details': {
'hr': None
}
})
return selector
class BakatestHR(NexusPHP):
def build_workflow(self, entry, config):
return [
Work(
url='/bakatest.php',
method='get',
succeed_regex='今天已经签过到了\\(已连续.*天签到\\)',
check_state=('sign_in', SignState.NO_SIGN_IN),
is_base_content=True
),
Work(
url='/bakatest.php',
method='question',
succeed_regex='连续.*天签到,获得.*点魔力值|今天已经签过到了\\(已连续.*天签到\\)',
fail_regex='回答错误,失去 1 魔力值,这道题还会再考一次',
)
]
def sign_in_by_question(self, entry, config, work, last_content=None):
question_element = get_soup(last_content).select_one('input[name="questionid"]')
if question_element:
question_id = question_element.get('value')
local_answer = None
question_file = Path.cwd().joinpath('nexusphp_question.json')
if question_file.is_file():
question_json = json.loads(question_file.read_text())
else:
question_json = {}
question_extend_file = Path(__file__).with_name('nexusphp_question.json')
if question_extend_file.is_file():
question_extend_json = json.loads(question_extend_file.read_text())
NetUtils.dict_merge(question_json, question_extend_json)
question_file.write_text(json.dumps(question_json))
question_extend_file.unlink()
site_question = question_json.get(entry['url'])
if site_question:
local_answer = site_question.get(question_id)
else:
question_json[entry['url']] = {}
choice_elements = get_soup(last_content).select('input[name="choice[]"]')
choices = []
for choice_element in choice_elements:
choices.append(choice_element.get('value', ''))
if choice_elements[0].get('type') == 'radio':
choice_range = 1
else:
choice_range = len(choices)
answer_list = []
for i in range(choice_range):
for arr in itertools.combinations(choices, i + 1):
if list(arr) not in answer_list:
answer_list.append(list(arr))
answer_list.reverse()
if local_answer and local_answer in choices and len(local_answer) <= choice_range:
answer_list.insert(0, local_answer)
times = 0
for answer in answer_list:
data = {'questionid': question_id, 'choice[]': answer, 'usercomment': '此刻心情:无', 'submit': '提交'}
response = self._request(entry, 'post', work.url, data=data)
state = self.check_sign_in_state(entry, work, response, NetUtils.decode(response))
if state == SignState.SUCCEED:
entry['result'] = f"{entry['result']} ( {times} attempts.)"
question_json[entry['url']][question_id] = answer
question_file.write_text(json.dumps(question_json))
logger.info(f"{entry['title']}, correct answer: {data}")
return
times += 1
entry.fail_with_prefix(SignState.SIGN_IN_FAILED.value.format('No answer.'))
class Bakatest(BakatestHR):
def build_selector(self):
selector = super(Bakatest, self).build_selector()
NetUtils.dict_merge(selector, {
'details': {
'hr': None
}
})
return selector
class VisitHR(NexusPHP):
SUCCEED_REGEX = '[欢歡]迎回[来來家]'
def build_workflow(self, entry, config):
return [
Work(
url='/',
method='get',
succeed_regex=self.SUCCEED_REGEX,
check_state=('final', SignState.SUCCEED),
is_base_content=True
)
]
class Visit(VisitHR):
def build_selector(self):
selector = super(Visit, self).build_selector()
NetUtils.dict_merge(selector, {
'details': {
'hr': None
}
})
return selector
|
11543646
|
class ModelFactory():
def __init__(self):
pass
@staticmethod
def get_model(model_type, dataset, in_channels=6, num_actions=6, width=300):
if dataset == "omniglot":
nm_channels = 112
channels = 256
size_of_representation = 2304
size_of_interpreter = 1008
if model_type == "ANML+AIM":
nm_channels = 112
channels = 256
size_of_representation = 2304
size_of_interpreter = 1008
return [
# =============== Separate network neuromodulation =======================
('conv1_nm', [nm_channels, 3, 3, 3, 1, 0]),
('bn1_nm', [nm_channels]),
('conv2_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn2_nm', [nm_channels]),
('conv3_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn3_nm', [nm_channels]),
('nm_to_fc', [size_of_representation, size_of_interpreter]),
# =============== Prediction network ===============================
('conv1', [channels, 3, 3, 3, 1, 0]),
('bn1', [channels]),
('conv2', [channels, channels, 3, 3, 1, 0]),
('bn2', [channels]),
('conv3', [channels, channels, 3, 3, 1, 0]),
('bn3', [channels]),
('fc', [1000, size_of_representation // 2]),
('linear', [size_of_representation // 2, size_of_representation]),
('aim', [size_of_representation // 2, 128, 64, 128, size_of_representation // 2, 128])
]
elif model_type == "OML+AIM":
return [
# =============== slow weight =======================
('conv1_nm', [nm_channels, 3, 3, 3, 1, 0]),
('bn1_nm', [nm_channels]),
('conv2_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn2_nm', [nm_channels]),
('conv3_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn3_nm', [nm_channels]),
('conv4_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn4_nm', [nm_channels]),
('conv5_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn5_nm', [nm_channels]),
('conv6_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn6_nm', [nm_channels]),
('nm_to_fc', [size_of_interpreter // 2, size_of_interpreter]),
# =============== fast weight =======================
('fc', [1000, size_of_interpreter // 2]),
('aim', [size_of_interpreter // 2, 128, 64, 128, size_of_interpreter // 2, 128]),
# [input_size, hidden_size, num_units, input_key_size, input_value_size, input_query_size]
]
elif dataset == "cifar100":
nm_channels = 112
channels = 256
size_of_representation = 1024
size_of_interpreter = 1792
if model_type == "ANML+AIM":
size_of_representation = 4096
size_of_interpreter = 1792
return [
# =============== slow weight =======================
('conv1_nm', [nm_channels, 3, 3, 3, 1, 0]),
('bn1_nm', [nm_channels]),
('conv2_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn2_nm', [nm_channels]),
('conv3_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn3_nm', [nm_channels]),
('nm_to_fc', [size_of_representation, size_of_interpreter]),
# =============== fast weight =======================
('conv1', [channels, 3, 3, 3, 1, 0]),
('bn1', [channels]),
('conv2', [channels, channels, 3, 3, 1, 0]),
('bn2', [channels]),
('conv3', [channels, channels, 3, 3, 1, 0]),
('bn3', [channels]),
('fc', [100, size_of_representation // 4]),
('linear', [size_of_representation // 4, size_of_representation]),
('aim', [size_of_representation // 4, 128, 64, 128, size_of_representation // 4, 128]),
]
elif model_type == "OML+AIM":
return [
# =============== slow weight =======================
('conv1_nm', [nm_channels, 3, 3, 3, 1, 0]),
('bn1_nm', [nm_channels]),
('conv2_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn2_nm', [nm_channels]),
('conv3_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn3_nm', [nm_channels]),
('conv4_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn4_nm', [nm_channels]),
('conv5_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn5_nm', [nm_channels]),
('conv6_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn6_nm', [nm_channels]),
('nm_to_fc', [size_of_interpreter // 2, size_of_interpreter]),
# =============== fast weight =======================
('fc', [100, size_of_interpreter // 2]),
('aim', [size_of_interpreter // 2, 128, 64, 128, size_of_interpreter // 2, 128]),
# [input_size, hidden_size, num_units, input_key_size, input_value_size, input_query_size]
]
elif dataset == "imagenet":
nm_channels = 112
channels = 256
size_of_representation = 2304
size_of_interpreter = 2800
if model_type == "ANML+AIM":
size_of_representation = 16384
size_of_interpreter = 7168
return [
# =============== slow weight =======================
('conv1_nm', [nm_channels, 3, 3, 3, 1, 0]),
('bn1_nm', [nm_channels]),
('conv2_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn2_nm', [nm_channels]),
('conv3_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn3_nm', [nm_channels]),
('nm_to_fc', [size_of_representation, size_of_interpreter]),
# =============== fast weight =======================
('conv1', [channels, 3, 3, 3, 1, 0]),
('bn1', [channels]),
('conv2', [channels, channels, 3, 3, 1, 0]),
('bn2', [channels]),
('conv3', [channels, channels, 3, 3, 1, 0]),
('bn3', [channels]),
('fc', [84, size_of_representation // 16]),
('linear', [size_of_representation // 16, size_of_representation]),
('aim', [size_of_representation // 16, 128, 64, 128, size_of_representation // 16, 128]),
]
elif model_type == "OML+AIM":
return [
# =============== slow weight =======================
('conv1_nm', [nm_channels, 3, 3, 3, 1, 0]),
('bn1_nm', [nm_channels]),
('conv2_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn2_nm', [nm_channels]),
('conv3_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn3_nm', [nm_channels]),
('conv4_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn4_nm', [nm_channels]),
('conv5_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn5_nm', [nm_channels]),
('conv6_nm', [nm_channels, nm_channels, 3, 3, 1, 0]),
('bn6_nm', [nm_channels]),
('nm_to_fc', [size_of_interpreter // 2, size_of_interpreter]),
# =============== fast weight =======================
('fc', [84, size_of_interpreter // 2]),
('aim', [size_of_interpreter // 2, 128, 32, 128, size_of_interpreter // 2, 128]),
# [input_size, hidden_size, num_units, input_key_size, input_value_size, input_query_size]
]
else:
print("Unsupported model; either implement the model in model/ModelFactory or choose a different model")
assert (False)
|
11543656
|
from io import StringIO
from yglu.main import process
from .utils import outdent
def test_process_ok():
input = """
a: 1
b: 2
---
c: 3
"""
output = StringIO()
process(outdent(input), output)
assert output.getvalue() == "a: 1\nb: 2\n---\nc: 3\n"
def test_process_failure():
input = """
a: 1
b
"""
output = StringIO()
errors = []
process(input, output, None, errors)
assert len(errors) == 1
|
11543663
|
from appconf import AppConf
class CustomHolder(object):
pass
custom_holder = CustomHolder()
class TestConf(AppConf):
SIMPLE_VALUE = True
CONFIGURED_VALUE = 'wrong'
def configure_configured_value(self, value):
return 'correct'
def configure(self):
self.configured_data['CONFIGURE_METHOD_VALUE'] = True
return self.configured_data
class PrefixConf(TestConf):
class Meta:
prefix = 'prefix'
class YetAnotherPrefixConf(PrefixConf):
SIMPLE_VALUE = False
class Meta:
prefix = 'yetanother_prefix'
class SeparateConf(AppConf):
SEPARATE_VALUE = True
class Meta(PrefixConf.Meta):
pass
class SubclassConf(TestConf):
def configure(self):
self.configured_data['CONFIGURE_METHOD_VALUE2'] = False
return self.configured_data
class ProxyConf(TestConf):
class Meta:
proxy = True
class CustomHolderConf(AppConf):
SIMPLE_VALUE = True
class Meta:
holder = 'appconf.tests.models.custom_holder' # instead of django.conf.settings
prefix = 'custom_holder'
|
11543677
|
import os
import re
import sys
import json
import shutil
import logging
import zipfile
import requests
from datetime import datetime
import pandas as pd
from onesaitplatform.iotbroker import DigitalClient
from onesaitplatform.files import FileManager
import urllib3
urllib3.disable_warnings()
DATETIME_PATTERN = "%Y-%m-%dT%H:%M:%SZ"
DIGITAL_CLIENT_JOIN_MESSAGE = "Digital Client joining server"
DIGITAL_CLIENT_GET_ERROR_MESSAGE = "Not possible to get data from server with Digital Client: {}"
DIGITAL_CLIENT_JOIN_ERROR_MESSAGE = "Not possible to join server with Digital Client: {}"
DIGITAL_CLIENT_JOIN_SUCCESS_MESSAGE = "Digital Client joined server: {}"
FILE_MANAGER_GET_ERROR_MESSAGE = "Not possible to download with File Manager: {}"
TRAINING_SUCCESS_MESSAGE = "Training finished with metrics: {}"
logger = logging.getLogger('onesait.platform.model.BaseModelService')
logger.setLevel(logging.WARNING)
class AuditClient(object):
"""Client to API for audit in Platform"""
def __init__(self, protocol=None, host=None, port=None, token=None):
ERROR_MESSAGE = 'Mandatory attribute {} not specified'
if protocol is None:
raise AttributeError(ERROR_MESSAGE.format('protocol'))
if host is None:
raise AttributeError(ERROR_MESSAGE.format('host'))
if port is None:
raise AttributeError(ERROR_MESSAGE.format('port'))
if token is None:
raise AttributeError(ERROR_MESSAGE.format('token'))
port = str(port)
url = "{protocol}://{host}:{port}/controlpanel/api/audit/".format(
protocol=protocol, host=host, port=port
)
headers = {
'Authorization': token,
'Content-Type': 'application/json',
'accept': '*/*'
}
self.url = url
self.headers = headers
def report(
self, message=None, ontology=None, operation_type=None,
other_type=None, result_operation=None, type_=None
):
now = datetime.now()
date_formated = now.strftime(DATETIME_PATTERN)
data = [{
"formatedTimeStamp": date_formated,
"message": message,
"ontology": ontology,
"operationType": operation_type,
"otherType": other_type,
"resultOperation": result_operation,
"timeStamp": datetime.timestamp(now),
"type": type_
}]
response = requests.post(
self.url, headers=self.headers, json=data, timeout=5
)
return response.status_code, response.text
class Config(object):
"""Class that manages configuration"""
def __init__(self, parameters=None, file_path=None):
self.PLATFORM_HOST = None
self.PLATFORM_PORT = None
self.PLATFORM_DIGITAL_CLIENT = None
self.PLATFORM_DIGITAL_CLIENT_TOKEN = None
self.PLATFORM_DIGITAL_CLIENT_PROTOCOL = 'https'
self.PLATFORM_DIGITAL_CLIENT_AVOID_SSL_CERTIFICATE = True
self.PLATFORM_ONTOLOGY_MODELS = None
self.PLATFORM_USER_TOKEN = None
self.TMP_FOLDER = '/tmp/'
self.NAME = None
if parameters:
self.set_parameters(parameters)
if file_path:
with open(file_path, 'r') as filehandle:
parameters = json.load(filehandle)
self.set_parameters(parameters)
ERROR_MESSAGE = 'Mandatory attribute {} not specified'
if self.PLATFORM_HOST is None:
raise AttributeError(ERROR_MESSAGE.format('PLATFORM_HOST'))
if self.PLATFORM_PORT is None:
raise AttributeError(ERROR_MESSAGE.format('PLATFORM_PORT'))
if self.PLATFORM_DIGITAL_CLIENT is None:
raise AttributeError(ERROR_MESSAGE.format('PLATFORM_DIGITAL_CLIENT'))
if self.PLATFORM_DIGITAL_CLIENT_TOKEN is None:
raise AttributeError(ERROR_MESSAGE.format('PLATFORM_DIGITAL_CLIENT_TOKEN'))
if self.PLATFORM_DIGITAL_CLIENT_PROTOCOL is None:
raise AttributeError(ERROR_MESSAGE.format('PLATFORM_DIGITAL_CLIENT_PROTOCOL'))
if self.PLATFORM_ONTOLOGY_MODELS is None:
raise AttributeError(ERROR_MESSAGE.format('PLATFORM_ONTOLOGY_MODELS'))
if self.PLATFORM_USER_TOKEN is None:
raise AttributeError(ERROR_MESSAGE.format('PLATFORM_USER_TOKEN'))
if self.NAME is None:
raise AttributeError(ERROR_MESSAGE.format('NAME'))
def set_parameters(self, parameters):
for key, value in parameters.items():
self.__setattr__(key, value)
logger.info('Model Service parameters instantiated: {}'.format(parameters))
class BaseModelService(object):
"""Base definition of a model service"""
def __init__(self, config=None):
"""Initializes clients to interact with Platform ontologies and file system"""
logger.info('Creating Model Service')
self.config = Config(parameters=config)
self.digital_client = self.create_digital_client()
self.file_manager = self.create_file_manager()
self.audit_client = self.create_audit_client()
self.reload_model()
def reload_model(self):
"""Reloads best model previously trained"""
logger.info('Searching best available model')
best_model_info = self.get_active_model()
if best_model_info:
logger.info('Best available model specifications: {}'.format(best_model_info))
self.load_model_from_file_system(model_info=best_model_info)
else:
logger.info('Models not found')
def create_digital_client(self):
"""Creates a digital client to communicate with Platform ontologies"""
host = self.config.PLATFORM_HOST
port = self.config.PLATFORM_PORT
iot_client = self.config.PLATFORM_DIGITAL_CLIENT
iot_client_token = self.config.PLATFORM_DIGITAL_CLIENT_TOKEN
protocol = self.config.PLATFORM_DIGITAL_CLIENT_PROTOCOL
avoid_ssl_certificate = self.config.PLATFORM_DIGITAL_CLIENT_AVOID_SSL_CERTIFICATE
digital_client = DigitalClient(
host=host, port=port, iot_client=iot_client, iot_client_token=iot_client_token
)
digital_client.protocol = protocol
digital_client.avoid_ssl_certificate = avoid_ssl_certificate
logger.info('Digital Client created: {}'.format(digital_client.to_json()))
return digital_client
def create_audit_client(self):
"""Creates a audit client to send logs to platform audit ontology"""
host = self.config.PLATFORM_HOST
port = self.config.PLATFORM_PORT
token = self.config.PLATFORM_USER_TOKEN
protocol = self.config.PLATFORM_DIGITAL_CLIENT_PROTOCOL
audit_client = AuditClient(
host=host, port=port, protocol=protocol, token=token
)
logger.info('Audit Client created: {}'.format(
[protocol, host, port, token]
))
return audit_client
def report(self, message=None, result=None):
"""Report event in audit"""
self.audit_client.report(
message=message, result_operation=result,
type_='GENERAL', operation_type='INDICATION'
)
def join_digital_client(self):
"""Digital client connects the server"""
if not self.digital_client.is_connected:
logger.info(DIGITAL_CLIENT_JOIN_MESSAGE)
ok_join, res_join = self.digital_client.join()
if not ok_join:
self.audit_client.report(
message=DIGITAL_CLIENT_JOIN_MESSAGE, result_operation='ERROR',
type_='IOTBROKER', operation_type='JOIN'
)
raise ConnectionError(
DIGITAL_CLIENT_JOIN_ERROR_MESSAGE.format(self.digital_client.to_json())
)
else:
logger.info(DIGITAL_CLIENT_JOIN_SUCCESS_MESSAGE.format(res_join))
self.audit_client.report(
message=DIGITAL_CLIENT_JOIN_MESSAGE, result_operation='SUCCESS',
type_='IOTBROKER', operation_type='JOIN'
)
def create_file_manager(self):
"""Creates a file manager to interact with Platform file system"""
host=self.config.PLATFORM_HOST
protocol=self.config.PLATFORM_DIGITAL_CLIENT_PROTOCOL
user_token = self.config.PLATFORM_USER_TOKEN
file_manager = FileManager(
host=host, user_token=user_token
)
file_manager.protocol = protocol
logger.info('File Manager created: {}'.format(file_manager.to_json()))
return file_manager
def get_model_in_ontology(self, query=None):
"""Search model with specific query"""
self.join_digital_client()
ontology = self.config.PLATFORM_ONTOLOGY_MODELS
ok_query, res_query = self.digital_client.query(
ontology=ontology, query=query, query_type="SQL"
)
if not ok_query:
message = DIGITAL_CLIENT_GET_ERROR_MESSAGE.format(self.digital_client.to_json())
self.audit_client.report(
message=message, result_operation='ERROR',
type_='IOTBROKER', operation_type='QUERY'
)
raise ConnectionError(message)
else:
message = "Digital Client got models information"
logger.info(message)
self.audit_client.report(
message=message, result_operation='SUCCESS',
type_='IOTBROKER', operation_type='QUERY'
)
self.digital_client.leave()
best_model = None
if len(res_query):
best_model = res_query[0][ontology]
return best_model
def get_model_by_id(self, model_id=None):
"""Search the model active in models ontology"""
ontology = self.config.PLATFORM_ONTOLOGY_MODELS
query = 'select * from {ontology} as c where c.{ontology}.id = "{model_id}"'.format(
ontology=ontology, model_id=model_id
)
model_info = self.get_model_in_ontology(query=query)
return model_info
def get_active_model(self):
"""Search the model active in models ontology"""
ontology = self.config.PLATFORM_ONTOLOGY_MODELS
query = 'select * from {ontology} as c where c.{ontology}.active = 1'.format(
ontology=ontology
)
model_info = self.get_model_in_ontology(query=query)
return model_info
def set_new_model_in_ontology(
self, name=None, version=None, description=None, metrics=None, model_file_id=None,
dataset_file_id=None, ontology_dataset=None, hyperparameters=None, extra_file_id=None,
pretrained_model_id=None
):
"""Set the new model in models ontology"""
def create_list_item(key, value):
"""Creates dict with key, value and dtype"""
if isinstance(value, int):
dtype = 'int'
elif isinstance(value, float):
dtype = 'float'
else:
dtype = 'string'
value = str(value)
item = {'name': key, 'value': value, 'dtype': dtype}
return item
self.join_digital_client()
model_info = {
self.config.PLATFORM_ONTOLOGY_MODELS: {
'asset': self.config.NAME,
'name': name,
'version': version,
'description': description,
'date': datetime.now().strftime(DATETIME_PATTERN),
'metrics': [create_list_item(key, value) for key, value in metrics.items()],
'hyperparameters': [create_list_item(key, value) for key, value in hyperparameters.items()],
'model_path': model_file_id,
'active': False
}
}
ontology = self.config.PLATFORM_ONTOLOGY_MODELS
if ontology_dataset is not None:
model_info[ontology]['ontology_dataset'] = ontology_dataset
if dataset_file_id is not None:
model_info[ontology]['dataset_path'] = dataset_file_id
if extra_file_id is not None:
model_info[ontology]['extra_path'] = extra_file_id
if pretrained_model_id is not None:
model_info[ontology]['pretrained_model_id'] = pretrained_model_id
model_id = '_'.join(
[
model_info[ontology]['date'],
str(model_info[ontology]['name']),
str(model_info[ontology]['version'])
]
)
model_id = model_id.replace('-', '_')
model_id = model_id.replace(':', '_')
model_info[ontology]['id'] = model_id
logger.info("Digital Client inserting model information: {}".format(model_info))
ok_query, res_query = self.digital_client.insert(self.config.PLATFORM_ONTOLOGY_MODELS, [model_info])
if not ok_query:
message = "Digital Client could not insert model information: {}".format(res_query)
self.audit_client.report(
message=message, result_operation='ERROR',
type_='IOTBROKER', operation_type='INSERT'
)
raise ConnectionError(message)
else:
message = "Digital Client inserted model information: {}".format(res_query)
logger.info(message)
self.audit_client.report(
message=message, result_operation='SUCCESS',
type_='IOTBROKER', operation_type='INSERT'
)
self.digital_client.leave()
def create_tmp_folder_name(self, suffix=None):
"""Creates a name for a temporal folder"""
tmp_model_name = self.config.NAME + ' ' + datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
tmp_model_name = re.sub('[/\s:-]', '_', tmp_model_name)
tmp_model_folder = self.config.TMP_FOLDER + '/' + tmp_model_name
if suffix:
tmp_model_folder = tmp_model_folder + '_' + str(suffix)
tmp_model_folder = tmp_model_folder + '/'
return tmp_model_folder, tmp_model_name
def upload_model_to_file_system(self, model_folder=None, zip_path=None):
"""Zips all elements of model and uploads it to file system"""
with zipfile.ZipFile(zip_path, 'w') as zip_fh:
for filename in os.listdir(model_folder):
zip_fh.write(model_folder + '/' + filename, filename)
model_filename = os.path.basename(zip_path)
uploaded, info = self.file_manager.upload_file(model_filename, zip_path)
if not uploaded:
message = "Not possible to upload with File Manager: {}".format(
self.file_manager.to_json()
)
self.audit_client.report(
message=message, result_operation='ERROR',
type_='GENERAL', operation_type='INSERT'
)
raise ConnectionError(message)
else:
message = "File manager uploaded model: {}".format(info)
logger.info(message)
self.audit_client.report(
message=message, result_operation='SUCCESS',
type_='GENERAL', operation_type='INSERT'
)
saved_file_id = info['id']
return saved_file_id
def download_from_file_system(
self, file_id=None, local_folder=None, unzip=None
):
"""Download file from file repository to local folder"""
downloaded, info = self.file_manager.download_file(
file_id, filepath=local_folder
)
if not downloaded:
message = FILE_MANAGER_GET_ERROR_MESSAGE.format(self.file_manager.to_json())
self.audit_client.report(
message=message, result_operation='ERROR',
type_='GENERAL', operation_type='QUERY'
)
raise ConnectionError(message)
else:
message = "File manager downloaded file: {}".format(info)
logger.info(message)
self.audit_client.report(
message=message, result_operation='SUCCESS',
type_='GENERAL', operation_type='QUERY'
)
if unzip:
zip_path = info['name']
zip_obj = zipfile.ZipFile(zip_path)
files = zip_obj.namelist()
for file in files:
zip_obj.extract(file, local_folder)
return info['name']
def load_model_from_file_system(self, model_info=None):
"""Downloads a zip from File System and creates a model"""
file_id = model_info['model_path']
extra_file_id = None
if 'extra_path' in model_info:
extra_file_id = model_info['extra_path']
hyperparameters = {}
for hyperparameter in model_info['hyperparameters']:
name = hyperparameter['name']
value = hyperparameter['value']
dtype = hyperparameter['dtype']
if dtype == 'float':
value = float(value)
elif dtype == 'int':
value = int(value)
hyperparameters[name] = value
tmp_model_folder, _ = self.create_tmp_folder_name()
tmp_extra_folder, _ = self.create_tmp_folder_name(suffix='extra')
os.mkdir(tmp_model_folder)
os.mkdir(tmp_extra_folder)
self.download_from_file_system(
file_id=file_id, local_folder=tmp_model_folder, unzip=True
)
if extra_file_id:
self.download_from_file_system(
file_id=extra_file_id, local_folder=tmp_extra_folder, unzip=True
)
self.load_model(
model_path=tmp_model_folder, hyperparameters=hyperparameters,
extra_path=tmp_extra_folder
)
shutil.rmtree(tmp_model_folder)
shutil.rmtree(tmp_extra_folder)
def train_from_file_system(
self, name=None, version=None, description=None,
dataset_file_id=None, hyperparameters=None,
extra_file_id=None, pretrained_model_id=None
):
"""Trains a model given a file in file system"""
logger.info("Training model from file repository: {}".format(
[name, version, dataset_file_id, hyperparameters]
))
tmp_model_folder, model_name = self.create_tmp_folder_name()
tmp_extra_folder, _ = self.create_tmp_folder_name(suffix='extra')
tmp_pretrained_folder, _ = self.create_tmp_folder_name(suffix='pretrained')
os.mkdir(tmp_model_folder)
os.mkdir(tmp_extra_folder)
os.mkdir(tmp_pretrained_folder)
dataset_path = self.download_from_file_system(
file_id=dataset_file_id, local_folder=tmp_model_folder
)
if extra_file_id:
self.download_from_file_system(
file_id=extra_file_id, local_folder=tmp_extra_folder, unzip=True
)
if pretrained_model_id:
pretrained_model_info = self.get_model_by_id(model_id=pretrained_model_id)
model_path = pretrained_model_info['model_path']
self.download_from_file_system(
file_id=model_path, local_folder=tmp_pretrained_folder, unzip=True
)
logger.info("Training started with dataset {} and output folder {}".format(
dataset_path, tmp_model_folder
))
metrics = self.train(
dataset_path=dataset_path, hyperparameters=hyperparameters,
model_path=tmp_model_folder, extra_path=tmp_extra_folder,
pretrained_path=tmp_pretrained_folder
)
logger.info(TRAINING_SUCCESS_MESSAGE.format(metrics))
os.remove(dataset_path)
zip_path = self.config.TMP_FOLDER + '/' + model_name + '.zip'
model_file_id = self.upload_model_to_file_system(
model_folder=tmp_model_folder, zip_path=zip_path
)
shutil.rmtree(tmp_model_folder)
shutil.rmtree(tmp_extra_folder)
shutil.rmtree(tmp_pretrained_folder)
os.remove(zip_path)
self.set_new_model_in_ontology(
name=name, version=version, description=description, metrics=metrics,
model_file_id=model_file_id, dataset_file_id=dataset_file_id,
hyperparameters=hyperparameters, extra_file_id=extra_file_id,
pretrained_model_id=pretrained_model_id
)
def train_from_ontology(
self, name=None, version=None, description=None,
ontology_dataset=None, hyperparameters=None,
extra_file_id=None, pretrained_model_id=None
):
"""Trains a model given the content of an ontology"""
logger.info("Training model from ontology: {}".format(
[name, version, ontology_dataset, hyperparameters]
))
tmp_model_folder, model_name = self.create_tmp_folder_name()
tmp_extra_folder, _ = self.create_tmp_folder_name(suffix='extra')
tmp_pretrained_folder, _ = self.create_tmp_folder_name(suffix='pretrained')
os.mkdir(tmp_model_folder)
os.mkdir(tmp_extra_folder)
os.mkdir(tmp_pretrained_folder)
if extra_file_id:
self.download_from_file_system(
file_id=extra_file_id, local_folder=tmp_extra_folder, unzip=True
)
if pretrained_model_id:
pretrained_model_info = self.get_model_by_id(model_id=pretrained_model_id)
model_path = pretrained_model_info['model_path']
self.download_from_file_system(
file_id=model_path, local_folder=tmp_pretrained_folder, unzip=True
)
query = "db.{ontology}.find()".format(ontology=ontology_dataset)
query_type = 'NATIVE'
query_batch_size = 900
self.join_digital_client()
ok_query, res_query = self.digital_client.query_batch(
ontology_dataset, query, query_type, batch_size=query_batch_size
)
if not ok_query:
message = DIGITAL_CLIENT_GET_ERROR_MESSAGE.format(self.digital_client.to_json())
self.audit_client.report(
message=message, result_operation='ERROR',
type_='IOTBROKER', operation_type='BATCH'
)
raise ConnectionError(message)
else:
message = "Digital Client got dataset"
logger.info(message)
self.audit_client.report(
message=message, result_operation='SUCCESS',
type_='IOTBROKER', operation_type='BATCH'
)
self.digital_client.leave()
dataset = pd.read_json(json.dumps(res_query))
dataset_path = tmp_model_folder + '/dataset.csv'
dataset.to_csv(dataset_path, sep='\t', index=False)
logger.info("Training started with dataset {} and output folder {}".format(
dataset_path, tmp_model_folder
))
metrics = self.train(
dataset_path=dataset_path, hyperparameters=hyperparameters,
model_path=tmp_model_folder, extra_path=tmp_extra_folder,
pretrained_path=tmp_pretrained_folder
)
logger.info(TRAINING_SUCCESS_MESSAGE.format(metrics))
os.remove(dataset_path)
zip_path = self.config.TMP_FOLDER + '/' + model_name + '.zip'
model_file_id = self.upload_model_to_file_system(
model_folder=tmp_model_folder, zip_path=zip_path
)
shutil.rmtree(tmp_model_folder)
shutil.rmtree(tmp_extra_folder)
shutil.rmtree(tmp_pretrained_folder)
self.set_new_model_in_ontology(
name=name, version=version, description=description, metrics=metrics,
model_file_id=model_file_id, ontology_dataset=ontology_dataset,
hyperparameters=hyperparameters, extra_file_id=extra_file_id,
pretrained_model_id=pretrained_model_id
)
def train_from_external_source(
self, name=None, version=None, description=None, hyperparameters=None,
extra_file_id=None, pretrained_model_id=None
):
"""Trains a model given am external source"""
logger.info("Training model from external source: {}".format(
[name, version, hyperparameters]
))
tmp_model_folder, model_name = self.create_tmp_folder_name()
tmp_extra_folder, _ = self.create_tmp_folder_name(suffix='extra')
tmp_pretrained_folder, _ = self.create_tmp_folder_name(suffix='pretrained')
os.mkdir(tmp_model_folder)
os.mkdir(tmp_extra_folder)
os.mkdir(tmp_pretrained_folder)
if extra_file_id:
self.download_from_file_system(
file_id=extra_file_id, local_folder=tmp_extra_folder, unzip=True
)
if pretrained_model_id:
pretrained_model_info = self.get_model_by_id(model_id=pretrained_model_id)
model_path = pretrained_model_info['model_path']
self.download_from_file_system(
file_id=model_path, local_folder=tmp_pretrained_folder, unzip=True
)
logger.info("Training started with external source and output folder {}".format(
tmp_model_folder
))
metrics = self.train(
hyperparameters=hyperparameters, model_path=tmp_model_folder, extra_path=tmp_extra_folder,
pretrained_path=tmp_pretrained_folder
)
logger.info(TRAINING_SUCCESS_MESSAGE.format(metrics))
zip_path = self.config.TMP_FOLDER + '/' + model_name + '.zip'
model_file_id = self.upload_model_to_file_system(
model_folder=tmp_model_folder, zip_path=zip_path
)
shutil.rmtree(tmp_model_folder)
shutil.rmtree(tmp_extra_folder)
shutil.rmtree(tmp_pretrained_folder)
os.remove(zip_path)
if not metrics:
metrics = {}
if not hyperparameters:
hyperparameters = {}
self.set_new_model_in_ontology(
name=name, version=version, description=description, metrics=metrics,
model_file_id=model_file_id, hyperparameters=hyperparameters,
extra_file_id=extra_file_id, pretrained_model_id=pretrained_model_id
)
def predict_from_ontology(
self, input_ontology=None, output_ontology=None
):
"""Predicts given input in a ontology"""
logger.info("Predicting from ontology {}".format(input_ontology))
query = "db.{ontology}.find()".format(ontology=input_ontology)
query_type = 'NATIVE'
query_batch_size = 900
self.join_digital_client()
ok_query, res_query = self.digital_client.query_batch(
input_ontology, query, query_type, batch_size=query_batch_size
)
if not ok_query:
message = DIGITAL_CLIENT_GET_ERROR_MESSAGE.format(self.digital_client.to_json())
self.audit_client.report(
message=message, result_operation='ERROR',
type_='IOTBROKER', operation_type='BATCH'
)
raise ConnectionError(message)
else:
message = "Digital Client got dataset"
logger.info(message)
self.audit_client.report(
message=message, result_operation='SUCCESS',
type_='IOTBROKER', operation_type='BATCH'
)
self.digital_client.leave()
logger.info("Inference started with ontology {}".format(input_ontology))
predictions = self.predict(inputs=res_query)
logger.info("Inference finished")
if output_ontology is not None:
self.insert_dataset_in_ontology(
inputs=predictions, ontology=output_ontology
)
return predictions
def predict_from_file_system(
self, dataset_file_id=None, output_ontology=None
):
"""Predics given a file in file system"""
logger.info("Predicting from file repository: {}".format(dataset_file_id))
tmp_folder, _ = self.create_tmp_folder_name()
os.mkdir(tmp_folder)
dataset_path = self.download_from_file_system(
file_id=dataset_file_id, local_folder=tmp_folder
)
logger.info("Inference started with dataset {}".format(dataset_path))
predictions = self.predict(dataset_path=dataset_path)
logger.info("Inference finished")
shutil.rmtree(tmp_folder)
if output_ontology is not None:
self.insert_dataset_in_ontology(
inputs=predictions, ontology=output_ontology
)
return predictions
def insert_dataset_in_ontology(self, inputs=None, ontology=None):
"""Inserts predictions in ontology"""
self.join_digital_client()
logger.info("Digital Client inserting predictions")
ok_query, res_query = self.digital_client.insert(ontology, inputs)
if not ok_query:
message = "Digital Client could not insert predictions: {}".format(res_query)
self.audit_client.report(
message=message, result_operation='ERROR',
type_='IOTBROKER', operation_type='INSERT'
)
raise ConnectionError(message)
else:
message = "Digital Client inserted predictions: {}".format(res_query)
logger.info(message)
self.audit_client.report(
message=message, result_operation='SUCCESS',
type_='IOTBROKER', operation_type='INSERT'
)
self.digital_client.leave()
def load_model(self, model_path=None, hyperparameters=None, extra_path=None):
"""Loads the model given input files and/or folders"""
raise NotImplementedError
def train(
self, dataset_path=None, hyperparameters=None,
model_path=None, extra_path=None, pretrained_path=None):
"""Trains a model given a dataset"""
raise NotImplementedError
def predict(self, inputs=None, dataset_path=None):
"""Predicts given a model and an array of inputs or a dataset"""
raise NotImplementedError
|
11543691
|
import nltk
def english_segmentation(sentence):
""""""
# Converted to lowercase letters
sentence = sentence.lower()
# Separate words and token
sentence = nltk.tokenize.word_tokenize(sentence)
# delete the token
english_punctuations = [",", ".", ":", ";", "?", "(", ")", "[", "]", '\'', '"', '=', '|',
"&", "!", "*", "@", "#", "$", "%", "|", "\\", "/", '{', '}']
sentence = [e for e in sentence if e not in english_punctuations]
return sentence
def get_bleu_score(hypothesis, reference):
hypothesis = english_segmentation(hypothesis)
reference = english_segmentation(reference)
weights = (0.25, 0.25, 0.25, 0.25)
bleu_score = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis, weights)
return bleu_score
if __name__ == '__main__':
Ref = "It is the guiding principle which guarantees" \
" the military forces always being under the command of the Party."
Candidate = "It is the practical guide for the army" \
" always to heed the directions of the party."
print(get_bleu_score(Candidate, Ref))
|
11543764
|
from pathlib import Path
import cv2
import numpy as np
import torch
import torchvision
from PIL import Image
from torch.utils.data import Dataset
from scipy.spatial.transform import Rotation
from utils import map_fn
class TUMMonoVOMultiDataset(Dataset):
def __init__(self, dataset_dirs, **kwargs):
if isinstance(dataset_dirs, list):
self.datasets = [TUMMonoVODataset(dataset_dir, **kwargs) for dataset_dir in dataset_dirs]
else:
self.datasets = [TUMMonoVODataset(dataset_dirs, **kwargs)]
def __getitem__(self, index):
for dataset in self.datasets:
l = len(dataset)
if index >= l:
index -= l
else:
return dataset.__getitem__(index)
return None
def __len__(self):
sum = 0
for dataset in self.datasets:
sum += len(dataset)
return sum
class TUMMonoVODataset(Dataset):
def __init__(self, dataset_dir, frame_count=2, target_image_size=(480, 640), max_length=None, dilation=1, only_keyframes=False, color_augmentation=True, scale_factor=1):
"""
Dataset implementation for TUMMonoVO. Requires the images to be rectified first. Support for depth maps is WIP.
:param dataset_dir: Folder of a single sequence (e.g. .../tummonovo/sequence_50). This folder should contain images/.
:param frame_count: Number of frames used per sample (excluding the keyframe). (Default=2)
:param target_image_size: Desired image size. (Default=(480, 640))
:param max_length: Crop dataset to given length. (Default=None)
:param dilation: Spacing between the different frames. (Default=1)
:param only_keyframes: Only use frames that were used as keyframes in DSO. Relies on depth maps -> WIP. (Default=False)
:param color_augmentation: Use color jitter augmentation. (Default=False)
:param scale_factor: Scale poses for the sequence. Useful for DSO, which does not necessarily detect the correct world-scale. (Default=1)
"""
self.dataset_dir = Path(dataset_dir)
self.frame_count = frame_count
self.only_keyframes = only_keyframes
self.dilation = dilation
self.target_image_size = target_image_size
self.color_augmentation = color_augmentation
self.scale_factor = scale_factor
self._result = np.loadtxt(self.dataset_dir / "result.txt")
self._times = np.loadtxt(self.dataset_dir / "times.txt")
self._pcalib = self.invert_pcalib(np.loadtxt(self.dataset_dir / "pcalib.txt"))
self._image_index = self.build_image_index()
if self.only_keyframes:
self._keyframe_index = self.build_keyframe_index()
self.length = self._keyframe_index.shape[0]
else:
self.length = self._result.shape[0] - frame_count * dilation
if max_length is not None:
self.length = min(self.length, max_length)
self._offset = (frame_count // 2) * self.dilation
self._intrinsics, self._crop_box = self.compute_target_intrinsics()
self._intrinsics = format_intrinsics(self._intrinsics, self.target_image_size)
self._poses = self.build_poses()
self._depth = torch.zeros((1, target_image_size[0], target_image_size[1]), dtype=torch.float32)
if self.color_augmentation:
self.color_augmentation_transform = ColorJitterMulti(brightness=.2, contrast=.2, saturation=.2, hue=.1)
def preprocess_image(self, img: Image.Image, crop_box=None):
img = img.convert('RGB')
if crop_box:
img = img.crop(crop_box)
if self.target_image_size:
img = img.resize((self.target_image_size[1], self.target_image_size[0]), resample=Image.BILINEAR)
if self.color_augmentation:
img = self.color_augmentation_transform(img)
image_tensor = torch.tensor(np.array(img)).to(dtype=torch.float32)
image_tensor = self._pcalib[image_tensor.to(dtype=torch.long)]
image_tensor = image_tensor / 255 - .5
if len(image_tensor.shape) == 2:
image_tensor = torch.stack((image_tensor, image_tensor, image_tensor))
else:
image_tensor = image_tensor.permute(2, 0, 1)
del img
return image_tensor
def preprocess_depth(self, depth: Image.Image, crop_box=None):
if crop_box:
depth = depth.crop(crop_box)
if self.target_image_size:
if self.target_image_size[0] * 2 == depth.size[1]:
depth_tensor = torch.tensor(np.array(depth).astype(np.float32))
depth_tensor = torch.nn.functional.max_pool2d(depth_tensor.unsqueeze(0), kernel_size=2)
else:
depth = depth.resize((self.target_image_size[1], self.target_image_size[0]), resample=Image.BILINEAR)
depth_tensor = torch.tensor(np.array(depth).astype(np.float32)).unsqueeze(0)
depth_tensor[depth_tensor < 0] = 0
return depth_tensor
def __getitem__(self, index: int):
frame_count = self.frame_count
offset = self._offset
if self.color_augmentation:
self.color_augmentation_transform.fix_transform()
if self.only_keyframes:
index = self._keyframe_index[index] - offset
keyframe_intrinsics = self._intrinsics
keyframe = self.preprocess_image(self.open_image(index + offset), self._crop_box)
keyframe_pose = self._poses[index + offset]
keyframe_depth = self.open_depth(index + offset)
if keyframe_depth is None:
keyframe_depth = self._depth
else:
keyframe_depth = self.preprocess_depth(keyframe_depth, self._crop_box)
frames = [self.preprocess_image(self.open_image(index + i), self._crop_box) for i in range(0, (frame_count + 1) * self.dilation, self.dilation) if i != offset]
intrinsics = [self._intrinsics for _ in range(frame_count)]
poses = [self._poses[index + i] for i in range(0, (frame_count + 1) * self.dilation, self.dilation) if i != offset]
data = {
"keyframe": keyframe,
"keyframe_pose": keyframe_pose,
"keyframe_intrinsics": keyframe_intrinsics,
"frames": frames,
"poses": poses,
"intrinsics": intrinsics,
"sequence": torch.tensor([0]),
"image_id": torch.tensor([index + offset])
}
return data, keyframe_depth
def __len__(self) -> int:
return self.length
def build_image_index(self):
eps = 1e-5
current_index = 0
image_index = np.zeros((self._result.shape[0]), dtype=np.int)
for i in range(self._result.shape[0]):
timestamp = self._result[i, 0]
while not timestamp <= self._times[current_index, 1] + eps:
current_index += 1
image_index[i] = current_index
return image_index
def build_keyframe_index(self):
keyframe_index = []
image_index_pos = 0
for p in sorted((self.dataset_dir / "images_depth").glob("*.exr")):
index = int(p.stem[:5])
while self._image_index[image_index_pos] < index:
image_index_pos += 1
index = image_index_pos
if not (index >= len(self._image_index) - (self.frame_count // 2 + 1) * self.dilation or index < (self.frame_count // 2) * self.dilation):
keyframe_index.append(index)
return np.array(keyframe_index)
def load_orig_intrinsics(self):
camera_file = self.dataset_dir / "camera.txt"
with open(camera_file) as f:
intrinsics_use_first_col = ord("0") <= ord(f.readline()[0]) <= ord("9")
if intrinsics_use_first_col:
intrinsics_v = np.loadtxt(camera_file, usecols=list(range(4)), max_rows=1)
else:
intrinsics_v = np.loadtxt(camera_file, usecols=[1, 2, 3, 4], max_rows=1)
intrinsics = np.identity(4, dtype=np.float)
intrinsics[0, 0] = intrinsics_v[0]
intrinsics[1, 1] = intrinsics_v[1]
intrinsics[0, 2] = intrinsics_v[2]
intrinsics[1, 2] = intrinsics_v[3]
return intrinsics
def compute_target_intrinsics(self):
P_cam = self.load_orig_intrinsics()
orig_size = tuple(reversed(Image.open(self.dataset_dir / "images" / "00000.jpg").size))
P_cam[0, 0] *= orig_size[1]
P_cam[1, 1] *= orig_size[0]
P_cam[0, 2] *= orig_size[1]
P_cam[1, 2] *= orig_size[0]
r_orig = orig_size[0] / orig_size[1]
r_target = self.target_image_size[0] / self.target_image_size[1]
if r_orig >= r_target:
new_height = r_target * orig_size[1]
box = (0, (orig_size[0] - new_height) // 2, orig_size[1], orig_size[0] - (orig_size[0] - new_height) // 2)
c_x = P_cam[0, 2] / orig_size[1]
c_y = (P_cam[1, 2] - (orig_size[0] - new_height) / 2) / new_height
rescale = orig_size[1] / self.target_image_size[1]
else:
new_width = orig_size[0] / r_target
box = ((orig_size[1] - new_width) // 2, 0, orig_size[1] - (orig_size[1] - new_width) // 2, orig_size[0])
c_x = (P_cam[0, 2] - (orig_size[1] - new_width) / 2) / new_width
c_y = P_cam[1, 2] / orig_size[0]
rescale = orig_size[0] / self.target_image_size[0]
f_x = P_cam[0, 0] / self.target_image_size[1] / rescale
f_y = P_cam[1, 1] / self.target_image_size[0] / rescale
intrinsics = (f_x, f_y, c_x, c_y)
return intrinsics, box
def build_poses(self):
ts = torch.tensor(self._result[:, 1:4])
qs = torch.tensor(self._result[:, [7, 4, 5, 6]])
rs = torch.eye(4).unsqueeze(0).repeat(qs.shape[0], 1, 1)
rs[:, :3, :3] = torch.tensor(Rotation.from_quat(qs).as_matrix())
rs[:, :3, 3] = ts * self.scale_factor
poses = rs
return poses.to(torch.float32)
def open_image(self, index):
return Image.open(self.dataset_dir / "images" / f"{self._image_index[index]:05d}.jpg")
def open_depth(self, index):
p = self.dataset_dir / "images_depth" / f"{self._image_index[index]:05d}_d.exr"
if p.exists() and p.is_file():
return Image.fromarray(cv2.imread(str(p), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH))
else:
return None
def invert_pcalib(self, pcalib):
inv_pcalib = torch.zeros(256, dtype=torch.float32)
j = 0
for i in range(256):
while j < 255 and i + .5 > pcalib[j]:
j += 1
inv_pcalib[i] = j
return inv_pcalib
def format_intrinsics(intrinsics, target_image_size):
intrinsics_mat = torch.zeros(4, 4, dtype=torch.float32)
intrinsics_mat[0, 0] = intrinsics[0] * target_image_size[1]
intrinsics_mat[1, 1] = intrinsics[1] * target_image_size[0]
intrinsics_mat[0, 2] = intrinsics[2] * target_image_size[1]
intrinsics_mat[1, 2] = intrinsics[3] * target_image_size[0]
intrinsics_mat[2, 2] = 1
intrinsics_mat[3, 3] = 1
return intrinsics_mat
class ColorJitterMulti(torchvision.transforms.ColorJitter):
def fix_transform(self):
self.transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
def __call__(self, x):
return map_fn(x, self.transform)
|
11543765
|
from django import template
from user.models import User_Info
from topic.models import Create_Topic
register=template.Library()
@register.simple_tag
def each_people_num(username):
info=User_Info.objects.get(username=username)
all_theme=info.create_topic_set.all()
all_theme1=len(all_theme)
return all_theme1
|
11543780
|
from enum import Enum, unique
from app.domain.common.models.value_object import value_object
@unique
class LevelName(Enum):
BLOCKED = "BLOCKED"
USER = "USER"
ADMINISTRATOR = "ADMINISTRATOR"
@value_object
class AccessLevel:
id: int
name: LevelName
|
11543788
|
import sys, random
fin = open(sys.argv[1], 'r')
data = fin.readlines()
fin.close()
random.shuffle(data)
sys.stdout.writelines(data)
|
11543791
|
from petlib.ec import EcGroup
from genzkp import ZKProof, ZKEnv, ConstGen, Sec
def test_blog_post():
# Define an EC group
G = EcGroup(713)
print (EcGroup.list_curves()[713])
order = G.order()
## Define the Zero-Knowledge proof statement
zk = ZKProof(G)
g, h = zk.get(ConstGen, ["g", "h"])
x, o = zk.get(Sec, ["x", "o"])
Cxo = zk.get(ConstGen, "Cxo")
zk.add_proof(Cxo, x*g + o*h)
## Render the proof statement in Latex
print(zk.render_proof_statement())
# A concrete Pedersen commitment
ec_g = G.generator()
ec_h = order.random() * ec_g
bn_x = order.random()
bn_o = order.random()
ec_Cxo = bn_x * ec_g + bn_o * ec_h
## Bind the concrete variables to the Proof
env = ZKEnv(zk)
env.g, env.h = ec_g, ec_h
env.Cxo = ec_Cxo
env.x = bn_x
env.o = bn_o
# Create the Non-Interactive Zero-Knowledge (NIZK) proof
sig = zk.build_proof(env.get())
# Execute the verification on the proof 'sig'
env = ZKEnv(zk)
env.g, env.h = ec_g, ec_h
env.Cxo = ec_Cxo
assert zk.verify_proof(env.get(), sig)
if __name__ == "__main__":
test_blog_post()
|
11543855
|
import csv
import os
import re
import requests
from pattern import web
def get_dom(url):
html = requests.get(url).text
dom = web.Element(html)
return dom
def get_taxons_from_crispr_db():
taxon_ids = []
if os.path.exists('taxon_ids.csv'):
with open('taxon_ids.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
taxon_ids.append(row[0])
return taxon_ids
url = "http://crispr.u-psud.fr/crispr/"
dom_homepage = get_dom(url)
container = dom_homepage('div[class="strainlist"]')[0]
for link in container('a'):
taxon_id = link.href.encode('ascii', 'ignore')[46:]
taxon_ids.append(taxon_id)
with open('taxon_ids.csv', 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([taxon_id])
return taxon_ids
def get_sequences(taxon_id):
url = ("http://crispr.u-psud.fr/cgi-bin/crispr/SpecieProperties.cgi?"
"Taxon_id=" + taxon_id)
dom = get_dom(url)
table = dom('table[class="primary_table"]')[1]
sequences = []
for sequence in table('tr'):
if not sequence('th'):
seq = {
'Taxon_id': taxon_id,
'RefSeq': sequence('td')[1].content.encode('ascii', 'ignore')
}
sequences.append(seq)
return sequences
def get_loc_ids(sequence):
url = ("http://crispr.u-psud.fr/crispr/CRISPRProperties.php?RefSeq="
+ sequence['RefSeq'])
dom = get_dom(url)
div = dom('div[class="rightcontent"]')[1]
table = div('table[class="primary_table"]')[0]
tbody = table('tbody')[0]
loc_ids = []
for crispr_id in tbody('tr'):
cell = crispr_id('td')[1]
crispr_id_value = (cell('font')[0]
.content
.encode('ascii', 'ignore')
.replace('<br/n/>', '')
.replace('\n', '')
.replace('\t', '')
.replace('<br />', ''))
loc_ids.append(crispr_id_value.split('_')[-1])
return loc_ids
def get_positions(sequence, loc_id):
params = {'Taxon': sequence['Taxon_id']}
crispr_id = sequence['RefSeq'] + "_" + loc_id
params['checked[]'] = crispr_id
r = requests.post(
"http://crispr.u-psud.fr/crispr/crispr.php",
data=params
)
source = web.Element(r.text)
print("Crispr id: " + crispr_id)
table = source('table[class="crispr"]')[0]
tr = table('tr')[3]
td = tr('td')
begin = re.search('(?<=</span>) +\d+', td[0].content)
begin = begin.group(0).split(' ')[-1]
end = re.search('(?<=</span>) +\d+', td[1].content)
end = end.group(0).split(' ')[-1]
return [begin, end]
def get_results():
print("Getting taxon ids...")
taxon_ids = get_taxons_from_crispr_db()
n = len(taxon_ids)
i = 0.
print("Getting positions...")
restart = False
var = False
if os.path.exists('last'):
with open('last', 'r') as f:
last = f.read()
[last_taxon_id, last_refseq, last_loc_id] = last.split(',')
restart = True
for taxon_id in taxon_ids:
print("{:.2%}".format(i / n))
print("Taxon id: " + taxon_id)
if restart and last_taxon_id == taxon_id:
var = True
continue
if var or not restart:
sequences = get_sequences(taxon_id)
for sequence in sequences:
loc_ids = get_loc_ids(sequence)
sequence['Loc_ids'] = loc_ids
for loc_id in sequence['Loc_ids']:
[begin, end] = get_positions(sequence, loc_id)
result = [sequence['RefSeq'], loc_id, begin, end]
with open('results.csv', 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(result)
with open('last', 'w') as f:
f.write(','.join((
sequence['Taxon_id'],
sequence['RefSeq'],
loc_id
)))
i += 1
get_results()
|
11543862
|
import sys
from abc import abstractmethod
import numpy as np
from sklearn.metrics import (accuracy_score, f1_score, log_loss, mean_absolute_error, mean_absolute_percentage_error,
mean_squared_error, mean_squared_log_error, precision_score, r2_score, roc_auc_score,
silhouette_score, roc_curve, auc)
from sklearn.preprocessing import OneHotEncoder
from fedot.core.data.data import InputData, OutputData
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.repository.tasks import TaskTypesEnum
from fedot.core.pipelines.ts_wrappers import in_sample_ts_forecast
def from_maximised_metric(metric_func):
def wrapper(*args, **kwargs):
return -metric_func(*args, **kwargs)
return wrapper
class Metric:
output_mode = 'default'
default_value = 0
@classmethod
@abstractmethod
def get_value(cls, pipeline: Pipeline, reference_data: InputData,
validation_blocks: int) -> float:
""" Get metrics values based on pipeline and InputData for validation """
raise NotImplementedError()
@staticmethod
@abstractmethod
def metric(reference: InputData, predicted: OutputData) -> float:
raise NotImplementedError()
class QualityMetric:
max_penalty_part = 0.01
output_mode = 'default'
default_value = 0
@classmethod
def get_value(cls, pipeline: Pipeline, reference_data: InputData,
validation_blocks: int = None) -> float:
metric = cls.default_value
try:
if validation_blocks is None:
# Time series or regression classical hold-out validation
results, reference_data = cls._simple_prediction(pipeline, reference_data)
else:
# Perform time series in-sample validation
reference_data, results = cls._in_sample_prediction(pipeline, reference_data, validation_blocks)
metric = cls.metric(reference_data, results)
except Exception as ex:
print(f'Metric evaluation error: {ex}')
return metric
@classmethod
def _simple_prediction(cls, pipeline: Pipeline, reference_data: InputData):
""" Method prepares data for metric evaluation and perform simple validation """
results = pipeline.predict(reference_data, output_mode=cls.output_mode)
# Define conditions for target and predictions transforming
is_regression = reference_data.task.task_type == TaskTypesEnum.regression
is_multi_target = len(np.array(results.predict).shape) > 1
is_multi_target_regression = is_regression and is_multi_target
# Time series forecasting
is_ts_forecasting = reference_data.task.task_type == TaskTypesEnum.ts_forecasting
if is_ts_forecasting or is_multi_target_regression:
results, reference_data = cls.flatten_convert(results, reference_data)
return results, reference_data
@staticmethod
def flatten_convert(results, reference_data):
""" Transform target and predictions by converting them into
one-dimensional array
:param results: output from pipeline
:param reference_data: actual data for validation
"""
# Predictions convert into uni-variate array
forecast_values = np.ravel(np.array(results.predict))
results.predict = forecast_values
# Target convert into uni-variate array
target_values = np.ravel(np.array(reference_data.target))
reference_data.target = target_values
return results, reference_data
@classmethod
def get_value_with_penalty(cls, pipeline: Pipeline, reference_data: InputData,
validation_blocks: int = None) -> float:
quality_metric = cls.get_value(pipeline, reference_data)
structural_metric = StructuralComplexity.get_value(pipeline)
penalty = abs(structural_metric * quality_metric * cls.max_penalty_part)
metric_with_penalty = (quality_metric +
min(penalty, abs(quality_metric * cls.max_penalty_part)))
return metric_with_penalty
@staticmethod
def _in_sample_prediction(pipeline, data, validation_blocks):
""" Performs in-sample pipeline validation for time series prediction """
# Get number of validation blocks per each fold
horizon = data.task.task_params.forecast_length * validation_blocks
predicted_values = in_sample_ts_forecast(pipeline=pipeline,
input_data=data,
horizon=horizon)
# Clip actual data by the forecast horizon length
actual_values = data.target[-horizon:]
# Wrap target and prediction arrays into OutputData and InputData
results = OutputData(idx=np.arange(0, len(predicted_values)), features=predicted_values,
predict=predicted_values, task=data.task, target=predicted_values,
data_type=DataTypesEnum.ts)
reference_data = InputData(idx=np.arange(0, len(actual_values)), features=actual_values,
task=data.task, target=actual_values, data_type=DataTypesEnum.ts)
return reference_data, results
@staticmethod
@abstractmethod
def metric(reference: InputData, predicted: OutputData) -> float:
raise NotImplementedError()
class RMSE(QualityMetric):
default_value = sys.maxsize
@staticmethod
def metric(reference: InputData, predicted: OutputData) -> float:
return mean_squared_error(y_true=reference.target,
y_pred=predicted.predict, squared=False)
class MSE(QualityMetric):
default_value = sys.maxsize
@staticmethod
def metric(reference: InputData, predicted: OutputData) -> float:
return mean_squared_error(y_true=reference.target,
y_pred=predicted.predict, squared=True)
class MSLE(QualityMetric):
default_value = sys.maxsize
@staticmethod
def metric(reference: InputData, predicted: OutputData) -> float:
return mean_squared_log_error(y_true=reference.target,
y_pred=predicted.predict)
class MAPE(QualityMetric):
default_value = sys.maxsize
@staticmethod
def metric(reference: InputData, predicted: OutputData) -> float:
return mean_absolute_percentage_error(y_true=reference.target,
y_pred=predicted.predict)
class F1(QualityMetric):
default_value = 0
output_mode = 'labels'
@staticmethod
@from_maximised_metric
def metric(reference: InputData, predicted: OutputData) -> float:
n_classes = reference.num_classes
if n_classes > 2:
additional_params = {'average': 'weighted'}
else:
additional_params = {'average': 'micro'}
return f1_score(y_true=reference.target, y_pred=predicted.predict,
**additional_params)
class MAE(QualityMetric):
default_value = sys.maxsize
@staticmethod
def metric(reference: InputData, predicted: OutputData) -> float:
return mean_absolute_error(y_true=reference.target, y_pred=predicted.predict)
class R2(QualityMetric):
default_value = 0
@staticmethod
def metric(reference: InputData, predicted: OutputData) -> float:
return r2_score(y_true=reference.target, y_pred=predicted.predict)
class ROCAUC(QualityMetric):
default_value = 0.5
@staticmethod
@from_maximised_metric
def metric(reference: InputData, predicted: OutputData) -> float:
n_classes = reference.num_classes
if n_classes > 2:
additional_params = {'multi_class': 'ovr', 'average': 'macro'}
else:
additional_params = {}
score = round(roc_auc_score(y_score=predicted.predict,
y_true=reference.target,
**additional_params), 3)
return score
@staticmethod
def roc_curve(target: np.ndarray, predict: np.ndarray, pos_label=None):
return roc_curve(target, predict, pos_label=pos_label)
@classmethod
def auc(cls, fpr, tpr):
return auc(fpr, tpr)
class Precision(QualityMetric):
output_mode = 'labels'
@staticmethod
@from_maximised_metric
def metric(reference: InputData, predicted: OutputData) -> float:
return precision_score(y_true=reference.target, y_pred=predicted.predict)
class Logloss(QualityMetric):
default_value = sys.maxsize
@staticmethod
def metric(reference: InputData, predicted: OutputData) -> float:
return log_loss(y_true=reference.target, y_pred=predicted.predict)
class Accuracy(QualityMetric):
default_value = 0
output_mode = 'labels'
@staticmethod
@from_maximised_metric
def metric(reference: InputData, predicted: OutputData) -> float:
return accuracy_score(y_true=reference.target, y_pred=predicted.predict)
class Silhouette(QualityMetric):
default_value = 1
@staticmethod
@from_maximised_metric
def metric(reference: InputData, predicted: OutputData) -> float:
return silhouette_score(reference.features, labels=predicted.predict)
class StructuralComplexity(Metric):
@classmethod
def get_value(cls, pipeline: Pipeline, **args) -> float:
norm_constant = 30
return (pipeline.depth ** 2 + pipeline.length) / norm_constant
class NodeNum(Metric):
@classmethod
def get_value(cls, pipeline: Pipeline, **args) -> float:
norm_constant = 10
return pipeline.length / norm_constant
class ComputationTime(Metric):
@classmethod
def get_value(cls, pipeline: Pipeline, **args) -> float:
return pipeline.computation_time
|
11543908
|
import inspect
from django.contrib.contenttypes.models import ContentType
from django.apps import apps
from django.core import serializers
from django.core.management import BaseCommand
from field_history.models import FieldHistory
from field_history.tracker import FieldHistoryTracker, get_serializer_name
class Command(BaseCommand):
help = "Adds initial FieldHistory objects"
def handle(self, *args, **options):
models = []
for model in apps.get_models():
for member in inspect.getmembers(model):
if isinstance(member[1], FieldHistoryTracker):
models.append((model, member[1].fields))
break
if models:
self.stdout.write('Creating initial field history for {} models\n'.format(len(models)))
for model_fields in models:
model = model_fields[0]
fields = model_fields[1]
for obj in model._default_manager.all():
for field in list(fields):
content_type = ContentType.objects.get_for_model(obj)
if not FieldHistory.objects.filter(
object_id=obj.pk,
content_type=content_type,
field_name=field).exists():
data = serializers.serialize(get_serializer_name(),
[obj],
fields=[field])
FieldHistory.objects.create(
object=obj,
field_name=field,
serialized_data=data,
)
else:
self.stdout.write('There are no models to create field history for.')
|
11543909
|
import {{ cookiecutter.app_module }}
import pytest
@pytest.fixture
def client():
"""Flask test client"""
{{ cookiecutter.app_module }}.app.testing = True
return {{ cookiecutter.app_module }}.app.test_client()
def test_version():
"""Package should have a version defined"""
version = getattr({{ cookiecutter.app_module }}, '__version__', None)
assert version is not None
def test_ui_index(client):
"""Should render the home page"""
resp = client.get('/')
assert b'<div id="root">' in resp.data
assert b'initApp' in resp.data
def test_api_echo(client):
"""Should echo the URL parameter"""
resp = client.get('/api/echo/test-value')
assert resp.get_json() == {'value': 'test-value'}
def test_api_echo_empty(client):
"""Should return a 404 error"""
resp = client.get('/api/echo')
assert resp.status_code == 404
|
11543934
|
import logging
from exchange.errors import *
from exchange.ticker import Ticker
from exchange.orderbook import *
from exchange.currency_pair import CurrencyPair
from exchange.exchange_base import ExchangeBase
from exchange.coinbene.coinbene import Coinbene
class ExchangeCoinbene(ExchangeBase):
"""
Coinbene
"""
NAME = 'Coinbene'
VERSION = 'v1'
URL = 'https://github.com/Coinbene/API-Documents/wiki/0.0.0-Coinbene-API-documents'
def __init__(self):
super().__init__(self.NAME, self.VERSION, self.URL)
self._bene = Coinbene()
def get_currency_pairs(self):
'''
Gets currency list supported by exchange
:return: supported currency pair list
:rtype: CurrencyPair[]
'''
currency_pairs = []
symbols = self._bene.get_symbols()
for symbol in symbols['symbol']:
market_currency = symbol['quoteAsset']
currency = symbol['baseAsset']
currency_pairs.append(
CurrencyPair(market_currency, currency))
return currency_pairs
def get_ticker(self, currency_pair):
'''
Gets last price
:param CurrencyPair currency_pair: currency pair
:return: ticker
:rtype: Ticker
'''
if currency_pair is None:
raise InvalidParamException('currency_pair is None')
symbol = currency_pair.currency + currency_pair.market_currency
ticker = self._bene.get_ticker(symbol)
timestamp = int(ticker['timestamp'])
price = float(ticker['ticker'][0]['last'])
return Ticker(currency_pair, price, timestamp)
def get_orderbook(self, currency_pair):
'''
Gets orderbook information
:param CurrencyPair currency_pair: currency pair
:return: orderbook
:rtype: Orderbook
'''
if currency_pair is None:
raise InvalidParamException('currency_pair is None')
symbol = currency_pair.currency + currency_pair.market_currency
orderbook = self._bene.get_orderbook(symbol, 50)
timestamp = orderbook['timestamp']
asks = []
for unit in orderbook['orderbook']['asks']:
price = float(unit['price'])
amount = float(unit['quantity'])
asks.append(OrderbookItem(price, amount))
bids = []
for unit in orderbook['orderbook']['bids']:
price = float(unit['price'])
amount = float(unit['quantity'])
bids.append(OrderbookItem(price, amount))
return Orderbook(currency_pair, asks, bids, timestamp)
|
11543949
|
import urllib.request
import urllib
def extract(html):
content = ""
for l in html.splitlines():
if 'class="verse"' in l:
for c in l:
if c==' ' or (c >= u"\U00001200" and c <= u"\U0000135A"):
content += c
content = ' '.join(content.split())
return content
def save(book, chapter):
response = get(book, chapter)
if response[0]:
base = "./data/bbl/%02d-%02d" % (book,chapter)
with open(base, "w") as f:
f.write(response[1])
url = FILE_BASE + "%d/%d.mp3" % (book, chapter)
urllib.request.urlretrieve (url, base+'.mp3')
return response[0]
def get(book, chapter):
b = "%02d"%book
url = WEB_BASE + b + "/" + str(chapter) + ".htm"
request = urllib.request.Request(url,headers={'User-Agent': USER_AGENT})
try:
response = urllib.request.urlopen(request)
html = response.read().decode(response.headers.get_content_charset())
if "404 Not Found" in html:
return (False, "")
else:
extracted = extract(html)
return (True, extracted)
except:
return (False, "")
def get_all():
book = 1
chapter = 1
while(True):
chapter = 1
if not get(book, chapter)[0]:
print("Finished books")
break
while(True):
print("Requesting book %d chapter %d" % (book, chapter))
exists = save(book, chapter)
if not exists:
print("Finished chapters for book %d" % book)
break
chapter+=1
book+=1
|
11543962
|
import re
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db.models.query import QuerySet
from django.urls import reverse
from rest_framework import serializers
from reversion.models import Version
from apis_core.apis_labels.serializers import LabelSerializerLegacy as LabelSerializer
base_uri = getattr(settings, "APIS_BASE_URI", "http://apis.info")
if base_uri.endswith("/"):
base_uri = base_uri[:-1]
class CollectionSerializer(serializers.Serializer):
id = serializers.IntegerField()
name = serializers.CharField()
class VocabsSerializer(serializers.Serializer):
id = serializers.IntegerField()
name = serializers.CharField()
label = serializers.CharField()
class EntityUriSerializer(serializers.Serializer):
id = serializers.IntegerField()
uri = serializers.URLField()
class TextSerializer(serializers.Serializer):
id = serializers.IntegerField()
kind = VocabsSerializer()
text = serializers.CharField()
class EntitySerializer(serializers.Serializer):
id = serializers.IntegerField()
url = serializers.SerializerMethodField(method_name="add_url")
name = serializers.CharField()
start_date = serializers.DateField()
end_date = serializers.DateField()
uris = EntityUriSerializer(source="uri_set", many=True)
labels = LabelSerializer(source="label_set", many=True)
revisions = serializers.SerializerMethodField(method_name="add_revisions")
def add_revisions(self, obj):
ver = Version.objects.get_for_object(obj)
res = []
for v in ver:
usr_1 = getattr(v.revision, "user", None)
if usr_1 is not None:
usr_1 = usr_1.username
else:
usr_1 = "Not specified"
res.append(
{
"id": v.id,
"date_created": v.revision.date_created,
"user_created": usr_1,
}
)
return res
def add_relations(self, obj):
res = {}
mk = obj.__class__.__name__
for rel in ContentType.objects.filter(
app_label="apis_relations", model__icontains=mk.lower()
):
mk2 = re.match(r"{}([A-Za-z]+)".format(mk.lower()), rel.model)
reverse = False
if not mk2:
mk2 = re.match(r"([A-Za-z]+){}".format(mk.lower()), rel.model)
reverse = True
res["{}s".format(mk2.group(1))] = []
if mk2.group(1).lower() != mk.lower():
if self._only_published:
rel_qs = (
getattr(obj, "{}_set".format(rel.model)).all().filter_for_user()
)
else:
rel_qs = getattr(obj, "{}_set".format(rel.model)).all()
for rel2 in rel_qs:
res["{}s".format(mk2.group(1))].append(
RelationEntitySerializer(
rel2,
own_class=mk,
read_only=True,
context=self.context,
reverse=reverse,
).data
)
else:
for t in ["A", "B"]:
for rel2 in (
getattr(obj, "related_{}{}".format(mk.lower(), t))
.all()
.filter_for_user()
):
if t == "A":
ok = "{}B".format(mk.lower())
reverse = True
else:
ok = "{}A".format(mk.lower())
reverse = False
res["{}s".format(mk2.group(1))].append(
RelationEntitySerializer(
rel2,
own_class=ok,
read_only=True,
context=self.context,
reverse=reverse,
).data
)
return res
def add_entity_type(self, obj):
return str(obj.__class__.__name__)
def add_url(self, obj):
url = f"{base_uri}{reverse('GetEntityGenericRoot', kwargs={'pk': obj.pk})}"
return url
def __init__(
self, *args, depth_ent=1, only_published=True, add_texts=False, **kwargs
):
super(EntitySerializer, self).__init__(*args, **kwargs)
self._only_published = only_published
if type(self.instance) == QuerySet:
inst = self.instance[0]
else:
inst = self.instance
if inst is None:
return
for f in inst._meta.fields:
field_name = re.search(r"([A-Za-z]+)\'>", str(f.__class__)).group(1)
if field_name in [
"CharField",
"DateField",
"DateTimeField",
"IntegerField",
"FloatField",
]:
self.fields[f.name] = getattr(serializers, field_name)()
elif field_name in ["ForeignKey", "ManyToMany"]:
if str(f.related_model.__module__).endswith("apis_vocabularies.models"):
many = False
if f.many_to_many or f.one_to_many:
many = True
self.fields[f.name] = VocabsSerializer(many=many)
for f in inst._meta.many_to_many:
if f.name.endswith("relationtype_set"):
continue
elif f.name == "collection":
self.fields["collection"] = CollectionSerializer(many=True)
elif str(f.related_model.__module__).endswith("apis_vocabularies.models"):
self.fields[f.name] = VocabsSerializer(many=True)
self.fields["entity_type"] = serializers.SerializerMethodField(
method_name="add_entity_type"
)
if depth_ent == 1:
self.fields["relations"] = serializers.SerializerMethodField(
method_name="add_relations"
)
if add_texts:
self.fields["text"] = TextSerializer(many=True)
class RelationEntitySerializer(serializers.Serializer):
id = serializers.IntegerField()
start_date = serializers.DateField()
end_date = serializers.DateField()
start_date_written = serializers.DateField()
end_date_written = serializers.DateField()
relation_type = serializers.SerializerMethodField(method_name="add_relation_label")
annotation = serializers.SerializerMethodField(method_name="add_annotations")
revisions = serializers.SerializerMethodField(method_name="add_revisions")
def add_revisions(self, obj):
ver = Version.objects.get_for_object(obj)
res = []
for v in ver:
usr_1 = getattr(v.revision, "user", None)
if usr_1 is not None:
usr_1 = usr_1.username
else:
usr_1 = "Not specified"
res.append(
{
"id": v.id,
"date_created": v.revision.date_created,
"user_created": usr_1,
}
)
return res
def add_annotations(self, obj):
if "apis_highlighter" in settings.INSTALLED_APPS:
res = []
offs = 50
for an in obj.annotation_set.all():
r1 = dict()
r1["id"] = an.pk
r1["user"] = an.user_added.username
text = an.text.text
if offs < an.start:
s = an.start - offs
else:
s = 0
if offs + an.end < len(text):
e = an.end + offs
else:
e = len(text)
r1["annotation"] = text[an.start : an.end]
r1["text"] = text[s:e]
r1["text"] = "{}<annotation>{}</annotation>{}".format(
r1["text"][: an.start - s],
r1["text"][an.start - s : an.end - s],
r1["text"][an.end - s :],
)
r1["text"] = r1["text"].replace("\r\n", "<br/>")
r1["text"] = r1["text"].replace("\r", "<br/>")
r1["text"] = r1["text"].replace("\n", "<br/>")
r1["string_offset"] = "{}-{}".format(an.start, an.end)
# r1["text_url"] = self.context["request"].build_absolute_uri(
# reverse("apis_core:apis_api:text-detail", kwargs={"pk": an.text_id})
# )
r1[
"text_url"
] = f"{base_uri}{reverse('apis_core:apis_api:text-detail', kwargs={'pk': an.text_id})}"
res.append(r1)
return res
def add_entity(self, obj):
return EntitySerializer(
getattr(obj, "related_{}".format(self.entity_type)), depth_ent=0
).data
def add_relation_label(self, obj):
cm = obj.__class__.__name__
res_1 = dict()
res_1["id"] = obj.relation_type.pk
res_1[
"url"
] = f"{base_uri}{reverse('apis_core:apis_api:{}relation-detail'.format(cm).lower(), kwargs={'pk': obj.relation_type.pk},)}"
if self.reverse and len(obj.relation_type.label_reverse) > 0:
res_1["label"] = obj.relation_type.label_reverse
elif self.reverse:
res_1["label"] = "({})".format(obj.relation_type.label)
else:
res_1["label"] = obj.relation_type.label
return res_1
def __init__(self, *args, own_class=None, reverse=False, **kwargs):
super(RelationEntitySerializer, self).__init__(*args, **kwargs)
self.own_class = own_class
self.reverse = reverse
if self.instance is not None:
for f in self.instance._meta.fields:
if f.name.startswith("related_"):
mk2 = f.name.replace("related_", "")
if mk2.lower() != own_class.lower():
self.entity_type = mk2
self.fields["target"] = serializers.SerializerMethodField(
method_name="add_entity"
)
|
11543992
|
import cv2
# Get the number of cameras available
def count_cameras():
max_tested = 100
for i in range(max_tested):
temp_camera = cv2.VideoCapture(i)
if temp_camera.isOpened():
temp_camera.release()
continue
return i
print(count_cameras())
|
11543997
|
import numpy as np
import matplotlib.pyplot as plt
import csv
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
def npRelu(X):
return np.maximum(0,X)
def npSigmoid(X):
return 1 / (1 + np.exp(-1 * X))
def parainit(n_X, m):
W = np.random.randn(1, n_X)*0.01
b = 0.0
return W, b
def forward_prop(W, X,b, activation):
Z = np.dot(W, X) + b
if activation == "Relu":
A = npRelu(Z)
if activation == "Sigmoid":
A = npSigmoid(Z)
if activation == "TanH":
A = np.tanh(X)
cache = {"Z": Z}
pred = np.ceil(A-0.5)
return A, cache,pred
def cost(A, Y, activation, m):
if activation == "Sigmoid":
c = (-1/m)*(np.dot(Y,np.log(A).T)+np.dot((1-Y),np.log(1-A).T))
if activation == "Relu":
c = (1 / 2 * m) * np.sum(np.power((A - Y), 2))
return c
def update(W, A, b, Y, m, activation, lr):
if activation == "Sigmoid":
der = (A - Y)
if activation == "Relu":
der = A / A
W = W - lr * (1 / m) * np.dot(der, X.T)
b = b - np.sum(lr * der)
return W, b
def learn(W, b, X, m, Y, num, lr, activation, show):
costs = []
for i in range(1, num):
A, f_cache, predict = forward_prop(W, X,b, activation)
costs.append(cost(A, Y, activation,m))
W, b = update(W, A, b, Y, m, activation, lr)
if show and (i % 1000==0):
print("Cost is : ",costs[i-1])
percent = np.sum(np.abs(predict-Y))
print("Percentage Accuracy after",i,"iterations is: ",100.00-percent)
return W, b
# filename = 'nba.csv'
# raw_data = open(filename)
arr = pd.read_csv("bindata.csv", sep=',', header=None,error_bad_lines=False)
print(arr)
arr = np.array(arr)
# arr = float(np.loadtxt("Temp1.txt",delimiter=','))
X = arr[0:99,0:2]
X = X.T
max=np.max(X)
print(X.shape)
# scaler = MinMaxScaler(feature_range=(0, 1))
# X = scaler.fit_transform(X)
# print('X Shape : ', X.shape)
Y = arr[0:99,2]
Y = Y.T
print('Y Shape : ', Y)
n_X = X.shape[0]
m = X.shape[1]
W, b = parainit(n_X, m)
W, b = learn(W, b, X, m, Y, 10000, 0.012, activation="Sigmoid", show=True)
plt.xlabel("x1")
plt.ylabel("x2")
for i in range (0,99):
if Y[i] :
plt.plot(X[0,i], X[1,i], 'go')
else :
plt.plot(X[0,i],X[1,i],'r* ')
print("W is ",W)
print("b is ",b)
w1 = W[0,0]
w2 = W[0,1]
xval = np.array(np.linspace(np.min(X[1,:]),np.max(X[1,:]),1000))
yval =-1* (b +W[0,0]*xval)/W[0,1]
plt.plot(xval, yval, 'b.')
plt.show()
|
11544093
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def general_weight_initialization(module: nn.Module):
if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):
if module.weight is not None:
nn.init.uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
nn.init.kaiming_normal_(module.weight)
# print("Initing linear")
if module.bias is not None:
nn.init.constant_(module.bias, 0)
class TimeFirstBatchNorm1d(nn.Module):
def __init__(self, dim, groups=None):
super().__init__()
self.groups = groups
self.bn = nn.BatchNorm1d(dim)
def forward(self, tensor):
_, length, dim = tensor.size()
if self.groups:
dim = dim // self.groups
tensor = tensor.view(-1, dim)
tensor = self.bn(tensor)
if self.groups:
return tensor.view(-1, length, self.groups, dim)
else:
return tensor.view(-1, length, dim)
class LambdaLayer(nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, tensor):
return self.func(tensor)
def permute_tensor(permute):
def _permute_tensor(tensor):
return tensor.permute(*permute)
return _permute_tensor
def view_tensor(view):
def _view_tensor(tensor):
return tensor.view(*view)
return _view_tensor
class SEModule(nn.Module):
"""Squeeze-and-excite context gating
Adapted from https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
"""
def __init__(self, channels, reduction):
super().__init__()
self.fc1 = nn.Linear(channels, channels // reduction)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(channels // reduction, channels)
self.sigmoid = nn.Sigmoid()
self._init_weights()
def _init_weights(self):
for module in self.modules():
general_weight_initialization(module)
def forward(self, x):
module_input = x
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class BNSEModule(nn.Module):
"""Squeeze-and-excite context gating
Adapted from https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
"""
def __init__(self, channels, reduction):
super().__init__()
self.bn1 = nn.BatchNorm1d(channels)
self.fc1 = nn.Linear(channels, channels // reduction)
self.relu = nn.ReLU()
self.bn2 = nn.BatchNorm1d(channels // reduction)
self.fc2 = nn.Linear(channels // reduction, channels)
self.sigmoid = nn.Sigmoid()
self._init_weights()
def _init_weights(self):
for module in self.modules():
general_weight_initialization(module)
def forward(self, x):
module_input = x
x = self.bn1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.bn2(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class BNSE1dModule(nn.Module):
"""Squeeze-and-excite context gating
Adapted from https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
"""
def __init__(self, channels, reduction):
super().__init__()
self.bn1 = nn.BatchNorm1d(channels)
self.fc1 = nn.Conv1d(channels, channels // reduction, kernel_size=1)
self.relu = nn.ReLU()
self.bn2 = nn.BatchNorm1d(channels // reduction)
self.fc2 = nn.Conv1d(channels // reduction, channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self._init_weights()
def _init_weights(self):
for module in self.modules():
general_weight_initialization(module)
def forward(self, x):
module_input = x
x = self.bn1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.bn2(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class NeXtVLAD(nn.Module):
"""NeXtVLAD layer implementation
Adapted from https://github.com/linrongc/youtube-8m/blob/master/nextvlad.py
"""
def __init__(self, num_clusters=64, dim=128, alpha=100.0,
groups: int = 8, expansion: int = 2,
normalize_input=True, p_drop=0.25, add_batchnorm=False):
"""
Args:
num_clusters : int
The number of clusters
dim : int
Dimension of descriptors
alpha : float
Parameter of initialization. Larger value is harder assignment.
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
"""
super().__init__()
assert dim % groups == 0, "`dim` must be divisible by `groups`"
assert expansion > 1
self.p_drop = p_drop
self.cluster_dropout = nn.Dropout2d(p_drop)
self.num_clusters = num_clusters
self.dim = dim
self.expansion = expansion
self.grouped_dim = dim * expansion // groups
self.groups = groups
self.alpha = alpha
self.normalize_input = normalize_input
self.add_batchnorm = add_batchnorm
self.expansion_mapper = nn.Linear(dim, dim * expansion)
if add_batchnorm:
self.soft_assignment_mapper = nn.Sequential(
nn.Linear(dim * expansion, num_clusters * groups, bias=False),
TimeFirstBatchNorm1d(num_clusters, groups=groups)
)
else:
self.soft_assignment_mapper = nn.Linear(
dim * expansion, num_clusters * groups, bias=True)
self.attention_mapper = nn.Linear(
dim * expansion, groups
)
# (n_clusters, dim / group)
self.centroids = nn.Parameter(
torch.rand(num_clusters, self.grouped_dim))
self.final_bn = nn.BatchNorm1d(num_clusters * self.grouped_dim)
self._init_params()
def _init_params(self):
for component in (self.soft_assignment_mapper, self.attention_mapper,
self.expansion_mapper):
for module in component.modules():
general_weight_initialization(module)
if self.add_batchnorm:
self.soft_assignment_mapper[0].weight = nn.Parameter(
(2.0 * self.alpha * self.centroids).repeat((self.groups, self.groups))
)
nn.init.constant_(self.soft_assignment_mapper[1].bn.weight, 1)
nn.init.constant_(self.soft_assignment_mapper[1].bn.bias, 0)
else:
self.soft_assignment_mapper.weight = nn.Parameter(
(2.0 * self.alpha * self.centroids).repeat((self.groups, self.groups))
)
self.soft_assignment_mapper.bias = nn.Parameter(
(- self.alpha * self.centroids.norm(dim=1)
).repeat((self.groups,))
)
def forward(self, x, masks=None):
"""NeXtVlad Adaptive Pooling
Arguments:
x {torch.Tensor} -- shape: (n_batch, len, dim)
Returns:
torch.Tensor -- shape (n_batch, n_cluster * dim / groups)
"""
if self.normalize_input:
x = F.normalize(x, p=2, dim=2) # across descriptor dim
# expansion
# shape: (n_batch, len, dim * expansion)
x = self.expansion_mapper(x)
# soft-assignment
# shape: (n_batch, len, n_cluster, groups)
soft_assign = self.soft_assignment_mapper(x).view(
x.size(0), x.size(1), self.num_clusters, self.groups
)
soft_assign = F.softmax(soft_assign, dim=2)
# attention
# shape: (n_batch, len, groups)
attention = torch.sigmoid(self.attention_mapper(x))
if masks is not None:
# shape: (n_batch, len, groups)
attention = attention * masks[:, :, None]
# (n_batch, len, n_cluster, groups, dim / groups)
activation = (
attention[:, :, None, :, None] *
soft_assign[:, :, :, :, None]
)
# calculate residuals to each clusters
# (n_batch, n_cluster, dim / groups)
second_term = (
activation.sum(dim=3).sum(dim=1) *
self.centroids[None, :, :]
)
# (n_batch, n_cluster, dim / groups)
first_term = (
# (n_batch, len, n_cluster, groups, dim / groups)
activation *
x.view(x.size(0), x.size(1), 1, self.groups, self.grouped_dim)
).sum(dim=3).sum(dim=1)
# vlad shape (n_batch, n_cluster, dim / groups)
vlad = first_term - second_term
vlad = F.normalize(vlad, p=2, dim=2) # intra-normalization
# flatten shape (n_batch, n_cluster * dim / groups)
vlad = vlad.view(x.size(0), -1) # flatten
# vlad = F.normalize(vlad, p=2, dim=1) # L2 normalize
vlad = self.final_bn(vlad)
if self.p_drop:
vlad = self.cluster_dropout(
vlad.view(x.size(0), self.num_clusters, self.grouped_dim, 1)
).view(x.size(0), -1)
return vlad
def test_nextvlad():
model = NeXtVLAD(
num_clusters=64, dim=128, alpha=100,
groups=8, expansion=2, normalize_input=True,
p_drop=0.25, add_batchnorm=True
)
# shape (n_batch, len, dim)
input_tensor = torch.rand(16, 300, 128)
# shape (n_batch, n_clusters * dim / groups)
output_tensor = model(input_tensor)
assert output_tensor.size() == (16, 64 * 2 * 128 // 8)
model = NeXtVLAD(
num_clusters=64, dim=128, alpha=100,
groups=8, expansion=2, normalize_input=True,
p_drop=0.25, add_batchnorm=False
)
# shape (n_batch, len, dim)
input_tensor = torch.rand(16, 300, 128)
# shape (n_batch, n_clusters * dim / groups)
output_tensor = model(input_tensor)
assert output_tensor.size() == (16, 64 * 2 * 128 // 8)
if __name__ == "__main__":
test_nextvlad()
|
11544100
|
import tensorflow as tf
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from IPython.terminal.debugger import set_trace as keyboard
def build_train_op(self):
with tf.device('/cpu:0'):
min_queue_examples =20
num_preprocess_threads = 16
imagefile_batch,image_batch,grasp_batch = tf.train.shuffle_batch(
[self.imagefile,self.image,self.grasp],
batch_size=self.FLAGS.batch,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * self.FLAGS.batch,
min_after_dequeue=min_queue_examples)
with tf.device(self.arg_gpu):
pred = self.model(image_batch,self.FLAGS.batch,base='relu',train=True)
loss = self.loss_function(grasp_batch,pred)
loss2 = self.loss_function2(grasp_batch,pred)
lr = self.FLAGS.lr
optimizer = tf.train.AdamOptimizer(learning_rate=lr,
beta1=0.9,
beta2=0.999,
epsilon=1e-08,
use_locking=False,
name='Adam')
grads = optimizer.compute_gradients(loss)
train_op = optimizer.apply_gradients(grads)
self.truth_grasp = grasp_batch
self.pred = pred
self.imagefile_batch = imagefile_batch
self.train_op = train_op
self.loss = loss
self.loss2 = loss2
def loss_function(self,teach,predict):
"""
loss function is calculate by difference of all predict & teach values
"""
loss = tf.reduce_mean(tf.squared_difference(teach,predict))
with tf.device('/cpu:0'):
tf.summary.scalar('loss', loss)
return loss
def loss_function2(self,teach,predict):
"""
loss function2 is calculate by distance difference of grasp center and the difference of turning angle
"""
dx = teach[0] - predict[0]
dy = teach[1] - predict[1]
pt_distance = tf.sqrt(dx**2 + dy**2)
sin2rad_t = teach[2]
cos2rad_t = teach[3]
rad_t = tf.atan2(cos2rad_t,sin2rad_t) / 2
sin2rad_p = predict[2]
cos2rad_p = predict[3]
rad_p = tf.atan2(cos2rad_p,sin2rad_p) / 2
d_rad = rad_t - rad_p
w_t = teach[4]
w_p = predict[4]
dw = w_t-w_p
loss_distance_sum = tf.reduce_sum(pt_distance)
loss_rad_sum = tf.reduce_sum(tf.sqrt(d_rad**2))
loss_w_sum = tf.reduce_sum(tf.sqrt(dw**2))
loss2 = (loss_distance_sum+loss_rad_sum)/self.FLAGS.batch #Calculate mean
with tf.device('/cpu:0'):
tf.summary.scalar('loss2', loss2)
return loss2
def train(self):
epochs = self.FLAGS.epoch
loss_mva = None
plt.ion()
if not self.FLAGS.load == "":
step = self.FLAGS.load
else:
step = 0
for _ in range(epochs+1):
step += 1
_,loss,loss2,pred,truth_grasp,imagefile_batch = self.sess.run([self.train_op,self.loss,self.loss2,self.pred,self.truth_grasp,self.imagefile_batch])
if step%self.FLAGS.save == 0 and step != 0:
#Test Operation
self.test()
self.test_successrate()
self.save_checkpoint(step)
if loss_mva is None: loss_mva = loss
loss_mva = .9 * loss_mva + .1 * loss
print("Step"+str(step)+" loss = "+str(loss) + " loss2 = "+str(loss2)) + " loss_mva = "+str(loss_mva)
print("Image: " + str(imagefile_batch[0]))
print("pred[0] = "+str(pred[0]))
print("teach_grasp[0] = "+str(truth_grasp[0]))
image = Image.open(self.FLAGS.images+"/"+imagefile_batch[0])
image = self.draw_image_PIL_Rad(image,truth_grasp[0],'blue',radius=2,linethick=3)
image = self.draw_image_PIL_Rad(image,pred[0],'red',radius=2)
plt.imshow(np.asarray(image))
plt.title(str(imagefile_batch[0]))
plt.pause(.01)
plt.show()
plt.clf()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.