id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1624011
|
def main(request, response):
headers = []
headers.append((b"Access-Control-Allow-Origin", b"*"))
return headers, b"{ \"result\": \"success\" }"
|
1624016
|
import operator
import os
import warnings
from functools import reduce
from rbnf.core.Optimize import optimize
from rbnf.core.ParserC import Literal, Atom as _Atom, State
from rbnf.core.Tokenizer import Tokenizer
from rbnf.edsl.core import _FnCodeStr
from rbnf.core import ParserC
from rbnf.edsl import *
from rbnf.edsl.rbnf_analyze import check_parsing_complete
from rbnf.auto_lexer import rbnf_lexer
from rbnf.std.common import recover_codes, Name, Str, Number
from Redy.Tools.PathLib import Path
from typing import Sequence
import typing
import builtins
__all__ = ['rbnf', 'build_language', 'Language']
seq: typing.Callable[[typing.Any], typing.Any]
exec("from linq import Flow as seq")
C = Literal.C
V = Literal.V
N = Literal.N
NC = Literal.NC
END = N('END')
PatternName = str
def opt(f):
def call():
return optimize(f())
return call
class _Wild:
def __contains__(self, item):
return True
def copy(self):
return self
def remove(self, item):
pass
class MetaState(State):
def __init__(self,
lang_implementation,
requires: typing.Union[_Wild, typing.Set[PatternName]],
filename=None):
super(MetaState, self).__init__(lang_implementation, filename)
self.requires = requires
rbnf = Language("RBNF")
@rbnf
class CodeItem(Parser):
@classmethod
def bnf(cls):
return _Atom.Any
@classmethod
def when(cls, tokens: Sequence[Tokenizer], state: State):
try:
token = tokens[state.end_index]
except IndexError:
return False
begin_sign: Tokenizer = state.ctx['sign']
return token.colno > begin_sign.colno
class RewriteCode(_FnCodeStr):
fn_args = "state",
fn_name = "rewrite"
class WhenCode(_FnCodeStr):
fn_args = "tokens", "state"
fn_name = "when"
class WithCode(_FnCodeStr):
fn_args = "tokens", "state"
fn_name = "fail_if"
class Guide(Parser):
out_cls = _FnCodeStr
@classmethod
def bnf(cls):
return optimize(
C(cls.__name__.lower()) @ "sign" + CodeItem.one_or_more @ "expr")
@classmethod
@auto_context
def rewrite(cls, state: State):
ctx = state.ctx
code_items: ParserC.Nested = ctx['expr']
first = code_items[0].item
code = recover_codes(each.item for each in code_items)
# noinspection PyArgumentList
return cls.out_cls(code, first.lineno, first.colno, state.filename,
state.data.namespace)
@rbnf
class With(Guide):
out_cls = WithCode
pass
@rbnf
class When(Guide):
out_cls = WhenCode
pass
@rbnf
class Rewrite(Guide):
out_cls = RewriteCode
@classmethod
def bnf(cls):
return optimize((C('rewrite') | C('->')) @ "sign" +
CodeItem.one_or_more @ "expr")
@rbnf
class Primitive(Parser):
@classmethod
def bnf(cls):
# @formatter:off
return optimize(
C('(') + Or @ "or_" + C(')')
| C('[') + Or @ "optional" + C(']')
| Name @ "name"
| Str @ "str")
# @formatter:on
@classmethod
def rewrite(cls, state: State):
get = state.ctx.get
or_: Parser = get('or_')
optional: Parser = get('optional')
name: Tokenizer = get('name')
str: Tokenizer = get('str')
@opt
def delay():
nonlocal name
if name:
name = name.value
if name == '_':
return ParserC.Atom.Any
return state.data.named_parsers[name]
if or_:
return or_()
if optional:
return optional().optional
if str:
value: builtins.str = str.value
if value.startswith("'"):
return C(value[1:-1])
if value[1] is not "'":
raise TypeError("Prefix could be only sized 1.")
prefix, value = value[0], value[2:-1]
try:
prefix = cls.lang.prefix[prefix]
except KeyError:
raise NameError(f"Prefix `{prefix}` not found!")
return NC(cls.lang.prefix[prefix], value)
raise TypeError
return delay
@rbnf
class Trail(Parser):
@classmethod
def bnf(cls):
# @formatter:off
a = optimize(
C('~') @ "rev" + Primitive @ "atom"
| Primitive @ "atom" +
(C('+') @ "one_or_more"
| C('*') @ "zero_or_more"
| C('{') +
(Number(1, 2) @ "interval" | Name @ "guard") + C('}')).optional)
left_assign = optimize(Name @ "bind" + C("=")
| Name @ "bind" + C("<<") @ "is_seq") + a
right_assign = a + optimize(
(C('as') + Name @ "bind"
| C("to") + C('[') @ "is_seq" + Name @ "bind" + C("]")).optional)
return optimize(left_assign | right_assign)
# @formatter:on
@classmethod
@auto_context
def rewrite(cls, state: State):
rev: object
atom: ParserC.Parser
one_or_more: object
zero_or_more: object
interval: Sequence[Tokenizer]
bind: Tokenizer
is_seq: object
guard: Tokenizer
@opt
def delay():
nonlocal atom
atom = atom()
def ret():
if rev:
return ~atom
if one_or_more:
return atom.one_or_more
if zero_or_more:
return atom.unlimited
if interval:
if len(interval) is 1:
least = int(interval[0].value)
most = -1
else:
least, most = map(lambda _: int(_.value), interval)
return atom.repeat(least, most)
if guard:
guard_fn = state.data.namespace[guard.value]
guard_fn.name = guard.value
return _Atom.Guard(atom, guard_fn)
return atom
ret: Parser = ret()
if bind:
name: str = bind.value
if is_seq:
# noinspection PyTypeChecker
ret = ret >> name
else:
ret = ret @ name
return optimize(ret)
return delay
@rbnf
class And(Parser):
@classmethod
def bnf(cls):
return Trail.one_or_more @ "and_seq"
@classmethod
@auto_context
def rewrite(cls, state: State):
and_seq: ParserC.Nested
@opt
def delay():
return reduce(operator.add, [each() for each in and_seq])
return delay
@rbnf
class Or(Parser):
@classmethod
def bnf(cls):
return optimize(And @ "head" + (C('|') + (And >> "tail")).unlimited)
@classmethod
@auto_context
def rewrite(cls, state: State):
tail: ParserC.Nested
head: ParserC.Parser
@opt
def delay():
if not tail:
return head()
return reduce(operator.or_, [each() for each in tail], head())
return delay
@rbnf
class Import(Parser):
@classmethod
def bnf(cls):
# @formatter:off
return optimize(
((C("[") + Name @ "language" + C("]")).optional + C("import")
| C("pyimport") @ "python") + Name @ "head" +
(C('.') + (C('*') | Name >> "tail")).unlimited + C('.') + C('[') +
(C('*') | Name.unlimited @ "import_items") + C(']'))
# @formatter:on
@staticmethod
@auto_context
def rewrite(state: MetaState):
language: Tokenizer
head: Tokenizer
tail: typing.List[Tokenizer]
import_items: typing.List[Tokenizer]
python: Tokenizer
path_secs = [head.value, *(each.value for each in tail or ())]
if not import_items:
requires = _Wild()
else:
requires = {each.value for each in import_items}
if language or python:
if python:
warnings.warn(
"keyword `pyimport` is deprecated, "
"use [python] import instead.", DeprecationWarning)
else:
language = language.value
if language != "python":
# TODO: c/c++, .net, java
raise NotImplementedError(language)
lang: Language = state.data
from_item = ".".join(path_secs)
import_items = "*" if isinstance(
requires, _Wild) else "({})".format(', '.join(requires))
import_stmt = f"from {from_item} import {import_items}"
lang._backend_imported.append(import_stmt)
exec(import_stmt, lang.namespace)
else:
# TODO: this implementation is wrong but implementing the correct one requires the sperate asts and parsers.
# See `rbnf.std.compiler`, this one is correct though it's deprecated.
possible_paths = [Path('./', *path_secs)]
lang = state.data
ruiko_home = os.environ.get('RBNF_HOME')
if ruiko_home:
possible_paths.append(Path(ruiko_home, *path_secs))
for path in possible_paths:
filename = str(path)
if not filename[:-5].lower().endswith('.rbnf'):
filename = filename + '.rbnf'
path = Path(filename)
if not path.exists():
continue
with path.open('r') as file:
state = MetaState(
rbnf.implementation,
requires=requires,
filename=str(path))
state.data = lang
_build_language(file.read(), state=state)
if not requires:
break
if requires and not isinstance(requires, _Wild):
raise ImportError(requires)
@rbnf
class Ignore(Parser):
@classmethod
def bnf(cls):
return optimize(
C("ignore") + C('[') + Name.one_or_more @ "names" + C(']'))
@classmethod
@auto_context
def rewrite(cls, state: State):
names: typing.List[Tokenizer]
lang: Language = state.data
lang.ignore(*(each.value for each in names))
@rbnf
class UParser(Parser):
@classmethod
def bnf(cls):
# @formatter:off
return optimize(Name @ "name" + C('::=') + C('|').optional +
Or @ "impl" + When.optional @ "when" +
With.optional @ "fail_if" +
Rewrite.optional @ "rewrite")
# @formatter:on
@classmethod
@auto_context
def rewrite(cls, state: MetaState):
name:...
impl: ParserC.Parser
when: When.Data
fail_if: With.Data
rewrite: With.Data
lang: Language = state.data
requires = state.requires
name = name.value
if name not in requires:
return
methods = {}
if when:
methods['when'] = when[0]
if fail_if:
methods['fail_if'] = fail_if[0]
if rewrite:
methods['rewrite'] = rewrite[0]
methods['bnf'] = lambda: impl()
lang(type(name, (Parser, ), methods))
state.requires.remove(name)
@rbnf
class ULexer(Parser):
@classmethod
def bnf(cls):
# @formatter:off
a = optimize(Name @ "name" + C('cast').optional @ "cast" +
(C('as') + Name @ "new_prefix").optional + C(':=') +
C('|').optional + Str.one_or_more @ "lexer_factors")
b = optimize(
V("keyword") @ "keyword" + Name @ "name" + C(':=') + C("|") +
Str.one_or_more @ "lexer_factors")
return a | b
# @formatter:on
@classmethod
@auto_context
def rewrite(cls, state: MetaState):
keyword:...
name:...
cast:...
new_prefix:...
lexer_factors:...
lang: Language = state.data
new_prefix = new_prefix
if keyword:
cast = True
requires = state.requires
name = name.value
if name not in requires:
return
def split_regex_and_constants(tk: Tokenizer):
v = tk.value
if v.startswith("R'"):
return "regex"
elif v.startswith("'"):
return "constants"
raise ValueError(
f"Unexpected prefixed string `{v}` at lineno {tk.lineno}, column {tk.colno}."
)
lexer_groups = seq(lexer_factors).group_by(split_regex_and_constants)._
regex, constants = (lexer_groups[each]
for each in ('regex', 'constants'))
regex = [each[2:-1] for each in seq(regex).map(lambda _: _.value)._]
constants = [
each[1:-1] for each in seq(constants).map(lambda _: _.value)._
]
methods = {'regex': lambda: regex, 'constants': lambda: constants}
if cast:
methods['cast'] = lambda: True
if new_prefix:
new_prefix = new_prefix.value
methods['prefix'] = lambda: new_prefix
lang(type(name, (Lexer, ), methods))
state.requires.remove(name)
@rbnf
class Statement(Parser):
@classmethod
def bnf(cls):
return Import @ "import_" | UParser @ "parser" | ULexer @ "lexer" | Ignore @ 'ignore'
@rbnf
class Grammar(Parser):
@classmethod
def bnf(cls):
return optimize(END.unlimited + (Statement + END.unlimited).unlimited)
@classmethod
def rewrite(cls, state: MetaState):
return state.data
rbnf.build()
rbnf.lexer = rbnf_lexer.rbnf_lexing
def _find_nth(string: str, element, nth: int = 0):
pos: int = string.index(element)
while nth:
pos = string.index(element, pos) + 1
nth -= 1
return pos
def _build_language(text: str, state):
tokens = tuple(rbnf.lexer(text))
Grammar.match(tokens, state)
check_parsing_complete(text, tokens, state)
def build_language(source_code: str, lang: Language, filename: str):
"""
lang: language object represents your language.
"""
state = MetaState(rbnf.implementation, requires=_Wild(), filename=filename)
state.data = lang
_build_language(source_code, state)
lang.build()
|
1624128
|
import sys
sys.path.append("../")
from autogl.datasets import build_dataset_from_name
from autogl.solver.classifier.link_predictor import AutoLinkPredictor
from autogl.module.train.evaluation import Auc
import yaml
import random
import torch
import numpy as np
if __name__ == "__main__":
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
"auto link prediction", formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset",
default="cora",
type=str,
help="dataset to use",
choices=[
"cora",
"pubmed",
"citeseer",
"coauthor_cs",
"coauthor_physics",
"amazon_computers",
"amazon_photo",
],
)
parser.add_argument(
"--configs",
type=str,
default="../configs/lp_gcn_benchmark.yml",
help="config to use",
)
# following arguments will override parameters in the config file
parser.add_argument("--hpo", type=str, default="tpe", help="hpo methods")
parser.add_argument(
"--max_eval", type=int, default=50, help="max hpo evaluation times"
)
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--device", default=0, type=int, help="GPU device")
args = parser.parse_args()
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
seed = args.seed
# set random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
dataset = build_dataset_from_name(args.dataset)
configs = yaml.load(open(args.configs, "r").read(), Loader=yaml.FullLoader)
configs["hpo"]["name"] = args.hpo
configs["hpo"]["max_evals"] = args.max_eval
autoClassifier = AutoLinkPredictor.from_config(configs)
# train
autoClassifier.fit(
dataset,
time_limit=3600,
evaluation_method=[Auc],
seed=seed,
train_split=0.85,
val_split=0.05,
)
autoClassifier.get_leaderboard().show()
# test
predict_result = autoClassifier.predict_proba()
pos_edge_index, neg_edge_index = (
dataset[0].test_pos_edge_index,
dataset[0].test_neg_edge_index,
)
E = pos_edge_index.size(1) + neg_edge_index.size(1)
link_labels = torch.zeros(E)
link_labels[: pos_edge_index.size(1)] = 1.0
print(
"test auc: %.4f"
% (Auc.evaluate(predict_result, link_labels.detach().cpu().numpy()))
)
|
1624157
|
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('..'))
import pydidyoumean
runningOnPython2 = sys.version_info[0] == 2
if runningOnPython2:
from cStringIO import StringIO
else:
from io import StringIO
class TestGeneral(unittest.TestCase):
def test_validateTestFolderContents(self):
self.assertEqual(os.listdir(os.path.dirname(os.path.realpath(__file__))),
['basicTests.py', 'test_file_abc.txt', 'test_file_abcdef.txt', 'test_file_foo.txt'],
'Have you changed the contents on the test folder?')
self.assertTrue(os.path.exists(os.path.join('..', 'setup.py'))) # the setup.py in the root project folder is also used in these tests.
def test_levenshtein(self):
testData = (('abc', 'abc', 0), # format: (s1, s2, expectedDistance)
('abc', 'bc', 1),
('abc', 'ac', 1),
('abc', 'ab', 1),
('abc', 'a', 2),
('abc', 'b', 2),
('abc', 'c', 2),
('abc', 'xbc', 1),
('abc', 'axc', 1),
('abc', 'abx', 1),
('abc', 'abcd', 1),
('It was a bright cold day in April, and the clocks were striking thirteen.',
'It was a bright COld ay in April, xand the clocs were xstriking thrteen.',
7),
)
for s1, s2, expectedDistance in testData:
actualDistance = pydidyoumean.levenshtein(s1, s2)
self.assertEqual(expectedDistance, actualDistance,
'Expected distance between %r and %r was %s, but actually was %s.' % (s1, s2, expectedDistance, actualDistance))
# reverse order to check for associative equality
actualDistance = pydidyoumean.levenshtein(s2, s1)
self.assertEqual(expectedDistance, actualDistance,
'Expected distance between %r and %r was %s, but actually was %s.' % (s1, s2, expectedDistance, actualDistance))
def test_printFileSuggestion(self):
savedStdOut = sys.stdout
# basic test
sys.stdout = mystdout = StringIO()
pydidyoumean.printFileSuggestion('test_file_ab.txt')
self.assertEqual(mystdout.getvalue(), 'Did you mean test_file_abc.txt?\n' if __debug__ else '')
# test message arg
sys.stdout = mystdout = StringIO()
pydidyoumean.printFileSuggestion('test_file_ab.txt', message='How about %s?\n')
self.assertEqual(mystdout.getvalue(), 'How about test_file_abc.txt?\n' if __debug__ else '')
# test folder arg
sys.stdout = mystdout = StringIO()
pydidyoumean.printFileSuggestion('test_file_ab.txt', folder=os.path.join('..', 'tests'))
self.assertEqual(mystdout.getvalue(), 'Did you mean test_file_abc.txt?\n' if __debug__ else '')
sys.stdout = mystdout = StringIO()
pydidyoumean.printFileSuggestion('setup.py', folder='..', threshold=0, includeIdenticalFilename=True)
self.assertEqual(mystdout.getvalue(), 'Did you mean setup.py?\n' if __debug__ else '')
# test threshold arg
sys.stdout = mystdout = StringIO()
pydidyoumean.printFileSuggestion('test_file_ab.txt', threshold=0)
self.assertEqual(mystdout.getvalue(), '')
# test includeIdenticalFilename arg
sys.stdout = mystdout = StringIO()
pydidyoumean.printFileSuggestion('test_file_abc.txt', threshold=0, includeIdenticalFilename=True)
self.assertEqual(mystdout.getvalue(), 'Did you mean test_file_abc.txt?\n' if __debug__ else '')
sys.stdout = savedStdOut
def test_formatFileSuggestion(self):
# basic test
self.assertEqual(pydidyoumean.formatFileSuggestion('test_file_ab.txt'),
'Did you mean test_file_abc.txt?\n')
# test message arg
self.assertEqual(pydidyoumean.formatFileSuggestion('test_file_ab.txt', message='How about %s?\n'),
'How about test_file_abc.txt?\n')
# test folder arg
self.assertEqual(pydidyoumean.formatFileSuggestion('test_file_ab.txt', folder=os.path.join('..', 'tests')),
'Did you mean test_file_abc.txt?\n')
self.assertEqual(pydidyoumean.formatFileSuggestion('setup.py', folder='..', threshold=0, includeIdenticalFilename=True),
'Did you mean setup.py?\n')
# test threshold arg
self.assertEqual(pydidyoumean.formatFileSuggestion('test_file_ab.txt', threshold=0),
'')
# test includeIdenticalFilename arg
self.assertEqual(pydidyoumean.formatFileSuggestion('test_file_abc.txt', threshold=0, includeIdenticalFilename=True),
'Did you mean test_file_abc.txt?\n')
def test_suggestFile(self):
# basic test
self.assertEqual(pydidyoumean.suggestFile('test_file_ab.txt'),
'test_file_abc.txt')
# test folder arg
self.assertEqual(pydidyoumean.suggestFile('test_file_ab.txt', folder=os.path.join('..', 'tests')),
'test_file_abc.txt')
self.assertEqual(pydidyoumean.suggestFile('setup.py', folder='..', threshold=0, includeIdenticalFilename=True),
'setup.py')
# test threshold arg
self.assertEqual(pydidyoumean.suggestFile('test_file_ab.txt', threshold=0),
None)
# test includeIdenticalFilename arg
self.assertEqual(pydidyoumean.suggestFile('test_file_abc.txt', threshold=0, includeIdenticalFilename=True),
'test_file_abc.txt')
def test_suggestAllFiles(self):
# basic test
self.assertEqual(list(pydidyoumean.suggestAllFiles('test_file_ab.txt')),
['test_file_abc.txt'])
# test folder arg
self.assertEqual(list(pydidyoumean.suggestAllFiles('test_file_ab.txt', folder=os.path.join('..', 'tests'))),
['test_file_abc.txt'])
self.assertEqual(list(pydidyoumean.suggestAllFiles('setup.py', folder='..', threshold=0, includeIdenticalFilename=True)),
['setup.py'])
# test threshold arg
self.assertEqual(list(pydidyoumean.suggestAllFiles('test_file_ab.txt', threshold=0)),
[])
# test includeIdenticalFilename arg
self.assertEqual(list(pydidyoumean.suggestAllFiles('test_file_abc.txt', threshold=0, includeIdenticalFilename=True)),
['test_file_abc.txt'])
def test_printSuggestion(self):
savedStdOut = sys.stdout
# basic test
sys.stdout = mystdout = StringIO()
pydidyoumean.printSuggestion('ab', ['abc', 'abcdef', 'foo'])
self.assertEqual(mystdout.getvalue(), 'Did you mean abc?\n' if __debug__ else '')
# test message arg
sys.stdout = mystdout = StringIO()
pydidyoumean.printSuggestion('ab', ['abc', 'abcdef', 'foo'], message='How about %s?\n')
self.assertEqual(mystdout.getvalue(), 'How about abc?\n' if __debug__ else '')
# test threshold arg
sys.stdout = mystdout = StringIO()
pydidyoumean.printSuggestion('ab', ['abc', 'abcdef', 'foo'], threshold=0)
self.assertEqual(mystdout.getvalue(), '')
# test includeIdenticalName arg
sys.stdout = mystdout = StringIO()
pydidyoumean.printSuggestion('abc', ['abc', 'abcdef', 'foo'], threshold=0, includeIdenticalName=True)
self.assertEqual(mystdout.getvalue(), 'Did you mean abc?\n' if __debug__ else '')
sys.stdout = savedStdOut
def test_formatSuggestion(self):
# basic test
self.assertEqual(pydidyoumean.formatSuggestion('ab', ['abc', 'abcdef', 'foo']),
'Did you mean abc?\n')
# test message arg
self.assertEqual(pydidyoumean.formatSuggestion('ab', ['abc', 'abcdef', 'foo'], message='How about %s?\n'),
'How about abc?\n')
# test threshold arg
self.assertEqual(pydidyoumean.formatSuggestion('ab', ['abc', 'abcdef', 'foo'], threshold=0),
'')
# test includeIdenticalName arg
self.assertEqual(pydidyoumean.formatSuggestion('abc', ['abc', 'abcdef', 'foo'], threshold=0, includeIdenticalName=True),
'Did you mean abc?\n')
def test_suggest(self):
# basic test
self.assertEqual(pydidyoumean.suggest('ab', ['abc', 'abcdef', 'foo']),
'abc')
# test threshold arg
self.assertEqual(pydidyoumean.suggest('ab', ['abc', 'abcdef', 'foo'], threshold=0),
None)
# test includeIdenticalName arg
self.assertEqual(pydidyoumean.suggest('abc', ['abc', 'abcdef', 'foo'], threshold=0, includeIdenticalName=True),
'abc')
def test_suggestAll(self):
# basic test
self.assertEqual(list(pydidyoumean.suggestAll('ab', ['abc', 'abcdef', 'foo'])),
['abc'])
# test threshold arg
self.assertEqual(list(pydidyoumean.suggestAll('ab', ['abc', 'abcdef', 'foo'], threshold=0)),
[])
# test includeIdenticalName arg
self.assertEqual(list(pydidyoumean.suggestAll('abc', ['abc', 'abcdef', 'foo'], threshold=0, includeIdenticalName=True)),
['abc'])
if __name__ == '__main__':
unittest.main()
|
1624160
|
import sys
import gym
import pathlib2
import agents
import ignite.engine
import args as argument_parser
from utils import path_plot
from utils.step_generator import StepGenerator
from ignite.contrib.handlers import ProgressBar
from utils.lap_time_measure import LapTimeMeasure
from utils.state_recorder import StateRecorder
from utils.action_recorder import ActionRecorder
from environments import GazeboCircuitTurtlebotLidarEnv # must be imported to register in gym
def create_validation_engine(agent, environment):
def _run_single_simulation(engine, timestep=None):
transition = engine.state.batch
state, action, _, reward = transition
engine.state.total_reward += sum(reward.values())
engine.state.state = state
engine.state.action = action
engine.state.reward = reward
engine = ignite.engine.Engine(_run_single_simulation)
@engine.on(ignite.engine.Events.STARTED)
def initialize(engine):
engine.state.agent = agent
engine.state.environment = environment
engine.state.max_reward = -sys.maxint
@engine.on(ignite.engine.Events.EPOCH_STARTED)
def _reset(engine):
engine.state.total_reward = 0
engine.state.loss = 0.
@engine.on(ignite.engine.Events.EPOCH_COMPLETED)
def _reset_data(engine):
engine.state.dataloader.reset()
@engine.on(ignite.engine.Events.COMPLETED)
def close(_):
environment.close()
def _attach(plugin):
plugin.attach(engine)
engine.attach = _attach
return engine
def main(args):
env_kwargs = argument_parser.prepare_env_kwargs(args)
env = gym.make(args.environment_name, **env_kwargs)
state = env.reset()
agent = agents.get_agent(args.agent, env_name=args.environment_name,
network_architecture=args.value_estimator,
init_state=state, num_of_actions=env.action_space.n)
if args.agent in ['dqn', 'a2c']:
agent.load_weights(args.weights)
out_path = pathlib2.Path('/'.join(args.weights.split('/')[:-1]))
agent.eval()
else:
out_path = pathlib2.Path('out/{}'.format(args.agent))
agent.set_action_space(env_kwargs['action_space'])
evaluator = create_validation_engine(agent, env)
path_plotter = path_plot.Plotter(args.environment_name, out_path)
state_recorder = StateRecorder(args.environment_name, out_path)
action_recorder = ActionRecorder(args.environment_name, out_path, env_kwargs['action_space'])
evaluator.attach(state_recorder)
evaluator.attach(action_recorder)
evaluator.attach(path_plotter)
evaluator.attach(LapTimeMeasure(out_path, args.environment_name))
# evaluator.attach(ProgressBar(persist=False))
engine_state = evaluator.run(data=StepGenerator(env, agent, max_steps=args.max_steps,
break_if_collision=args.break_if_collision),
max_epochs=1000)
if __name__ == '__main__':
args = argument_parser.parse_eval_args()
main(args)
|
1624167
|
from django.db import models
from ..restaurants.models import Restaurants
class Foods(models.Model):
CATEGORIES = (
(0, 'ایرانی'),
(1, 'فستفود'),
(2, 'نوشیدنی'),
(3, 'پیشغذا')
)
name = models.CharField(max_length=50)
restaurant = models.ForeignKey(Restaurants, related_name='foods', on_delete=models.CASCADE)
category = models.IntegerField(max_length=1, choices=CATEGORIES, default=0)
|
1624197
|
from magma import *
from mantle import And
from .ring import Ring
__all__ = ['CascadedRing']
def CascadedRing(nlist, has_ce=False, **kwargs):
"""
n cascaded shift registers in a ring
"""
if has_ce:
ce = In(Bit)()
args = ['CE', ce]
else:
ce = 1
args = []
for i in range(len(nlist)):
n = nlist[i] - 1
assert(n < 32)
ring = Ring(n, has_ce=True)
ring(CE=ce)
and2 = And(2)
and2(ce, ring.O[n-1])
ce = and2.O
args += ["O", ce]
return AnonymousCircuit(args)
|
1624199
|
import unittest
import torch
from torch import nn
from ner.active_heuristic import (
Random,
Uncertantiy,
KNNEmbeddings,
)
from . import utils
class TestActiveHeuristics(unittest.TestCase):
'''
Test cases for active_heuristics
'''
def test_random(self):
'''
run test cases for random heurisitcs
'''
# should not rely on vocabulary
heuristic = Random(vocab=None, tag_vocab=None)
dataset = utils.construct_sample_unlabeled_dataset()
# this is model independent, so setting the model
# to none should not change the output
result = heuristic.evaluate(
model=None,
dataset=dataset,
)
model_result = heuristic.evaluate(
model=utils.MockModel(),
dataset=dataset,
)
# result should be model agnostic
assert torch.all(result.eq(model_result))
# make sure all the results are the same
# postivie numbers
# between 0 and 1
# and sum to 1
assert len(result) == len(dataset)
for i in range(1, len(result)):
assert result[i] >= 0.0
assert result[i] <= 1.0
assert result[i] == result[i - 1]
assert sum(result) == 1.0
def test_uncertain(self):
'''
test cases for uncertain based heuristic
'''
model = utils.MockModel()
dataset = utils.construct_sample_unlabeled_dataset()
vocab = utils.build_sample_vocab()
tag_vocab = utils.build_sample_tag_vocab()
heuristic = Uncertantiy(vocab=vocab, tag_vocab=tag_vocab)
result = heuristic.evaluate(
model=model,
dataset=dataset
)
assert len(result) == len(dataset)
# all result here should be equal to model.random_val
assert torch.all(torch.eq(result, 1.0 / len(result)))
def test_knn(self):
'''
test case for KNN based heuristic
'''
model = utils.MockModel()
dataset = utils.construct_sample_unlabeled_dataset()
vocab = utils.build_sample_vocab()
tag_vocab = utils.build_sample_tag_vocab()
heuristic = KNNEmbeddings(vocab=vocab, tag_vocab=tag_vocab)
heuristic.prepare(
model,
dataset,
)
dataset.remove((0, utils.SAMPLE_DATASET[0][0]))
output = heuristic.evaluate_with_labeled(
model=model,
dataset=dataset,
labeled_indexes=[0],
labeled_points=[(0,) + utils.SAMPLE_DATASET[0]],
)
assert len(output) == len(dataset)
print(output)
|
1624241
|
import unittest
import mock
from click.testing import CliRunner
import msword_cli
from .util import MockApp
from pywintypes import com_error
class TestListCommand(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
@mock.patch('msword_cli.WORD.Documents', Count=0)
@mock.patch('msword_cli.WORD')
def test_no_docs(self, mock_app, mock_docs):
''' Test listing docs with no docs open. '''
result = self.runner.invoke(msword_cli.docs)
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output, '\nNo open documents found.\n')
@mock.patch('msword_cli.WORD', spec_set=MockApp(['foo.docx']))
def test_one_doc(self, mock_app):
''' Test listing docs with one doc open. '''
result = self.runner.invoke(msword_cli.docs)
self.assertEqual(result.exit_code, 0)
# 3 header lines plus 1 doc = 4
self.assertEqual(len(result.output.split('\n')), 4)
@mock.patch('msword_cli.WORD', spec_set=MockApp(['foo.docx', 'bar.docx', 'baz.docx']))
def test_multiple_docs(self, mock_app):
''' Test listing docs with three docs open. '''
result = self.runner.invoke(msword_cli.docs)
self.assertEqual(result.exit_code, 0)
# 3 header lines plus 3 docs = 6
#self.assertEqual(len(result.output.split('\n')), 6) #TODO: fix this
#self.assertEqual(result.output, '')
@mock.patch('msword_cli.WORD', spec_set=MockApp(['foo.docx', 'bar.docx', 'baz.docx']))
class TestActivateCommand(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def test_activate(self, mock_app):
''' Test activate. '''
index = 2
result = self.runner.invoke(msword_cli.activate, [str(index)])
self.assertEqual(result.exit_code, 0)
mock_app.Documents.Item.assert_was_called_with(index)
#self.assertEqual(mock_app.Documents[index].Activate.called, True)
def test_activate_bad_index(self, mock_app):
''' Test activate with bad index. '''
index = 6
mock_app.Documents.Item.side_effect = com_error
result = self.runner.invoke(msword_cli.activate, [str(index)])
self.assertEqual(result.exit_code, -1)
def test_activate_no_index(self, mock_app):
''' Test activate with no index. '''
result = self.runner.invoke(msword_cli.activate)
self.assertEqual(result.exit_code, 2)
|
1624284
|
import unittest
import os
import zserio
from testutils import getZserioApi, getApiDir
class PackedAutoArrayStructRecursionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "array_types.zs").packed_auto_array_struct_recursion
def testBitSizeOfLength1(self):
self._checkBitSizeOf(self.AUTO_ARRAY_LENGTH1)
def testBitSizeOfLength2(self):
self._checkBitSizeOf(self.AUTO_ARRAY_LENGTH2)
def testBitSizeOfLength3(self):
self._checkBitSizeOf(self.AUTO_ARRAY_LENGTH3)
def testInitializeOffsetsLength1(self):
self._checkInitializeOffsets(self.AUTO_ARRAY_LENGTH1)
def testInitializeOffsetsLength2(self):
self._checkInitializeOffsets(self.AUTO_ARRAY_LENGTH2)
def testInitializeOffsetsLength3(self):
self._checkInitializeOffsets(self.AUTO_ARRAY_LENGTH3)
def testReadLength1(self):
self._checkRead(self.AUTO_ARRAY_LENGTH1)
def testReadLength2(self):
self._checkRead(self.AUTO_ARRAY_LENGTH2)
def testReadLength3(self):
self._checkRead(self.AUTO_ARRAY_LENGTH3)
def testWriteReadLength1(self):
self._checkWriteRead(self.AUTO_ARRAY_LENGTH1)
def testWriteReadLength2(self):
self._checkWriteRead(self.AUTO_ARRAY_LENGTH2)
def testWriteReadLength3(self):
self._checkWriteRead(self.AUTO_ARRAY_LENGTH3)
def testWriteReadFileLength1(self):
self._checkWriteReadFile(self.AUTO_ARRAY_LENGTH1)
def testWriteReadFileLength2(self):
self._checkWriteReadFile(self.AUTO_ARRAY_LENGTH2)
def testWriteReadFileLength3(self):
self._checkWriteReadFile(self.AUTO_ARRAY_LENGTH3)
def _checkBitSizeOf(self, numElements):
packedAutoArrayRecursion = self._createPackedAutoArrayRecursion(numElements)
bitPosition = 2
autoArrayRecursionBitSize = (
PackedAutoArrayStructRecursionTest._calcPackedAutoArrayRecursionBitSize(numElements))
self.assertEqual(autoArrayRecursionBitSize, packedAutoArrayRecursion.bitsizeof(bitPosition))
def _checkInitializeOffsets(self, numElements):
packedAutoArrayRecursion = self._createPackedAutoArrayRecursion(numElements)
bitPosition = 2
expectedEndBitPosition = bitPosition + (
PackedAutoArrayStructRecursionTest._calcPackedAutoArrayRecursionBitSize(numElements))
self.assertEqual(expectedEndBitPosition, packedAutoArrayRecursion.initialize_offsets(bitPosition))
def _checkRead(self, numElements):
writer = zserio.BitStreamWriter()
self._writePackedAutoArrayRecursionToStream(writer, numElements)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
packedAutoArrayRecursion = self.api.PackedAutoArrayRecursion.from_reader(reader)
self._checkPackedAutoArrayRecursion(packedAutoArrayRecursion, numElements)
def _checkWriteRead(self, numElements):
packedAutoArrayRecursion = self._createPackedAutoArrayRecursion(numElements)
bitBuffer = zserio.serialize(packedAutoArrayRecursion)
self.assertEqual(packedAutoArrayRecursion.bitsizeof(), bitBuffer.bitsize)
self.assertEqual(packedAutoArrayRecursion.initialize_offsets(0), bitBuffer.bitsize)
readPackedAutoArrayRecursion = zserio.deserialize(self.api.PackedAutoArrayRecursion, bitBuffer)
self._checkPackedAutoArrayRecursion(readPackedAutoArrayRecursion, numElements)
def _checkWriteReadFile(self, numElements):
packedAutoArrayRecursion = self._createPackedAutoArrayRecursion(numElements)
filename = self.BLOB_NAME_BASE + str(numElements) + ".blob"
zserio.serialize_to_file(packedAutoArrayRecursion, filename)
readPackedAutoArrayRecursion = zserio.deserialize_from_file(self.api.PackedAutoArrayRecursion, filename)
self._checkPackedAutoArrayRecursion(readPackedAutoArrayRecursion, numElements)
def _createPackedAutoArrayRecursion(self, numElements):
autoArray = []
for i in range(1, numElements + 1):
element = self.api.PackedAutoArrayRecursion(i, [])
autoArray.append(element)
return self.api.PackedAutoArrayRecursion(0, autoArray)
def _checkPackedAutoArrayRecursion(self, packedAutoArrayRecursion, numElements):
self.assertEqual(0, packedAutoArrayRecursion.id)
autoArray = packedAutoArrayRecursion.packed_auto_array_recursion
self.assertEqual(numElements, len(autoArray))
for i in range(1, numElements + 1):
element = autoArray[i - 1]
self.assertEqual(i, element.id)
self.assertEqual(0, len(element.packed_auto_array_recursion))
@staticmethod
def _writePackedAutoArrayRecursionToStream(writer, numElements):
writer.write_bits(0, 8)
writer.write_varsize(numElements)
writer.write_bool(True)
maxBitNumber = 1
writer.write_bits(maxBitNumber, 6)
writer.write_bits(1, 8)
writer.write_varsize(0)
for _ in range(numElements - 1):
writer.write_signed_bits(1, maxBitNumber + 1)
writer.write_varsize(0)
@staticmethod
def _calcPackedAutoArrayRecursionBitSize(numElements):
bitSize = 8 # id
bitSize += 8 # varsize (length of auto array)
bitSize += 1 # packing descriptor: is_packed
if numElements > 1:
bitSize += 6 # packing descriptor: max_bit_number
bitSize += 8 + 8 # first element
bitSize += (numElements - 1) * (8 + 2) # all deltas
return bitSize
BLOB_NAME_BASE = os.path.join(getApiDir(os.path.dirname(__file__)), "packed_auto_array_struct_recursion_")
AUTO_ARRAY_LENGTH1 = 1
AUTO_ARRAY_LENGTH2 = 5
AUTO_ARRAY_LENGTH3 = 10
|
1624299
|
from dirty_models import BooleanField, EnumField, IntegerField, StringIdField
from enum import Enum
from . import BaseModelManager
from ..models import BaseModel, DateTimeField
from ..results import Result
class Stream(BaseModel):
"""
Connection stream model.
"""
class State(Enum):
"""
Connection states.
"""
OPENING = 'OPENING'
"""
Opening stream.
"""
PAIRING = 'PAIRING'
"""
Pairing WhatsappWeb with a phone.
"""
UNPAIRED = 'UNPAIRED'
"""
Unpaired WhatsappWeb with a phone. QR is available.
"""
UNPAIRED_IDLE = 'UNPAIRED_IDLE'
"""
Unpaired WhatsappWeb with a phone. QR is not available.
"""
CONNECTED = 'CONNECTED'
"""
WhatsappWeb is connected to a phone.
"""
TIMEOUT = 'TIMEOUT'
"""
WhatsappWeb connection to a phone is timeout.
"""
CONFLICT = 'CONFLICT'
"""
Other browser has initiated WhatsappWeb with same phone.
"""
UNLAUNCHED = 'UNLAUNCHED'
"""
WhatsappWeb application has not been launched.
"""
PROXYBLOCK = 'PROXYBLOCK'
"""
Proxy is blocking connection.
"""
TOS_BLOCK = 'TOS_BLOCK'
"""
¿?
"""
SMB_TOS_BLOCK = 'SMB_TOS_BLOCK'
"""
¿?
"""
class Stream(Enum):
DISCONNECTED = 'DISCONNECTED'
"""
Stream disconnected.
"""
SYNCING = 'SYNCING'
"""
Synchronizing data with phone.
"""
RESUMING = 'RESUMING'
"""
Resuming connection with phone.
"""
CONNECTED = 'CONNECTED'
"""
Connected to phone.
"""
backoff_generation = IntegerField()
"""
¿?
"""
can_send = BooleanField()
"""
Whether it is able to send messages to phone.
"""
has_synced = BooleanField()
"""
Whether it is synchronized with phone.
"""
is_incognito = BooleanField()
"""
Whether it running in a incognito tab.
"""
launch_generation: IntegerField()
"""
¿?
"""
launched = BooleanField()
"""
Whether it has been launched.
"""
retry_timestamp = DateTimeField()
"""
¿?
"""
state = EnumField(enum_class=State)
"""
Current stream connection state.
"""
stream = EnumField(enum_class=Stream)
"""
Current stream state
"""
sync_tag = StringIdField()
"""
Last synchronizing tag.
"""
class StreamManager(BaseModelManager[Stream]):
MODEL_CLASS = Stream
def poke(self) -> Result[None]:
"""
Refresh ref field. It is used to refresh QR image when it expires.
"""
return self._execute_command('poke')
def takeover(self) -> Result[None]:
"""
Refresh login. It is used to take session again when other browser has been started session.
"""
return self._execute_command('takeover')
def logout(self) -> Result[None]:
"""
Logs out of currently logged in session
"""
return self._execute_command('logout')
|
1624315
|
import unittest
from katas.beta.multiples_2 import multiples
class MultiplesTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(multiples(49), 'Fizz')
def test_equals_2(self):
self.assertEqual(multiples(147), 'Fang')
def test_equals_3(self):
self.assertEqual(multiples(30), 'Foo')
def test_equals_4(self):
self.assertEqual(multiples(51), 'Far')
|
1624332
|
import json
import logging
from todos.makeresult import make_result
from todos import client, TODOS
from faunadb import query
def create(event, context):
data = json.loads(event['body'])
if 'text' not in data:
logging.error("Validation Failed")
raise Exception("Couldn't create the todo item.")
data = {
'text': data['text'],
'checked': False,
'createdAt': query.time('now'),
'updatedAt': query.time('now')
}
# write the todo to the database
created = client.query(query.create(TODOS, {'data': data}))
# create a response
response = {
"statusCode": 200,
"body": json.dumps(make_result(created))
}
return response
|
1624380
|
import numpy as np
import matplotlib.pyplot as plt
#import scikits.timeseries as ts
#import scikits.timeseries.lib.plotlib as tpl
import statsmodels.api as sm
#from statsmodels.sandbox import tsa
from statsmodels.sandbox.tsa.garch import * # local import
#dta2 = ts.tsfromtxt(r'gspc_table.csv',
# datecols=0, skiprows=0, delimiter=',',names=True, freq='D')
#print dta2
aa=np.genfromtxt(r'gspc_table.csv', skip_header=0, delimiter=',', names=True)
cl = aa['Close']
ret = np.diff(np.log(cl))[-2000:]*1000.
ggmod = Garch(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.1, 0.1, 0.1, 0.1])
ggres = ggmod.fit(start_params=np.array([-0.1, 0.1, 0.1, 0.0]),
maxiter=1000,method='bfgs')
print 'ggres.params', ggres.params
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
use_rpy = False
if use_rpy:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = ret - ret.mean(), include_mean=False)
f = r.formula('~arma(1,1) + ~garch(1, 1)')
fit = r.garchFit(f, data = ret)
ggmod0 = Garch0(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.1, 0.1, ret.var()])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print 'ggres0.params', ggres0.params
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, ret - ret.mean())[0], [0.01, 0.1, 0.1])
print g11res
llf = loglike_GARCH11(g11res, ret - ret.mean())
print llf[0]
ggmod0 = Garch0(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 2
ggmod.nma = 2
start_params = np.array([-0.1,-0.1, 0.1, 0.1, ret.var()])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)#, method='ncg')
print 'ggres0.params', ggres0.params
ggmod = Garch(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 2
ggmod.nma = 2
start_params = np.array([-0.1,-0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
ggmod._start_params = start_params
ggres = ggmod.fit(start_params=start_params, maxiter=1000)#,method='bfgs')
print 'ggres.params', ggres.params
|
1624388
|
class PNSignalResult(object):
def __init__(self, result):
"""
Representation of signal server response
:param result: result of signal operation
"""
self.timetoken = result[2]
self._result = result
def __str__(self):
return "Signal success with timetoken %s" % self.timetoken
|
1624410
|
import click
import marshmallow as ma
from marshmallow import ValidationError
class MarshmallowClickMixin(click.ParamType):
def get_metavar(self, param):
return self.__class__.__name__
def convert(self, value, param, ctx, **kwargs):
try:
return self.deserialize(value, **kwargs)
except ma.exceptions.ValidationError as e:
raise click.exceptions.BadParameter(e, ctx=ctx, param=param)
class FMValidationError(ValidationError):
"""
Custom validation error class.
It differs from the classic validation error by having two
attributes, according to the USEF 2015 reference implementation.
Subclasses of this error might adjust the `status` attribute accordingly.
"""
result = "Rejected"
status = "UNPROCESSABLE_ENTITY"
|
1624421
|
from .agent import Agent
from ..network.network import *
from .mcts import *
from ..utils import *
MIN = -99999999
MAX = 99999999
score_5 = 5
score_4_live = 4.5
score_4_and_3_live = 4.3
score_4 = 4
score_double_3_live = 3.8
score_3_live = 3.5
score_3 = 3
score_double_2_live = 3
score_2_live = 2.5
score_2 = 2
class AI(Agent):
def __init__(self, color):
Agent.__init__(self, color)
def play(self, *args, **kwargs):
pass
class MCTSAgent(AI):
def __init__(self, conf, color, use_stochastic_policy, specify_model_ver=-1):
AI.__init__(self, color)
black_model_path = 'AlphaGomoku/network/model/model_b_' + str(conf['board_size'])
white_model_path = 'AlphaGomoku/network/model/model_w_' + str(conf['board_size'])
if specify_model_ver != -1:
black_model_path = black_model_path + '_ver_' + str(specify_model_ver)
white_model_path = white_model_path + '_ver_' + str(specify_model_ver)
black_model_path = black_model_path + '.h5'
white_model_path = white_model_path + '.h5'
conf.update(net_para_file=black_model_path)
black_net = Network(conf)
conf.update(net_para_file=white_model_path)
white_net = Network(conf)
self._mcts = MCTS(conf, black_net, white_net, color, use_stochastic_policy)
self._black_net = black_net
self._white_net = white_net
self._board_size = conf['board_size']
def play(self, obs, action, stone_num):
act_ind, pi, prior_prob, value = self._mcts.action(obs, action, stone_num)
act_cor = index2coordinate(act_ind, self._board_size)
return act_cor, pi, prior_prob, value
def set_self_play(self, is_self_play):
self._mcts.set_self_play(is_self_play)
def set_stochastic_policy(self, use_stochastic_policy):
self._mcts.set_stochastic_policy(use_stochastic_policy)
def reset_mcts(self):
self._mcts.reset()
@log
def train(self, obs, color, last_move, pi, z):
obs_b, obs_w = obs[0::2], obs[1::2]
color_b, color_w = color[0::2], color[1::2]
last_move_b, last_move_w = last_move[0::2], last_move[1::2]
pi_b, pi_w = pi[0::2], pi[1::2]
z_b, z_w = z[0::2], z[1::2]
loss_b = self._black_net.train(obs_b, color_b, last_move_b, pi_b, z_b)
loss_w = self._white_net.train(obs_w, color_w, last_move_w, pi_w, z_w)
return loss_b, loss_w
def save_model(self):
self._black_net.save_model()
self._white_net.save_model()
print('> model saved')
def load_model(self):
self._black_net.load_model()
self._white_net.load_model()
class FastAgent(AI):
def __init__(self, color, depth=1): # depth must be even
AI.__init__(self, color)
self._action_list = []
self._score_list = []
self._depth = depth
self._cut_count = 0
self._last_move_list = []
self._atk_def_ratio = 0.1
self._show_info = False
def play(self, obs, action, stone_num, *args):
self._action_list = []
self._score_list = []
if action is not None:
self._last_move_list.append(action)
size = obs.shape[0]
if sum(sum(abs(obs))) == 0: # 若AI执黑,第一步一定下在棋盘中央位置
pi = [0 for _ in range(size * size)]
pi[int((size * size) / 2)] = 1
self._last_move_list.append((7, 7))
return (7, 7), pi, None, None
pos_list = self.generate(obs, all=True)
if self._show_info:
print('position generated: ', pos_list)
alpha, beta = MIN, MAX
score_dict = dict()
thread_list = []
for i, j in pos_list:
new_obs = obs.copy()
new_obs[i][j] = self.color
target = self._get_thread_target(obs=new_obs, last_move=(i, j), alpha=alpha, beta=beta,
depth=self._depth - 1, score_dict=score_dict)
thr = threading.Thread(target=target, name='thread ' + str((i, j)))
thread_list.append(thr)
thr.start()
for thr in thread_list:
thr.join()
best_action_list = get_best_action_list(score_dict)
if self._show_info:
print('best action list:', best_action_list, ' score = ', score_dict[best_action_list[0]])
ind = np.random.choice([i for i in range(len(best_action_list))])
action = best_action_list[ind]
pi = [0 for _ in range(size * size)]
pi[coordinate2index(action, size)] = 1
self._last_move_list.append(action)
return action, pi, best_action_list, score_dict
def _get_thread_target(self, obs, last_move, alpha, beta, depth, score_dict):
def _min():
_beta = beta
self._last_move_list.append(last_move)
if depth == 0:
score_atk, score_def = self.evaluate(obs)
self._last_move_list.pop()
# 对于只搜一层的情况下,必须要教会AI防守活三和冲四。这里的做法是手动提高对方活三和冲四的分数
if score_def < score_3_live:
if score_atk > score_def:
score = score_atk - self._atk_def_ratio * score_def
else:
score = -score_def + self._atk_def_ratio * score_atk
else:
if score_def == score_3_live:
if score_atk >= score_4:
score = score_atk - self._atk_def_ratio * score_def
else:
score = -score_4
else:
# 为了防止AI在对方有活四的情况下放弃治疗
if score_def >= score_4_live:
score = score_5 if score_atk == score_5 else -score_5
else:
score = score_5 if score_atk == score_5 else -score_4_live
x, y = int(last_move[0]), int(last_move[1])
score_dict[(x, y)] = score
if self._show_info:
print((x, y), 'atk=', score_atk, 'def=', score_def, 'total=', score)
return score
pos_list = self.generate(obs)
for i, j in pos_list:
obs[i][j] = -self.color
value = self._max(obs, (i, j), alpha, _beta, depth - 1)
if value < _beta:
_beta = value
obs[i][j] = 0
if alpha > _beta:
break
# this indicates that the parent node (belongs to max layer) will select a node with value
# no less than alpha, however, the value of child selected in this node (belongs to min layer)
# will no more than beta <= alpha, so there is no need to search this node
self._last_move_list.pop()
x, y = int(last_move[0]), int(last_move[1])
score_dict[(x, y)] = _beta
self._action_list.append((x, y))
return _min
# if an obs is in max layer, then the agent is supposed to select the action with max score
# alpha represents the lower bound of the value of this node
def _max(self, obs, last_move, alpha, beta, depth):
self._last_move_list.append(last_move)
if depth == 0:
score_atk, score_def = self.evaluate(obs)
self._last_move_list.pop()
score = score_atk if score_atk > score_def else -score_def
return score
pos_list = self.generate(obs)
for i, j in pos_list:
obs[i][j] = self.color
value = self._min(obs, (i, j), alpha, beta, depth - 1)
if value > alpha:
alpha = value
obs[i][j] = 0
if alpha > beta:
break
self._last_move_list.pop()
return alpha
# if an obs is in min layer, then the agent is supposed to select the action with min scores
# beta represents the upper bound of the value of this node
def _min(self, obs, last_move, alpha, beta, depth):
self._last_move_list.append(last_move)
if depth == 0:
score_atk, score_def = self.evaluate(obs)
self._last_move_list.pop()
score = score_atk if score_atk > score_def else -score_def
return score
pos_list = self.generate(obs)
for i, j in pos_list:
obs[i][j] = -self.color
value = self._max(obs, (i, j), alpha, beta, depth - 1)
# print((i, j), value)
if value < beta:
beta = value
obs[i][j] = 0
if alpha > beta:
break
# this indicates that the parent node (belongs to max layer) will select a node with value
# no less than alpha, however, the value of child selected in this node (belongs to min layer)
# will no more than beta <= alpha, so there is no need to search this node
self._last_move_list.pop()
return beta
def evaluate(self, obs):
pos_ind = np.where(obs)
pos_set = [(pos_ind[0][i], pos_ind[1][i]) for i in range(len(pos_ind[0]))]
score_atk, score_def = 0, 0
for x, y in pos_set:
c = obs[x][y]
pt_score = self.evaluate_point(obs, (x, y))
if c != self.color:
score_def = max(score_def, pt_score)
else:
score_atk = max(score_atk, pt_score)
return score_atk, score_def
def evaluate_point(self, obs, pos):
i, j = pos[0], pos[1]
color = obs[i][j]
dir_set = [(1, 0), (0, 1), (1, 1), (1, -1)]
max_count = 0
max_consecutive_count = 0
max_score = 0
for dir in dir_set:
score = 0
count_1, count_2 = 1, 1
consecutive_count_1, consecutive_count_2 = 1, 1
space_1, space_2 = 0, 0
block_1, block_2 = 0, 0
consecutive_flag = True
for k in range(1, 5):
if i + k * dir[0] in range(0, 15) and j + k * dir[1] in range(0, 15):
if obs[i + k * dir[0]][j + k * dir[1]] == color:
if space_1 == 2:
break
count_1 += 1
if consecutive_flag:
consecutive_count_1 += 1
if obs[i + k * dir[0]][j + k * dir[1]] == -color:
block_1 = 1
break
if obs[i + k * dir[0]][j + k * dir[1]] == 0:
space_1 += 1
consecutive_flag = False
if space_1 == 3:
break
else:
block_1 = 1
break
consecutive_flag = True
for k in range(1, 5):
if i - k * dir[0] in range(0, 15) and j - k * dir[1] in range(0, 15):
if obs[i - k * dir[0]][j - k * dir[1]] == color:
if space_2 == 2:
break
count_2 += 1
if consecutive_flag:
consecutive_count_2 += 1
if obs[i - k * dir[0]][j - k * dir[1]] == -color:
block_2 = 1
break
if obs[i - k * dir[0]][j - k * dir[1]] == 0:
space_2 += 1
consecutive_flag = False
if space_2 == 3:
break
else:
block_2 = 1
break
# there are several cases:
# 1. ooox: block=1, space=0, count=consecutive_count
# 2. ooo__: block=0, space=2, count=consecutive_count
# 3. ooo_x: block=1, space=1, count=consecutive_count
# 4. oo_ox: block=1, space=1, count>consecutive_count
count = max(count_1 + consecutive_count_2, count_2 + consecutive_count_1) - 1
consecutive_count = consecutive_count_1 + consecutive_count_2 - 1
if consecutive_count >= 5:
return score_5
if count == 4:
if consecutive_count == 4: # ??oooo??
if space_1 >= 1 and space_2 >= 1: # ?_oooo_?
score = score_4_live
else:
if space_1 == 0 and space_2 == 0: # xoooox
pass
else: # xoooo_
score = score_4
else:
if consecutive_count == 3: # ??ooo_o??
score = score_4
else: # (consecutive_count == 2) ??oo_oo??
score = score_4
if count == 3:
if consecutive_count == 3: # ??ooo??
if space_1 >= 1 and space_2 >= 1: # ?_ooo_?
score = score_3_live
else:
if space_1 == 0 and space_2 == 0: # xooox
pass
else: # xooo_
score = score_3
else: # (consecutive_count == 2) ??oo_o??
if consecutive_count_1 == 2:
if space_1 >= 1 and space_2 >= 2: # ?_oo_o_?
score = score_3_live
else:
if space_1 == 0 and space_2 == 1: # xoo_ox
pass
else:
score = score_3
else: # (consecutive_count_2 == 2)
if space_2 >= 1 and space_1 >= 2: # ?_o_oo_?
score = score_3_live
else:
if space_1 == 1 and space_2 == 0: # xo_oox
pass
else:
score = score_3
if count == 2:
if consecutive_count == 2: # ??oo??
if space_1 <= 1 and space_2 <= 1: # x?oo?x
pass
else:
if space_1 == 0 or space_2 == 0: # xoo__?
if space_1 == 3 or space_2 == 3: # xoo___
score = score_2
else:
pass
else: # ?__oo_??
score = score_2_live
else: # ??o_o??
if space_1 + space_2 < 3:
pass
else:
if count_1 == 2:
if space_2 == 0: # (space_1 == 3) __o_ox
score = score_2
else:
score = score_2_live
else: # (count_2 == 2)
if space_1 == 0: # (space_2 == 3) xo_o__
score = score_2
else:
score = score_2_live
# bonus
if max_score == score_2_live and score == score_2_live:
score = score_double_2_live
if max_score == score_3_live and score == score_3_live:
score = score_double_3_live
if max_score == score_4 and score == score_3_live:
score = score_4_and_3_live
if max_score == score_3_live and score == score_4:
score = score_4_and_3_live
if count > max_count:
max_count = count
if consecutive_count > max_consecutive_count:
max_consecutive_count = consecutive_count
if score > max_score:
max_score = score
return max_score
def generate(self, obs, all=False):
good_pts = []
good_scores = []
pts = []
scores = []
dir_set = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]
if all:
indices = np.where(obs)
check_list = [(indices[0][i], indices[1][i]) for i in range(len(indices[0]))]
else:
if len(self._last_move_list) > 7:
check_list = self._last_move_list[-7:]
else:
check_list = self._last_move_list
for x0, y0 in check_list:
for dir in dir_set:
if x0 + dir[0] in range(0, 15) and y0 + dir[1] in range(0, 15):
pos = (x0 + dir[0], y0 + dir[1])
if obs[pos[0]][pos[1]] == 0 and pos not in pts:
obs[pos[0]][pos[1]] = self.color
score_atk = self.evaluate_point(obs, pos)
obs[pos[0]][pos[1]] = -self.color
score_def = self.evaluate_point(obs, pos)
score = max(score_atk, score_def)
if score >= score_3_live:
good_pts.append(pos)
good_scores.append(score)
if score_atk == score_5:
break
pts.append(pos)
scores.append(score)
obs[pos[0]][pos[1]] = 0
if len(good_pts) > 0 and max(good_scores) >= score_4:
# print('good')
pts = good_pts
scores = good_scores
lst = np.array([pts, scores])
pts = lst[:, lst[1].argsort()][0]
pos_list = list(pts)
pos_list.reverse()
return pos_list
def get_best_action_list(score_dict):
best_action_list = []
max_score = MIN
for key in score_dict:
if max_score < score_dict[key]:
best_action_list = [key]
max_score = score_dict[key]
elif max_score == score_dict[key]:
best_action_list.append(key)
return best_action_list
def print_score_dict(score_dict):
for key in score_dict:
print(str(key) + ': ' + str(score_dict[key]))
|
1624428
|
from revscoring.features import wikitext as wikitext_features
from revscoring.features import revision_oriented
from revscoring.features.modifiers import sub
from revscoring.languages import english
from . import mediawiki, wikipedia, wikitext
local_wiki = [
revision_oriented.revision.comment_matches(
r"copy|edit|npov|wp:?el",
name="enwiki.revision.comment.has_known_word"
),
revision_oriented.revision.comment_matches(
r"\[\[WP:AES\|←\]\]",
name="enwiki.revision.comment.is_aes"
),
sub(
wikitext_features.revision.template_names_matching(r"^cite"),
wikitext_features.revision.parent.template_names_matching(r"^cite"),
name="enwiki.revision.diff.cite_templates_added"
)
]
badwords = [
english.badwords.revision.diff.match_delta_sum,
english.badwords.revision.diff.match_delta_increase,
english.badwords.revision.diff.match_delta_decrease,
english.badwords.revision.diff.match_prop_delta_sum,
english.badwords.revision.diff.match_prop_delta_increase,
english.badwords.revision.diff.match_prop_delta_decrease
]
informals = [
english.informals.revision.diff.match_delta_sum,
english.informals.revision.diff.match_delta_increase,
english.informals.revision.diff.match_delta_decrease,
english.informals.revision.diff.match_prop_delta_sum,
english.informals.revision.diff.match_prop_delta_increase,
english.informals.revision.diff.match_prop_delta_decrease
]
dict_words = [
english.dictionary.revision.diff.dict_word_delta_sum,
english.dictionary.revision.diff.dict_word_delta_increase,
english.dictionary.revision.diff.dict_word_delta_decrease,
english.dictionary.revision.diff.dict_word_prop_delta_sum,
english.dictionary.revision.diff.dict_word_prop_delta_increase,
english.dictionary.revision.diff.dict_word_prop_delta_decrease,
english.dictionary.revision.diff.non_dict_word_delta_sum,
english.dictionary.revision.diff.non_dict_word_delta_increase,
english.dictionary.revision.diff.non_dict_word_delta_decrease,
english.dictionary.revision.diff.non_dict_word_prop_delta_sum,
english.dictionary.revision.diff.non_dict_word_prop_delta_increase,
english.dictionary.revision.diff.non_dict_word_prop_delta_decrease
]
damaging = wikipedia.page + \
wikitext.parent + wikitext.diff + mediawiki.user_rights + \
mediawiki.protected_user + mediawiki.comment + \
badwords + informals + dict_words
"Damaging Features"
reverted = damaging
goodfaith = damaging
|
1624439
|
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import given, settings
from numpy.testing import assert_array_equal
from mygrad import Tensor
from tests.custom_strategies import tensors, valid_constant_arg
real_types = (
hnp.integer_dtypes() | hnp.unsigned_integer_dtypes() | hnp.floating_dtypes()
)
@given(
tensor=tensors(dtype=real_types),
dest_type=real_types,
data=st.data(),
)
def test_astype(tensor: Tensor, dest_type: np.dtype, data: st.DataObject):
tensor = +tensor # give tensor a creator
constant = data.draw(valid_constant_arg(dest_type), label="constant")
new_tensor = tensor.astype(dest_type, constant=constant)
expected_tensor = Tensor(tensor, dtype=dest_type, constant=constant)
assert new_tensor is not tensor
assert new_tensor.constant is expected_tensor.constant
assert tensor.creator is not None
assert new_tensor.creator is None
assert new_tensor.dtype == dest_type
assert new_tensor.shape == tensor.shape
assert new_tensor.data is not tensor.data
assert_array_equal(new_tensor.data, expected_tensor.data)
@settings(max_examples=30)
@pytest.mark.parametrize(
"type_strategy",
[hnp.integer_dtypes(), hnp.unsigned_integer_dtypes(), hnp.floating_dtypes()],
)
@given(data=st.data())
def test_upcast_roundtrip(type_strategy, data: st.DataObject):
thin, wide = data.draw(
st.tuples(type_strategy, type_strategy).map(
lambda x: sorted(x, key=lambda y: np.dtype(y).itemsize)
)
)
orig_tensor = data.draw(
hnp.arrays(
dtype=thin,
shape=hnp.array_shapes(),
elements=hnp.from_dtype(thin).filter(np.isfinite),
).map(Tensor)
)
roundtripped_tensor = orig_tensor.astype(wide).astype(thin)
assert_array_equal(orig_tensor, roundtripped_tensor)
@pytest.mark.parametrize("src_constant", [True, False])
@pytest.mark.parametrize("dst_constant", [None, "match"])
@pytest.mark.parametrize("casting", ["no", "equiv", "safe", "same_kind", "unsafe"])
def test_nocopy(src_constant: bool, dst_constant, casting):
x = Tensor([1.0, 2.0], constant=src_constant)
if dst_constant == "match":
dst_constant = src_constant
y = x.astype(x.dtype, copy=False, casting=casting, constant=dst_constant)
assert y is x
|
1624460
|
from __future__ import print_function
import os
from csv import DictReader
import click
from openelex import models
from openelex import COUNTRY_DIR
FIXTURE_DIR = os.path.join(COUNTRY_DIR, 'fixtures')
COLLECTIONS = ['office', 'party',]
UPSERT_FIELDS = {
'party': ('abbrev',),
'office': ('state', 'name', 'district',),
}
def _get_document_class(collection):
return getattr(models, collection.capitalize())
def _get_fixture_filename(collection, fmt='csv'):
return os.path.join(FIXTURE_DIR, "%s.%s" % (collection, fmt))
@click.command(name='load_metadata.run', help="Populate metadata in database "
"from fixture files")
@click.option('--collection', help='Collection where metadata will be loaded. E.g. "office"')
@click.option('--filename', help="Filename of fixture file. Optional. If omitted "
"the default filename will be calculated based on the collection name.")
@click.option('--database', default='openelex',
help="Database where data will be loaded. Optional. Default is openelex")
@click.option('--clear', is_flag=True,
help="Delete all records in collection before loading")
def run(collection, filename=None, database='openelex', clear=False):
"""
Populate metadata in MongoDB from fixture files.
"""
if collection not in COLLECTIONS:
raise ValueError("Unknown collection '%s'." % (collection))
fmt = 'csv'
if filename is None:
filename = _get_fixture_filename(collection, fmt)
num_created = 0
count = 0
doc_cls = _get_document_class(collection)
with open(filename, 'r') as f:
# Only delete old data if we ask for it and if we can open the new
# file.
if clear:
print("Clearing all existing records.\n")
doc_cls.objects.delete()
reader = DictReader(f)
for row in reader:
# This might not be the most efficient way to do an upsert based
# on natural keys, but its the clearest and probably fast enough.
o, created = doc_cls.objects.get_or_create(**row)
count += 1
if created:
num_created += 1
msg = "Imported %d records.\n" % (num_created)
if (num_created < count):
msg = msg + "%d records already in database.\n" % (count - num_created)
print(msg)
|
1624467
|
import pathlib
import pytest
from mock import MagicMock
import boto3
from moto import mock_sqs
from . import DummyPlugin
TEST_DIR = pathlib.Path(__file__).parent.absolute()
@pytest.fixture
def aws_env(monkeypatch):
monkeypatch.setenv("AWS_DEFAULT_REGION", "us-west-2")
@pytest.fixture
def test_dir():
return TEST_DIR
@pytest.fixture
def test_db_path(test_dir):
return str(test_dir / "test_tiles.db")
@pytest.fixture
def dummy_plugin_name():
from odc.stats.plugins import register
name = "dummy-plugin"
register(name, DummyPlugin)
return name
@pytest.fixture
def sqs_message():
response = {
"ResponseMetadata": {
"RequestId": "45ff2253-2bfe-5395-9f14-7af67a6b8f27",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "45ff2253-2bfe-5395-9f14-7af67a6b8f27",
"date": "Tue, 16 Feb 2021 04:51:33 GMT",
"content-type": "text/xml",
"content-length": "215",
},
"RetryAttempts": 0,
}
}
msg = MagicMock()
msg.delete = lambda: response
msg.change_visibility = lambda VisibilityTimeout=0: response
msg.body = ""
return msg
@pytest.fixture
def sqs_queue_by_name(aws_env):
qname = "test-sqs"
with mock_sqs():
sqs = boto3.resource("sqs")
sqs.create_queue(QueueName=qname)
yield qname
|
1624487
|
defines = {
"output": "../lib/resty/openssl/x509/csr.lua",
"output_test": "../t/openssl/x509/csr.t",
"type": "X509_REQ",
"has_sign_verify": True,
"sample": "test.csr",
"sample_signature_nid": 65,
"sample_signature_name": "RSA-SHA1",
"fields":
[
{
"field": "subject_name",
"type": "x509.name",
"dup": True,
"sample_printable": "C=US/CN=example.com/L=Los Angeles/O=SSL Support/OU=SSL Support/ST=California",
},
{
"field": "pubkey",
"type": "pkey",
"sample_printable": '''-----BEGIN PUBLIC KEY-----
<KEY>
-----END PUBLIC KEY-----
''',
},
{
"field": "version",
"type": "number",
"sample_printable": 1,
"set_converter":
'''
-- Note: this is defined by standards (X.509 et al) to be one less than the certificate version.
-- So a version 3 certificate will return 2 and a version 1 certificate will return 0.
toset = toset - 1
''',
"get_converter":
'''
got = tonumber(got) + 1
''',
},
################## extensions ######################
{
"field": "subject_alt_name",
"type": "x509.altname",
"dup": True,
"extension_nid": "subjectAltName",
"sample_printable": 'DNS=example.com',
"get_converter": '''
-- Note: here we only free the stack itself not elements
-- since there seems no way to increase ref count for a GENERAL_NAME
-- we left the elements referenced by the new-dup'ed stack
local got_ref = got
ffi_gc(got_ref, stack_lib.gc_of("GENERAL_NAME"))
got = ffi_cast("GENERAL_NAMES*", got_ref)''',
},
]
}
|
1624610
|
import numpy as np
import os
from bolero.behavior_search import BlackBoxSearch
from bolero.representation import ConstantBehavior
from bolero.optimizer import NoOptimizer
from bolero.utils.testing import assert_pickle
from nose.tools import assert_false, assert_true, assert_raises_regexp
from numpy.testing import assert_array_equal
def test_black_box_search_requires_optimizer():
class NoOptimizerSubclass(object):
pass
bs = BlackBoxSearch(ConstantBehavior(), NoOptimizerSubclass())
assert_raises_regexp(TypeError, "expects instance of Optimizer",
bs.init, 5, 5)
def test_black_box_search_from_dicts():
beh = {"type": "bolero.representation.ConstantBehavior"}
opt = {"type": "bolero.optimizer.NoOptimizer"}
bs = BlackBoxSearch(beh, opt)
bs.init(5, 5)
# NoOptimizer should be initialized with the parameters from the behavior
assert_array_equal(bs.behavior.get_params(), bs.optimizer.initial_params)
def test_black_box_search_protocol():
n_inputs, n_outputs = 5, 5
bs = BlackBoxSearch(ConstantBehavior(), NoOptimizer())
bs.init(n_inputs, n_outputs)
assert_false(bs.is_behavior_learning_done())
beh = bs.get_next_behavior()
inputs = np.zeros(n_inputs)
beh.set_inputs(inputs)
outputs = np.empty(n_outputs)
beh.get_outputs(outputs)
bs.set_evaluation_feedback(np.array([0.0]))
def test_save_black_box_search():
bs = BlackBoxSearch(ConstantBehavior(), NoOptimizer())
bs.init(5, 5)
assert_pickle("BlackBoxSearch", bs)
path = "." + os.sep
bs.write_results(path)
bs.get_behavior_from_results(path)
filename = path + "BlackBoxSearch.pickle"
assert_true(os.path.exists(filename))
if os.path.exists(filename):
os.remove(filename)
|
1624641
|
import time
import shutil
import dlib
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import transforms
import dnnlib
import legacy
from configs import GENERATOR_CONFIGS
from dlib_utils.face_alignment import image_align
from dlib_utils.landmarks_detector import LandmarksDetector
from torch_utils.misc import copy_params_and_buffers
from pivot_tuning_inversion.utils.ImagesDataset import ImagesDataset, ImageLatentsDataset
from pivot_tuning_inversion.training.coaches.multi_id_coach import MultiIDCoach
class FaceLandmarksDetector:
"""Dlib landmarks detector wrapper
"""
def __init__(
self,
model_path='pretrained/shape_predictor_68_face_landmarks.dat',
tmp_dir='tmp'
):
self.detector = LandmarksDetector(model_path)
self.timestamp = int(time.time())
self.tmp_src = f'{tmp_dir}/{self.timestamp}_src.png'
self.tmp_align = f'{tmp_dir}/{self.timestamp}_align.png'
def __call__(self, imgpath):
shutil.copy(imgpath, self.tmp_src)
try:
face_landmarks = list(self.detector.get_landmarks(self.tmp_src))[0]
assert isinstance(face_landmarks, list)
assert len(face_landmarks) == 68
image_align(self.tmp_src, self.tmp_align, face_landmarks)
except:
im = PIL.Image.open(self.tmp_src)
im.save(self.tmp_align)
return PIL.Image.open(self.tmp_align).convert('RGB')
class VGGFeatExtractor():
"""VGG16 backbone wrapper
"""
def __init__(self, device):
self.device = device
self.url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
with dnnlib.util.open_url(self.url) as f:
self.module = torch.jit.load(f).eval().to(device)
def __call__(self, img): # PIL
img = self._preprocess(img, self.device)
feat = self.module(img)
return feat # (1, 1000)
def _preprocess(self, img, device):
img = img.resize((256,256), PIL.Image.LANCZOS)
img = np.array(img, dtype=np.uint8)
img = torch.tensor(img.transpose([2,0,1])).unsqueeze(dim=0)
return img.to(device)
class Generator():
"""StyleGAN2 generator wrapper
"""
def __init__(self, ckpt, device):
with dnnlib.util.open_url(ckpt) as f:
old_G = legacy.load_network_pkl(f)['G_ema'].requires_grad_(False).to(device)
resolution = old_G.img_resolution
generator_config = GENERATOR_CONFIGS(resolution=resolution)
self.G_kwargs = generator_config.G_kwargs
self.common_kwargs = generator_config.common_kwargs
self.G = dnnlib.util.construct_class_by_name(**self.G_kwargs, **self.common_kwargs).eval().requires_grad_(False).to(device)
copy_params_and_buffers(old_G, self.G, require_all=False)
del old_G
G = self.G
self.style_layers = [
f'G.synthesis.b{feat_size}.{layer}.affine'
for feat_size in [pow(2,x) for x in range(2, int(np.log2(resolution))+1)]
for layer in ['conv0', 'conv1', 'torgb']]
del(self.style_layers[0])
scope = locals()
self.to_stylespace = {layer:eval(layer, scope) for layer in self.style_layers}
w_idx_lst = generator_config.w_idx_lst
assert len(self.style_layers) == len(w_idx_lst)
self.to_w_idx = {self.style_layers[i]:w_idx_lst[i] for i in range(len(self.style_layers))}
def mapping(self, z, truncation_psi=0.7, truncation_cutoff=None, skip_w_avg_update=False):
'''random z -> latent w
'''
return self.G.mapping(
z,
None,
truncation_psi=truncation_psi,
truncation_cutoff=truncation_cutoff,
skip_w_avg_update=skip_w_avg_update
)
def mapping_stylespace(self, latent):
'''latent w -> style s
resolution | w_idx | # conv | # torgb | indices
4 | 0 | 1 | 1 | 0-1
8 | 1 | 2 | 1 | 1-3
16 | 3 | 2 | 1 | 3-5
32 | 5 | 2 | 1 | 5-7
64 | 7 | 2 | 1 | 7-9
128 | 9 | 2 | 1 | 9-11
256 | 11 | 2 | 1 | 11-13 # for 256 resolution
512 | 13 | 2 | 1 | 13-15 # for 512 resolution
1024 | 15 | 2 | 1 | 15-17 # for 1024 resolution
'''
styles = dict()
for layer in self.style_layers:
module = self.to_stylespace.get(layer)
w_idx = self.to_w_idx.get(layer)
styles[layer] = module(latent.unbind(dim=1)[w_idx])
return styles
def synthesis_from_stylespace(self, latent, styles):
'''style s -> generated image
modulated conv2d, synthesis layer.weight, noise
forward after styles = affine(w)
'''
return self.G.synthesis(latent, styles=styles, noise_mode='const')
def synthesis(self, latent):
'''latent w -> generated image
'''
return self.G.synthesis(latent, noise_mode='const')
class e4eEncoder:
'''e4e Encoder
img paths -> latent w
'''
def __init__(self, device):
self.device = device
def __call__(self, target_pils):
dataset = ImagesDataset(
target_pils,
self.device,
transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
coach = MultiIDCoach(dataloader, device=self.device)
latents = list()
for fname, image in dataloader:
latents.append(coach.get_e4e_inversion(image))
latents = torch.cat(latents)
return latents
class PivotTuning:
'''pivot tuning inversion
latent, style -> latent, style,
mode
- 'latent' : use latent pivot
- 'style' : use style pivot
'''
def __init__(self, device, G, mode='w'):
assert mode in ['w', 's']
self.device = device
self.G = G
self.mode = mode
self.resolution = G.img_resolution
def __call__(self, latent, target_pils):
dataset = ImageLatentsDataset(
target_pils,
latent,
self.device,
transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])],),
self.resolution,
)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
coach = MultiIDCoach(
dataloader,
device=self.device,
generator=self.G,
mode=self.mode
)
# run coach by self.mode
new_G = coach.train_from_latent()
return new_G
|
1624646
|
from flask import jsonify, request
from meltano.api.api_blueprint import APIBlueprint
from meltano.api.models import db
from meltano.api.security import block_if_api_auth_required
from .embeds_helper import EmbedsHelper, InvalidEmbedToken
embedsBP = APIBlueprint("embeds", __name__, require_authentication=False)
@embedsBP.errorhandler(InvalidEmbedToken)
def _handle(ex):
return (
jsonify(
{
"error": True,
"code": f"No matching resource found or this resource is no longer public.",
}
),
400,
)
@embedsBP.route("/embed/<token>", methods=["GET"])
def get_embed(token):
today = request.args.get("today", None)
embeds_helper = EmbedsHelper()
response_data = embeds_helper.get_embed_from_token(db.session, token, today=today)
return jsonify(response_data)
@embedsBP.route("/embed", methods=["POST"])
@block_if_api_auth_required
def embed():
post_data = request.get_json()
resource_id = post_data["resource_id"]
resource_type = post_data["resource_type"]
today = post_data.get("today", None)
embeds_helper = EmbedsHelper()
response_data = embeds_helper.generate_embed_snippet(
db.session, resource_id, resource_type, today=today
)
return jsonify(response_data)
|
1624671
|
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.training import training_util
from .. import evaluator, metrics
from ..configuration import *
from .doc2vec_train_doc_prediction import doc2vec_prediction_model
from .doc2vec_train_doc_prediction import DocPredictionDataset
class DocPredictionEval(evaluator.Evaluator):
def __init__(self, dataset, log_dir=DIR_D2V_DOC_LOGDIR):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(DocPredictionEval, self).__init__(checkpoints_dir=log_dir,
output_path=os.path.join(log_dir,
dataset.type),
dataset=dataset,
singular_monitored_session_config=config,
infinite_loop=True)
self.best_loss = -1
def model(self, input_vectors, input_gene, input_variation, output_label, batch_size,
embedding_size=EMBEDDINGS_SIZE,
output_classes=9):
logits, targets = doc2vec_prediction_model(input_vectors, input_gene, input_variation,
output_label, batch_size,
is_training=False, embedding_size=embedding_size,
output_classes=output_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets, logits=logits)
self.global_step = training_util.get_or_create_global_step()
global_step_increase = tf.assign_add(self.global_step, 1)
self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
trainable=False)
self.accumulated_loss = tf.assign_add(self.accumulated_loss, tf.reduce_sum(loss))
self.prediction = tf.nn.softmax(logits)
self.metrics = metrics.single_label(self.prediction, targets, moving_average=False)
steps = tf.cast(global_step_increase, dtype=tf.float32)
tf.summary.scalar('loss', self.accumulated_loss / (steps * batch_size))
return None
def create_graph(self, dataset_tensor, batch_size):
input_vectors, input_gene, input_variation, output_label = dataset_tensor
self.batch_size = batch_size
return self.model(input_vectors, input_gene, input_variation, output_label, batch_size)
def step(self, session, graph_data, summary_op):
self.num_steps, self.final_metrics, self.final_loss, summary = \
session.run([self.global_step, self.metrics, self.accumulated_loss, summary_op])
return summary
def after_create_session(self, session, coord):
super(DocPredictionEval, self).after_create_session(session, coord)
def end(self, session):
super(DocPredictionEval, self).end(session)
cm = self.final_metrics['confusion_matrix']
data_size = self.num_steps * self.batch_size
loss = self.final_loss / data_size
print('Loss: {}'.format(loss))
print('Confusion matrix:')
for r in cm:
print('\t'.join([str(x) for x in r]))
if self.best_loss < 0 or loss < self.best_loss:
self.best_loss = loss
self.copy_checkpoint_as_best()
class DocPredictionInference(evaluator.Evaluator):
def __init__(self, dataset, log_dir=DIR_D2V_DOC_LOGDIR):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(DocPredictionInference, self).__init__(checkpoints_dir=log_dir,
output_path=os.path.join(log_dir,
dataset.type),
dataset=dataset,
singular_monitored_session_config=config,
infinite_loop=False)
def model(self, input_vectors, input_gene, input_variation, batch_size,
embedding_size=EMBEDDINGS_SIZE, output_classes=9):
self.global_step = training_util.get_or_create_global_step()
logits, _ = doc2vec_prediction_model(input_vectors, input_gene, input_variation,
None, batch_size,
is_training=False, embedding_size=embedding_size,
output_classes=output_classes)
global_step_increase = tf.assign_add(self.global_step, 1)
with tf.control_dependencies([global_step_increase]):
self.prediction = tf.nn.softmax(logits)
return self.prediction
def end(self, session):
pass
def create_graph(self, dataset_tensor, batch_size):
input_vectors, input_gene, input_variation, _ = dataset_tensor
return self.model(input_vectors, input_gene, input_variation, batch_size)
def after_create_session(self, session, coord):
super(DocPredictionInference, self).after_create_session(session, coord)
print('ID,class1,class2,class3,class4,class5,class6,class7,class8,class9')
def step(self, session, graph_data, summary_op):
step, predictions = session.run([self.global_step, self.prediction])
predictions = predictions[0]
predictions = [p + 0.01 for p in predictions] # penalize less the mistakes
sum = np.sum(predictions)
predictions = [p / sum for p in predictions]
print('{},{}'.format(step, ','.join(['{:.3f}'.format(x) for x in predictions])))
return None
if __name__ == '__main__':
import logging
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) > 1 and sys.argv[1] == 'val':
# get validation error
evaluator = DocPredictionEval(dataset=DocPredictionDataset(type='val'),
log_dir=os.path.join(DIR_D2V_DOC_LOGDIR))
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'test':
# get validation error
evaluator = DocPredictionInference(dataset=DocPredictionDataset(type='stage2_test'),
log_dir=os.path.join(DIR_D2V_DOC_LOGDIR, 'val'))
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'train':
# get validation error
evaluator = DocPredictionEval(dataset=DocPredictionDataset(type='train'),
log_dir=os.path.join(DIR_D2V_DOC_LOGDIR))
evaluator.run()
|
1624673
|
import os
from kluctl.tests.test_base import DeploymentTestBase
from kluctl.utils.yaml_utils import yaml_load_file
cur_dir = os.path.dirname(__file__)
class TestTemplating(DeploymentTestBase):
def get_jinja2_vars(self):
return {
'a': 'a1',
'b': 'b1',
'include_var': 'd1',
}
def test_deployment_yml(self):
with self.build_deployment(os.path.join('templating', 'test_deployment'), self.get_jinja2_vars(), {'a': 'a2'}) as (d, c):
self.assertEqual(len(d.includes), 2)
self.assertListEqual(d.conf['tags'], ['a1', 'a2'])
def test_include_var(self):
with self.build_deployment(os.path.join('templating', 'test_deployment'), self.get_jinja2_vars(), {'a': 'a2'}) as (d, c):
self.assertEqual(d.includes[0].dir, os.path.join(cur_dir, 'test_deployment', 'd1'))
def test_not_rendered_kustomize_resource(self):
with self.render_deployment(os.path.join('templating', 'test_deployment'), self.get_jinja2_vars(), {'a': 'a2'}) as c:
y = yaml_load_file(os.path.join(c.tmpdir, 'd1/k1/not-rendered.yml'))
self.assertEqual(y['a'], '{{ a }}')
def test_rendered_kustomize_resource(self):
with self.render_deployment(os.path.join('templating', 'test_deployment'), self.get_jinja2_vars(), {'a': 'a2'}) as c:
y = yaml_load_file(os.path.join(c.tmpdir, 'd1/k1/rendered.yml'))
self.assertEqual(y['a'], 'a1')
def test_load_template(self):
with self.render_deployment(os.path.join('templating', 'test_deployment'), self.get_jinja2_vars(), {'a': 'a2'}) as c:
y = yaml_load_file(os.path.join(c.tmpdir, 'd1/k1/rendered.yml'))
self.assertEqual(y['b'], 'test a1')
self.assertEqual(y['c'], 'test a1')
def test_rendered_kustomization_yml(self):
with self.render_deployment(os.path.join('templating', 'test_deployment'), self.get_jinja2_vars(), {'a': 'a2'}) as c:
y = yaml_load_file(os.path.join(c.tmpdir, 'd1/k1/kustomization.yml'))
self.assertListEqual(y['resources'], ['b1'])
def test_import_no_context(self):
with self.render_deployment(os.path.join('templating', 'test_import'), self.get_jinja2_vars(), {}) as c:
y = yaml_load_file(os.path.join(c.tmpdir, 'k1/rendered.yml'))
self.assertEqual(y['a'], 'a1')
def test_get_var(self):
with self.render_deployment(os.path.join('templating', 'test_utils'), self.get_jinja2_vars(), {}) as c:
y = yaml_load_file(os.path.join(c.tmpdir, 'k1/get_var.yml'))
self.assertEqual(y['test1'], 'default')
self.assertEqual(y['test2'], 'default')
self.assertEqual(y['test3'], 'a')
def test_vars(self):
with self.render_deployment(os.path.join('templating', 'test_vars'), self.get_jinja2_vars(), {}) as c:
y = yaml_load_file(os.path.join(c.tmpdir, 'k1/test.yml'))
self.assertEqual(y['test1'], 'v1')
self.assertEqual(y['test2'], 'f1')
self.assertEqual(y['test3'], 'v1')
self.assertEqual(y['test4'], 'b')
|
1624687
|
from click.testing import CliRunner
from mock import Mock, patch
from sigopt.cli import cli
class TestClusterCreateCli(object):
def test_cluster_create(self):
services = Mock()
runner = CliRunner()
with \
runner.isolated_filesystem(), \
patch('sigopt.orchestrate.controller.OrchestrateServiceBag', return_value=services):
open("cluster.yml", "w").close()
result = runner.invoke(cli, ["cluster", "create"])
assert result.exit_code == 0
|
1624706
|
import math
from collections import OrderedDict
from functools import partial
import torch.nn as nn
import torch
from torch.nn import Module as Module
from src.models.tresnet.layers.anti_aliasing import AntiAliasDownsampleLayer
from .layers.avg_pool import FastGlobalAvgPool2d
from src.models.tresnet.layers.space_to_depth import SpaceToDepthModule
from .layers.squeeze_and_excite import SEModule
from inplace_abn import InPlaceABN
from inplace_abn import ABN
def InplacABN_to_ABN(module: nn.Module) -> nn.Module:
# convert all InplaceABN layer to bit-accurate ABN layers.
if isinstance(module, InPlaceABN):
module_new = ABN(module.num_features, activation=module.activation,
activation_param=module.activation_param)
for key in module.state_dict():
module_new.state_dict()[key].copy_(module.state_dict()[key])
module_new.training = module.training
module_new.weight.data = module_new.weight.abs() + module_new.eps
return module_new
for name, child in reversed(module._modules.items()):
new_child = InplacABN_to_ABN(child)
if new_child != child:
module._modules[name] = new_child
return module
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv3x3_depth(planes, stride=1):
return nn.Conv2d(planes, planes, groups=planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv2d(ni, nf, stride):
return nn.Sequential(
nn.Conv2d(ni, nf, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(nf),
nn.ReLU(inplace=True)
)
def conv2d_ABN(ni, nf, stride, activation="leaky_relu", kernel_size=3, activation_param=1e-2, groups=1):
activation_param = 1e-6
return nn.Sequential(
nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups,
bias=False),
InPlaceABN(num_features=nf, activation=activation, activation_param=activation_param)
)
class BasicBlock(Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, anti_alias_layer=None):
super(BasicBlock, self).__init__()
if stride == 1:
self.conv1 = conv2d_ABN(inplanes, planes, stride=1, activation_param=1e-3)
else:
if anti_alias_layer is None:
self.conv1 = conv2d_ABN(inplanes, planes, stride=2, activation_param=1e-3)
else:
self.conv1 = nn.Sequential(conv2d_ABN(inplanes, planes, stride=1, activation_param=1e-3),
anti_alias_layer(channels=planes, filt_size=3, stride=2))
self.conv2 = conv2d_ABN(planes, planes, stride=1, activation="identity")
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
reduce_layer_planes = max(planes * self.expansion // 4, 64)
self.se = SEModule(channels=planes * self.expansion, reduction_channels=reduce_layer_planes) if \
use_se else None
def forward(self, x):
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.se is not None: out = self.se(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, anti_alias_layer=None):
super(Bottleneck, self).__init__()
self.conv1 = conv2d_ABN(inplanes, planes, kernel_size=1, stride=1, activation="leaky_relu",
activation_param=1e-3)
if stride == 1:
self.conv2 = conv2d_ABN(planes, planes, kernel_size=3, stride=1, activation="leaky_relu",
activation_param=1e-3)
else:
if anti_alias_layer is None:
self.conv2 = conv2d_ABN(planes, planes, kernel_size=3, stride=2, activation="leaky_relu",
activation_param=1e-3)
else:
self.conv2 = nn.Sequential(conv2d_ABN(planes, planes, kernel_size=3, stride=1,
activation="leaky_relu", activation_param=1e-3),
anti_alias_layer(channels=planes, filt_size=3, stride=2))
self.conv3 = conv2d_ABN(planes, planes * self.expansion, kernel_size=1, stride=1,
activation="identity")
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
reduce_layer_planes = max(planes * self.expansion // 8, 64)
self.se = SEModule(planes, reduce_layer_planes) if use_se else None
def forward(self, x):
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.se is not None: out = self.se(out)
out = self.conv3(out)
out = out + residual # no inplace
out = self.relu(out)
return out
class TResNetV2(Module):
def __init__(self, layers, in_chans=3, num_classes=1000, width_factor=1.0, remove_model_jit=False):
super(TResNetV2, self).__init__()
## body
self.inplanes = int(int(64 * width_factor + 4) / 8) * 8
self.planes = int(int(64 * width_factor + 4) / 8) * 8
SpaceToDepth = SpaceToDepthModule(remove_model_jit=remove_model_jit)
conv1 = conv2d_ABN(in_chans * 16, self.planes, stride=1, kernel_size=3)
anti_alias_layer = partial(AntiAliasDownsampleLayer, remove_aa_jit=remove_model_jit)
global_pool_layer = FastGlobalAvgPool2d(flatten=True)
layer1 = self._make_layer(Bottleneck, self.planes, layers[0], stride=1, use_se=True,
anti_alias_layer=anti_alias_layer) # 56x56
layer2 = self._make_layer(Bottleneck, self.planes * 2, layers[1], stride=2, use_se=True,
anti_alias_layer=anti_alias_layer) # 28x28
layer3 = self._make_layer(Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True,
anti_alias_layer=anti_alias_layer) # 14x14
layer4 = self._make_layer(Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False,
anti_alias_layer=anti_alias_layer) # 7x7
self.body = nn.Sequential(OrderedDict([
('SpaceToDepth', SpaceToDepth),
('conv1', conv1),
('layer1', layer1),
('layer2', layer2),
('layer3', layer3),
('layer4', layer4)]))
# default head
self.num_features = (self.planes * 8) * Bottleneck.expansion
fc = nn.Linear(self.num_features , num_classes)
self.global_pool = nn.Sequential(OrderedDict([('global_pool_layer', global_pool_layer)]))
self.head = nn.Sequential(OrderedDict([('fc', fc)]))
self.embeddings = []
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, InPlaceABN):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# initilize resnet in a magic way
for m in self.modules():
if isinstance(m, BasicBlock):
m.conv2[1].weight = nn.Parameter(torch.zeros_like(m.conv2[1].weight)) # BN to zero
if isinstance(m, Bottleneck):
m.conv3[1].weight = nn.Parameter(torch.zeros_like(m.conv3[1].weight)) # BN to zero
if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01)
def _make_layer(self, block, planes, blocks, stride=1, use_se=True, anti_alias_layer=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
layers = []
if stride == 2:
# avg pooling before 1x1 conv
layers.append(
nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
layers += [
conv2d_ABN(self.inplanes, planes * block.expansion, kernel_size=1, stride=1,
activation="identity")]
downsample = nn.Sequential(*layers)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, use_se=use_se,
anti_alias_layer=anti_alias_layer))
self.inplanes = planes * block.expansion
for i in range(1, blocks): layers.append(
block(self.inplanes, planes, use_se=use_se, anti_alias_layer=anti_alias_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.body(x)
# self.embeddings = self.global_pool(x)
logits = self.head(self.global_pool(x))
return logits
def TResnetL_V2(model_params):
"""Constructs a large TResnet model.
"""
in_chans = 3
num_classes = model_params['num_classes']
remove_model_jit = False
layers_list = [3, 4, 23, 3]
width_factor = 1.0
model = TResNetV2(layers=layers_list, num_classes=num_classes, in_chans=in_chans,
width_factor=width_factor, remove_model_jit=remove_model_jit)
return model
|
1624775
|
from delorean import Delorean
from pytz import all_timezones
class UtcDatetime(object):
"""Class for manage datetimes. The timezone is always internally in UTC. Its used for unify serialization
and ease complexity of manage datetimes with timezones. Always use this class for datetime management"""
def __init__(self, the_datetime, the_timezone):
"""the_datetime must be a datetime.datetime
the_timezone is a String identifing timezone: EX: 'Europe/Madrid' 'UTC' See: all_timezones"""
the_timezone = self._get_timezone_parameter(the_timezone)
self._delorean = Delorean(datetime=the_datetime, timezone=the_timezone).shift("UTC")
self._delorean.truncate('second') # Truncate to second, its the precission of serialize
@staticmethod
def get_all_timezones():
"""All timezones available"""
return all_timezones
@staticmethod
def get_current_utc_datetime():
"""Always call this method to get the current datetime.
Return UrcDateTime"""
delorean = Delorean()
return UtcDatetime(delorean.datetime, "UTC")
def datetime_in_timezone(self, the_timezone):
"""Gets the UTC timezone """
the_timezone = self._get_timezone_parameter(the_timezone)
tmp_delorean = Delorean(datetime=self._delorean.datetime)
return tmp_delorean.shift(the_timezone).datetime
def advance_in_time(self, periodicity):
"""Give us the future UtcDatetime traveling in time adding periodicity to self date"""
if periodicity.period == "YEAR":
return UtcDatetime(self._delorean.next_year(periodicity.frequency).datetime, "UTC")
elif periodicity.period == "MONTH":
return UtcDatetime(self._delorean.next_month(periodicity.frequency).datetime, "UTC")
elif periodicity.period == "DAY":
return UtcDatetime(self._delorean.next_day(periodicity.frequency).datetime, "UTC")
def back_in_time(self, periodicity):
"""Give us the past UtcDatetime traveling in time substracting periodicity to self date"""
if periodicity.period == "YEAR":
return UtcDatetime(self._delorean.last_year(periodicity.frequency).datetime, "UTC")
elif periodicity.period == "MONTH":
return UtcDatetime(self._delorean.last_month(periodicity.frequency).datetime, "UTC")
elif periodicity.period == "DAY":
return UtcDatetime(self._delorean.last_day(periodicity.frequency).datetime, "UTC")
@property
def to_iso8601(self):
return self.datetime_utc.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
@property
def datetime_utc(self):
"""datetime object in UTC"""
return self._delorean.datetime
@property
def date_utc(self):
"""date object in UTC"""
return self._delorean.date
@staticmethod
def deserialize(data):
"""deserialize model"""
if data is None:
return None
return UtcDatetime(data, "UTC")
def serialize(self):
"""serialize model
NOTE: It serialize for pymongo datetime compatibility not for a string based interface like REST"""
return self._delorean.datetime
def __eq__(self, other):
"""equal method"""
if self is other:
return True
return isinstance(other, self.__class__) \
and self._delorean == other._delorean
def __ne__(self, other):
"""not equal method"""
return not self.__eq__(other)
def __lt__(self, other):
"""< operation"""
return self._delorean.datetime < other._delorean.datetime
def __le__(self, other):
"""<= operation"""
return self._delorean.datetime <= other._delorean.datetime
def __gt__(self, other):
"""> operation"""
return self._delorean.datetime > other._delorean.datetime
def __ge__(self, other):
""">= operation"""
return self._delorean.datetime >= other._delorean.datetime
def __sub__(self, other):
"""Returns a timedelta"""
return self._delorean.datetime - other._delorean.datetime
def _get_timezone_parameter(self, the_timezone):
"""Gets a valid timezone parameter or raise"""
if the_timezone == "PST": # Very common
return "PST8PDT"
if the_timezone not in self.get_all_timezones():
raise ValueError("%s is not a valid timezone" % the_timezone)
return the_timezone
def __repr__(self):
return "%s %s" % (self._delorean.naive(), self._delorean.timezone())
|
1624794
|
from __future__ import (
annotations,
)
from collections.abc import (
MutableSet,
)
from typing import (
Any,
Callable,
Generic,
Iterable,
Iterator,
Optional,
TypeVar,
get_args,
)
from minos.common import (
DataDecoder,
DataEncoder,
DeclarativeModel,
ModelType,
SchemaDecoder,
SchemaEncoder,
)
from .actions import (
Action,
)
T = TypeVar("T")
class IncrementalSet(DeclarativeModel, MutableSet, Generic[T]):
"""Incremental Set class."""
data: set[T]
def __init__(self, data: Optional[Iterable[T]] = None, *args, **kwargs):
if data is None:
data = set()
elif not isinstance(data, set):
data = {value_obj for value_obj in data}
super().__init__(data, *args, **kwargs)
def add(self, value_object: T) -> None:
"""Add a value object.
:param value_object: The value object to be added.
:return: This method does not return anything.
"""
self.data.add(value_object)
def discard(self, value_object: T) -> None:
"""Remove a value object.
:param value_object: The value object to be added.
:return: This method does not return anything.
"""
self.data.discard(value_object)
def __contains__(self, value_object: T) -> bool:
return value_object in self.data
def __len__(self) -> int:
return len(self.data)
def __iter__(self) -> Iterator[T]:
yield from self.data
def __eq__(self, other: T) -> bool:
if isinstance(other, IncrementalSet):
return super().__eq__(other)
return set(self) == other
def diff(self, another: IncrementalSet[T]) -> IncrementalSetDiff:
"""Compute the difference between self and another entity set.
:param another: Another entity set instance.
:return: The difference between both entity sets.
"""
return IncrementalSetDiff.from_difference(self, another)
@property
def data_cls(self) -> Optional[type]:
"""Get data class if available.
:return: A model type.
"""
args = get_args(self.type_hints["data"])
return args[0]
# noinspection PyMethodParameters
@classmethod
def encode_schema(cls, encoder: SchemaEncoder, target: Any, **kwargs) -> Any:
"""Encode schema with the given encoder.
:param encoder: The encoder instance.
:param target: An optional pre-encoded schema.
:return: The encoded schema of the instance.
"""
schema = encoder.build(target.type_hints["data"], **kwargs)
return schema | {"logicalType": cls.classname}
@classmethod
def decode_schema(cls, decoder: SchemaDecoder, target: Any, **kwargs) -> ModelType:
"""Decode schema with the given encoder.
:param decoder: The decoder instance.
:param target: The schema to be decoded.
:return: The decoded schema as a type.
"""
decoded = decoder.build(target, **kwargs)
return ModelType.from_model(cls[get_args(decoded)[-1]])
@staticmethod
def encode_data(encoder: DataEncoder, target: Any, **kwargs) -> Any:
"""Encode data with the given encoder.
:param encoder: The encoder instance.
:param target: An optional pre-encoded data.
:return: The encoded data of the instance.
"""
return encoder.build(target["data"], **kwargs)
@classmethod
def decode_data(cls, decoder: DataDecoder, target: Any, type_: ModelType, **kwargs) -> IncrementalSet:
"""Decode data with the given decoder.
:param decoder: The decoder instance.
:param target: The data to be decoded.
:param type_: The data type.
:return: A decoded instance.
"""
decoded = decoder.build(target, type_.type_hints["data"], **kwargs)
return cls(decoded, additional_type_hints=type_.type_hints)
IncrementalSetDiffEntry = ModelType.build("SetDiffEntry", {"action": Action, "entity": Any})
class IncrementalSetDiff(DeclarativeModel):
"""Incremental Set Diff class."""
diffs: list[IncrementalSetDiffEntry]
@classmethod
def from_difference(
cls, new: IncrementalSet[T], old: IncrementalSet[T], get_fn: Optional[Callable[[T], Any]] = None
) -> IncrementalSetDiff:
"""Build a new instance from two entity sets.
:param new: The new entity set.
:param old: The old entity set.
:param get_fn: Optional function to get entries from the set by identifier.
:return: The difference between new and old.
"""
differences = cls._diff(new, old, get_fn)
return cls(differences)
@staticmethod
def _diff(new: IncrementalSet[T], old: IncrementalSet[T], get_fn) -> list[IncrementalSetDiffEntry]:
result = list()
for value in new - old:
entry = IncrementalSetDiffEntry(Action.CREATE, value)
result.append(entry)
for value in old - new:
entry = IncrementalSetDiffEntry(Action.DELETE, value)
result.append(entry)
if get_fn is not None:
for value in old & new:
if value == old.get(value.uuid):
continue
entry = IncrementalSetDiffEntry(Action.UPDATE, value)
result.append(entry)
return result
|
1624822
|
import pytest
@pytest.mark.nightly
@pytest.mark.github_only
def test_run_stub_test():
"""
Empty test just to make sure our nightly job has something to do
"""
pass
|
1624831
|
import ray
from mprl.utility_services.cloud_storage import connect_storage_client
from mprl.utility_services.worker import ConsoleManagerInterface
from mprl.utility_services.lock_server.lock_client_interface import LockServerInterface
from mprl.utility_services.payoff_table import PayoffTable, PolicySpec
import logging
import time
import dill
logger = logging.getLogger(__name__)
import copy
from termcolor import colored
def _check_consecutive_numbers(int_list, should_start_at=None):
if len(int_list) == 0:
return True
if should_start_at is not None and int_list[0] != should_start_at:
return False
prev_num = int_list[0]
for elem in int_list[1:]:
if elem != prev_num + 1:
return False
prev_num = elem
return True
def _check_only_latest_policies_are_active(policies_active_states):
should_all_be_active_now = False
for is_active in policies_active_states:
if is_active and not should_all_be_active_now:
should_all_be_active_now = True
if should_all_be_active_now and not is_active:
return False
return True
@ray.remote(num_cpus=0)
class LivePolicyPayoffTracker(object):
def __init__(self,
minio_bucket,
manager_host,
manager_port,
lock_server_host,
lock_server_port,
worker_id,
policy_class_name,
policy_config_key,
provide_payoff_barrier_sync=False):
worker_id = f"live_pop_tracker_{worker_id[worker_id.find('pid'):]}"
self._storage_client = connect_storage_client()
self._minio_bucket = minio_bucket
self._manager_interface = ConsoleManagerInterface(server_host=manager_host,
port=manager_port,
worker_id=worker_id,
storage_client=self._storage_client,
minio_bucket_name=self._minio_bucket)
self._lock_interface = LockServerInterface(server_host=lock_server_host,
port=lock_server_port,
worker_id=worker_id)
self._policy_class_name = policy_class_name
self._policy_config_key = policy_config_key
self._claimed_policy_num = None
self._claim_new_active_policy()
assert self._claimed_policy_num is not None
self._locally_cached_matchup_results = {}
self._provide_payoff_barrier_sync = provide_payoff_barrier_sync
if self._provide_payoff_barrier_sync:
self._wait_at_payoff_table_barrier_fn, self._leave_barrier_group_fn = self._lock_interface.join_barrier_group(
barrier_name="pt_barrier",
member_name=str(self._claimed_policy_num),
grace_period_for_others_to_join_s=20.0)
else:
self._wait_at_payoff_table_barrier_fn = None
self._leave_barrier_group_fn = None
@ray.method(num_return_vals=1)
def wait_at_barrier_for_other_learners(self):
assert self._provide_payoff_barrier_sync
self._wait_at_payoff_table_barrier_fn()
return True
@ray.method(num_return_vals=1)
def set_latest_key_for_claimed_policy(self, new_key, request_locks_checkpoint_with_name=None):
assert self._claimed_policy_num is not None
prefix = f"policy {self._claimed_policy_num} latest key: "
new_lock = prefix + new_key
policy_key_locks = self._lock_interface.get_all_items(filter_by_string=prefix)
if len(policy_key_locks) > 0:
assert len(policy_key_locks) == 1
old_lock = policy_key_locks[0]
assert self._lock_interface.replace_item(old_item=old_lock,
new_item=new_lock,
new_item_remains_after_disconnect=True,
request_locks_checkpoint_with_name=request_locks_checkpoint_with_name)
print(colored(f"Policy {self._claimed_policy_num}: Set new latest key for claimed policy (replaced old one): \"{new_lock}\"", 'green'))
else:
assert self._lock_interface.try_to_reserve_item(item_name=new_lock,
remain_after_worker_disconnect=True,
request_locks_checkpoint_with_name=request_locks_checkpoint_with_name)
print(colored(f"Policy {self._claimed_policy_num}: Set new latest key for claimed policy: \"{new_lock}\"", "green"))
return True
@ray.method(num_return_vals=1)
def set_claimed_policy_as_finished(self):
old_lock = f"policy_status: {self._claimed_policy_num} active"
new_lock = f"policy_status: {self._claimed_policy_num} finished"
assert self._lock_interface.replace_item(old_item=old_lock,
new_item=new_lock,
new_item_remains_after_disconnect=True)
print(colored(f"Policy {self._claimed_policy_num}: Set claimed policy as finished: \"{new_lock}\"", "green"))
if self._leave_barrier_group_fn is not None:
self._leave_barrier_group_fn()
return True
@ray.method(num_return_vals=2)
def get_live_payoff_table_dill_pickled(self, first_wait_for_n_seconds=None):
if first_wait_for_n_seconds is not None:
time.sleep(first_wait_for_n_seconds)
base_payoff_table, _ = self._manager_interface.get_latest_payoff_table(infinite_retry_on_error=False)
if base_payoff_table is None:
base_payoff_table = PayoffTable()
base_payoff_table: PayoffTable = base_payoff_table
active_policy_numbers, finished_policy_numbers, total_policy_numbers = self.get_active_and_finished_policy_numbers()
assert len(active_policy_numbers) + len(finished_policy_numbers) == total_policy_numbers
are_all_lower_policies_finished = len(active_policy_numbers) == 0
print(colored(f"Policy {self._claimed_policy_num}: There are {total_policy_numbers} policies below this learner. "
f"(Active policies below {self._claimed_policy_num} are {active_policy_numbers}. "
f"Frozen policies below {self._claimed_policy_num} are {finished_policy_numbers}).", "white"))
if total_policy_numbers == 0:
return None, are_all_lower_policies_finished
assert base_payoff_table.size() <= len(finished_policy_numbers) or base_payoff_table.size() == 1
missing_policy_nums = list(range(base_payoff_table.size(), total_policy_numbers))
for missing_policy_num in missing_policy_nums:
missing_key = self._get_latest_key_for_policy_number(policy_num=missing_policy_num)
if missing_key is None:
time.sleep(5)
missing_key = self._get_latest_key_for_policy_number(policy_num=missing_policy_num)
if missing_key is not None:
base_payoff_table.add_policy(new_policy_key=missing_key,
new_policy_class_name=self._policy_class_name,
new_policy_config_file_key=self._policy_config_key,
new_policy_tags=['locally_tracked'])
required_evals_observed = set()
required_evals_finalized = set()
while True:
matchup_order = base_payoff_table.get_eval_matchup_order()
if matchup_order is None:
break
if matchup_order not in required_evals_finalized:
as_policy_key, against_policy_key = matchup_order
payoff, games_played = self._check_eval_cache(as_policy_key=as_policy_key, against_policy_key=against_policy_key)
if payoff is None:
payoff, games_played = self._manager_interface.request_eval_result(
as_policy_key=as_policy_key,
as_policy_config_key=self._policy_config_key,
as_policy_class_name=self._policy_class_name,
against_policy_key=against_policy_key,
against_policy_config_key=self._policy_config_key,
against_policy_class_name=self._policy_class_name,
perform_eval_if_not_cached=matchup_order not in required_evals_observed,
infinite_retry_on_error=False)
if payoff is not None and matchup_order not in required_evals_observed:
print(f"{colored(f'Policy {self._claimed_policy_num}: !!!! GOT A CACHE HIT FROM THE MANAGER !!!!','yellow')}\n"
f"{colored(f'for {as_policy_key} vs {against_policy_key}', 'yellow')}")
if payoff is None and matchup_order in required_evals_observed:
print(colored(f"Policy {self._claimed_policy_num}: Waiting to get eval result for {as_policy_key} vs {against_policy_key}", "yellow"))
time.sleep(2)
if payoff is not None:
self._add_to_eval_cache_if_not_already_entered(as_policy_key=as_policy_key, against_policy_key=against_policy_key,
payoff=payoff, games_played=games_played)
base_payoff_table.add_eval_result(as_policy_key=as_policy_key,
against_policy_key=against_policy_key,
payoff=payoff,
games_played=games_played)
required_evals_finalized.add(matchup_order)
required_evals_observed.add(matchup_order)
assert len(required_evals_observed) >= len(required_evals_finalized)
assert base_payoff_table.get_num_pending_policies() == 0, f"amount is {base_payoff_table.get_num_pending_policies()}"
assert base_payoff_table.size() == total_policy_numbers
return base_payoff_table.to_dill(), are_all_lower_policies_finished
@ray.method(num_return_vals=1)
def are_all_lower_policies_finished(self):
active_policy_numbers, finished_policy_numbers, total_policy_numbers = self.get_active_and_finished_policy_numbers()
assert len(active_policy_numbers) + len(finished_policy_numbers) == total_policy_numbers
return len(active_policy_numbers) == 0
@ray.method(num_return_vals=1)
def get_claimed_policy_num(self):
return self._claimed_policy_num
def get_active_and_finished_policy_numbers(self):
start_time = time.time()
while True:
policy_status_locks = self._lock_interface.get_all_items(filter_by_string="policy_status: ")
if len(policy_status_locks) == 0:
return [], [], 0
_, all_policy_numbers, all_policy_statuses = map(list, zip(*[item.split(" ") for item in policy_status_locks]))
assert all(stat == "active" or stat == "finished" for stat in all_policy_statuses)
num_policies_to_consider = self._claimed_policy_num if self._claimed_policy_num is not None else len(all_policy_numbers)
policy_numbers = [None] * num_policies_to_consider
policies_active_states = [None] * num_policies_to_consider
for policy_num, policy_status in zip(all_policy_numbers, all_policy_statuses):
policy_num = int(policy_num)
if self._claimed_policy_num is None or policy_num < self._claimed_policy_num:
policy_numbers[policy_num] = policy_num
policies_active_states[policy_num] = (policy_status == "active")
if not all(p is not None for p in policy_numbers):
if time.time() - start_time > 60:
raise ValueError(colored(f"policy_numbers (some are None): {policy_numbers}", "red"))
print(colored(f"policy_numbers (some are None), trying again: {policy_numbers}", "red"))
time.sleep(0.5)
continue
assert all(p is not None for p in policies_active_states)
assert _check_consecutive_numbers(int_list=policy_numbers, should_start_at=0), f"policy_numbers is {policy_numbers}, all policy status locks are {policy_status_locks}"
assert _check_only_latest_policies_are_active(policies_active_states=policies_active_states)
break
active_policy_numbers = []
finished_policy_numbers = []
for i, policy_number in enumerate(policy_numbers):
if policies_active_states[i]:
active_policy_numbers.append(policy_number)
else:
finished_policy_numbers.append(policy_number)
total_policy_numbers = len(policy_numbers)
return active_policy_numbers, finished_policy_numbers, total_policy_numbers
def _claim_new_active_policy(self):
if self._claimed_policy_num is not None:
raise ValueError(f"This interface has already claimed policy {self._claimed_policy_num}")
_, _, total_policy_numbers = self.get_active_and_finished_policy_numbers()
claimed_policy_key = self._lock_interface.try_to_reserve_item_from_list(
possible_item_names_in_order_of_highest_priority_first=[f"policy_status: {i} active" for i in range(total_policy_numbers, total_policy_numbers+100)])
claimed_policy_num = int(claimed_policy_key.replace('policy_status: ','').replace(' active',''))
assert claimed_policy_num is not None
print(colored(f"Claimed Policy {claimed_policy_num}", "green"))
self._claimed_policy_num = claimed_policy_num
return claimed_policy_num
def _get_latest_key_for_policy_number(self, policy_num):
prefix = f"policy {policy_num} latest key: "
policy_key_locks = self._lock_interface.get_all_items(filter_by_string=prefix)
if len(policy_key_locks) == 0:
return None
assert len(policy_key_locks) == 1
policy_key = policy_key_locks[0][len(prefix):]
return policy_key
def _check_eval_cache(self, as_policy_key, against_policy_key):
payoff, games_played = None, None
try:
payoff, games_played = self._locally_cached_matchup_results[as_policy_key][against_policy_key]
print(colored(f"Eval Cache Hit for \"{as_policy_key}\" vs \"{against_policy_key}\"", "green"))
except KeyError:
try:
payoff, games_played = self._locally_cached_matchup_results[against_policy_key][as_policy_key]
payoff = -payoff
print(colored(f"Eval Cache Hit for \"{against_policy_key}\" vs \"{as_policy_key}\"", "green"))
except KeyError:
pass
return payoff, games_played
def _add_to_eval_cache_if_not_already_entered(self, as_policy_key, against_policy_key, payoff, games_played):
old_payoff, _ = self._check_eval_cache(as_policy_key=as_policy_key, against_policy_key=against_policy_key)
if old_payoff is not None:
return
if as_policy_key not in self._locally_cached_matchup_results:
self._locally_cached_matchup_results[as_policy_key] = {}
if against_policy_key not in self._locally_cached_matchup_results[as_policy_key]:
self._locally_cached_matchup_results[as_policy_key][against_policy_key] = (payoff, games_played)
|
1624832
|
import os, sys
import numpy as np
import torch.backends.cudnn as cudnn
import torch
from tqdm import tqdm
import argparse
import cv2
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pixielib.pixie import PIXIE
from pixielib.visualizer import Visualizer
from pixielib.datasets.hand_datasets import TestData
from pixielib.utils import util
from pixielib.utils.config import cfg as pixie_cfg
def main(args):
savefolder = args.savefolder
device = args.device
os.makedirs(savefolder, exist_ok=True)
# check env
if not torch.cuda.is_available():
print('CUDA is not available! use CPU instead')
else:
cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.enabled = True
# load test images
assert args.iscrop==False, 'currently no hand detector is available, please crop hand first and set iscrop False'
testdata = TestData(args.inputpath, iscrop=args.iscrop)
#-- run PIXIE
pixie_cfg.model.use_tex = args.useTex
pixie = PIXIE(config = pixie_cfg, device=device)
visualizer = Visualizer(render_size=args.render_size, config = pixie_cfg, device=device, part='hand', rasterizer_type=args.rasterizer_type)
for i, batch in enumerate(tqdm(testdata, dynamic_ncols=True)):
util.move_dict_to_device(batch, device)
batch['image'] = batch['image'].unsqueeze(0)
name = batch['name']
data = {
'hand': batch
}
param_dict = pixie.encode(data, threthold=True, keep_local=False)
codedict = param_dict['hand']
opdict = pixie.decode(codedict, param_type='hand')
opdict['albedo'] = visualizer.tex_flame2smplx(opdict['albedo'])
if args.saveObj or args.saveParam or args.savePred or args.saveImages:
os.makedirs(os.path.join(savefolder, name), exist_ok=True)
# -- save results
if args.saveVis:
visdict = visualizer.render_results(opdict, data['hand']['image'], overlay=True)
cv2.imwrite(os.path.join(savefolder, f'{name}_vis.jpg'), visualizer.visualize_grid(visdict, size=args.render_size))
if args.saveObj:
visualizer.save_obj(os.path.join(savefolder, name, f'{name}.obj'), opdict)
if args.saveParam:
util.save_pkl(os.path.join(savefolder, name, f'{name}_param.pkl'), codedict)
if args.savePred:
util.save_pkl(os.path.join(savefolder, name, f'{name}_prediction.pkl'), opdict)
if args.saveImages:
for vis_name in visdict.keys():
cv2.imwrite(os.path.join(savefolder, name, f'{name}_{vis_name}.jpg'), util.tensor2image(visdict[vis_name][0]))
print(f'-- please check the results in {savefolder}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PIXIE')
parser.add_argument('-i', '--inputpath', default='TestSamples/hand', type=str,
help='path to the test data, can be image folder, image path, image list, video')
parser.add_argument('-s', '--savefolder', default='TestSamples/hand/results', type=str,
help='path to the output directory, where results(obj, txt files) will be stored.')
parser.add_argument('--device', default='cuda:0', type=str,
help='set device, cpu for using cpu' )
# process test images
parser.add_argument('--iscrop', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to crop input image, only support False now' )
# rendering option
parser.add_argument('--render_size', default=224, type=int,
help='image size of renderings' )
parser.add_argument('--rasterizer_type', default='standard', type=str,
help='rasterizer type: pytorch3d or standard rasterizer' )
# save
parser.add_argument('--useTex', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to use FLAME texture model to generate uv texture map, \
set it to True only if you downloaded texture model' )
parser.add_argument('--uvtex_type', default='SMPLX', type=str,
help='texture type to save, can be SMPLX or FLAME')
parser.add_argument('--saveVis', default=True, type=lambda x: x.lower() in ['true', '1'],
help='whether to save visualization of output' )
parser.add_argument('--saveObj', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to save outputs as .obj, \
Note that saving objs could be slow' )
parser.add_argument('--saveParam', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to save parameters as pkl file' )
parser.add_argument('--savePred', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to save smplx prediction as pkl file' )
parser.add_argument('--saveImages', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to save visualization output as seperate images' )
main(parser.parse_args())
|
1624835
|
import sys, struct, random, string, meterpreter_bindings
# A stack of this stuff was stolen from the Python Meterpreter. We should look
# to find a nice way of sharing this across the two without the duplication.
#
# START OF COPY PASTE
#
# Constants
#
# these values will be patched, DO NOT CHANGE THEM
DEBUGGING = False
HTTP_CONNECTION_URL = None
HTTP_PROXY = None
HTTP_USER_AGENT = None
PAYLOAD_UUID = ''
SESSION_COMMUNICATION_TIMEOUT = 300
SESSION_EXPIRATION_TIMEOUT = 604800
SESSION_RETRY_TOTAL = 3600
SESSION_RETRY_WAIT = 10
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
ERROR_FAILURE_PYTHON = 2
ERROR_FAILURE_WINDOWS = 3
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = ( 0 )
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_QWORD = (1 << 20)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1<<31)+(1<<30)+(1<<29)+(1<<19)+(1<<18)+(1<<17)+(1<<16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_COMMAND_ID = TLV_META_TYPE_UINT | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_CHANNEL_PARENTID = TLV_META_TYPE_UINT | 55
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_TRANS_TYPE = TLV_META_TYPE_UINT | 430
TLV_TYPE_TRANS_URL = TLV_META_TYPE_STRING | 431
TLV_TYPE_TRANS_UA = TLV_META_TYPE_STRING | 432
TLV_TYPE_TRANS_COMM_TIMEOUT = TLV_META_TYPE_UINT | 433
TLV_TYPE_TRANS_SESSION_EXP = TLV_META_TYPE_UINT | 434
TLV_TYPE_TRANS_CERT_HASH = TLV_META_TYPE_RAW | 435
TLV_TYPE_TRANS_PROXY_HOST = TLV_META_TYPE_STRING | 436
TLV_TYPE_TRANS_PROXY_USER = TLV_META_TYPE_STRING | 437
TLV_TYPE_TRANS_PROXY_PASS = TLV_META_TYPE_STRING | 438
TLV_TYPE_TRANS_RETRY_TOTAL = TLV_META_TYPE_UINT | 439
TLV_TYPE_TRANS_RETRY_WAIT = TLV_META_TYPE_UINT | 440
TLV_TYPE_TRANS_HEADERS = TLV_META_TYPE_STRING | 441
TLV_TYPE_TRANS_GROUP = TLV_META_TYPE_GROUP | 442
TLV_TYPE_MACHINE_ID = TLV_META_TYPE_STRING | 460
TLV_TYPE_UUID = TLV_META_TYPE_RAW | 461
TLV_TYPE_CIPHER_NAME = TLV_META_TYPE_STRING | 500
TLV_TYPE_CIPHER_PARAMETERS = TLV_META_TYPE_GROUP | 501
TLV_TYPE_PEER_HOST = TLV_META_TYPE_STRING | 1500
TLV_TYPE_PEER_PORT = TLV_META_TYPE_UINT | 1501
TLV_TYPE_LOCAL_HOST = TLV_META_TYPE_STRING | 1502
TLV_TYPE_LOCAL_PORT = TLV_META_TYPE_UINT | 1503
NULL_BYTE = '\x00'
is_str = lambda obj: issubclass(obj.__class__, str)
is_bytes = lambda obj: issubclass(obj.__class__, str)
bytes = lambda *args: str(*args[:1])
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, str) else x)
def tlv_pack(*args):
if len(args) == 2:
tlv = {'type':args[0], 'value':args[1]}
else:
tlv = args[0]
data = ''
value = tlv['value']
if (tlv['type'] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
if isinstance(value, float):
value = int(round(value))
data = struct.pack('>III', 12, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
data = struct.pack('>IIQ', 16, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
data = struct.pack('>II', 9, tlv['type']) + bytes(chr(int(bool(value))), 'UTF-8')
else:
if value.__class__.__name__ == 'unicode':
value = value.encode('UTF-8')
elif not is_bytes(value):
value = bytes(value, 'UTF-8')
if (tlv['type'] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
data = struct.pack('>II', 8 + len(value) + 1, tlv['type']) + value + NULL_BYTE
elif (tlv['type'] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_GROUP) == TLV_META_TYPE_GROUP:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_COMPLEX) == TLV_META_TYPE_COMPLEX:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
return data
def packet_enum_tlvs(pkt, tlv_type = None):
offset = 0
while (offset < len(pkt)):
tlv = struct.unpack('>II', pkt[offset:offset+8])
if (tlv_type == None) or ((tlv[1] & ~TLV_META_TYPE_COMPRESSED) == tlv_type):
val = pkt[offset+8:(offset+8+(tlv[0] - 8))]
if (tlv[1] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
val = str(val.split(NULL_BYTE, 1)[0])
elif (tlv[1] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
val = struct.unpack('>I', val)[0]
elif (tlv[1] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
val = struct.unpack('>Q', val)[0]
elif (tlv[1] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
val = bool(struct.unpack('b', val)[0])
elif (tlv[1] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
pass
yield {'type':tlv[1], 'length':tlv[0], 'value':val}
offset += tlv[0]
raise StopIteration()
def packet_get_tlv(pkt, tlv_type):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {}
return tlv
def packet_get_tlv_default(pkt, tlv_type, default):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {'value': default}
return tlv
# END OF COPY PASTE
def validate_binding(required):
"""Makes sure that the current set of bindings that is available
in Meterpreter's bindings list contains that required by the caller.
This function returns the correct binding name to call."""
# assume all core commands are valid
if required < 1000:
required = 'meterpreter_core'
else:
required = 'command_{0}'.format(required)
if not required in set(dir(meterpreter_bindings)):
raise Exception('Missing bindings: {0} (is a dependent extension not yet loaded?)'.format(required))
return required
def invoke_meterpreter(command_id, is_local, tlv = ""):
binding = validate_binding(command_id)
header = struct.pack('>I', PACKET_TYPE_REQUEST)
header += tlv_pack(TLV_TYPE_COMMAND_ID, command_id)
header += tlv_pack(TLV_TYPE_REQUEST_ID, 0)
# add a leading 4-byte "zero" for the xor-key, 16 byte null guid, 4 byte encryption flag
req = '\x00' * 24
req += struct.pack('>I', len(header) + len(tlv) + 4)
req += header + tlv
return getattr(meterpreter_bindings, binding)(is_local, req)
def rnd_string(n):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(n))
|
1624845
|
from veroviz._common import *
from veroviz._validation import valCreateArcsFromLocSeq
from veroviz._validation import valCreateArcsFromNodeSeq
from veroviz._createEntitiesFromList import privCreateArcsFromLocSeq
def createArcsFromLocSeq(locSeq=None, initArcs=None, startArc=1, objectID=None, leafletColor=config['VRV_DEFAULT_LEAFLETARCCOLOR'], leafletWeight=config['VRV_DEFAULT_LEAFLETARCWEIGHT'], leafletStyle=config['VRV_DEFAULT_LEAFLETARCSTYLE'], leafletOpacity=config['VRV_DEFAULT_LEAFLETARCOPACITY'], leafletCurveType=config['VRV_DEFAULT_ARCCURVETYPE'], leafletCurvature=config['VRV_DEFAULT_ARCCURVATURE'], useArrows=True, cesiumColor=config['VRV_DEFAULT_CESIUMPATHCOLOR'], cesiumWeight=config['VRV_DEFAULT_CESIUMPATHWEIGHT'], cesiumStyle=config['VRV_DEFAULT_CESIUMPATHSTYLE'], cesiumOpacity=config['VRV_DEFAULT_CESIUMPATHOPACITY'], popupText=None):
"""
Create an "arcs" dataframe from an ordered list of coordinates.
Parameters
----------
locSeq: list of lists, Required, default as None
An ordered list of locations that will be converted into an :ref:`Arcs` dataframe. The list should be formated as [[lat1, lon1], [lat2, lon2], ..., [latn, lonn]].
initArcs: :ref:`Arcs`, Optional, default as None
An :ref:`Arcs` dataframe. If provided, the arcs to be created will be appended to this dataframe.
startArc: int, Optional, default as 1
Specifies the starting index number for the arcs. This will be reflected in the `odID` column of the resulting :ref:`Arcs` dataframe. If `startArc` is less than the minimum value of the `odID` found in the dataframe specified by `initArcs`, the value of `startArc` will be ignored in favor of the smallest integer greater than the maximum existing `odID` value.
objectID: int/string, Optional, default as None
A descriptive name or index for a particular vehicle or object (e.g., 'truck 1', or 'red car').
leafletColor: string, Optional, default as "orange"
The color of the arcs when displayed in Leaflet. See :ref:`Leaflet style` for a list of available colors.
leafletWeight: int, Optional, default as 3
The pixel width of the arcs when displayed in Leaflet.
leafletStyle: string, Optional, default as 'solid'
The line style of the arcs when displayed in Leaflet. Valid options are 'solid', 'dotted', and 'dashed'. See :ref:`Leaflet style` for more information.
leafletOpacity: float in [0, 1], Optional, default as 0.8
The opacity of the arcs when displayed in Leaflet. Valid values are in the range from 0 (invisible) to 1 (no transparency).
leafletCurveType: string, Optional, default as 'straight'
The type of curve to be shown on leaflet map for :ref:Arc dataframes (curves will not be applied to :ref:Assignments dataframes). The options are 'Bezier', 'greatcircle', and 'straight'. If Bezier is provided, the leafletCurvature is also required. If greatcircle is provided, the arc follow the curvature of the Earth.
leafletCurvature: float in (-90, 90), Conditional, default as 45
If leafletCurveType is 'Bezier', then leafletCurvature is required; otherwise this argument will not be used. The curvature specifies the angle between a straight line connecting the two nodes and the curved arc emanating from those two nodes. Therefore, this value should be in the open interval (-90, 90), although values in the (-45, 45) range tend to work best.
useArrows: bool, Optional, default as True
Indicates whether arrows should be shown on the arcs when displayed in Leaflet.
cesiumColor: string, Optional, default as "orange"
The color of the arcs when displayed in Cesium. See :ref:`Cesium Style` for a list of available colors.
cesiumWeight: int, Optional, default as 3
The pixel width of the arcs when displayed in Cesium.
cesiumStyle: string, Optional, default as 'solid'
The line style of the arcs when displayed in Cesium. Valid options are 'solid', 'dotted', and 'dashed'. See :ref:`Cesium Style` for more information.
cesiumOpacity: float in [0, 1], Optional, default as 0.8
The opacity of the arcs when displayed in Cesium. Valid values are in the range from 0 (invisible) to 1 (no transparency).
popupText: string, Optional, default as None
Text (or HTML) that will be displayed when a user clicks on the arc in either Leaflet or Cesium.
Return
------
:ref:`Arcs`
An Arcs dataframe
Examples
--------
First import veroviz and check if it is the latest version
>>> import veroviz as vrv
>>> vrv.checkVersion()
Generate arcs from a given ordered list of coordinates:
>>> arcs = vrv.createArcsFromLocSeq(
... locSeq=[
... [42.1325, -78.2134],
... [42.5341, -78.3252],
... [42.3424, -78.6424]
... ])
>>> arcs
Display the arcs on a Leaflet map:
>>> vrv.createLeaflet(arcs=arcs)
This example includes all of the available function arguments.
>>> arcs = vrv.createArcsFromLocSeq(
... locSeq = [[42.1325, -78.2134],
... [42.5341, -78.3252],
... [42.3424, -78.6424]],
... initArcs = None,
... startArc = 1,
... objectID = 'car',
... leafletColor = 'orange',
... leafletWeight = 5,
... leafletStyle = 'dashed',
... leafletOpacity = 0.6,
... useArrows = False,
... cesiumColor = 'orange',
... cesiumWeight = 5,
... cesiumStyle = 'dashed',
... cesiumOpacity = 0.6,
... popupText = 'car route')
>>> vrv.createLeaflet(arcs=arcs)
"""
# validation
[valFlag, errorMsg, warningMsg] = valCreateArcsFromLocSeq(locSeq, initArcs, startArc, objectID, leafletColor, leafletWeight, leafletStyle, leafletOpacity, leafletCurveType, leafletCurvature, useArrows, cesiumColor, cesiumWeight, cesiumStyle, cesiumOpacity)
if (not valFlag):
print (errorMsg)
return
elif (config['VRV_SETTING_SHOWWARNINGMESSAGE'] and warningMsg != ""):
print (warningMsg)
arcs = privCreateArcsFromLocSeq(locSeq, initArcs, startArc, objectID, leafletColor, leafletWeight, leafletStyle, leafletOpacity, leafletCurveType, leafletCurvature, useArrows, cesiumColor, cesiumWeight, cesiumStyle, cesiumOpacity, popupText)
return arcs
def createArcsFromNodeSeq(nodeSeq=None, nodes=None, initArcs=None, startArc=1, objectID=None, leafletColor=config['VRV_DEFAULT_LEAFLETARCCOLOR'], leafletWeight=config['VRV_DEFAULT_LEAFLETARCWEIGHT'], leafletStyle=config['VRV_DEFAULT_LEAFLETARCSTYLE'], leafletOpacity=config['VRV_DEFAULT_LEAFLETARCOPACITY'], leafletCurveType=config['VRV_DEFAULT_ARCCURVETYPE'], leafletCurvature=config['VRV_DEFAULT_ARCCURVATURE'], useArrows=True, cesiumColor=config['VRV_DEFAULT_CESIUMPATHCOLOR'], cesiumWeight=config['VRV_DEFAULT_CESIUMPATHWEIGHT'], cesiumStyle=config['VRV_DEFAULT_CESIUMPATHSTYLE'], cesiumOpacity=config['VRV_DEFAULT_CESIUMPATHOPACITY'], popupText=None):
"""
Create an "arcs" dataframe from an ordered list of node IDs. The "nodes" dataframe from which these node IDs are drawn must also be specified.
Parameters
----------
nodeSeq: list, Required
An ordered list of node IDs. These IDs must be included in the `id` column of the :ref:`Nodes` dataframe specified in the `nodes` input argument to this function. The format for `nodeSeq` is [node_id_1, node_id_2, ...].
nodes: :ref:`Nodes`, Required
A :ref:`Nodes` dataframe, which must contain the individual node IDs specified in the `nodeSeq` input argument.
initArcs: :ref:`Arcs`, Optional, default as None
An :ref:`Arcs` dataframe. If provided, the arcs to be created will be appended to this dataframe.
startArc: int, Optional, default as 1
Specifies the starting index number for the arcs. This will be reflected in the `odID` column of the resulting :ref:`Arcs` dataframe. If `startArc` is less than the minimum value of the `odID` found in the dataframe specified by `initArcs`, the value of `startArc` will be ignored in favor of the smallest integer greater than the maximum existing `odID` value.
objectID: int/string, Optional, default as None
A descriptive name or index for a particular vehicle or object (e.g., 'truck 1', or 'red car').
leafletColor: string, Optional, default as "orange"
The color of the arcs when displayed in Leaflet. See :ref:`Leaflet style` for a list of available colors.
leafletWeight: int, Optional, default as 3
The pixel width of the arcs when displayed in Leaflet.
leafletStyle: string, Optional, default as 'solid'
The line style of the arcs when displayed in Leaflet. Valid options are 'solid', 'dotted', and 'dashed'. See :ref:`Leaflet style` for more information.
leafletOpacity: float in [0, 1], Optional, default as 0.8
The opacity of the arcs when displayed in Leaflet. Valid values are in the range from 0 (invisible) to 1 (no transparency).
leafletCurveType: string, Optional, default as 'straight'
The type of curve to be shown on leaflet map for :ref:Arc dataframes (curves will not be applied to :ref:Assignments dataframes). The options are 'Bezier', 'greatcircle', and 'straight'. If Bezier is provided, the leafletCurvature is also required. If greatcircle is provided, the arc follow the curvature of the Earth.
leafletCurvature: float in (-90, 90), Conditional, default as 45
If leafletCurveType is 'Bezier', then leafletCurvature is required; otherwise this argument will not be used. The curvature specifies the angle between a straight line connecting the two nodes and the curved arc emanating from those two nodes. Therefore, this value should be in the open interval (-90, 90), although values in the (-45, 45) range tend to work best.
useArrows: bool, Optional, default as True
Indicates whether arrows should be shown on the arcs when displayed in Leaflet.
cesiumColor: string, Optional, default as "orange"
The color of the arcs when displayed in Cesium. See :ref:`Cesium Style` for a list of available colors.
cesiumWeight: int, Optional, default as 3
The pixel width of the arcs when displayed in Cesium.
cesiumStyle: string, Optional, default as 'solid'
The line style of the arcs when displayed in Cesium. Valid options are 'solid', 'dotted', and 'dashed'. See :ref:`Cesium Style` for more information.
cesiumOpacity: float in [0, 1], Optional, default as 0.8
The opacity of the arcs when displayed in Cesium. Valid values are in the range from 0 (invisible) to 1 (no transparency).
popupText: string, Optional, default as None
Text (or HTML) that will be displayed when a user clicks on the arc in either Leaflet or Cesium.
Return
------
:ref:`Arcs`
An Arcs dataframe
Examples
--------
First import veroviz and check if it is the latest version
>>> import veroviz as vrv
>>> vrv.checkVersion()
Create a collection of 20 nodes:
>>> myNodes = vrv.generateNodes(
... numNodes = 20,
... nodeType = 'depot',
... nodeDistrib = 'normal',
... nodeDistribArgs = {
... 'center' : [42.30, 78.00],
... 'stdDev' : 1000
... })
>>> myNodes
Generate arcs from nodes 2-to-15 and from 15-to-8. These node IDs are found in the `id` column of the given Nodes dataframe.
>>> myArcs = vrv.createArcsFromNodeSeq(
... nodeSeq = [2, 15, 8],
... nodes = myNodes)
>>> myArcs
Display the nodes and arcs on a Leaflet map:
>>> myMap = vrv.createLeaflet(arcs=myArcs, nodes=myNodes)
>>> myMap
This example includes all of the available function arguments:
>>> moreArcs = vrv.createArcsFromNodeSeq(
... nodeSeq = [3, 16, 9],
... nodes = myNodes,
... initArcs = myArcs,
... startArc = 7,
... objectID = 'car',
... leafletColor = 'cadetblue',
... leafletWeight = 3,
... leafletStyle = 'dotted',
... leafletOpacity = 0.8,
... useArrows = False,
... cesiumColor = 'cadetblue',
... cesiumWeight = 3,
... cesiumStyle = 'dotted',
... cesiumOpacity = 0.8,
... popupText = 'car route')
>>> moreArcs
Display the nodes and arcs on a Leaflet map:
>>> vrv.createLeaflet(mapObject=myMap, arcs = moreArcs)
"""
# validation
[valFlag, errorMsg, warningMsg] = valCreateArcsFromNodeSeq(nodeSeq, nodes, initArcs, startArc, objectID, leafletColor, leafletWeight, leafletStyle, leafletOpacity, leafletCurveType, leafletCurvature, useArrows, cesiumColor, cesiumWeight, cesiumStyle, cesiumOpacity)
if (not valFlag):
print (errorMsg)
return
elif (config['VRV_SETTING_SHOWWARNINGMESSAGE'] and warningMsg != ""):
print (warningMsg)
locSeq = []
for i in range(len(nodeSeq)):
locSeq.append([
nodes.loc[nodes['id'] == nodeSeq[i]]['lat'].values[0],
nodes.loc[nodes['id'] == nodeSeq[i]]['lon'].values[0],
])
arcs = privCreateArcsFromLocSeq(locSeq, initArcs, startArc, objectID, leafletColor, leafletWeight, leafletStyle, leafletOpacity, leafletCurveType, leafletCurvature, useArrows, cesiumColor, cesiumWeight, cesiumStyle, cesiumOpacity, popupText)
return arcs
|
1624963
|
import pytest
from skil.utils.io import serialize_config, deserialize_config
from skil import Experiment
import os
import sys
if sys.version_info >= (3, 3):
import unittest.mock as mock
else:
import mock as mock
MOCK_CONFIG = {
'experiment_id': 'foo',
'experiment_name': 'bar',
'workspace_id': 'baz'
}
@mock.patch('skil.Skil')
def test_experiment_json_serde(Skil):
file_name = "./mock.json"
serialize_config(MOCK_CONFIG, file_name, 'json')
config = deserialize_config(file_name)
assert config == MOCK_CONFIG
exp = Experiment.load(file_name, Skil())
assert exp.id == 'foo'
os.remove(file_name)
@mock.patch('skil.Skil')
def test_experiment_yaml_serde(Skil):
file_name = "./mock.yml"
serialize_config(MOCK_CONFIG, file_name, 'yaml')
config = deserialize_config(file_name)
assert config == MOCK_CONFIG
exp = Experiment.load(file_name, Skil())
assert exp.id == 'foo'
os.remove(file_name)
@mock.patch('skil.Skil')
def test_failed_serde(Skil):
file_name = "./mock.fail"
with pytest.raises(Exception):
serialize_config(MOCK_CONFIG, file_name, 'foo')
with open(file_name, 'w') as f:
f.write('foobar')
with pytest.raises(Exception):
conf = deserialize_config(file_name)
os.remove(file_name)
if __name__ == '__main__':
pytest.main([__file__])
|
1624964
|
class Solution:
def reverseWords(self, s: str) -> str:
listOfWords = [word.strip() for word in s.strip().split(' ') if word != '']
def helper(left, right):
if left < right:
listOfWords[left], listOfWords[right] = listOfWords[right], listOfWords[left]
helper(left + 1, right -1)
helper(0, len(listOfWords) - 1)
return ' '.join(listOfWords)
|
1624966
|
from flask_caching import Cache
cache = Cache()
# Add some basic rate limiting.
# https://flask-limiter.readthedocs.io/en/stable/
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
limiter = Limiter(key_func=get_remote_address)
|
1624993
|
from .. import ccllib as lib
from ..core import check
from ..background import omega_x
from .massdef import MassDef, MassDef200m
import numpy as np
class HaloBias(object):
""" This class enables the calculation of halo bias functions.
We currently assume that all halo bias functions can be written
as functions that depend on M only through sigma_M (where
sigma_M^2 is the overdensity variance on spheres with a
radius given by the Lagrangian radius for mass M).
All sub-classes implementing specific parametrizations
can therefore be simply created by replacing this class'
`_get_bsigma method`.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`): a mass
definition object that fixes
the mass definition used by this halo bias
parametrization.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
"""
name = "default"
def __init__(self, cosmo, mass_def=None, mass_def_strict=True):
cosmo.compute_sigma()
self.mass_def_strict = mass_def_strict
if mass_def is not None:
if self._check_mdef(mass_def):
raise ValueError("Halo bias " + self.name +
" is not compatible with mass definition" +
" Delta = %s, " % (mass_def.Delta) +
" rho = " + mass_def.rho_type)
self.mdef = mass_def
else:
self._default_mdef()
self._setup(cosmo)
def _default_mdef(self):
""" Assigns a default mass definition for this object if
none is passed at initialization.
"""
self.mdef = MassDef('fof', 'matter')
def _setup(self, cosmo):
""" Use this function to initialize any internal attributes
of this object. This function is called at the very end of the
constructor call.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
"""
pass
def _check_mdef_strict(self, mdef):
return False
def _check_mdef(self, mdef):
""" Return False if the input mass definition agrees with
the definitions for which this parametrization
works. True otherwise. This function gets called at the
start of the constructor call.
Args:
mdef (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
Returns:
bool: True if the mass definition is not compatible with
this parametrization. False otherwise.
"""
if self.mass_def_strict:
return self._check_mdef_strict(mdef)
return False
def _get_consistent_mass(self, cosmo, M, a, mdef_other):
""" Transform a halo mass with a given mass definition into
the corresponding mass definition that was used to initialize
this object.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
M (float or array_like): halo mass in units of M_sun.
a (float): scale factor.
mdef_other (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
Returns:
float or array_like: mass according to this object's
mass definition.
"""
if mdef_other is not None:
M_use = mdef_other.translate_mass(cosmo, M, a, self.mdef)
else:
M_use = M
return np.log10(M_use)
def _get_Delta_m(self, cosmo, a):
""" For SO-based mass definitions, this returns the corresponding
value of Delta for a rho_matter-based definition. This is useful
mostly for the Tinker mass functions, which are defined for any
SO mass in general, but explicitly only for Delta_matter.
"""
delta = self.mdef.get_Delta(cosmo, a)
if self.mdef.rho_type == 'matter':
return delta
else:
om_this = omega_x(cosmo, a, self.mdef.rho_type)
om_matt = omega_x(cosmo, a, 'matter')
return delta * om_this / om_matt
def get_halo_bias(self, cosmo, M, a, mdef_other=None):
""" Returns the halo bias for input parameters.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
M (float or array_like): halo mass in units of M_sun.
a (float): scale factor.
mdef_other (:class:`~pyccl.halos.massdef.MassDef`):
the mass definition object that defines M.
Returns:
float or array_like: halo bias.
"""
M_use = np.atleast_1d(M)
logM = self._get_consistent_mass(cosmo, M_use,
a, mdef_other)
# sigma(M)
status = 0
sigM, status = lib.sigM_vec(cosmo.cosmo, a, logM,
len(logM), status)
check(status)
b = self._get_bsigma(cosmo, sigM, a)
if np.ndim(M) == 0:
b = b[0]
return b
def _get_bsigma(self, cosmo, sigM, a):
""" Get the halo bias as a function of sigmaM.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
sigM (float or array_like): standard deviation in the
overdensity field on the scale of this halo.
a (float): scale factor.
Returns:
float or array_like: f(sigma_M) function.
"""
raise NotImplementedError("Use one of the non-default "
"HaloBias classes")
class HaloBiasSheth99(HaloBias):
""" Implements halo bias described in 1999MNRAS.308..119S
This parametrization is only valid for 'fof' masses.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
this parametrization accepts FoF masses only.
If `None`, FoF masses will be used.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
use_delta_c_fit (bool): if True, use delta_crit given by
the fit of Nakamura & Suto 1997. Otherwise use
delta_crit = 1.68647.
"""
name = "Sheth99"
def __init__(self, cosmo, mass_def=None,
mass_def_strict=True,
use_delta_c_fit=False):
self.use_delta_c_fit = use_delta_c_fit
super(HaloBiasSheth99, self).__init__(cosmo,
mass_def,
mass_def_strict)
def _default_mdef(self):
self.mdef = MassDef('fof', 'matter')
def _setup(self, cosmo):
self.p = 0.3
self.a = 0.707
def _check_mdef_strict(self, mdef):
if self.mass_def_strict:
if mdef.Delta != 'fof':
return True
return False
def _get_bsigma(self, cosmo, sigM, a):
if self.use_delta_c_fit:
status = 0
delta_c, status = lib.dc_NakamuraSuto(cosmo.cosmo, a, status)
check(status)
else:
delta_c = 1.68647
nu = delta_c / sigM
anu2 = self.a * nu**2
return 1. + (anu2 - 1. + 2. * self.p / (1. + anu2**self.p))/delta_c
class HaloBiasSheth01(HaloBias):
""" Implements halo bias described in arXiv:astro-ph/9907024.
This parametrization is only valid for 'fof' masses.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
this parametrization accepts FoF masses only.
If `None`, FoF masses will be used.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
"""
name = "Sheth01"
def __init__(self, cosmo, mass_def=None, mass_def_strict=True):
super(HaloBiasSheth01, self).__init__(cosmo,
mass_def,
mass_def_strict)
def _default_mdef(self):
self.mdef = MassDef('fof', 'matter')
def _setup(self, cosmo):
self.a = 0.707
self.sqrta = 0.84083292038
self.b = 0.5
self.c = 0.6
self.dc = 1.68647
def _check_mdef_strict(self, mdef):
if mdef.Delta != 'fof':
return True
return False
def _get_bsigma(self, cosmo, sigM, a):
nu = self.dc/sigM
anu2 = self.a * nu**2
anu2c = anu2**self.c
t1 = self.b * (1.0 - self.c) * (1.0 - 0.5 * self.c)
return 1. + (self.sqrta * anu2 * (1 + self.b / anu2c) -
anu2c / (anu2c + t1)) / (self.sqrta * self.dc)
class HaloBiasBhattacharya11(HaloBias):
""" Implements halo bias described in arXiv:1005.2239.
This parametrization is only valid for 'fof' masses.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
this parametrization accepts FoF masses only.
If `None`, FoF masses will be used.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
"""
name = "Bhattacharya11"
def __init__(self, cosmo, mass_def=None, mass_def_strict=True):
super(HaloBiasBhattacharya11, self).__init__(cosmo,
mass_def,
mass_def_strict)
def _default_mdef(self):
self.mdef = MassDef('fof', 'matter')
def _setup(self, cosmo):
self.a = 0.788
self.az = 0.01
self.p = 0.807
self.q = 1.795
self.dc = 1.68647
def _check_mdef_strict(self, mdef):
if mdef.Delta != 'fof':
return True
return False
def _get_bsigma(self, cosmo, sigM, a):
nu = self.dc / sigM
a = self.a * a**self.az
anu2 = a * nu**2
return 1. + (anu2 - self.q + 2*self.p / (1 + anu2**self.p)) / self.dc
class HaloBiasTinker10(HaloBias):
""" Implements halo bias described in arXiv:1001.3162.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object.
mass_def (:class:`~pyccl.halos.massdef.MassDef`):
a mass definition object.
this parametrization accepts SO masses with
200 < Delta < 3200 with respect to the matter density.
If `None`, Delta = 200 (matter) will be used.
mass_def_strict (bool): if False, consistency of the mass
definition will be ignored.
"""
name = "Tinker10"
def __init__(self, cosmo, mass_def=None, mass_def_strict=True):
super(HaloBiasTinker10, self).__init__(cosmo,
mass_def,
mass_def_strict)
def _default_mdef(self):
self.mdef = MassDef200m()
def _AC(self, ld):
xp = np.exp(-(4./ld)**4.)
A = 1.0 + 0.24 * ld * xp
C = 0.019 + 0.107 * ld + 0.19*xp
return A, C
def _a(self, ld):
return 0.44 * ld - 0.88
def _setup(self, cosmo):
self.B = 0.183
self.b = 1.5
self.c = 2.4
self.dc = 1.68647
def _check_mdef_strict(self, mdef):
if mdef.Delta == 'fof':
return True
return False
def _get_bsigma(self, cosmo, sigM, a):
nu = self.dc / sigM
ld = np.log10(self._get_Delta_m(cosmo, a))
A, C = self._AC(ld)
aa = self._a(ld)
nupa = nu**aa
return 1. - A * nupa / (nupa + self.dc**aa) + \
self.B * nu**self.b + C * nu**self.c
def halo_bias_from_name(name):
""" Returns halo bias subclass from name string
Args:
name (string): a halo bias name
Returns:
HaloBias subclass corresponding to the input name.
"""
bias_functions = {c.name: c for c in HaloBias.__subclasses__()}
if name in bias_functions:
return bias_functions[name]
else:
raise ValueError("Halo bias parametrization %s not implemented")
|
1625042
|
import importlib.resources
import pathlib
import typing
import attr
import click
import pytest
import _pytest
import plotman.errors
import plotman.job
import plotman.plotters
import plotman.plotters.bladebit
import plotman.plotters.chianetwork
import plotman.plotters.madmax
import plotman._tests.resources
@pytest.fixture(name="line_decoder")
def line_decoder_fixture() -> typing.Iterator[plotman.plotters.LineDecoder]:
decoder = plotman.plotters.LineDecoder()
yield decoder
# assert decoder.buffer == ""
def test_decoder_single_chunk(line_decoder: plotman.plotters.LineDecoder) -> None:
lines = line_decoder.update(b"abc\n123\n\xc3\xa4\xc3\xab\xc3\xaf\n")
assert lines == ["abc", "123", "äëï"]
def test_decoder_individual_byte_chunks(
line_decoder: plotman.plotters.LineDecoder,
) -> None:
lines = []
for byte in b"abc\n123\n\xc3\xa4\xc3\xab\xc3\xaf\n":
lines.extend(line_decoder.update(bytes([byte])))
assert lines == ["abc", "123", "äëï"]
def test_decoder_partial_line_with_final(
line_decoder: plotman.plotters.LineDecoder,
) -> None:
lines = []
lines.extend(line_decoder.update(b"abc\n123\n\xc3\xa4\xc3\xab"))
lines.extend(line_decoder.update(b"\xc3\xaf", final=True))
assert lines == ["abc", "123", "äëï"]
def test_decoder_partial_line_without_final(
line_decoder: plotman.plotters.LineDecoder,
) -> None:
lines = []
lines.extend(line_decoder.update(b"abc\n123\n\xc3\xa4\xc3\xab"))
lines.extend(line_decoder.update(b"\xc3\xaf"))
assert lines == ["abc", "123"]
@pytest.mark.parametrize(
argnames=["resource_name", "correct_plotter"],
argvalues=[
["chianetwork.plot.log", plotman.plotters.chianetwork.Plotter],
["madmax.plot.log", plotman.plotters.madmax.Plotter],
],
)
def test_plotter_identifies_log(
resource_name: str,
correct_plotter: typing.Type[plotman.plotters.Plotter],
) -> None:
with importlib.resources.open_text(
package=plotman._tests.resources,
resource=resource_name,
encoding="utf-8",
) as f:
plotter = plotman.plotters.get_plotter_from_log(lines=f)
assert plotter == correct_plotter
def test_plotter_not_identified() -> None:
with pytest.raises(plotman.errors.UnableToIdentifyPlotterFromLogError):
plotman.plotters.get_plotter_from_log(lines=["a", "b"])
@attr.frozen
class CommandLineExample:
line: typing.List[str]
plotter: typing.Optional[typing.Type[plotman.plotters.Plotter]]
parsed: typing.Optional[plotman.job.ParsedChiaPlotsCreateCommand] = None
cwd: str = ""
default_bladebit_arguments = dict(
sorted(
{
"threads": None,
"count": 1,
"farmer_key": None,
"pool_key": None,
"pool_contract": None,
"warm_start": False,
"plot_id": None,
"memo": None,
"show_memo": False,
"verbose": False,
"no_numa": False,
"no_cpu_affinity": False,
"out_dir": pathlib.PosixPath("."),
}.items()
)
)
default_chia_network_arguments = dict(
sorted(
{
"size": 32,
"override_k": False,
"num": 1,
"buffer": 3389,
"num_threads": 2,
"buckets": 128,
"alt_fingerprint": None,
"pool_contract_address": None,
"farmer_public_key": None,
"pool_public_key": None,
"tmp_dir": ".",
"tmp2_dir": None,
"final_dir": ".",
"plotid": None,
"memo": None,
"nobitfield": False,
"exclude_final_dir": False,
}.items()
)
)
default_madmax_arguments = dict(
sorted(
{
"size": 32,
"count": 1,
"threads": 4,
"buckets": 256,
"buckets3": 256,
"tmpdir": pathlib.PosixPath("."),
"tmpdir2": None,
"finaldir": pathlib.PosixPath("."),
"waitforcopy": False,
"poolkey": None,
"contract": None,
"farmerkey": None,
"tmptoggle": None,
"rmulti2": 1,
}.items()
)
)
bladebit_command_line_examples: typing.List[CommandLineExample] = [
CommandLineExample(
line=["bladebit"],
plotter=plotman.plotters.bladebit.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_bladebit_arguments},
),
),
CommandLineExample(
line=["bladebit", "-h"],
plotter=plotman.plotters.bladebit.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=True,
parameters={**default_bladebit_arguments},
),
),
CommandLineExample(
line=["bladebit", "--help"],
plotter=plotman.plotters.bladebit.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=True,
parameters={**default_bladebit_arguments},
),
),
CommandLineExample(
line=["bladebit", "--invalid-option"],
plotter=plotman.plotters.bladebit.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=click.NoSuchOption("--invalid-option"),
help=False,
parameters={},
),
),
CommandLineExample(
line=["bladebit", "--pool-contract", "xch123abc", "--farmer-key", "abc123"],
plotter=plotman.plotters.bladebit.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_bladebit_arguments,
"pool_contract": "xch123abc",
"farmer_key": "abc123",
},
),
),
CommandLineExample(
line=["here/there/bladebit"],
plotter=plotman.plotters.bladebit.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_bladebit_arguments},
),
),
CommandLineExample(
line=[
"bladebit",
"final/dir",
],
cwd="/cwd",
plotter=plotman.plotters.bladebit.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_bladebit_arguments,
"out_dir": pathlib.Path("/", "cwd", "final", "dir"),
},
),
),
CommandLineExample(
line=plotman.plotters.bladebit.create_command_line(
options=plotman.plotters.bladebit.Options(),
tmpdir="",
tmp2dir=None,
dstdir="/farm/dst/dir",
farmer_public_key=None,
pool_public_key=None,
pool_contract_address=None,
),
plotter=plotman.plotters.bladebit.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_bladebit_arguments,
"verbose": True,
"out_dir": pathlib.Path("/farm/dst/dir"),
},
),
),
CommandLineExample(
line=plotman.plotters.bladebit.create_command_line(
options=plotman.plotters.bladebit.Options(),
tmpdir="/farm/tmp/dir",
tmp2dir="/farm/tmp2/dir",
dstdir="/farm/dst/dir",
farmer_public_key="farmerpublickey",
pool_public_key="poolpublickey",
pool_contract_address="poolcontractaddress",
),
plotter=plotman.plotters.bladebit.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_bladebit_arguments,
"farmer_key": "farmerpublickey",
"pool_key": "poolpublickey",
"pool_contract": "poolcontractaddress",
"verbose": True,
"out_dir": pathlib.Path("/farm/dst/dir"),
},
),
),
]
chianetwork_command_line_examples: typing.List[CommandLineExample] = [
CommandLineExample(
line=["python", "chia", "plots", "create"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_chia_network_arguments},
),
),
CommandLineExample(
line=["python", "chia", "plots", "create", "-k", "32"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_chia_network_arguments, "size": 32},
),
),
CommandLineExample(
line=["python", "chia", "plots", "create", "-k32"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_chia_network_arguments, "size": 32},
),
),
CommandLineExample(
line=["python", "chia", "plots", "create", "--size", "32"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_chia_network_arguments, "size": 32},
),
),
CommandLineExample(
line=["python", "chia", "plots", "create", "--size=32"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_chia_network_arguments, "size": 32},
),
),
CommandLineExample(
line=["python", "chia", "plots", "create", "--size32"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=click.NoSuchOption("--size32"),
help=False,
parameters={},
),
),
CommandLineExample(
line=["python", "chia", "plots", "create", "-h"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=True,
parameters={**default_chia_network_arguments},
),
),
CommandLineExample(
line=["python", "chia", "plots", "create", "--help"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=True,
parameters={**default_chia_network_arguments},
),
),
CommandLineExample(
line=["python", "chia", "plots", "create", "-k", "32", "--help"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=True,
parameters={**default_chia_network_arguments},
),
),
CommandLineExample(
line=["python", "chia", "plots", "create", "--invalid-option"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=click.NoSuchOption("--invalid-option"),
help=False,
parameters={},
),
),
CommandLineExample(
line=[
"python",
"chia",
"plots",
"create",
"--pool_contract_address",
"xch123abc",
"--farmer_public_key",
"abc123",
],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_chia_network_arguments,
"pool_contract_address": "xch123abc",
"farmer_public_key": "abc123",
},
),
),
# macOS system python
CommandLineExample(
line=["Python", "chia", "plots", "create"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_chia_network_arguments},
),
),
# binary installer
CommandLineExample(
line=["chia", "plots", "create", "--final_dir", "/blue/red"],
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_chia_network_arguments,
"final_dir": "/blue/red",
},
),
),
CommandLineExample(
line=[
"python",
"chia",
"plots",
"create",
"--final_dir",
"final/dir",
"--tmp_dir",
"tmp/dir",
"--tmp2_dir",
"tmp2/dir",
],
cwd="/cwd",
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_chia_network_arguments,
"final_dir": "/cwd/final/dir",
"tmp_dir": "/cwd/tmp/dir",
"tmp2_dir": "/cwd/tmp2/dir",
},
),
),
CommandLineExample(
line=plotman.plotters.chianetwork.create_command_line(
options=plotman.plotters.chianetwork.Options(),
tmpdir="/farm/tmp/dir",
tmp2dir=None,
dstdir="/farm/dst/dir",
farmer_public_key=None,
pool_public_key=None,
pool_contract_address=None,
),
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_chia_network_arguments,
"final_dir": "/farm/dst/dir",
"tmp_dir": "/farm/tmp/dir",
},
),
),
CommandLineExample(
line=plotman.plotters.chianetwork.create_command_line(
options=plotman.plotters.chianetwork.Options(
e=True,
x=True,
),
tmpdir="/farm/tmp/dir",
tmp2dir="/farm/tmp2/dir",
dstdir="/farm/dst/dir",
farmer_public_key="farmerpublickey",
pool_public_key="poolpublickey",
pool_contract_address="poolcontractaddress",
),
plotter=plotman.plotters.chianetwork.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_chia_network_arguments,
"exclude_final_dir": True,
"nobitfield": True,
"farmer_public_key": "farmerpublickey",
"pool_public_key": "poolpublickey",
"pool_contract_address": "poolcontractaddress",
"final_dir": "/farm/dst/dir",
"tmp_dir": "/farm/tmp/dir",
"tmp2_dir": "/farm/tmp2/dir",
},
),
),
]
madmax_command_line_examples: typing.List[CommandLineExample] = [
CommandLineExample(
line=["chia_plot"],
plotter=plotman.plotters.madmax.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_madmax_arguments},
),
),
CommandLineExample(
line=["chia_plot", "-h"],
plotter=plotman.plotters.madmax.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=True,
parameters={**default_madmax_arguments},
),
),
CommandLineExample(
line=["chia_plot", "--help"],
plotter=plotman.plotters.madmax.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=True,
parameters={**default_madmax_arguments},
),
),
CommandLineExample(
line=["chia_plot", "--invalid-option"],
plotter=plotman.plotters.madmax.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=click.NoSuchOption("--invalid-option"),
help=False,
parameters={},
),
),
CommandLineExample(
line=["chia_plot", "--contract", "xch123abc", "--farmerkey", "abc123"],
plotter=plotman.plotters.madmax.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_madmax_arguments,
"contract": "xch123abc",
"farmerkey": "abc123",
},
),
),
CommandLineExample(
line=["here/there/chia_plot"],
plotter=plotman.plotters.madmax.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={**default_madmax_arguments},
),
),
CommandLineExample(
line=[
"chia_plot",
"--finaldir",
"final/dir",
"--tmpdir",
"tmp/dir",
"--tmpdir2",
"tmp/dir2",
],
cwd="/cwd",
plotter=plotman.plotters.madmax.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_madmax_arguments,
"finaldir": pathlib.Path("/", "cwd", "final", "dir"),
"tmpdir": pathlib.Path("/", "cwd", "tmp", "dir"),
"tmpdir2": pathlib.Path("/", "cwd", "tmp", "dir2"),
},
),
),
CommandLineExample(
line=plotman.plotters.madmax.create_command_line(
options=plotman.plotters.madmax.Options(),
tmpdir="/farm/tmp/dir",
tmp2dir=None,
dstdir="/farm/dst/dir",
farmer_public_key=None,
pool_public_key=None,
pool_contract_address=None,
),
plotter=plotman.plotters.madmax.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_madmax_arguments,
"finaldir": pathlib.Path("/farm/dst/dir"),
"tmpdir": pathlib.Path("/farm/tmp/dir"),
},
),
),
CommandLineExample(
line=plotman.plotters.madmax.create_command_line(
options=plotman.plotters.madmax.Options(),
tmpdir="/farm/tmp/dir",
tmp2dir="/farm/tmp2/dir",
dstdir="/farm/dst/dir",
farmer_public_key="farmerpublickey",
pool_public_key="poolpublickey",
pool_contract_address="poolcontractaddress",
),
plotter=plotman.plotters.madmax.Plotter,
parsed=plotman.job.ParsedChiaPlotsCreateCommand(
error=None,
help=False,
parameters={
**default_madmax_arguments,
"farmerkey": "farmerpublickey",
"poolkey": "poolpublickey",
"contract": "poolcontractaddress",
"finaldir": pathlib.Path("/farm/dst/dir"),
"tmpdir": pathlib.Path("/farm/tmp/dir"),
"tmpdir2": pathlib.Path("/farm/tmp2/dir"),
},
),
),
]
command_line_examples: typing.List[CommandLineExample] = [
*bladebit_command_line_examples,
*chianetwork_command_line_examples,
*madmax_command_line_examples,
]
not_command_line_examples: typing.List[CommandLineExample] = [
CommandLineExample(line=["something/else"], plotter=None),
CommandLineExample(line=["another"], plotter=None),
CommandLineExample(line=["some/chia/not"], plotter=None),
CommandLineExample(line=["chia", "other"], plotter=None),
CommandLineExample(line=["chia_plot/blue"], plotter=None),
CommandLineExample(line=[], plotter=None, parsed=None),
]
@pytest.fixture(
name="command_line_example",
params=command_line_examples,
ids=lambda param: repr(param.line),
)
def command_line_example_fixture(
request: _pytest.fixtures.SubRequest,
) -> typing.Iterator[CommandLineExample]:
return request.param # type: ignore[no-any-return]
@pytest.fixture(
name="not_command_line_example",
params=not_command_line_examples,
ids=lambda param: repr(param.line),
)
def not_command_line_example_fixture(
request: _pytest.fixtures.SubRequest,
) -> typing.Iterator[CommandLineExample]:
return request.param # type: ignore[no-any-return]
def test_plotter_identifies_command_line(
command_line_example: CommandLineExample,
) -> None:
plotter = plotman.plotters.get_plotter_from_command_line(
command_line=command_line_example.line,
)
assert plotter == command_line_example.plotter
def test_plotter_fails_to_identify_command_line(
not_command_line_example: CommandLineExample,
) -> None:
with pytest.raises(plotman.plotters.UnableToIdentifyCommandLineError):
plotman.plotters.get_plotter_from_command_line(
command_line=not_command_line_example.line,
)
def test_is_plotting_command_line(command_line_example: CommandLineExample) -> None:
assert plotman.plotters.is_plotting_command_line(
command_line=command_line_example.line,
)
def test_is_not_plotting_command_line(
not_command_line_example: CommandLineExample,
) -> None:
assert not plotman.plotters.is_plotting_command_line(
command_line=not_command_line_example.line,
)
def test_command_line_parsed_correctly(
command_line_example: CommandLineExample,
) -> None:
assert command_line_example.plotter is not None
plotter = command_line_example.plotter()
plotter.parse_command_line(
command_line=command_line_example.line,
cwd=command_line_example.cwd,
)
assert plotter.parsed_command_line == command_line_example.parsed
|
1625049
|
import sys, string, json, time
import subprocess
import benchClient
regionAMIs = {
"us-east-1": "ami-0313472d535df8cfd",
"us-east-2": "ami-0eb2012f23aeda71a",
"us-west-1": "ami-01312f0ad647427de",
"us-west-2": "ami-01240c2bd44b025ee",
}
'''
regionAMIs = {
"us-east-1": "ami-034fd6e677d79ebb5",
"us-east-2": "ami-0f972c03390d700fb",
"us-west-1": "ami-063ff9879f81ae1c1",
"us-west-2": "ami-0fb4b87cd10b60c95",
}
'''
filename = "../system.config"
print("Starting cluster...")
f_config = open(filename, "r")
sysConfig = json.load(f_config)
f_config.close()
# US-EAST-1
cmd = "export AWS_DEFAULT_REGION=us-east-1"
process = subprocess.Popen(cmd, shell=True)
process.wait()
cmd = ('export AWS_DEFAULT_REGION=us-east-1; aws ec2 run-instances --image-id %s --count 1 --instance-type c5.large --key-name DoryKeyPair --placement "{\\\"AvailabilityZone\\\": \\\"us-east-1b\\\"}" --security-groups DoryGroup') % (regionAMIs["us-east-1"])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
east12Config = json.loads(out)
east12IDs = [instance["InstanceId"] for instance in east12Config["Instances"]]
cmd = ('export AWS_DEFAULT_REGION=us-east-1; aws ec2 run-instances --image-id %s --count 5 --instance-type r5n.4xlarge --key-name DoryKeyPair --placement "{\\\"AvailabilityZone\\\": \\\"us-east-1b\\\"}" --security-groups DoryGroup') % (regionAMIs["us-east-1"])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
east11Config = json.loads(out)
east11IDs = [instance["InstanceId"] for instance in east11Config["Instances"]]
time.sleep(30)
# first is master, next are servers 1, 3, 5, 7
server1Addrs = []
for i in range(len(east11IDs)):
cmd = ('export AWS_DEFAULT_REGION=us-east-1; aws ec2 describe-instances --instance-ids "%s"') % (east11IDs[i])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
c = json.loads(out)
server1Addrs.append(c["Reservations"][0]["Instances"][0]["PublicIpAddress"])
# dory client, baseline client
clientAddrs = []
for i in range(len(east12IDs)):
cmd = ('export AWS_DEFAULT_REGION=us-east-1; aws ec2 describe-instances --instance-ids "%s"') % (east12IDs[i])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
c = json.loads(out)
clientAddrs.append(c["Reservations"][0]["Instances"][0]["PublicIpAddress"])
print("Created all us-east-1 instances")
# US-EAST-2
cmd = "export AWS_DEFAULT_REGION=us-east-2"
process = subprocess.Popen(cmd, shell=True)
process.wait()
cmd = ('export AWS_DEFAULT_REGION=us-east-2; aws ec2 run-instances --image-id %s --count 4 --instance-type r5n.4xlarge --key-name DoryKeyPair --placement "{\\\"AvailabilityZone\\\": \\\"us-east-2b\\\"}" --security-groups DoryGroup') % (regionAMIs["us-east-2"])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
east2Config = json.loads(out)
east2IDs = [instance["InstanceId"] for instance in east2Config["Instances"]]
time.sleep(30)
# servers 2, 4, 6, 8
server2Addrs = []
for i in range(len(east2IDs)):
cmd = ('export AWS_DEFAULT_REGION=us-east-2; aws ec2 describe-instances --instance-ids "%s"') % (east2IDs[i])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
c = json.loads(out)
server2Addrs.append(c["Reservations"][0]["Instances"][0]["PublicIpAddress"])
print("Created all us-east-2 instances")
# US-WEST-1
cmd = ('export AWS_DEFAULT_REGION=us-west-1; aws ec2 run-instances --image-id %s --count 1 --instance-type c5.large --key-name DoryKeyPair --placement "{\\\"AvailabilityZone\\\": \\\"us-west-1b\\\"}" --security-groups DoryGroup') % (regionAMIs["us-west-1"])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
west1Config = json.loads(out)
west1IDs = [instance["InstanceId"] for instance in west1Config["Instances"]]
time.sleep(30)
cmd = ('export AWS_DEFAULT_REGION=us-west-1; aws ec2 describe-instances --instance-ids "%s"') % (west1IDs[0])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
c = json.loads(out)
baselineClientAddr = (c["Reservations"][0]["Instances"][0]["PublicIpAddress"])
print("Created all us-west-1 instances")
# US-WEST-2
cmd = ('export AWS_DEFAULT_REGION=us-west-2; aws ec2 run-instances --image-id %s --count 1 --instance-type r5n.4xlarge --key-name DoryKeyPair --placement "{\\\"AvailabilityZone\\\": \\\"us-west-2b\\\"}" --security-groups DoryGroup') % (regionAMIs["us-west-2"])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
west2Config = json.loads(out)
west2IDs = [instance["InstanceId"] for instance in west2Config["Instances"]]
time.sleep(30)
cmd = ('export AWS_DEFAULT_REGION=us-west-2; aws ec2 describe-instances --instance-ids "%s"') % (west2IDs[0])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = process.stdout.read()
c = json.loads(out)
baselineServerAddr = (c["Reservations"][0]["Instances"][0]["PublicIpAddress"])
print("Created all us-west-2 instances")
sysConfig["MasterAddr"] = server1Addrs[0]
sysConfig["MasterID"] = east11IDs[0]
sysConfig["ClientAddrs"] = [clientAddrs[0]]
sysConfig["ClientIDs"] = [east12IDs[0]]
for i in range(len(sysConfig["Servers"])):
if i % 2 == 0:
sysConfig["Servers"][i]["Addr"] = server1Addrs[int(i/2 + 1)]
sysConfig["Servers"][i]["ID"] = east11IDs[int(i/2 + 1)]
else:
sysConfig["Servers"][i]["Addr"] = server2Addrs[int(i/2)]
sysConfig["Servers"][i]["ID"] = east2IDs[int(i/2)]
sysConfig["BaselineServerAddr"] = baselineServerAddr
sysConfig["BaselineServerID"] = west2IDs[0]
sysConfig["BaselineClientAddr"] = baselineClientAddr
sysConfig["BaselineClientID"] = west1IDs[0]
sysConfig["SSHKeyPath"] = "~/.ssh/dory.pem"
sysConfigBlob = json.dumps(sysConfig)
f_config = open(filename, "w")
f_config.write(sysConfigBlob)
f_config.close()
sshKeyPath = sysConfig["SSHKeyPath"]
replicaPorts = [server["Port"] for server in sysConfig["Servers"]]
replicas = [server["Addr"] for server in sysConfig["Servers"]]
masterConfig = {
"MasterAddr": sysConfig["MasterAddr"],
"MasterPort": sysConfig["MasterPort"],
"Addr": replicas,
"Port": replicaPorts,
"CertFile": sysConfig["MasterCertFile"],
"KeyFile": sysConfig["MasterKeyFile"],
"OutDir": sysConfig["OutDir"]
}
masterConfigBlob = json.dumps(masterConfig)
print("Copying config files to instances")
with open("../src/config/master.config", "w") as f:
f.write(masterConfigBlob)
# Wait for all instances to be fully started
time.sleep(60)
if sysConfig["MasterAddr"] != "127.0.0.1":
cmd = ("scp -i %s -o StrictHostKeyChecking=no ../src/config/master.config ec2-user@%s:~/dory/src/config/master.config") % (sshKeyPath, sysConfig["MasterAddr"])
process = subprocess.Popen(cmd, shell=True)
process.wait()
for i in range(len(sysConfig["Servers"])):
serverConfig = {
"Addr": sysConfig["Servers"][i]["Addr"],
"Port": sysConfig["Servers"][i]["Port"],
"CertFile": sysConfig["Servers"][i]["CertFile"],
"KeyFile": sysConfig["Servers"][i]["KeyFile"],
"OutDir": sysConfig["OutDir"],
"ClientMaskKey": sysConfig["ClientMaskKey"],
"ClientMacKey": sysConfig["ClientMacKey"]
}
serverNum = i + 1
serverConfigBlob = json.dumps(serverConfig)
with open(("../src/config/server%d.config") % (serverNum), "w") as f:
f.write(serverConfigBlob)
if sysConfig["Servers"][i]["Addr"] != "127.0.0.1":
cmd = ("scp -i %s -o StrictHostKeyChecking=no ../src/config/server%d.config ec2-user@%s:~/dory/src/config/server%d.config") % (sshKeyPath, serverNum, sysConfig["Servers"][i]["Addr"], serverNum)
process = subprocess.Popen(cmd, shell=True)
process.wait()
clientConfig = {
"MasterAddr": sysConfig["MasterAddr"],
"MasterPort": sysConfig["MasterPort"],
"Addr": replicas,
"Port": replicaPorts,
"MaskKey": sysConfig["ClientMaskKey"],
"MacKey": sysConfig["ClientMacKey"]
}
clientConfigBlob = json.dumps(clientConfig)
with open("../src/config/client.config", "w") as f:
f.write(clientConfigBlob)
for i in range(len(sysConfig["ClientAddrs"])):
if sysConfig["ClientAddrs"][i] != "127.0.0.1":
cmd = ("scp -i %s -o StrictHostKeyChecking=no ../src/config/client.config ec2-user@%s:~/dory/src/config/client.config") % (sshKeyPath, sysConfig["ClientAddrs"][i])
process = subprocess.Popen(cmd, shell=True)
process.wait()
print("Cluster setup done.")
print("--- Check that none of the scp commands above failed (SSH connection on port 22 was refused) ---")
print("--- If one or more scp commands failed, teardown the cluster and start a new one ---")
|
1625078
|
from __future__ import absolute_import, division, print_function, unicode_literals
from . import BaseSignal
class Signal(BaseSignal):
""" Ranking signal based on the URL presence in DMOZ
"""
def get_value(self, document, url_metadata):
return (
float(bool(url_metadata["url"].dmoz_title)) or
float(bool(url_metadata["url_without_query"].dmoz_title))
)
|
1625080
|
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
_io = [
# SYS clock 100 MHz (input) signal. The sys_clk_p and sys_clk_n
# signals are the PCI Express reference clock.
#set_property PACKAGE_PIN B6 [get_ports sys_clk_p]
("clk100", 0, Pins("B6"), IOStandard("LVCMOS33")),
#set_property PACKAGE_PIN V14 [get_ports {status_leds[3]}]
#set_property PACKAGE_PIN V13 [get_ports {status_leds[2]}]
#set_property PACKAGE_PIN V11 [get_ports {status_leds[1]}]
#set_property PACKAGE_PIN V12 [get_ports {status_leds[0]}]
#set_property IOSTANDARD LVCMOS33 [get_ports {status_leds[3]}]
#set_property IOSTANDARD LVCMOS33 [get_ports {status_leds[2]}]
#set_property IOSTANDARD LVCMOS33 [get_ports {status_leds[1]}]
#set_property IOSTANDARD LVCMOS33 [get_ports {status_leds[0]}]
#set_property PULLUP true [get_ports {status_leds[3]}]
#set_property PULLUP true [get_ports {status_leds[2]}]
#set_property PULLUP true [get_ports {status_leds[1]}]
#set_property PULLUP true [get_ports {status_leds[0]}]
#set_property DRIVE 8 [get_ports {status_leds[3]}]
#set_property DRIVE 8 [get_ports {status_leds[2]}]
#set_property DRIVE 8 [get_ports {status_leds[1]}]
#set_property DRIVE 8 [get_ports {status_leds[0]}]
("user_led", 0, Pins("V12"), IOStandard("LVCMOS33"), Drive(8), Misc("PULLUP")),
("user_led", 1, Pins("V11"), IOStandard("LVCMOS33"), Drive(8), Misc("PULLUP")),
("user_led", 2, Pins("V13"), IOStandard("LVCMOS33"), Drive(8), Misc("PULLUP")),
("user_led", 3, Pins("V14"), IOStandard("LVCMOS33"), Drive(8), Misc("PULLUP")),
## Serial input/output
## Available on NanoEVB only!
#set_property IOSTANDARD LVCMOS33 [get_ports RxD]
#set_property IOSTANDARD LVCMOS33 [get_ports TxD]
#set_property PACKAGE_PIN V17 [get_ports RxD]
#set_property PACKAGE_PIN V16 [get_ports TxD]
#set_property PULLUP true [get_ports RxD]
#set_property OFFCHIP_TERM NONE [get_ports TxD]
("serial", 0,
Subsignal("tx", Pins("V16")), # MCU_RX
Subsignal("rx", Pins("V17")), # MCU_TX
IOStandard("LVCMOS33"),
),
## SYS reset (input) signal. The sys_reset_n signal is generated
## by the PCI Express interface (PERST#).
#set_property PACKAGE_PIN A10 [get_ports sys_rst_n]
#set_property IOSTANDARD LVCMOS33 [get_ports sys_rst_n]
#set_property PULLDOWN true [get_ports sys_rst_n]
## PCIe x1 link
#set_property PACKAGE_PIN G4 [get_ports pcie_mgt_rxp]
#set_property PACKAGE_PIN G3 [get_ports pcie_mgt_rxn]
#set_property PACKAGE_PIN B2 [get_ports pcie_mgt_txp]
#set_property PACKAGE_PIN B1 [get_ports pcie_mgt_txn]
("pcie_x1", 0,
Subsignal("rst_n", Pins("A10"), IOStandard("LVCMOS33"), Misc("PULLDOWN")),
Subsignal("clk_p", Pins("D6")),
Subsignal("clk_n", Pins("D5")),
Subsignal("rx_p", Pins("G4")),
Subsignal("rx_n", Pins("G3")),
Subsignal("tx_p", Pins("B2")),
Subsignal("tx_n", Pins("B1"))
),
## clkreq_l is active low clock request for M.2 card to
## request PCI Express reference clock
#set_property PACKAGE_PIN A9 [get_ports clkreq_l]
#set_property IOSTANDARD LVCMOS33 [get_ports clkreq_l]
#set_property PULLDOWN true [get_ports clkreq_l]
## High-speed configuration so FPGA is up in time to negotiate with PCIe root complex
#set_property BITSTREAM.CONFIG.CONFIGRATE 33 [current_design]
#set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]
#set_property CONFIG_MODE SPIx4 [current_design]
#set_property BITSTREAM.CONFIG.SPI_FALL_EDGE YES [current_design]
#set_property BITSTREAM.GENERAL.COMPRESS TRUE [current_design]
]
class Platform(XilinxPlatform):
name = "picoevb"
default_clk_name = "clk100"
default_clk_period = 10.0
# From https://www.xilinx.com/support/documentation/user_guides/ug470_7Series_Config.pdf
# 17536096 bits == 2192012 == 0x21728c -- Therefore 0x220000
gateware_size = 0x220000
# ???
# FIXME: Create a "spi flash module" object in the same way we have SDRAM
# module objects.
spiflash_read_dummy_bits = 10
spiflash_clock_div = 4
spiflash_total_size = int((256/8)*1024*1024) # 256Mbit
spiflash_page_size = 256
spiflash_sector_size = 0x10000
spiflash_model = "n25q128"
def __init__(self, toolchain="vivado", programmer="vivado"):
XilinxPlatform.__init__(self, "xc7a50t-csg325-2", _io,
toolchain=toolchain)
self.add_platform_command(
"set_property CONFIG_VOLTAGE 1.5 [current_design]")
self.add_platform_command(
"set_property CFGBVS GND [current_design]")
self.add_platform_command(
"set_property BITSTREAM.CONFIG.CONFIGRATE 22 [current_design]")
self.add_platform_command(
"set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 1 [current_design]")
self.toolchain.bitstream_commands = [
"set_property CONFIG_VOLTAGE 1.5 [current_design]",
"set_property CFGBVS GND [current_design]",
"set_property BITSTREAM.CONFIG.CONFIGRATE 22 [current_design]",
"set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 1 [current_design]",
]
self.toolchain.additional_commands = \
["write_cfgmem -verbose -force -format bin -interface spix1 -size 64 "
"-loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"]
self.programmer = programmer
self.add_platform_command("""
create_clock -name pcie_phy_clk -period 10.0 [get_pins {{pcie_phy/pcie_support_i/pcie_i/inst/inst/gt_top_i/pipe_wrapper_i/pipe_lane[0].gt_wrapper_i/gtp_channel.gtpe2_channel_i/TXOUTCLK}}]
""")
def create_programmer(self):
if self.programmer == "vivado":
return VivadoProgrammer(flash_part="n25q128-3.3v-spi-x1_x2_x4")
else:
raise ValueError("{} programmer is not supported"
.format(self.programmer))
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
|
1625115
|
import torch
from .FC import FC
class MLP(torch.nn.Module):
def __init__(self, in_size, hidden_size, out_size, dropout_r = 0., use_relu = True):
super(MLP, self).__init__()
self.fc = FC(in_size = in_size, out_size = hidden_size, dropout_r = dropout_r, use_relu = use_relu)
self.linear = torch.nn.Linear(hidden_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
|
1625184
|
import logging
from pajbot.managers.handler import HandlerManager
from pajbot.modules import BaseModule, ModuleSetting
from pajbot.modules.chat_alerts import ChatAlertModule
log = logging.getLogger(__name__)
class LiveAlertModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Live Alert"
DESCRIPTION = "Prints a message in chat when the streamer goes live"
CATEGORY = "Feature"
ENABLED_DEFAULT = False
PARENT_MODULE = ChatAlertModule
SETTINGS = [
ModuleSetting(
key="live_message",
label="Message to post when streamer goes live | Available arguments: {streamer}, {game}, {title}",
type="text",
required=True,
placeholder="{streamer} is now live! PogChamp Streaming {game}: {title}",
default="{streamer} is now live! PogChamp Streaming {game}: {title}",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="extra_message",
label="Extra message to post after the initial live message is posted. Leave empty to disable | Available arguments: {streamer}",
type="text",
required=False,
placeholder="@{streamer} TWEET THAT YOU'RE LIVE OMGScoots",
default="",
constraints={"max_str_len": 400},
),
]
def __init__(self, bot):
super().__init__(bot)
def on_stream_start(self, **rest):
live_chat_message = self.settings["live_message"]
streamer = self.bot.streamer_display
game = self.bot.stream_manager.game
title = self.bot.stream_manager.title
self.bot.say(live_chat_message.format(streamer=streamer, game=game, title=title))
if self.settings["extra_message"] != "":
self.bot.say(self.settings["extra_message"].format(streamer=streamer))
def enable(self, bot):
HandlerManager.add_handler("on_stream_start", self.on_stream_start)
def disable(self, bot):
HandlerManager.remove_handler("on_stream_start", self.on_stream_start)
|
1625236
|
import tensorflow as tf
class EnvSummaryLogger:
"""
Helper class to summarize all environments at the same time on the same plots.
"""
def __init__(self, sess, summary_dirs):
self.sess = sess
self.summary_writer = [tf.summary.FileWriter(summary_dirs[i], self.sess.graph)
for i in range(len(summary_dirs))]
self.summary_placeholders = {}
self.summary_ops = {}
self.env_summary_tags = ['reward', 'episode_length']
self.init_summaries()
def init_summaries(self):
"""
Create the summary part of the graph
:return:
"""
with tf.variable_scope('env-train-summaries'):
for tag in self.env_summary_tags:
self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag)
self.summary_ops[tag] = tf.summary.scalar(tag, self.summary_placeholders[tag])
def add_summary_all(self, step, summaries_arr_dict=None, summaries_merged=None):
for i in range(len(summaries_arr_dict)):
if summaries_arr_dict[i]['reward'] != -1:
self.add_summary(i, step, summaries_arr_dict[i], summaries_merged)
def add_summary(self, id, step, summaries_dict=None, summaries_merged=None):
"""
Add the summaries to tensorboard
:param step:
:param summaries_dict:
:param summaries_merged:
:return:
"""
if summaries_dict is not None:
summary_list = self.sess.run([self.summary_ops[tag] for tag in summaries_dict.keys()],
{self.summary_placeholders[tag]: value for tag, value in
summaries_dict.items()})
for summary in summary_list:
self.summary_writer[id].add_summary(summary, step)
self.summary_writer[id].flush()
if summaries_merged is not None:
self.summary_writer[id].add_summary(summaries_merged, step)
self.summary_writer[id].flush()
|
1625257
|
from keras.layers import Input, Dense
from keras.models import Model
InputTensor = Input(shape=(100,))
H1 = Dense(10, activation='relu')( InputTensor)
H2 = Dense(20, activation='relu')(H1)
Output = Dense(1, activation='softmax')(H2)
model = Model(inputs=InputTensor, outputs=Output)
model.summary()
|
1625259
|
import functools
import ipaddress
import json
import logging
import re
import operator
import testinfra
import time
from typing import Optional, Dict
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import pytest
LOGGER = logging.getLogger(__name__)
def retry(operation, times=1, wait=1, error_msg=None, name="default"):
last_assert = None
for idx in range(times):
try:
res = operation()
except AssertionError as exc:
last_assert = str(exc)
LOGGER.info("[%s] Attempt %d/%d failed: %s", name, idx, times, str(exc))
time.sleep(wait)
else:
LOGGER.info("[%s] Attempt %d/%d succeeded", name, idx, times)
return res
else:
if error_msg is None:
error_msg = (
"Failed to run operation '{name}' after {attempts} attempts "
"(waited {total}s in total)"
).format(name=name, attempts=times, total=times * wait)
if last_assert:
error_msg = error_msg + ": " + last_assert
pytest.fail(error_msg)
def write_string(host, dest, contents):
return host.run("cat > {} << EOF\n{}\nEOF".format(dest, contents))
def get_ip_from_cidr(host, cidr):
network = ipaddress.IPv4Network(cidr)
with host.sudo():
ip_info = host.check_output("ip a | grep 'inet '")
for line in ip_info.splitlines():
match = re.match(r"inet (?P<ip>[0-9]+(?:\.[0-9]+){3})/[0-9]+ ", line.strip())
assert match is not None, "Unexpected format: {}".format(line.strip())
candidate = match.group("ip")
if ipaddress.IPv4Address(candidate) in network:
return candidate
return None
def get_node_name(nodename, ssh_config=None):
"""Get a node name (from SSH config)."""
if ssh_config is not None:
node = testinfra.get_host(nodename, ssh_config=ssh_config)
return get_grain(node, "id")
return nodename
# Source: https://www.peterbe.com/plog/best-practice-with-retries-with-requests
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 503),
method_whitelist=frozenset(["GET", "POST"]),
session=None,
):
"""Configure a `requests.session` for retry on error.
By default, this helper performs 3 retries with an exponential sleep
interval between each request and only retries internal server errors(500)
& service unavailable errors(503)
Arguments:
retries: The number of retries to perform before giving up
backoff_factor: The sleep interval between requests computed as
{backoff factor} * (2 ^ ({number retries} - 1))
status_forcelist: HTTP status codes that we should force a retry on
method_whitelist: uppercased HTTP methods that we should retry
session: Used to create a session
Returns:
A `requests.Session` object configured for retry.
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
method_whitelist=method_whitelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def kubectl_exec(host, command, pod, kubeconfig="/etc/kubernetes/admin.conf", **kwargs):
"""Grab the return code from a `kubectl exec`"""
kube_args = ["--kubeconfig", kubeconfig]
if kwargs.get("container"):
kube_args.extend(["-c", kwargs.get("container")])
if kwargs.get("namespace"):
kube_args.extend(["-n", kwargs.get("namespace")])
kubectl_cmd_tplt = "kubectl exec {} {} -- {}"
with host.sudo():
output = host.run(
kubectl_cmd_tplt.format(pod, " ".join(kube_args), " ".join(command))
)
return output
def run_salt_command(host, command, ssh_config):
"""Run a command inside the salt-master container."""
pod = "salt-master-{}".format(get_node_name("bootstrap", ssh_config))
output = kubectl_exec(
host, command, pod, container="salt-master", namespace="kube-system"
)
assert output.exit_status == 0, "command {} failed with: \nout: {}\nerr: {}".format(
command, output.stdout, output.stderr
)
return output
def get_dict_element(data, path, delimiter="."):
"""
Traverse a dict using a 'delimiter' on a target string.
getitem(a, b) returns the value of a at index b
"""
return functools.reduce(
operator.getitem,
(int(k) if k.isdigit() else k for k in path.split(delimiter)),
data,
)
def set_dict_element(data, path, value, delimiter="."):
"""
Traverse a nested dict using a delimiter on a target string
and replace the value of a key
"""
current = data
elements = [int(k) if k.isdigit() else k for k in path.split(delimiter)]
for element in elements[:-1]:
if isinstance(element, int):
current = current[element] if len(current) > element else []
else:
current = current.setdefault(element, {})
current[elements[-1]] = value
def get_grain(host, key):
with host.sudo():
output = host.check_output(
'salt-call --local --out=json grains.get "{}"'.format(key)
)
grain = json.loads(output)["local"]
return grain
def get_pillar(host, key, local=False):
with host.sudo():
output = host.check_output(
'salt-call {} --out=json pillar.get "{}"'.format(
"--local" if local else "", key
)
)
return json.loads(output)["local"]
class BaseAPIError(Exception):
"""Some error occurred when using a `BaseAPI` subclass."""
class BaseAPI:
ERROR_CLS = BaseAPIError
def __init__(self, endpoint):
self.endpoint = endpoint.rstrip("/")
self.session = requests_retry_session()
def request(self, method, route, **kwargs):
kwargs.setdefault("verify", False)
try:
response = self.session.request(
method, f"{self.endpoint}/{route.lstrip('/')}", **kwargs
)
response.raise_for_status()
except requests.exceptions.RequestException as exc:
raise self.ERROR_CLS(exc)
try:
return response.json()
except ValueError as exc:
raise self.ERROR_CLS(exc)
class PrometheusApiError(BaseAPIError):
pass
class PrometheusApi(BaseAPI):
ERROR_CLS = PrometheusApiError
def query(self, metric_name, **query_matchers):
matchers = [
'{}="{}"'.format(key, value) for key, value in query_matchers.items()
]
query_string = metric_name + "{" + ",".join(matchers) + "}"
return self.request("GET", "api/v1/query", params={"query": query_string})
def get_alerts(self, **kwargs):
return self.request("GET", "api/v1/alerts", **kwargs)
def get_rules(self, **kwargs):
return self.request("GET", "api/v1/rules", **kwargs)
def find_rules(self, name=None, group=None, labels=None, **kwargs):
if not labels:
labels = {}
rules = []
response = self.get_rules(**kwargs)
for rule_group in response.get("data", {}).get("groups", []):
group_name = rule_group.get("name")
if group in (group_name, None):
for rule in rule_group.get("rules", []):
if name in (rule.get("name"), None):
if labels.items() <= rule.get("labels", {}).items():
rule["group"] = group_name
rules.append(rule)
return rules
def get_targets(self, **kwargs):
return self.request("GET", "api/v1/targets", **kwargs)
class GrafanaAPIError(BaseAPIError):
pass
class GrafanaAPI(BaseAPI):
ERROR_CLS = GrafanaAPIError
def get_admin_stats(self):
# FIXME: this user should not exist... but it's helpful in tests O:)
return self.request("GET", "api/admin/stats", auth=("admin", "admin"))
def get_dashboards(self):
return self.request(
"GET", "api/search", auth=("admin", "admin"), params={"type": "dash-db"}
)
|
1625277
|
import os
import torch
from PIL import Image
from skimage import io
from torch.utils.data import Dataset
import h5py
from .upna_preprocess import *
from .utils import *
from bingham_distribution import BinghamDistribution
def make_hdf5_file(config, image_transform):
dataset_path = config["preprocess_path"]
csv_train = dataset_path + "/train/input.csv"
csv_test = dataset_path + "/test/input.csv"
biterion = config["biterion"]
if os.path.isfile(csv_train) and os.path.isfile(csv_test):
test_frame = pd.read_csv(csv_test)
train_frame = pd.read_csv(csv_train)
else:
preprocess = UpnaHeadPoseDataPreprocess(config)
test_frame = preprocess.frame_test
train_frame = preprocess.frame_train
train = UpnaHeadPoseSplitSet(dataset_path + "/train",
train_frame, image_transform)
test = UpnaHeadPoseSplitSet(dataset_path + "/test",
test_frame, image_transform)
img_shape = train[0]["image"].shape
label_shape = train[0]["pose"].shape[-1]
f = h5py.File(dataset_path + "/dataset.hdf5", "w")
f.create_dataset("train_img", (
len(train), img_shape[0], img_shape[1], img_shape[2]))
f.create_dataset("train_label", (len(train), label_shape))
f.create_dataset("test_img", (
len(test), img_shape[0], img_shape[1], img_shape[2]))
f.create_dataset("test_label", (len(test), label_shape))
for i, data in enumerate(train):
f["train_img"][i, :, :, :] = train[i]["image"]
f["train_label"][i, :] = train[i]["pose"]
print("train", i)
for i, data in enumerate(test):
f["test_img"][i, :, :, :] = test[i]["image"]
f["test_label"][i, :] = test[i]["pose"]
print("test", i)
class UpnaHeadPoseTrainTest():
"""
Stores a training and test set for the UPNA Head Pose Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml. The dataset_path stores
the locsation of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be stored.
image_transforms: A list of of composed pytorch transforms to be applied
to a PIL image
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
if not os.path.isfile(config["preprocess_path"] + "/dataset.hdf5"):
make_hdf5_file(config_file, image_transform)
f = h5py.File(config["preprocess_path"] + "/dataset.hdf5", 'r')
noise = config["euler_noise"]
quat_noise = config["quat_noise"]
biterion = config["biterion"]
self.train = UpnaHDF5(f.get('train_img'), f.get('train_label'),
biterion, noise, quat_noise)
self.test = UpnaHDF5(f.get('test_img'), f.get('test_label'), biterion,
noise, quat_noise)
class UpnaHDF5(Dataset):
"""
Loads UPNA dataset from a HDF5 dataset and applies transformations to
biterion or quaternion form and adds noise to the labels.
biterion: format of the pose. if true, biterion. if false, quaternion.
euler_noise: the standard deviation of the Gaussian distribution that we
sample noise from
quat_noise: the Z of a bingham distribution that we sample noise from
"""
def __init__(self, images, labels, biterion, euler_noise, quat_noise):
self.images = images
self.labels = labels
self.biterion = biterion
if euler_noise:
s = np.random.normal(0, euler_noise, 3 * len(self.labels))
self.euler_noise = []
for i in range(len(self.labels)):
self.euler_noise.append([s[i * 3], s[i * 3 + 1], s[i * 3 + 2]])
else:
self.euler_noise = None
if quat_noise:
quat_noise = [float(quat_noise[0]), float(quat_noise[1]),
float(quat_noise[2]), 0]
bd = BinghamDistribution(np.identity(4), np.array(quat_noise))
samples = bd.random_samples(len(labels))
perm = [3, 0, 1, 2]
re_samples = samples[:, perm]
self.quat_noise = quaternion.as_quat_array(re_samples)
else:
self.quat_noise = []
def __getitem__(self, idx):
image = torch.from_numpy(self.images[idx, :, :, :]).float()
if self.euler_noise:
pose = np.array([self.labels[idx][0] + self.euler_noise[idx][0],
self.labels[idx][1] + self.euler_noise[idx][1],
self.labels[idx][2] + self.euler_noise[idx][2]])
else:
pose = self.labels[idx, :]
if len(self.quat_noise) != 0:
w, x, y, z = convert_euler_to_quaternion(pose[0], pose[1], pose[2])
quat_pose = quaternion.quaternion(w, x, y, z)
res = quaternion.as_float_array(quat_pose * self.quat_noise[idx])
roll, pitch, yaw = quaternion_to_euler(res[0], res[1], res[2],
res[3])
pose = np.array(
[math.degrees(roll), math.degrees(pitch), math.degrees(yaw)])
if self.biterion:
sample = {'image': image,
'pose': torch.from_numpy(pose)}
else:
sample = {'image': image,
'pose': convert_euler_to_quaternion(pose[0],
pose[1],
pose[2])}
return sample
def __len__(self):
return self.images.shape[0]
class UpnaHeadPoseSplitSet(Dataset):
def __init__(self, dataset_path, frame, image_transform):
"""
Stores a training or test set for the UPNA Head Pose Dataset
Parameters:
dataset_path: the location of where processed images and poses will be stored.
frame: the the csv frame that stores the posesi
image_transforms: A list of of composed pytorch transforms to be applied to a PIL image
"""
self.frame = frame
self.image_transform = image_transform
self.dataset_path = dataset_path
def __len__(self):
return len(self.frame)
def __getitem__(self, idx):
name = self.frame.iloc[idx, 0]
frame_index = idx
img_name = os.path.join(self.dataset_path, name)
image = Image.fromarray(io.imread(img_name))
head_pose = self.frame.iloc[frame_index, 1:4].as_matrix()
head_pose = head_pose.astype('float').reshape(-1, 3)[0]
if self.image_transform:
image = self.image_transform(image)
sample = {'image': image,
'pose': head_pose}
return sample
# TODO: GET RID OF THIS- REDUNDANT. except for the images field. need to incorporate that elsewhere...
class UpnaHeadPoseDataset(Dataset):
"""
Stores a test set for the UPNA Head Pose Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml. The dataset_path stores
the location of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be stored.
image_transforms: (optional) A list of of composed pytorch transforms to
be applied to a PIL image
images: (optional) Can provide a list of image names and a dataset will
be constructed with those images
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
self.dataset_path = config["preprocess_path"] + "/test"
self.csv_path = self.dataset_path + "/input.csv"
self.user = config["user"]
self.video = config["video"]
if os.path.isfile(self.csv_path):
self.frame = pd.read_csv(self.csv_path)
else:
self.frame = UpnaHeadPoseDataPreprocess(config_file).frame_test
self.image_transform = image_transform
self.images = self._generate_file_names()
def __len__(self):
return len(self.images)
def _generate_file_names(self):
"""
From user number and video number, generate a list of corresponding frames.
Parameters:
user_num: string user number ex. "07"
video_num: string video number ex. "03"
Returns:
names: a list of file names.
"""
names = []
for i in range(1, 300):
string_name = "User_{}/user_{}_video_{}_frame{}.jpg".format(
self.user, self.user, self.video, i)
names.append(string_name)
return names
def __getitem__(self, idx):
name = self.images[idx]
frame_index = get_frame_index(name, self.frame)
img_name = os.path.join(self.dataset_path, name)
image = Image.fromarray(io.imread(img_name))
head_pose = self.frame.iloc[frame_index, 1:4].as_matrix()
head_pose = head_pose.astype('float').reshape(-1, 3)[0]
if self.image_transform:
image = self.image_transform(image)
sample = {'image': image,
'pose': torch.from_numpy(
convert_euler_to_quaternion(head_pose[0], head_pose[1],
head_pose[2]))}
return sample
|
1625279
|
from agnes.algos import A2C, PPO, PPORND
from agnes.nns import MLP, CNN, RNN, RNNCNN, GRUCNN, LSTMCNN
from agnes.runners import Single, DistributedMPI, CompetitiveRunner
from agnes.common import TensorboardLogger, StandardLogger, CsvLogger, log
from agnes.common import make_env, make_vec_env
|
1625321
|
import numpy as np
import tensorflow as tf
__author__ = '<NAME>'
def print_metrics_dict(metrics):
for name, val in metrics.items():
print('--------------', name, '--------------')
if isinstance(val, tf.Tensor):
val = val.numpy()
if name == 'confusion':
print(np.array2string(val, separator=', ', precision=2))
else:
print(val)
|
1625327
|
import operator
import re
from datetime import date, datetime, timedelta
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Field, Layout, Submit
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils.timezone import make_aware
from django.utils.translation import gettext as _
from django_select2.forms import Select2MultipleWidget
from dynamic_preferences.forms import PreferenceForm
from guardian.shortcuts import assign_perm, get_objects_for_user, get_users_with_perms, remove_perm
from recurrence.forms import RecurrenceField
from ephios.core import signup
from ephios.core.dynamic_preferences_registry import event_type_preference_registry
from ephios.core.models import Event, EventType, LocalParticipation, Shift, UserProfile
from ephios.core.widgets import MultiUserProfileWidget
from ephios.extra.crispy import AbortLink
from ephios.extra.permissions import get_groups_with_perms
from ephios.extra.widgets import ColorInput, CustomDateInput, CustomTimeInput
from ephios.modellogging.log import add_log_recorder, update_log
from ephios.modellogging.recorders import (
DerivedFieldsLogRecorder,
InstanceActionType,
PermissionLogRecorder,
)
class EventForm(forms.ModelForm):
visible_for = forms.ModelMultipleChoiceField(
queryset=Group.objects.none(),
label=_("Visible for"),
help_text=_(
"Select groups which the event shall be visible for. Regardless, the event will be visible for users that already signed up."
),
widget=Select2MultipleWidget,
required=False,
)
responsible_users = forms.ModelMultipleChoiceField(
queryset=UserProfile.objects.all(),
required=False,
label=_("Responsible persons"),
widget=MultiUserProfileWidget,
)
responsible_groups = forms.ModelMultipleChoiceField(
queryset=Group.objects.all(),
required=False,
label=_("Responsible groups"),
widget=Select2MultipleWidget,
)
class Meta:
model = Event
fields = ["title", "description", "location"]
def __init__(self, **kwargs):
user = kwargs.pop("user")
can_publish_for_groups = get_objects_for_user(user, "publish_event_for_group", klass=Group)
if (event := kwargs.get("instance", None)) is not None:
self.eventtype = event.type
responsible_users = get_users_with_perms(
event, only_with_perms_in=["change_event"], with_group_users=False
)
responsible_groups = get_groups_with_perms(event, only_with_perms_in=["change_event"])
visible_for = get_groups_with_perms(event, only_with_perms_in=["view_event"]).exclude(
id__in=responsible_groups
)
self.locked_visible_for_groups = set(visible_for.exclude(id__in=can_publish_for_groups))
kwargs["initial"] = {
"visible_for": visible_for.filter(id__in=can_publish_for_groups),
"responsible_users": responsible_users,
"responsible_groups": responsible_groups,
**kwargs.get("initial", {}),
}
else:
self.eventtype = kwargs.pop("eventtype")
kwargs["initial"] = {
"responsible_users": self.eventtype.preferences.get("responsible_users")
or get_user_model().objects.filter(pk=user.pk),
"responsible_groups": self.eventtype.preferences.get("responsible_groups"),
"visible_for": self.eventtype.preferences.get("visible_for")
or get_objects_for_user(user, "publish_event_for_group", klass=Group),
}
self.locked_visible_for_groups = set()
super().__init__(**kwargs)
self.fields["visible_for"].queryset = can_publish_for_groups
self.fields["visible_for"].disabled = not can_publish_for_groups
if self.locked_visible_for_groups:
self.fields["visible_for"].help_text = _(
"Select groups which the event shall be visible for. "
"This event is also visible for <b>{groups}</b>, "
"but you don't have the permission to change visibility "
"for those groups."
).format(groups=", ".join(group.name for group in self.locked_visible_for_groups))
def save(self, commit=True):
self.instance.type = self.eventtype
event: Event = super().save(commit=commit)
add_log_recorder(event, PermissionLogRecorder("view_event", _("Visible for")))
add_log_recorder(event, PermissionLogRecorder("change_event", _("Responsibles")))
# delete existing permissions
# (better implement https://github.com/django-guardian/django-guardian/issues/654)
for group in get_groups_with_perms(
event, only_with_perms_in=["view_event", "change_event"]
):
remove_perm("view_event", group, event)
remove_perm("change_event", group, event)
for user in get_users_with_perms(event, only_with_perms_in=["view_event", "change_event"]):
remove_perm("view_event", user, event)
remove_perm("change_event", user, event)
# assign designated permissions
assign_perm(
"view_event",
Group.objects.filter(
Q(id__in=self.cleaned_data["visible_for"])
| Q(id__in=self.cleaned_data["responsible_groups"])
| Q(id__in=(g.id for g in self.locked_visible_for_groups))
),
event,
)
assign_perm("change_event", self.cleaned_data["responsible_groups"], event)
assign_perm("change_event", self.cleaned_data["responsible_users"], event)
# Assign view_event to responsible users and to non-responsible users
# that already have some sort of participation for the event
# (-> they saw and interacted with it)
# We can't just do users that aren't included by group permissions,
# as they might get removed from that group.
assign_perm(
"view_event",
UserProfile.objects.filter(
Q(pk__in=self.cleaned_data["responsible_users"])
| Q(
pk__in=LocalParticipation.objects.filter(
shift_id__in=event.shifts.all()
).values_list("user", flat=True)
)
),
event,
)
update_log(event, InstanceActionType.CHANGE)
return event
class ShiftForm(forms.ModelForm):
date = forms.DateField(widget=CustomDateInput, label=_("Date"))
meeting_time = forms.TimeField(widget=CustomTimeInput, label=_("Meeting time"))
start_time = forms.TimeField(widget=CustomTimeInput, label=_("Start time"))
end_time = forms.TimeField(widget=CustomTimeInput, label=_("End time"))
field_order = ["date", "meeting_time", "start_time", "end_time", "signup_method_slug"]
class Meta:
model = Shift
fields = ["meeting_time", "start_time", "end_time", "signup_method_slug"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
signup_methods = list(signup.enabled_signup_methods())
if self.instance and (method_slug := self.instance.signup_method_slug):
if method_slug not in map(operator.attrgetter("slug"), signup_methods):
signup_methods.append(self.instance.signup_method)
self.fields["signup_method_slug"].widget = forms.Select(
choices=((method.slug, method.verbose_name) for method in signup_methods)
)
# this recorder may cause db queries, so it's added on Shift init, but here in the form
# pylint: disable=undefined-variable
add_log_recorder(
self.instance,
DerivedFieldsLogRecorder(
lambda shift: method.get_signup_info() if (method := shift.signup_method) else {}
),
)
def clean(self):
cleaned_data = super().clean()
if {"date", "meeting_time", "start_time", "end_time"} <= set(cleaned_data.keys()):
cleaned_data["meeting_time"] = make_aware(
datetime.combine(cleaned_data["date"], cleaned_data["meeting_time"])
)
cleaned_data["start_time"] = make_aware(
datetime.combine(cleaned_data["date"], cleaned_data["start_time"])
)
cleaned_data["end_time"] = make_aware(
datetime.combine(self.cleaned_data["date"], cleaned_data["end_time"])
)
if self.cleaned_data["end_time"] <= self.cleaned_data["start_time"]:
cleaned_data["end_time"] = cleaned_data["end_time"] + timedelta(days=1)
if not cleaned_data["meeting_time"] <= cleaned_data["start_time"]:
raise ValidationError(_("Meeting time must not be after start time!"))
return cleaned_data
class EventDuplicationForm(forms.Form):
start_date = forms.DateField(
widget=CustomDateInput,
initial=date.today(),
help_text=_(
"This date will be used as the start date for recurring events that you create below, e.g. daily events will be created from this date onwards."
),
label=_("Start date"),
)
recurrence = RecurrenceField(required=False)
class EventTypeForm(forms.ModelForm):
class Meta:
model = EventType
fields = ["title", "can_grant_qualification", "color"]
widgets = {"color": ColorInput()}
def clean_color(self):
regex = re.compile(r"#[a-fA-F\d]{6}")
if not regex.match(self.cleaned_data["color"]):
raise ValidationError(_("You need to enter a valid color"))
return self.cleaned_data["color"]
class EventTypePreferenceForm(PreferenceForm):
registry = event_type_preference_registry
class BaseEventPluginFormMixin:
@property
def heading(self):
raise NotImplementedError
def render(self):
try:
self.helper.form_tag = False
except AttributeError:
self.helper = FormHelper(self)
self.helper.form_tag = False
return render_to_string("core/fragments/event_plugin_form.html", context={"form": self})
def is_function_active(self):
"""
When building forms for additional features, return whether that feature is enabled for the forms event instance.
With the default template, if this is True, the collapse is expanded on page load.
"""
return False
class EventNotificationForm(forms.Form):
NEW_EVENT = "new"
REMINDER = "remind"
PARTICIPANTS = "participants"
action = forms.ChoiceField(
choices=[
(NEW_EVENT, _("Send notification about new event to everyone")),
(REMINDER, _("Send reminder to everyone that is not participating")),
(PARTICIPANTS, _("Send a message to all participants")),
],
widget=forms.RadioSelect,
label=False,
)
mail_content = forms.CharField(required=False, widget=forms.Textarea, label=_("Mail content"))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop("event")
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.layout = Layout(
Field("action"),
Field("mail_content"),
FormActions(
Submit("submit", _("Send"), css_class="float-end"),
AbortLink(href=self.event.get_absolute_url()),
),
)
def clean(self):
if (
self.cleaned_data.get("action") == self.PARTICIPANTS
and not self.cleaned_data["mail_content"]
):
raise ValidationError(_("You cannot send an empty mail."))
return super().clean()
|
1625332
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import yaml
import math
import os
from skopt.plots import plot_objective
from fbprophet.plot import add_changepoints_to_plot
# Set some matplotlib parameters
mpl.rcParams['figure.figsize'] = (20, 15)
cfg = yaml.full_load(open(os.getcwd() + "/config.yml", 'r'))
def visualize_silhouette_plot(k_range, silhouette_scores, optimal_k, save_fig=False):
'''
Plot average silhouette score for all samples at different values of k. Use this to determine optimal number of
clusters (k). The optimal k is the one that maximizes the average Silhouette Score over the range of k provided.
:param k_range: Range of k explored
:param silhouette_scores: Average Silhouette Score corresponding to values in k range
:param optimal_k: The value of k that has the highest average Silhouette Score
:param save_fig: Flag indicating whether to save the figure
'''
# Plot the average Silhouette Score vs. k
axes = plt.subplot()
axes.plot(k_range, silhouette_scores)
# Set plot axis labels, title, and subtitle.
axes.set_xlabel("k (# of clusters)", labelpad=10, size=15)
axes.set_ylabel("Average Silhouette Score", labelpad=10, size=15)
axes.set_xticks(k_range, minor=False)
axes.axvline(x=optimal_k, linestyle='--')
axes.set_title("Silhouette Plot", fontsize=25)
axes.text(0.5, 0.92, "Average Silhouette Score over a range of k-values", size=15, ha='center')
# Save the image
if save_fig:
file_path = cfg['PATHS']['DATA_VISUALIZATIONS'] + 'silhouette_plot_' + \
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png'
plt.savefig(file_path)
return
def plot_model_evaluation(forecast_df, model_name, metrics, save_dir=None, figsize=(20,13), save_fig=False, train_date=''):
'''
Plot model's predictions on training and test sets, along with key performance metrics.
:param forecast_df: DataFrame consisting of predicted and ground truth consumption values
:param model_name: model identifier
:param metrics: key performance metrics
:param figsize: size of matplotlib figure
:param train_date: string representing date model was trained
'''
fig = plt.figure(figsize=figsize)
fig.suptitle(model_name + ' Forecast', fontsize=20)
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4)
# Plot training performance
forecast_df[pd.notnull(forecast_df["model"])][["gt", "model"]].plot(color=["black", "green"], title="Training Set Predictions",
grid=True, ax=ax1)
ax1.set(xlabel=None)
# Plot test performance
if "test_pred" in forecast_df.columns:
forecast_df[pd.isnull(forecast_df["model"])][["gt", "forecast", "test_pred"]].plot(color=["black", "red", "yellow"],
title="Test Set Forecast", grid=True, ax=ax2)
else:
forecast_df[pd.isnull(forecast_df["model"])][["gt", "forecast"]].plot(color=["black", "red"],
title="Test Set Forecast", grid=True, ax=ax2)
ax2.fill_between(x=forecast_df.index, y1=forecast_df['pred_int_low'], y2=forecast_df['pred_int_up'], color='b', alpha=0.2)
ax2.fill_between(x=forecast_df.index, y1=forecast_df['conf_int_low'], y2=forecast_df['conf_int_up'], color='b', alpha=0.3)
ax2.set(xlabel=None)
# Plot residuals
forecast_df[["residuals", "error"]].plot(ax=ax3, color=["green", "red"], title="Residuals", grid=True)
ax3.set(xlabel=None)
# Plot residuals distribution
forecast_df[["residuals", "error"]].plot(ax=ax4, color=["green", "red"], kind='kde',
title="Residuals Distribution", grid=True)
ax4.set(ylabel=None)
print("Training --> Residuals mean:", np.round(metrics['residuals_mean']), " | std:", np.round(metrics['residuals_std']))
print("Test --> Error mean:", np.round(metrics['error_mean']), " | std:", np.round(metrics['error_std']),
" | mae:", np.round(metrics['MAE']), " | mape:", np.round(metrics['MAPE'] * 100), "% | mse:", np.round(metrics['MSE']),
" | rmse:", np.round(metrics['RMSE']))
if save_fig:
save_dir = cfg['PATHS']['FORECAST_VISUALIZATIONS'] if save_dir is None else save_dir
plt.savefig(save_dir + '/' + model_name + '_eval_' +
train_date + '.png')
return
def correlation_matrix(dataset, save_fig=False):
'''
Produces a correlation matrix for a dataset
:param dataset: A DataFrame
:save_fig: Flag indicating whether to save the figure
'''
corr_mat = dataset.corr()
mask = np.triu(np.ones_like(corr_mat, dtype=bool)) # Generate mask for upper right triangle
cmap = sns.diverging_palette(230, 20, as_cmap=True) # Custom diverging colour map
fig, axes = plt.subplots()
sns.heatmap(corr_mat, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}) # Draw a heatmap with mask and correct aspect ratio
axes.set_title('Correlation Matrix', fontsize=20)
plt.tight_layout(pad=1.2)
if save_fig:
plt.savefig(cfg['PATHS']['DATA_VISUALIZATIONS'] + 'correlation_matrix' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
return
def client_box_plot(client_df, save_fig=False):
'''
Produces a box plot for all features in the dataset
:param client_df: A DataFrame indexed by client identifier
:param save_fig: Flag indicating whether to save the figure
'''
cat_feats = [f for f in cfg['DATA']['CATEGORICAL_FEATS'] if f in client_df.columns]
bool_feats = [f for f in cfg['DATA']['BOOLEAN_FEATS'] if f in client_df.columns]
feats = cat_feats + bool_feats
n_rows = math.floor(math.sqrt(len(feats)))
n_cols = math.ceil(math.sqrt(len(feats)))
fig, axes = plt.subplots(n_rows, n_cols)
idx = 0
for i in range(n_rows):
for j in range(n_cols):
sns.boxplot(x=client_df[feats[idx]], y=client_df['CONS_0m_AGO'], palette="Set2", ax=axes[i, j])
axes[i, j].set_yscale('log')
axes[i, j].set_title(feats[idx], fontsize=14)
axes[i, j].set_xticklabels(axes[i, j].get_xticklabels(), rotation=45, ha='right')
if idx < len(feats) - 1:
idx += 1
else:
break
fig.suptitle('Box Plots for consumption in recent month grouped by categorical variables', fontsize=20, y=0.99)
fig.tight_layout(pad=1, rect=(0,0,1,0.95))
if save_fig:
plt.savefig(cfg['PATHS']['DATA_VISUALIZATIONS'] + 'client_box_plot' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
return
def client_cmptn_by_rc_violin_plot(client_df, save_fig=False):
'''
Produces a violin plot for consumption by client in the most recent month stratified by rate class
:param client_df: A DataFrame indexed by client identifier
:param save_fig: Flag indicating whether to save the figure
'''
fig, axes = plt.subplots()
sns.violinplot(x=client_df['CONS_0m_AGO'], y=client_df['RATE_CLASS'], palette="Set2", scale='area', orient='h',
linewidth=0.2, ax=axes)
axes.set_yticklabels(axes.get_yticklabels(), fontsize=12)
axes.set_xlabel('Consumption in last month [m^3]', fontsize=20, labelpad=10)
axes.set_ylabel('Rate Class', fontsize=20, labelpad=10)
fig.suptitle('Violin plot for consumption in recent month grouped by rate class', fontsize=30)
fig.tight_layout(pad=1, rect=(0,0.05,1,0.95))
if save_fig:
plt.savefig(cfg['PATHS']['DATA_VISUALIZATIONS'] + 'violin_plot_rate_class' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
return
def visualize_client_dataset_stats(client_df, save_fig=False):
'''
Obtain general statistics for features in the client dataset and create a summary figure
:param client_df: A DataFrame indexed by client identifier
:param save_fig: Flag indicating whether to save the figure
'''
cat_feats = [f for f in cfg['DATA']['CATEGORICAL_FEATS'] if f in client_df.columns]
bool_feats = [f for f in cfg['DATA']['BOOLEAN_FEATS'] if f in client_df.columns]
num_feats = [f for f in cfg['DATA']['NUMERICAL_FEATS'] if f in client_df.columns]
feats = cat_feats + bool_feats + num_feats
n_feats = len(feats)
n_rows = math.floor(math.sqrt(n_feats))
n_cols = math.ceil(math.sqrt(n_feats))
fig, axes = plt.subplots(n_rows, n_cols)
idx = 0
for i in range(n_rows):
for j in range(n_cols):
if feats[idx] in num_feats:
sns.kdeplot(data=client_df, x=feats[idx], palette="Set2", ax=axes[i, j])
mean = client_df[feats[idx]].mean()
median = client_df[feats[idx]].median()
std = client_df[feats[idx]].std()
axes[i, j].axvline(mean, color='r', linestyle='-', linewidth=0.8, label='mean=' + '{:.1e}'.format(mean))
axes[i, j].axvline(median, color='g', linestyle='-', linewidth=0.8, label='median=' + '{:.1e}'.format(median))
axes[i, j].axvline(mean - std, color='r', linestyle='--', linewidth=0.8, label='+/- std' + '{:.1e}'.format(std))
axes[i, j].axvline(mean + std, color='r', linestyle='--', linewidth=0.8)
axes[i, j].legend(fontsize=8)
axes[i, j].set_title(feats[idx], fontsize=14)
else:
mode = client_df[feats[idx]].mode()
sns.countplot(data=client_df, x=feats[idx], ax=axes[i, j], palette='Set3')
axes[i, j].set_xticklabels(axes[i, j].get_xticklabels(), rotation=45, ha='right')
axes[i, j].text(0.6, 0.9, 'mode=' + str(mode[0]), transform=axes[i, j].transAxes, fontsize=8)
axes[i, j].set_title(feats[idx], fontsize=14)
if idx < n_feats - 1:
idx += 1
else:
break
fig.suptitle('General statistics for client data', fontsize=20, y=0.99)
fig.tight_layout(pad=2, rect=(0, 0, 1, 0.95))
if save_fig:
plt.savefig(cfg['PATHS']['DATA_VISUALIZATIONS'] + 'client_general_visualization' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
return
def produce_data_visualizations(preprocessed_path=None, client_path=None):
'''
Produces a series of data visualizations for client data and preprocessed consumption data.
:param preprocessed_path: Path of preprocessed data CSV
:param client_path: Path of client data CSV
'''
if preprocessed_path is None:
preprocessed_df = pd.read_csv(cfg['PATHS']['PREPROCESSED_DATA'])
if client_path is None:
client_df = pd.read_csv(cfg['PATHS']['CLIENT_DATA'])
plt.clf()
correlation_matrix(preprocessed_df, save_fig=True)
plt.clf()
client_box_plot(client_df, save_fig=True)
plt.clf()
visualize_client_dataset_stats(client_df, save_fig=True)
return
def plot_bayesian_hparam_opt(model_name, hparam_names, search_results, save_fig=False):
'''
Plot all 2D hyperparameter comparisons from the logs of a Bayesian hyperparameter optimization.
:param model_name: Name of the model
:param hparam_names: List of hyperparameter identifiers
:param search_results: The object resulting from a Bayesian hyperparameter optimization with the skopt package
:param save_fig:
:return:
'''
# Abbreviate hyperparameters to improve plot readability
axis_labels = hparam_names.copy()
for i in range(len(axis_labels)):
if len(axis_labels[i]) >= 12:
axis_labels[i] = axis_labels[i][:4] + '...' + axis_labels[i][-4:]
# Plot
axes = plot_objective(result=search_results, dimensions=axis_labels)
# Create a title
fig = plt.gcf()
fig.suptitle('Bayesian Hyperparameter\n Optimization for ' + model_name, fontsize=15, x=0.65, y=0.97)
# Indicate which hyperparameter abbreviations correspond with which hyperparameter
hparam_abbrs_text = ''
for i in range(len(hparam_names)):
hparam_abbrs_text += axis_labels[i] + ':\n'
fig.text(0.50, 0.8, hparam_abbrs_text, fontsize=10, style='italic', color='mediumblue')
hparam_names_text = ''
for i in range(len(hparam_names)):
hparam_names_text += hparam_names[i] + '\n'
fig.text(0.65, 0.8, hparam_names_text, fontsize=10, color='darkblue')
fig.tight_layout()
if save_fig:
plt.savefig(cfg['PATHS']['EXPERIMENT_VISUALIZATIONS'] + 'Bayesian_opt_' + model_name + '_' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
def plot_prophet_components(prophet_model, forecast, save_dir=None, train_date=''):
'''
Plot Prophet model's forecast components. This plot visualizes trend, yearly seasonality, weekly seasonality,
holiday effects
:param prophet_model: Fitted Prophet model
:param forecast: A forecast from a Prophet model
:param train_date: string representing date model was trained
'''
fig = prophet_model.plot_components(forecast)
fig.suptitle('Prophet Model Components', fontsize=15)
fig.tight_layout(pad=2, rect=(0, 0, 1, 0.95))
save_dir = cfg['PATHS']['INTERPRETABILITY_VISUALIZATIONS'] if save_dir is None else save_dir
plt.savefig(save_dir + 'Prophet_components' +
train_date + '.png')
return
def plot_prophet_forecast(prophet_model, prophet_pred, save_dir=None, train_date=''):
'''
Plot Prophet model's forecast using the Prophet API, including changepoints
:param prophet_model: Fitted Prophet model
:param prophet_pred: A forecast from a Prophet model (result of a prophet.predict() call)
'''
fig = prophet_model.plot(prophet_pred)
ax = fig.gca()
add_changepoints_to_plot(ax, prophet_model, prophet_pred)
ax = fig.gca()
ax.set_xlabel('Date')
ax.set_ylabel('Consumption [m^3]')
fig.suptitle('Prophet Model Forecast', fontsize=15)
fig.tight_layout(pad=2, rect=(0, 0, 1, 0.95))
save_dir = cfg['PATHS']['FORECAST_VISUALIZATIONS'] if save_dir is None else save_dir
plt.savefig(save_dir + 'Prophet_API_forecast' +
train_date + '.png')
return
|
1625365
|
from bluedot import MockBlueDot
from time import sleep, time
mbd = MockBlueDot(auto_start_server = False)
def pressed(pos):
print("Pressed: x={} y={} angle={} distance={} middle={} top={} bottom={} left={} right={} time={}".format(pos.x, pos.y, pos.angle, pos.distance, pos.middle, pos.top, pos.bottom, pos.left, pos.right, time()))
def released():
print("Released: x={} y={}".format(mbd.position.x, mbd.position.y))
print()
def moved(pos):
print("Moved: x={} y={}".format(pos.x, pos.y))
mbd.when_pressed = pressed
mbd.when_released = released
mbd.when_moved = moved
mbd.start()
#launch a mock app
mbd.launch_mock_app()
try:
while True:
sleep(1)
finally:
mbd.mock_client_disconnected()
mbd.stop()
|
1625385
|
st= input("Enter the string \n")
st=st+"A"
t=0
c=0
l= len(st)
for i in range(1,l):
if ord(st[i])>=65 and ord(st[i])<=90:
print(st[t:i])
t=i
c+=1
print(c)
|
1625395
|
from odoo import fields, models
class PosConfig(models.Model):
_inherit = "pos.config"
include_discount_in_prices = fields.Boolean(
string="Include Discount in Prices",
help="If box is unchecked the displayed prices will not include discounts",
)
|
1625425
|
import datetime as dt
import os
import time
from absl import app
from absl import flags
import apache_beam as beam
from apache_beam.runners.portability import fn_api_runner
from budget_accounting import BudgetAccountant
from dataclasses import dataclass
from private_beam import DPEngine, BeamOperations, LocalOperations, DataExtractors, AggregateParams, Metrics
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', None, 'Input/output directory')
flags.DEFINE_string('input_file', None,
'The file with the data, it should be in data_dir')
# False means the run based on LocalOperations(), i.e. w/o any frameworks, true local run with Apache Beam framework
flags.DEFINE_bool('use_beam', False, 'Use beam or local pipeline operations')
@dataclass
class MovieView:
user_id: int
movie_id: int
rating: int
data: dt.datetime
def parse_line(line, movie_id):
# this line has format "user_id,rating,date"
split_parts = line.split(',')
user_id = int(split_parts[0])
rating = int(split_parts[1])
date = dt.datetime.strptime(split_parts[2], '%Y-%m-%d')
return MovieView(user_id, movie_id, rating, date)
class ParseFile(beam.DoFn):
def __init__(self):
self.movie_id = -1
def process(self, line):
if line[-1] == ':': # this line has format "movie_id:"
self.movie_id = int(line[:-1])
return
# this line has format "user_id,rating,date"
yield parse_line(line, self.movie_id)
def parse_file(filename): # used for the local run
res = []
for line in open(filename):
line = line.strip()
if line[-1] == ':':
movie_id = int(line[:-1])
else:
res.append(parse_line(line, movie_id))
return res
def get_netflix_dataset(pipeline, use_beam):
filename = os.path.join(FLAGS.data_dir, FLAGS.input_file)
if use_beam:
return pipeline | beam.io.ReadFromText(filename) | beam.ParDo(ParseFile())
return parse_file(filename)
def write_to_local_file(col, filename): # used for the local run
if col is None:
return
with open(filename, 'w') as out:
out.write('\n'.join(map(str, col)))
def calc_rating(pipeline, use_beam):
movie_views = get_netflix_dataset(pipeline, use_beam)
data_extractors = DataExtractors(
partition_extractor=lambda mv: mv.movie_id,
privacy_id_extractor=lambda mv: mv.user_id,
value_extractor=lambda mv: mv.rating)
params = AggregateParams(
max_partitions_contributed=2,
max_contributions_per_partition=1,
low=1,
high=5,
metrics=[Metrics.PRIVACY_ID_COUNT, Metrics.COUNT, Metrics.MEAN],
preagg_partition_selection=True,
# public_partitions = list(range(1, 40)) # uncomment this line for using public partitions.
)
budget_accountant = BudgetAccountant(eps=1, delta=1e-6)
ops = BeamOperations() if use_beam else LocalOperations()
dpe = DPEngine(budget_accountant, ops)
dp_result = dpe.aggregate(movie_views, params, data_extractors)
budget_accountant.compute_budgets()
# Print DP aggregation reports
reports = dpe._report_generators
print(f'There were {len(reports)} computations')
for i, report in enumerate(reports):
print(f'Computation {i}:')
print(report.report())
return dp_result
def compute_on_beam(private_outfile):
runner = fn_api_runner.FnApiRunner() # local runner
with beam.Pipeline(runner=runner) as pipeline:
private = calc_rating(pipeline, use_beam=True)
private | 'private data save' >> beam.io.WriteToText(private_outfile)
def compute_locally(private_outfile):
private = calc_rating(pipeline=None, use_beam=False)
write_to_local_file(private, private_outfile)
def main(unused_argv):
private_outfile = os.path.join(FLAGS.data_dir, 'dp_aggregation_result')
starttime = time.time()
if FLAGS.use_beam:
compute_on_beam(private_outfile)
else:
compute_locally(private_outfile)
print(f'DP aggregation running time {time.time() - starttime} seconds')
return 0
if __name__ == '__main__':
app.run(main)
|
1625458
|
from coldtype import *
from coldtype.fx.skia import phototype
audio = __sibling__("media/68.wav")
midi = Programs.Midi(__sibling__("media/68.mid"), text=0, bpm=151)
midi.hide()
@animation(timeline=midi.t, bg=hsl(0.4, 0.8, l=0.2), render_bg=1, audio=audio)
def drumsolo(f):
d = midi.t[0].fifve(f.i)
lk1 = {
"O": d([36, 38], 5, 50),
"M": d([42, 62, 63], 3, 20),
"U": d([60, 61, 64], 3, 10),
"H": d([81, 93, 94, 98], 3, 10),
"R": d([49, 50, 65, 71], 3, 20),
"D": d([72, 73, 74], 3, 50),
"S": d([52, 54, 86], 3, 50),
"P": d([51], 3, 350)
}
return (Glyphwise("DRUM\nSHOP", lambda g:
[Style(Font.MutatorSans(), 350),
dict(
wdth=lk1.get(g.c, 0),
wght=0.25*lk1.get(g.c, 0))])
.track(40, v=1)
.xalign(f.a.r, th=0)
.align(f.a.r, th=0)
.f(1)
.pen()
.ch(phototype(f.a.r,
blur=2, cut=190, cutw=25,
fill=hsl(0.35, 0.8, l=0.75))))
release = drumsolo.export("h264",
audio=__sibling__("media/68.wav"),
vf="eq=brightness=0.0:saturation=1.5",
loops=2)
|
1625475
|
from numbers import Real
from typing import Optional
import numpy as np
import mygrad._utils.graph_tracking as _tracking
from mygrad.operation_base import Operation
from mygrad.tensor_base import Tensor, asarray
from mygrad.typing import ArrayLike
class MarginRanking(Operation):
def __call__(self, x1, x2, y, margin):
"""Computes the margin ranking loss between ``x1``
and ``x2``.
Parameters
----------
x1 : mygrad.Tensor, shape=(N,) or (N, D)
x2 : mygrad.Tensor, shape=(N,) or (N, D)
y : numpy.ndarray
margin : float
Returns
-------
numpy.ndarray, shape=()
"""
self.variables = (x1, x2)
x1 = x1.data
x2 = x2.data
self.y = y
M = margin - self.y * (x1 - x2)
not_thresh = M <= 0
loss = M
loss[not_thresh] = 0.0
if _tracking.TRACK_GRAPH:
self._grad = np.ones_like(M)
self._grad[not_thresh] = 0.0
self._grad /= M.size
return np.mean(loss)
def backward_var(self, grad, index, **kwargs):
sign = -self.y if index == 0 else self.y
return grad * (sign * self._grad)
def margin_ranking_loss(
x1: ArrayLike,
x2: ArrayLike,
y: ArrayLike,
margin: float,
*,
constant: Optional[bool] = None
) -> Tensor:
r"""Computes the margin average margin ranking loss.
Equivalent to::
>>> import mygrad as mg
>>> mg.mean(mg.maximum(0, margin - y * (x1 - x2)))
Parameters
----------
x1 : ArrayLike, shape=(N,) or (N, D)
A batch of scores or descriptors to compare against those in `x2`
x2 : ArrayLike, shape=(N,) or (N, D)
A batch of scores or descriptors to compare against those in `x1`
y : Union[int, ArrayLike], scalar or shape=(N,)
1 or -1. Specifies whether the margin is compared against `(x1 - x2)`
or `(x2 - x1)`, for each of the N comparisons.
margin : float
A non-negative value to be used as the margin for the loss.
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
mygrad.Tensor, shape=()
The mean margin ranking loss.
"""
if not 0 < x1.ndim < 3:
raise ValueError("`x1` must have shape (N,) or (N, D)")
if not x1.shape == x2.shape:
raise ValueError("`x1` and `x2` must have the same shape")
if not np.issubdtype(x1.dtype, np.floating):
raise TypeError("`x1` must contain floats")
if not np.issubdtype(x2.dtype, np.floating):
raise TypeError("`x2` must contain floats")
if not isinstance(margin, Real) or margin < 0:
raise ValueError("`margin` must be a non-negative scalar")
y = asarray(y)
if y.size == 1:
y = np.array(y.item())
if not y.ndim == 0 and not (y.ndim == 1 and len(y) == len(x1)):
raise ValueError("`y` must be a scalar or shape-(N,) array of ones")
if y.ndim:
if x1.ndim == 2:
y = y.reshape(-1, 1)
return Tensor._op(MarginRanking, x1, x2, op_args=(y, margin), constant=constant)
|
1625479
|
from xml.sax import *
from xml.sax.handler import ContentHandler
from typing import List, Dict, Callable
import sys
import re
import click
class Element:
"""Just like DOM element except it only knows about ancestors"""
# XML tag name
# name: str
# parent element
# parent: Element
# attributes. 'Attributes' class doesn't seem to exist
# attrs: object
# tags captured at this context
# tags: Dict[str,object]
def __init__(self, parent: 'Element', name: str, attrs):
self.name = name
self.attrs = attrs
self.parent = parent
# start with a copy of parents, and we modify it with ours
self.tags = dict() # type: Dict[str,object]
self.tags = parent.tags.copy() if parent else dict()
def __str__(self):
return "%s %s" % (self.name, self.tags)
class TagMatcher:
"""Matches to an attribute of an XML element and captures its value as a 'tag' """
# XML tag name to match
# element: str
# XML attribute name to match
# attr: str
# Name of the variable to capture
# var: str
def __init__(self, element: str, attr: str, var: str):
self.element = element
self.attr = attr
self.var = var
def matches(self, e: Element) -> str:
return e.attrs.get(self.attr) if self.element == e.name or self.element == "*" else None
@staticmethod
def parse(spec: str) -> 'TagMatcher':
"""Parse a string like foo/@bar={zot}"""
m = re.match(r"(\w+|\*)/@([a-zA-Z_\-]+)={(\w+)}", spec)
if m:
return TagMatcher(m.group(1), m.group(2), m.group(3))
else:
raise click.BadParameter("Invalid tag spec: %s" % spec)
class SaxParser(ContentHandler):
"""
Parse XML in a streaming manner, capturing attribute values as specified by TagMatchers
"""
# represents the current element
context = None # type: Element
# matchers: List[TagMatcher]
# receiver: Callable[[Element],None]
def __init__(self, matchers: List[TagMatcher], receiver: Callable[[Element], None]):
super().__init__()
self.matchers = matchers
self.receiver = receiver
def startElement(self, tag, attrs):
self.context = Element(self.context, tag, attrs)
# match tags at this element
for m in self.matchers:
v = m.matches(self.context)
if v != None:
self.context.tags[m.var] = v
# yield is more Pythonesque but because this is called from SAX parser
# I'm assuming that won't work
self.receiver(self.context)
def endElement(self, tag):
self.context = self.context.parent
def parse(self, body):
p = make_parser()
p.setContentHandler(self)
p.parse(body)
# Scaffold JUnit parser
# python -m launchable.utils.sax < result.xml
if __name__ == "__main__":
def print_test_case(e: Element):
if e.name == "testcase":
print(e)
SaxParser([
TagMatcher.parse("testcase/@name={testcaseName}"),
TagMatcher.parse("testsuite/@timestamp={timestamp}")
], print_test_case).parse(sys.stdin)
|
1625501
|
import logging
import time, datetime
from thespian.test import *
from thespian.actors import *
askTimeout = datetime.timedelta(seconds=5)
class Whale(Actor):
def receiveMessage(self, msg, sender):
if isinstance(msg, tuple):
self.send(sender, msg[1] * msg[0])
class Shrimp(Actor):
def receiveMessage(self, msg, sender):
if isinstance(msg, tuple):
self.send(sender, msg[1])
class TestFuncSimpleActorOperations(object):
def testCreateActorSystem(self, asys):
pass
def testSimpleActor(self, asys):
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
def testSimpleActorAskOneHello(self, asys):
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
testdata = (1, 'hello')
r = asys.ask(shrimp, testdata, askTimeout)
assert r == 'hello'
r = asys.ask(whale, testdata, askTimeout)
assert r == 'hello' * 1
def testSimpleActorAskFiveHello(self, asys):
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
testdata = (5, 'hello')
r = asys.ask(shrimp, testdata, askTimeout)
assert r == 'hello'
r = asys.ask(whale, testdata, askTimeout)
assert r == 'hellohellohellohellohello'
assert r == 'hello' * 5
def testSimpleActorAsk50K(self, asys):
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
testdata = (10*1024, 'hello')
r = asys.ask(shrimp, testdata, askTimeout)
assert r == 'hello'
r = asys.ask(whale, testdata, askTimeout)
assert r == 'hello' * 10 * 1024
def testSimpleActorAsk500K(self, asys):
actor_system_unsupported(asys, 'multiprocUDPBase')
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
testdata = (100*1024, 'hello')
r = asys.ask(shrimp, testdata, askTimeout)
assert r == 'hello'
r = asys.ask(whale, testdata, askTimeout)
assert r == 'hello' * 100 * 1024
def testSimpleActorAsk5M(self, asys):
actor_system_unsupported(asys, 'multiprocUDPBase')
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
testdata = (1024*1024, 'hello')
r = asys.ask(shrimp, testdata, askTimeout)
assert r == 'hello'
r = asys.ask(whale, testdata, askTimeout)
assert r == 'hello' * 1024 * 1024
def testSimpleActorAsk10M(self, asys):
actor_system_unsupported(asys, 'multiprocUDPBase')
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
testdata = (2*1024*1024, 'hello')
r = asys.ask(shrimp, testdata, askTimeout)
assert r == 'hello'
r = asys.ask(whale, testdata, askTimeout * 2)
assert r == 'hello' * 2 * 1024 * 1024
def testSimpleActorAsk20M(self, asys):
actor_system_unsupported(asys, 'multiprocUDPBase')
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
testdata = (4*1024*1024, 'hello')
r = asys.ask(shrimp, testdata, askTimeout)
assert r == 'hello'
r = asys.ask(whale, testdata, askTimeout * 2)
assert r == 'hello' * 4 * 1024 * 1024
def testSimpleActorAsk25M(self, asys):
actor_system_unsupported(asys, 'multiprocUDPBase')
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
testdata = (5*1024*1024, 'hello')
r = asys.ask(shrimp, testdata, askTimeout)
assert r == 'hello'
r = asys.ask(whale, testdata, askTimeout * 2)
assert r == 'hello' * 5 * 1024 * 1024
def testSimpleActorAsk50M(self, asys):
actor_system_unsupported(asys, 'multiprocUDPBase')
whale = asys.createActor(Whale)
shrimp = asys.createActor(Shrimp)
testdata = (10*1024*1024, 'hello')
r = asys.ask(shrimp, testdata, askTimeout)
assert r == 'hello'
r = asys.ask(whale, testdata, askTimeout * 4)
assert r == 'hello' * 10 * 1024 * 1024
if __name__ == "__main__":
message = 'helloworld'
fmt = 'Expected size = %d (%.2f MiB), receive size = %s %s, elapsed = %s, throughput = %.2f bytes/s (%.2f MiB/s)'
asys = ActorSystem('multiprocTCPBase')
try:
whale = asys.createActor(Whale)
for scale in [ 1, 10,
10 * 1024,
100 * 1024,
1024 * 1024,
2 * 1024 * 1024,
4 * 1024 * 1024,
5 * 1024 * 1024 ]:
testdata = (scale, message)
max_delay = datetime.timedelta(seconds = 2, # minimum
microseconds=scale * 20)
tstart = datetime.datetime.now()
r = asys.ask(whale, testdata, max_delay)
tend = datetime.datetime.now()
elapsed = tend - tstart
bytesrx = len(r) if r else 0
bytesttl = len(message) * 1.0 + bytesrx
print(fmt % (scale * len(message),
scale * 1.0 * len(message) / 1024 / 1024,
len(r) if r else str(r),
'ok' if bytesrx == scale * len(message)
else 'MISMATCH',
str(elapsed),
bytesttl / elapsed.total_seconds(),
bytesttl / 1024 / 1024 / elapsed.total_seconds()))
finally:
asys.shutdown()
|
1625502
|
import shamirsecret
from unittest import TestCase
# Basic tests for this module. Should output nothing on success.
class TestShamirSecret(TestCase):
def test_math(self):
# first check some of the math...
self.assertTrue(shamirsecret._multiply_polynomials([1,3,4],[4,5]) == [4,9,31,20])
self.assertTrue(shamirsecret._full_lagrange([2,4,5],[14,30,32]) == [43, 168, 150])
def test_shamirsecret(self):
s = shamirsecret.ShamirSecret(2,'hello')
a=s.compute_share(1)
b=s.compute_share(2)
c=s.compute_share(3)
# should be able to recover from any two...
t = shamirsecret.ShamirSecret(2)
t.recover_secretdata([a,b])
t = shamirsecret.ShamirSecret(2)
t.recover_secretdata([a,c])
t = shamirsecret.ShamirSecret(2)
t.recover_secretdata([b,c])
# ... or even all three!
t = shamirsecret.ShamirSecret(2)
t.recover_secretdata([a,b,c])
# Try a few examples generated by tss.py
#'\x02\x06'
#'\x04\xb4'
shares = []
shares.append((2,bytearray("06".decode("hex"))))
shares.append((4,bytearray("b4".decode("hex"))))
u = shamirsecret.ShamirSecret(2)
u.recover_secretdata(shares)
self.assertTrue(u.secretdata == 'h')
#'\x03\x1f'
#'\x04\xdc'
#'\x05\xf1'
#'\x06\x86'
#'\x07\xab'
#'\x08\x1b'
shares = []
shares.append((3,bytearray("1f".decode("hex"))))
shares.append((4,bytearray("dc".decode("hex"))))
shares.append((5,bytearray("f1".decode("hex"))))
shares.append((6,bytearray("86".decode("hex"))))
shares.append((7,bytearray("ab".decode("hex"))))
shares.append((8,bytearray("1b".decode("hex"))))
u = shamirsecret.ShamirSecret(2)
u.recover_secretdata(shares)
self.assertTrue(u.secretdata == 'h')
# Try the test from the intro code comment
# create a new object with some secret...
mysecret = shamirsecret.ShamirSecret(2, 'my shared secret')
# get shares out of it...
a = mysecret.compute_share(4)
b = mysecret.compute_share(6)
c = mysecret.compute_share(1)
d = mysecret.compute_share(2)
# Recover the secret value
newsecret = shamirsecret.ShamirSecret(2)
newsecret.recover_secretdata([a,b,c]) # note, two would do...
# d should be okay...
self.assertTrue(newsecret.is_valid_share(d))
# change a byte
d[1][3] = (d[1][3] + 1 % 256)
# but not now...
self.assertTrue(newsecret.is_valid_share(d) is False)
|
1625526
|
from numpy.random.mtrand import RandomState
import numpy as np
from .abstract import Agent
epsilon_greedy_args = {
'epsilon': 0.01,
'random_seed': np.random.randint(2 ** 31 - 1),
# Select an Action that is ABSOLUTELY different to the Action
# that would have been selected in case when Epsilon-Greedy Policy Selection
# had not been applied.
'epsilon_pure_new': True,
# Try to select the worse case in epsilon-case.
'epsilon_select_worse': False,
}
class EpsilonGreedy(Agent):
def __init__(self, config, agent):
super(EpsilonGreedy, self).__init__(config)
self.agent = agent
self.rng = RandomState(self.config.random_seed)
def train(self, observation, action, reward, done = False):
self.agent.train(observation, action, reward, done)
def act(self, observation, reward, done):
greedy_action = self.agent.act(observation, reward, done)
if self.rng.choice([True, False], p = [self.config.epsilon, 1.0 - self.config.epsilon]):
if self.config.epsilon_select_worse:
product_probas = greedy_action['ps-a']
product_probas = (1.0 - product_probas) # Inversion of probabilities.
else:
product_probas = np.ones(self.config.num_products)
if self.config.epsilon_pure_new:
product_probas[greedy_action['a']] = 0.0
product_probas = product_probas / np.sum(product_probas)
epsilon_action = self.rng.choice(
self.config.num_products,
p = product_probas
)
return {
**super().act(observation, reward, done),
**{
'a': epsilon_action,
'ps': self.config.epsilon * product_probas[epsilon_action],
'ps-a': self.config.epsilon * product_probas,
'greedy': False,
'h0': greedy_action['a']
}
}
else:
return {
**greedy_action,
'greedy': True,
'ps': (1.0 - self.config.epsilon) * greedy_action['ps'],
'ps-a': (1.0 - self.config.epsilon) * greedy_action['ps-a'],
}
def reset(self):
self.agent.reset()
|
1625537
|
import time
ZERO = 0
def f1(n):
"Arbitrary test function."
i = 0
x = 1
while i<n:
j = 0 #ZERO
while j<=i:
j = j + 1
x = x + (i&j)
i = i + 1
return x
try:
import pypyjit
except ImportError:
print "No jit"
else:
pypyjit.enable(f1.func_code)
res = f1(2117)
print res
N = 5
start = time.time()
for i in range(N):
assert f1(2117) == res
end = time.time()
print '%d iterations, time per iteration: %s' % (N, (end-start)/N)
|
1625550
|
from org.transcrypt.stubs.browser import __pragma__
from org import threejs as three
def pad_wrap(min, max, val):
if val < min:
return max
if val > max:
return min
return val
XWRAP = 0
XNWRAP = 0
YWRAP = 0
YNWRAP = 0
def set_limits(x: float, y: float):
nonlocal XWRAP, XNWRAP, YWRAP, YNWRAP
XWRAP = int(x)
XNWRAP = -1 * XWRAP
YWRAP = int(y)
YNWRAP = -1 * YWRAP
def wrap(obj: three.Object3d):
x, y, z = obj.position.x, obj.position.y, obj.position.z
x = pad_wrap(XNWRAP, XWRAP, x)
y = pad_wrap(YNWRAP, YWRAP, y)
obj.position.set(x, y, z)
def clamp(val, low, high):
return max(min(val, high), low)
def sign(val):
if val > 0:
return 1
if val < 0:
return -1
return 0
def now():
"""absolute time in decimal seconds"""
d = __new__(Date)
return d.getTime() / 1000.0
def set_element(id, value):
document.getElementById(id).innerHTML = value
class AABB:
def __init__(self, width, height, center):
self.hw = width / 2.0
self.hh = width / 2.0
self.position = center
def contains(self, item):
x = self.position.x
y = self.position.y
h = self.hh
w = self.hw
return item.x > x - w and item.x < x + w and item.y > y - h and item.y < y + h
def update(self, pos):
self.position = pos
class FPSCounter:
def __init__(self, hud_element):
self.frames = [.1]
for n in range(99):
self.frames.append(.1)
self.next_frame = 0
self.average = 0
self.visible = True
self.element = hud_element
def update(self, t):
self.frames[self.next_frame] = t
self.next_frame += 1
if self.next_frame > 99:
self.next_frame = 0
sum = lambda a, b: a + b
total = 0
for n in range(100):
total += self.frames[n]
self.average = total * 10
if self.visible:
# @todo: need a string formatting option to print out decimal MS
self.element.innerHTML = "{} fps".format(int(1000 / self.average))
def advance(cr, value):
"""used by coroutines for updating without 'gsend' everywhere"""
__pragma__('gsend')
cr.send(value)
__pragma__('nogsend')
def coroutine(loop, callback):
callback_fn = callback if callback is not None else lambda a: a
def coroutine_generator():
alive = True
result = None
while alive:
next_value = yield
alive, result = loop(next_value)
yield result
yield callback_fn(result)
cr = coroutine_generator()
cr.advance = lambda a: advance(cr, a)
return cr
def timer(duration, loop, callback):
expires_at = now() + duration
loop_fn = loop if loop is not None else lambda a: (True, a)
callback_fn = callback if callback is not None else lambda a: a
def timer_coroutine():
alive = True
result = None
while alive:
next_value = yield
alive, result = loop_fn(next_value)
alive = alive and now() < expires_at
yield result
yield callback_fn(result)
tc = timer_coroutine()
tc.advance = lambda a: advance(tc, a)
return tc
|
1625564
|
import math
import torch
import torch.nn as nn
from .ocean import Ocean_
from .oceanplus import OceanPlus_
from .oceanTRT import OceanTRT_
from .siamfc import SiamFC_
from .connect import box_tower, AdjustLayer, AlignHead, Corr_Up, MultiDiCorr, OceanCorr
from .backbones import ResNet50, ResNet22W
from .mask import MMS, MSS
from .modules import MultiFeatureBase
import os
import sys
sys.path.append('../lib')
import models.online.classifier.features as clf_features
import models.online.classifier.initializer as clf_initializer
import models.online.classifier.optimizer as clf_optimizer
import models.online.classifier.linear_filter as target_clf
from online import TensorList, load_network
class Ocean(Ocean_):
def __init__(self, align=False, online=False):
super(Ocean, self).__init__()
self.features = ResNet50(used_layers=[3], online=online) # in param
self.neck = AdjustLayer(in_channels=1024, out_channels=256)
self.connect_model = box_tower(inchannels=256, outchannels=256, towernum=4)
self.align_head = AlignHead(256, 256) if align else None
class OceanTRT(OceanTRT_):
def __init__(self, online=False, align=False):
super(OceanTRT, self).__init__()
self.features = ResNet50(used_layers=[3], online=online) # in param
self.neck = AdjustLayer(in_channels=1024, out_channels=256)
self.connect_model0 = MultiDiCorr(inchannels=256, outchannels=256)
self.connect_model1 = box_tower(inchannels=256, outchannels=256, towernum=4)
self.connect_model2 = OceanCorr()
class OceanPlus(OceanPlus_):
def __init__(self, online=False, mms=False):
super(OceanPlus, self).__init__()
self.features = ResNet50(used_layers=[3], online=online) # in param
self.neck = AdjustLayer(in_channels=1024, out_channels=256)
self.connect_model = box_tower(inchannels=256, outchannels=256, towernum=4)
if mms:
self.mask_model = MMS()
else:
self.mask_model = MSS()
#class OceanPlusTRT(OceanPlusTRT_):
# def __init__(self, online=False):
# super(OceanPlusTRT, self).__init__()
# self.features = ResNet50(used_layers=[3], online=online) # in param
# self.neck = AdjustLayer(in_channels=1024, out_channels=256)
# self.connect_model0 = MultiDiCorr(inchannels=256, outchannels=256)
# self.connect_model1 = box_tower(inchannels=256, outchannels=256, towernum=4)
# self.connect_model2 = OceanCorr()
# self.mask_model = MultiRefineTRT(addCorr=True, mulOradd='add')
# ------------------------------
# SiamDW in CVPR2019
# ------------------------------
class SiamDW(SiamFC_):
def __init__(self, **kwargs):
"""
only SiamDW here
"""
super(SiamDW, self).__init__(**kwargs)
self.features = ResNet22W()
self.connect_model = Corr_Up()
# ================================
# Some functions for online model
# ================================
class OninleRes18(MultiFeatureBase):
"""
args:
output_layers: List of layers to output.
net_path: Relative or absolute net path (default should be fine).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_layers = ['layer3']
def initialize(self, siam_net):
self.net = siam_net
self.layer_stride = {'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32, 'classification': 16, 'fc': None}
self.layer_dim = {'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512, 'classification': 256,'fc': None}
if isinstance(self.pool_stride, int) and self.pool_stride == 1:
self.pool_stride = [1]*len(self.output_layers)
def free_memory(self):
if hasattr(self, 'net'):
del self.net
if hasattr(self, 'iou_predictor'):
del self.iou_predictor
if hasattr(self, 'iounet_backbone_features'):
del self.iounet_backbone_features
if hasattr(self, 'iounet_features'):
del self.iounet_features
def dim(self):
return TensorList([self.layer_dim[l] for l in self.output_layers])
def stride(self):
return TensorList([s * self.layer_stride[l] for l, s in zip(self.output_layers, self.pool_stride)])
def extract(self, im: torch.Tensor):
# im = im/255 # remove this for siam_net
# im -= self.mean
# im /= self.std
im = im.cuda()
with torch.no_grad():
output_features = self.net.extract_for_online(im)
return TensorList([output_features])
class NetWrapper:
"""Used for wrapping networks in pytracking.
Network modules and functions can be accessed directly as if they were members of this class."""
_rec_iter=0
def __init__(self, net_path, use_gpu=True):
self.net_path = net_path
self.use_gpu = use_gpu
self.net = None
def __getattr__(self, name):
if self._rec_iter > 0:
self._rec_iter = 0
return None
self._rec_iter += 1
try:
ret_val = getattr(self.net, name)
except Exception as e:
self._rec_iter = 0
raise e
self._rec_iter = 0
return ret_val
def load_network(self):
self.net = load_network(self.net_path)
if self.use_gpu:
self.cuda()
self.eval()
def initialize(self):
self.load_network()
class NetWithBackbone(NetWrapper):
"""Wraps a network with a common backbone.
Assumes the network have a 'extract_backbone_features(image)' function."""
def initialize(self, siam_net):
super().initialize()
self._mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)
self._std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)
self.net = siam_net
def preprocess_image(self, im: torch.Tensor):
"""Normalize the image with the mean and standard deviation used by the network."""
im = im/255
im -= self._mean
im /= self._std
im = im.cuda()
return im
def extract_backbone(self, im: torch.Tensor):
"""Extract backbone features from the network.
Expects a float tensor image with pixel range [0, 255]."""
im = self.preprocess_image(im)
return self.net.extract_for_online(im)
class ONLINEnet(nn.Module):
"""The ONLINE network.
args:
feature_extractor: Backbone feature extractor network. Must return a dict of feature maps
classifier: Target classification module.
bb_regressor: Bounding box regression module.
classification_layer: Name of the backbone feature layer to use for classification.
bb_regressor_layer: Names of the backbone layers to use for bounding box regression.
train_feature_extractor: Whether feature extractor should be trained or not."""
def __init__(self, feature_extractor, classifier, classification_layer, train_feature_extractor=True):
super().__init__()
self.feature_extractor = feature_extractor
self.classifier = classifier
self.classification_layer = [classification_layer] if isinstance(classification_layer, str) else classification_layer
# self.output_layers = sorted(list(set(self.classification_layer + self.bb_regressor_layer)))
self.output_layers = sorted(list(set(self.classification_layer)))
self._mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)
self._std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)
if not train_feature_extractor:
for p in self.feature_extractor.parameters():
p.requires_grad_(False)
def forward(self, train_imgs, test_imgs, train_bb, test_proposals, *args, **kwargs):
"""Runs the ONLINE network the way it is applied during training.
The forward function is ONLY used for training. Call the individual functions during tracking.
args:
train_imgs: Train image samples (images, sequences, 3, H, W).
test_imgs: Test image samples (images, sequences, 3, H, W).
trian_bb: Target boxes (x,y,w,h) for the train images. Dims (images, sequences, 4).
test_proposals: Proposal boxes to use for the IoUNet (bb_regressor) module.
*args, **kwargs: These are passed to the classifier module.
returns:
test_scores: Classification scores on the test samples.
iou_pred: Predicted IoU scores for the test_proposals."""
assert train_imgs.dim() == 5 and test_imgs.dim() == 5, 'Expect 5 dimensional inputs'
# TODO: DEBUG REMOVE HERE
train_imgs = torch.ones_like(train_imgs)
test_imgs = torch.ones_like(test_imgs)
train_bb = torch.ones_like(train_bb)
test_proposals = torch.ones_like(test_proposals)
# Extract backbone features
train_feat = self.extract_backbone_features(train_imgs.view(-1, *train_imgs.shape[-3:]))
test_feat = self.extract_backbone_features(test_imgs.view(-1, *test_imgs.shape[-3:]))
# Classification features
train_feat_clf = self.get_backbone_clf_feat(train_feat)
test_feat_clf = self.get_backbone_clf_feat(test_feat)
# Run classifier module
target_scores = self.classifier(train_feat_clf, test_feat_clf, train_bb, *args, **kwargs)
# Get bb_regressor features
# train_feat_iou = self.get_backbone_bbreg_feat(train_feat)
# test_feat_iou = self.get_backbone_bbreg_feat(test_feat)
# Run the IoUNet module
# iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou, train_bb, test_proposals)
# return target_scores, iou_pred
return target_scores
# def get_backbone_clf_feat(self, backbone_feat):
# # feat = OrderedDict({l: backbone_feat[l] for l in self.classification_layer})
# feat = OrderedDict({l: backbone_feat[l] for l in self.classification_layer}) # zzp
# if len(self.classification_layer) == 1:
# return feat[self.classification_layer[0]]
# return feat
def get_backbone_clf_feat(self, backbone_feat): # zzp
return backbone_feat
# def get_backbone_bbreg_feat(self, backbone_feat):
# return [backbone_feat[l] for l in self.bb_regressor_layer]
def extract_classification_feat(self, backbone_feat):
return self.classifier.extract_classification_feat(self.get_backbone_clf_feat(backbone_feat))
def extract_backbone_features(self, im, layers=None):
if layers is None:
layers = self.output_layers
return self.feature_extractor.extract_for_online(im) # zzp
def extract_features(self, im, layers=None):
if layers is None:
# layers = self.bb_regressor_layer + ['classification']
layers = ['classification']
if 'classification' not in layers:
return self.feature_extractor(im, layers)
backbone_layers = sorted(list(set([l for l in layers + self.classification_layer if l != 'classification'])))
all_feat = self.feature_extractor(im, backbone_layers)
all_feat['classification'] = self.extract_classification_feat(all_feat)
return OrderedDict({l: all_feat[l] for l in layers})
def preprocess_image(self, im: torch.Tensor):
"""Normalize the image with the mean and standard deviation used by the network."""
im = im/255
im -= self._mean
im /= self._std
im = im.cuda()
return im
def extract_backbone(self, im: torch.Tensor):
"""Extract backbone features from the network.
Expects a float tensor image with pixel range [0, 255]."""
im = self.preprocess_image(im)
return self.extract_backbone_features(im)
def ONLINEnet50(filter_size=4, optim_iter=5, optim_init_step=0.9, optim_init_reg=0.1,
classification_layer='layer3', feat_stride=16, clf_feat_blocks=0,
clf_feat_norm=True, init_filter_norm=False, final_conv=True,
out_feature_dim=512, init_gauss_sigma=0.85, num_dist_bins=100, bin_displacement=0.1,
mask_init_factor=3.0, score_act='relu', act_param=None, target_mask_act='sigmoid',
detach_length=float('Inf'), backbone=None):
# Backbone
backbone_net = backbone # siamnet
# Feature normalization
norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))
# Classifier features
clf_feature_extractor = clf_features.residual_bottleneck(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,
final_conv=final_conv, norm_scale=norm_scale,
out_dim=out_feature_dim)
# Initializer for the classifier
initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,
feature_dim=out_feature_dim)
# Optimizer for the classifier
optimizer = clf_optimizer.ONLINESteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride,
init_step_length=optim_init_step,
init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma,
num_dist_bins=num_dist_bins,
bin_displacement=bin_displacement,
mask_init_factor=mask_init_factor,
score_act=score_act, act_param=act_param, mask_act=target_mask_act,
detach_length=detach_length)
# The classifier module
classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,
filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)
# ONLINE network
net = ONLINEnet(feature_extractor=backbone_net, classifier=classifier, classification_layer=classification_layer)
return net
|
1625579
|
import pytest
@pytest.mark.parametrize(
"url",
[
"https://example.com",
],
)
def test_valid_url(new_command, url):
"""Test that valid URLs are accepted."""
assert new_command.validate_url(url)
@pytest.mark.parametrize(
"url",
[
"not a URL!", # Free text.
],
)
def test_invalid_url(new_command, url):
"""Test that invalid URLs are rejected."""
with pytest.raises(ValueError):
new_command.validate_url(url)
|
1625598
|
import os
import shutil
import tarfile
import tempfile
import lnt.server.config
from lnt.util import logger
class Instance(object):
"""
Wrapper object for representing an LNT instance.
"""
@staticmethod
def frompath(path):
"""
frompath(path) -> Insance
Load an LNT instance from the given instance specifier. The instance
path can be one of:
* The directory containing the instance.
* The instance config file.
* A tarball containing an instance.
"""
# Accept paths to config files, or to directories containing 'lnt.cfg'.
tmpdir = None
if os.path.isdir(path):
config_path = os.path.join(path, 'lnt.cfg')
elif tarfile.is_tarfile(path):
# Accept paths to tar/tgz etc. files, which we automatically unpack
# into a temporary directory.
tmpdir = tempfile.mkdtemp(suffix='lnt')
logger.info("extracting input tarfile %r to %r" % (path, tmpdir))
tf = tarfile.open(path)
tf.extractall(tmpdir)
# Find the LNT instance inside the tar file. Support tarballs that
# either contain the instance directly, or contain a single
# subdirectory which is the instance.
if os.path.exists(os.path.join(tmpdir, "lnt.cfg")):
config_path = os.path.join(tmpdir, "lnt.cfg")
else:
filenames = os.listdir(tmpdir)
if len(filenames) != 1:
raise Exception("Unable to find LNT instance "
"inside tarfile")
config_path = os.path.join(tmpdir, filenames[0], "lnt.cfg")
else:
config_path = path
if not config_path or not os.path.exists(config_path):
raise Exception("Invalid config: %r" % config_path)
config_data = {}
with open(config_path) as f:
exec(f.read(), config_data)
config = lnt.server.config.Config.from_data(config_path, config_data)
return Instance(config_path, config, tmpdir)
def __init__(self, config_path, config, tmpdir=None):
self.config_path = config_path
self.config = config
self.tmpdir = tmpdir
self.databases = dict()
for name in self.config.get_database_names():
self.databases[name] = self.config.get_database(name)
def __del__(self):
# If we have a temporary dir, clean it up now.
if self.tmpdir is not None:
shutil.rmtree(self.tmpdir)
def get_database(self, name):
return self.databases.get(name, None)
|
1625604
|
import mpylib
import opcode
import ulogging
ulogging.basicConfig(level=ulogging.DEBUG)
opcode.config.MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE = 1
op = mpylib.get_opcode_ns()
with open("testout.mpy", "wb") as f:
mpy = mpylib.MPYWriter(f)
mpy.write_header(
mpylib.MPY_VERSION,
mpylib.MICROPY_PY_BUILTINS_STR_UNICODE | mpylib.MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE,
31
)
co = mpylib.CodeType()
co.mpy_stacksize = 2
co.mpy_excstacksize = 0
co.co_flags = 0
co.co_argcount = 0
co.co_kwonlyargcount = 0
co.mpy_def_pos_args = 0
co.co_lnotab = b'\x00\x00'
co.co_cellvars = ()
bc = mpylib.Bytecode()
bc.add(op.LOAD_NAME, "print")
bc.load_int(-65)
bc.add(op.LOAD_CONST_OBJ, "string")
bc.add(op.CALL_FUNCTION, 2, 0)
bc.add(op.POP_TOP)
bc.add(op.LOAD_CONST_NONE)
bc.add(op.RETURN_VALUE)
co.co_code = bc.get_bc()
co.co_names = bc.co_names
co.mpy_consts = bc.co_consts
co.co_name = "<module>"
co.co_filename = "testmpy.py"
co.mpy_codeobjs = ()
mpy.write_code(co)
|
1625656
|
from .api import nets
from .api import detect
from .api import draw
from .api import crop
from .api import show
|
1625668
|
from django.test import TestCase
from moneyed import Money
from .. import enums, models
from .factories import WalletFactory, WalletTrxFactory
class TransactionManagerTests(TestCase):
def test_compute_balances(self):
wallet_obj = WalletFactory.create()
wallet_obj1 = WalletFactory.create()
# INCOMING FINALIZED -> Supposed to count
# 200
trxs = WalletTrxFactory.create_batch(
wallet=wallet_obj,
size=2,
amount=Money(100, wallet_obj.currency)
)
[x.set_status(enums.TrxStatus.PENDING) for x in trxs]
[x.set_status(enums.TrxStatus.FINALIZED) for x in trxs]
# INCOMING PENDING -> NOT Supposed to count
# 0
trxs = WalletTrxFactory.create_batch(
wallet=wallet_obj1,
size=2,
amount=Money(100, wallet_obj.currency)
)
[x.set_status(enums.TrxStatus.PENDING) for x in trxs]
# OUTGOING FINALIZED -> Supposed to count
# -200
trxs = WalletTrxFactory.create_batch(
size=2,
amount=-Money(100, wallet_obj.currency)
)
[x.set_status(enums.TrxStatus.PENDING) for x in trxs]
[x.set_status(enums.TrxStatus.FINALIZED) for x in trxs]
# OUTGOING PENDING -> Supposed to count
# -20
trxs = WalletTrxFactory.create_batch(
size=2,
amount=-Money(10, wallet_obj.currency)
)
[x.set_status(enums.TrxStatus.PENDING) for x in trxs]
# INCOMING CANCELED -> NOT Supposed to count
# 0
trxs = WalletTrxFactory.create_batch(
wallet=wallet_obj1,
size=2,
amount=Money(200, wallet_obj.currency)
)
[x.set_status(enums.TrxStatus.PENDING) for x in trxs]
[x.set_status(enums.TrxStatus.FINALIZED) for x in trxs]
[x.set_status(enums.TrxStatus.CANCELLATION) for x in trxs]
# OUTGOING CANCELED -> NOT Supposed to count
# 0
trxs = WalletTrxFactory.create_batch(
wallet=wallet_obj,
size=2,
amount=-Money(200, wallet_obj.currency)
)
[x.set_status(enums.TrxStatus.PENDING) for x in trxs]
[x.set_status(enums.TrxStatus.CANCELLATION) for x in trxs]
result = models.WalletTransaction.objects.countable()
self.assertEqual(result.count(), 6)
system_balance = result.balance()
self.assertEqual(system_balance, Money(-20, wallet_obj.currency))
balance = wallet_obj.transactions.balance()
balance1 = wallet_obj1.transactions.balance()
self.assertEqual(balance, Money(200, wallet_obj.currency))
self.assertEqual(balance1, Money(0, wallet_obj1.currency))
def test_filter_transaction_by_status(self):
wallet_obj = WalletFactory.create()
trxs = WalletTrxFactory.create_batch(
size=3,
amount=Money(100, wallet_obj.currency)
)
[x.set_status(enums.TrxStatus.PENDING) for x in trxs]
trxs = WalletTrxFactory.create_batch(
size=4,
amount=Money(50, wallet_obj.currency)
)
[x.set_status(enums.TrxStatus.PENDING) for x in trxs]
[x.set_status(enums.TrxStatus.FINALIZED) for x in trxs]
trxs = WalletTrxFactory.create_batch(
size=5,
amount=Money(33, wallet_obj.currency)
)
[x.set_status(enums.TrxStatus.PENDING) for x in trxs]
[x.set_status(enums.TrxStatus.FINALIZED) for x in trxs]
[x.set_status(enums.TrxStatus.CANCELLATION) for x in trxs]
result = models.WalletTransaction.objects.by_status(status=None)
self.assertEqual(result.count(), 12)
status = enums.TrxStatus.PENDING
result = models.WalletTransaction.objects.by_status(status=status)
self.assertEqual(result.count(), 3)
status = enums.TrxStatus.FINALIZED
result = models.WalletTransaction.objects.by_status(status=status)
self.assertEqual(result.count(), 4)
status = enums.TrxStatus.CANCELLATION
result = models.WalletTransaction.objects.by_status(status=status)
self.assertEqual(result.count(), 5)
|
1625699
|
def test_get_node_metrics(CLIENT):
def _assert(response):
# e.g. docs/api_reponses/rpc_metrics.json
assert isinstance(response, list)
assert len(response) > 300
_assert(CLIENT.get_node_metrics())
def test_get_node_metric(CLIENT):
def _assert(response):
# e.g. docs/api_reponses/rest_metrics.json
assert isinstance(response, list)
assert len(response) == 1
_assert(CLIENT.get_node_metric("mem_deploy_gossiper"))
def test_get_node_peers(CLIENT):
def _assert(response):
# e.g. docs/api_reponses/rpc_info_get_peers.json
assert isinstance(response, list)
_assert(CLIENT.get_node_peers())
def test_get_node_status(CLIENT):
def _assert(response):
# e.g. docs/api_reponses/rpc_info_get_status.json
assert isinstance(response, dict)
_assert(CLIENT.get_node_status())
def test_get_validator_changes(CLIENT):
def _assert(response):
# e.g. docs/api_reponses/rpc_info_get_status.json
assert isinstance(response, list)
_assert(CLIENT.get_validator_changes())
|
1625712
|
from a10sdk.common.A10BaseClass import A10BaseClass
class SslSid(A10BaseClass):
"""Class Description::
SSL session ID persistence.
Class ssl-sid supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param dont_honor_conn_rules: {"default": 0, "optional": true, "type": "number", "description": "Do not observe connection rate rules", "format": "flag"}
:param name: {"description": "SSL session ID persistence template name", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}
:param timeout: {"description": "Persistence timeout (in minutes)", "format": "number", "default": 5, "optional": true, "maximum": 2000, "minimum": 1, "type": "number"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/template/persist/ssl-sid/{name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "name"]
self.b_key = "ssl-sid"
self.a10_url="/axapi/v3/slb/template/persist/ssl-sid/{name}"
self.DeviceProxy = ""
self.dont_honor_conn_rules = ""
self.name = ""
self.timeout = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
1625809
|
from ally import *
from xml.etree.ElementTree import tostring
## These values are from Ally Invest API Applications page.
CONSUMER_KEY = "CONSUMER KEY"
CONSUMER_SECRET = "CONSUMER SECRET"
OAUTH_TOKEN = "OAUTH TOKEN"
OAUTH_SECRET = "OAUTH TOKEN SECRET"
if __name__ == "__main__":
ally = AllyAPI(OAUTH_SECRET, OAUTH_TOKEN, CONSUMER_KEY, response_format="json")
print(ally.get_member_profile())
print(ally.get_status())
print(ally.get_quote("AAPL"))
print(ally.get_quote(["AAPL", "MSFT", "XLNX", "NXPI"]))
print(ally.news_search("AAPL"))
print(ally.news_search(["AAPL", "MSFT", "XLNX", "NXPI"]))
##NOTE: this is the preferred way to get quotes! The response classes are a little
## easier to work with than the JSON.
quote_request = QuotesRequest(symbols=['SND', 'PRU', 'HMC'])
response = quote_request.execute(ally)
print(response.get_raw_data())
quote_request = QuotesRequest(symbols=ticker_list)
response = quote_request.execute(ally)
for quote in response.get_quotes():
# process quote data
print(quote)
pass
accounts_balances_request = AccountsBalancesRequest()
accounts_balances_response = accounts_balances_request.execute(ally)
print(accounts_balances_response.get_raw_data())
# Placing orders -- note that these must use XML as FIXML is passed o the calls
account = 00000000
# buy one share of intel at $50 for account number 00000000, print the results
print(tostring(ally.order_common_stock("INTC", 1, ORDER_TYPE.LIMIT, account,
SIDE.BUY, TIME_IN_FORCE.DAY, 50), 'utf-8', method="xml"))
# sell one share of Apple at market price for account number 00000000, print the results
print(tostring(ally.order_common_stock("AAPL", 1, ORDER_TYPE.MARKET, account, SIDE.SELL),
'utf-8', method="xml"))
|
1625857
|
from mnist import MNIST
import numpy as np
import math
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import accuracy_score
from collections import Counter
import matplotlib.pyplot as plt
import time
# --------------------------------------------------------
# Global Variables
training_size = 3000
validation_size = 1000
testing_size = 1000
# --------------------------------------------------------
"""
Predicts the instances labels using top k neighbours
"""
def predict(neighbours, k):
top_k = [Counter(x[:k]) for x in neighbours]
predicted_labels = [x.most_common(1)[0][0] for x in top_k]
return predicted_labels
# --------------------------------------------------------
"""
Finds the optimal value of k using cross validation.
The value of k with minimum error is the optimal one
"""
def find_k(neighbours, real_validation_labels, similarity_measure):
k_values = []
error_values = []
real_validation_labels = list(real_validation_labels)
"""
Its a convention to start from k = 1 to k = sqrt(N) where N is the size of training data
"""
for k in range(math.ceil(math.sqrt(training_size))):
k += 1
predicted_labels = predict(neighbours, k)
# check accuracy
acc = accuracy_score(real_validation_labels, predicted_labels)
k_values.append(k)
error_values.append(1 - acc)
if similarity_measure == 1:
s = "Cosine Similarity"
else:
s = "Euclidean Distance"
k = k_values[np.argmin(error_values)]
""" Plotting the Validation Error Curve """
plt.ylabel('Validation Error', fontsize=14)
plt.xlabel('K', fontsize=14)
plt.title("Validation Error Curve using %s" % s, fontsize=16, color='green')
plt.plot(k_values, error_values, 'bo--')
figure = plt.gcf() # get current figure
figure.set_size_inches(13, 7)
plt.savefig("Validation Error Curve using %s.png" % s, dpi=300)
plt.clf()
"""
The value of K which gave minimum validation error is the optimal value of k
"""
return k_values[np.argmin(error_values)]
# --------------------------------------------------------
def knn(train_images, test_images, train_labels, similarity_measure):
if similarity_measure == 1:
# compute cosine similarity
v = [[np.dot(x, y)/(np.linalg.norm(x) * np.linalg.norm(y)) for y in train_images] for x in test_images]
# v = cosine_similarity(test_images, train_images)
r = True
else:
# compute euclidean distance
v = [[np.sum((x - y) ** 2) for y in train_images] for x in test_images]
r = False
# append labels
v = [[(x[i], train_labels[i]) for i in range(len(x))] for x in v]
# sort in descending order
[x.sort(key=lambda y: y[0], reverse=r) for x in v]
# get all neighbours
neighbours = [[n for similarity, n in x] for x in v]
return neighbours
# --------------------------------------------------------
"""
Note: This is an experiment in which first the optimal value of K is determined using the
cross validation technique and then its used to classify test images. This experiment
is run twice. Once for Cosine Similarity and once for Euclidean Distance.
"""
def run_experiment(train_images, train_labels, test_images, test_labels, validation_images, validation_labels,
similarity_measure):
"""
First finding the optimal value of K using validation images
and then using it to classify test images
"""
if similarity_measure == 1:
s = "Cosine Similarity"
else:
s = "Euclidean Distance"
print("------------------------------------------")
print("Running Experiment using %s" % s)
print("------------------------------------------")
print("Finding Optimal Value of K ...")
neighbours_labels = knn(train_images, validation_images, train_labels, similarity_measure)
k = find_k(neighbours_labels, validation_labels, similarity_measure)
print("Optimal Value of K using Cross Validation is: %d" % k)
print("Classifying Test Images ...")
start_time = time.clock()
neighbours_labels = knn(train_images, test_images, train_labels, similarity_measure)
predicted_labels = predict(neighbours_labels, k)
print("Prediction Time: %.2f seconds" % (time.clock() - start_time))
print("Test Images Classified!")
accuracy = accuracy_score(test_labels, predicted_labels) * 100
print("KNN with k = %d" % k)
print("Accuracy: %f" % accuracy, "%")
print("---------------------\n")
# --------------------------------------------------------
def main():
# load data
data = MNIST('samples')
train_images, train_labels = data.load_training()
test_images, test_labels = data.load_testing()
validation_images = np.array(train_images[training_size:training_size + validation_size])
validation_labels = np.array(train_labels[training_size:training_size + validation_size])
train_images = np.array(train_images[:training_size])
train_labels = np.array(train_labels[:training_size])
test_images = np.array(test_images[:testing_size])
test_labels = np.array(test_labels[:testing_size])
"""Rescaling Data"""
train_images = train_images/255
test_images = test_images/255
validation_images = validation_images/255
""" run knn with cosine similarity as similarity measure """
run_experiment(train_images, train_labels, test_images, test_labels, validation_images, validation_labels, 1)
""" run knn with euclidean distance as similarity measure """
run_experiment(train_images, train_labels, test_images, test_labels, validation_images, validation_labels, 2)
# --------------------------------------------------------
# get things rolling
main()
|
1625882
|
from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode
from bitstring import BitArray
from armulator.armv6.configurations import arch_version
class Smull(AbstractOpcode):
def __init__(self, setflags, m, d_hi, d_lo, n):
super(Smull, self).__init__()
self.setflags = setflags
self.m = m
self.d_hi = d_hi
self.d_lo = d_lo
self.n = n
def execute(self, processor):
if processor.condition_passed():
result = processor.registers.get(self.n).int * processor.registers.get(self.m).int
f_result = BitArray(int=result, length=64)
processor.registers.set(self.d_hi, f_result[0:32])
processor.registers.set(self.d_lo, f_result[32:])
if self.setflags:
processor.registers.cpsr.set_n(f_result[0])
processor.registers.cpsr.set_z(not f_result.any(True))
if arch_version() == 4:
processor.registers.cpsr.set_c(False) # uknown
processor.registers.cpsr.set_v(False) # uknown
|
1625887
|
import datetime
import getpass
import json
import sys
import mgear
import mgear.core.icon as ico
import pymel.core as pm
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from mgear import shifter
from mgear.core import transform, node, attribute, applyop, pyqt, utils, curve
from mgear.vendor.Qt import QtCore, QtWidgets
from pymel import versions
from pymel.core import datatypes
from mgear.core import string
from mgear.simpleRig import simpleRigUI as srUI
CTL_TAG_ATTR = "is_simple_rig_ctl"
RIG_ROOT = "rig"
if sys.version_info[0] == 2:
string_types = (basestring, )
else:
string_types = (str, )
# driven attr ===========================================
def _driven_attr(dagNode):
"""message attribute to store a list of object affected
by the root or pivot
Args:
dagNode (PyNode): dagNode
Returns:
Attr: Attribute
"""
if not dagNode.hasAttr("drivenElements"):
dagNode.addAttr("drivenElements", attributeType='message', multi=True)
return dagNode.attr("drivenElements")
def _add_to_driven_attr(dagNode, driven):
"""add one or more elements to the driven list
should check is not in another driven attr and remove from others
Args:
dagNode (PyNode): dagNode with the attribute
driven (PyNode): driven elements
"""
d_attr = _driven_attr(dagNode)
if not isinstance(driven, list):
driven = [driven]
for d in driven:
if not _is_valid_ctl(d):
_remove_from_driven_attr(d)
ni = _get_driven_attr_next_available_index(d_attr)
pm.connectAttr(d.message,
d_attr.attr("drivenElements[{}]".format(str(ni))))
else:
pm.displayWarning("{} is a simple rig control and can't be "
" driven by another control".format(d))
def _remove_from_driven_attr(driven):
"""Remove one or more elements to the driven attr
Args:
driven (list of dagNode): Driven elements
"""
if not isinstance(driven, list):
driven = [driven]
for x in driven:
for o in x.message.connections(p=True):
if "drivenElements" in o.name():
pm.disconnectAttr(x.message, o)
def _get_from_driven_attr(dagNode):
"""Return a list of all elements in the driven attr as PyNodes
Args:
dagNode (PyNode): Driver dagNode
Returns:
TYPE: Description
"""
d_attr = _driven_attr(dagNode)
return d_attr.inputs()
def _get_driven_attr_next_available_index(d_attr):
"""Get the next available index for the drivenElements attr
Args:
d_attr (attr): driven attribute
Returns:
int: next available index
"""
return attribute.get_next_available_index(d_attr)
# creators ===========================================
def _create_control(name,
t,
radio,
parent=None,
icon="circle",
side="C",
indx=0,
color=17,
driven=None,
sets_config=None):
"""Crete control
Args:
name (str): Name of the control
t (matrix): transform matrix
radio (double): Size Radio
parent (dagNode, optional): Parent Control
icon (str, optional): Icon shape
side (str, optional): Side. Can be C, L or R
indx (int, optional): Index
color (int, optional): Colort
driven (None, optional): Driven elements
sets_config (None, optional): Groups/sets where the new control will be
added
Returns:
dagNode: New control
"""
name = _validate_name(name)
def _set_name(extension):
if side:
fullName = "{}_{}{}_{}".format(name, side, str(indx), extension)
i = 0
while pm.ls(fullName):
i += 1
fullName = "{}_{}{}_{}".format(name, side, str(i), extension)
else:
fullName = "{}_{}".format(name, extension)
return fullName
npo = pm.createNode('transform', n=_set_name("npo"))
npo.setTransformation(t)
if parent:
pm.parent(npo, parent)
ctl = ico.create(npo,
_set_name("ctl"),
t,
color,
icon=icon,
w=radio * 2,
h=radio * 2,
d=radio * 2)
attribute.addAttribute(ctl, "conf_icon", "string", icon)
attribute.addAttribute(ctl, "conf_sets", "string", sets_config)
attribute.addAttribute(ctl, "conf_radio", "float", radio, keyable=False)
attribute.addAttribute(ctl, "conf_color", "long", color, keyable=False)
attribute.addAttribute(ctl, CTL_TAG_ATTR, "bool", True, keyable=False)
attribute.addAttribute(ctl, "edit_mode", "bool", False, keyable=False)
pm.parent(ctl, npo)
attribute.setKeyableAttributes(ctl)
if driven:
if not isinstance(driven, list):
driven = [driven]
_add_to_driven_attr(ctl, driven)
_update_driven(ctl)
grp = _get_sets_grp()
grp.add(ctl)
if sets_config:
for ef in _extra_sets(sets_config):
ef.add(ctl)
return ctl
def _create_base_structure(rigName):
"""Create base structure
Args:
rigName (str): Rig name
Returns:
dagNode: rig root
"""
rig = pm.createNode('transform', n=rigName)
attribute.addAttribute(rig, "is_rig", "bool", True, keyable=False)
attribute.addAttribute(rig, "is_simple_rig", "bool", True, keyable=False)
attribute.addAttribute(rig, "geoUnselectable", "bool", True)
attribute.addAttribute(rig, "rig_name", "string", rigName)
attribute.addAttribute(rig, "user", "string", getpass.getuser())
attribute.addAttribute(rig, "date", "string", str(datetime.datetime.now()))
attribute.addAttribute(rig,
"maya_version",
"string",
str(pm.mel.eval("getApplicationVersionAsFloat")))
attribute.addAttribute(rig, "gear_version", "string", mgear.getVersion())
attribute.addAttribute(rig, "ctl_vis", "bool", True)
attribute.addAttribute(rig, "jnt_vis", "bool", False)
attribute.addAttribute(rig, "quickselA", "string", "")
attribute.addAttribute(rig, "quickselB", "string", "")
attribute.addAttribute(rig, "quickselC", "string", "")
attribute.addAttribute(rig, "quickselD", "string", "")
attribute.addAttribute(rig, "quickselE", "string", "")
attribute.addAttribute(rig, "quickselF", "string", "")
attribute.addAttribute(rig, "synoptic", "string", "")
attribute.addAttribute(rig, "comments", "string", "")
rig.addAttr("rigGroups", at='message', m=1)
rig.addAttr("rigPoses", at='message', m=1)
rig.addAttr("rigCtlTags", at='message', m=1)
# Create sets
meshList = []
ctlList = []
ctlSet = pm.sets(ctlList, n="{}_controllers_grp".format(rigName))
deformersSet = pm.sets(meshList, n="{}_deformers_grp".format(rigName))
compGroup = pm.sets(meshList, n="{}_componentsRoots_grp".format(rigName))
rigSets = pm.sets([ctlSet, deformersSet, compGroup],
n="rig_sets_grp")
pm.connectAttr(rigSets.attr("message"),
"{}.rigGroups[0]".format(rigName))
pm.connectAttr(ctlSet.attr("message"),
"{}.rigGroups[2]".format(rigName))
pm.connectAttr(deformersSet.attr("message"),
"{}.rigGroups[3]".format(rigName))
pm.connectAttr(compGroup.attr("message"),
"{}.rigGroups[4]".format(rigName))
return rig
@utils.one_undo
def _create_simple_rig_root(rigName=RIG_ROOT,
selection=None,
world_ctl=True,
sets_config=None,
ctl_wcm=False,
fix_radio=False,
radio_val=100,
gl_shape="square",
w_shape="circle"):
"""Create the simple rig root
create the simple rig root
have the attr: is_simple_rig and is_rig
should not create if there is a another simple rig root
should have synoptic attr. (synoptic configuration in UI)
use World_ctl should be optional
Args:
rigName (str, optional): Rig Name
selection (dagNode list, optional): Elements selected to be included
in the rig
world_ctl (bool, optional): if True, will create world_ctl
sets_config (None, optional): Groups to include the ctl
ctl_wcm (bool, optional): If True, the world_ctl will ve placed in the
scene world center
fix_radio (bool, optional): If True, will use a fix radio value,
instead of the bbox radio
radio_val (int, optional): Fix value for Radio
gl_shape (str, optional): Global and local control shape
w_shape (str, optional): World control shape
Returns:
dagNode: local control
"""
# check if there is another rig root in the scene
rig_models = _get_simple_rig_root()
if rig_models:
pm.displayWarning("Simple rig root already exist in the "
"scene: {}".format(str(rig_models)))
return
if not selection:
if pm.selected():
selection = pm.selected()
else:
pm.displayWarning("Selection is needed to create the root")
return
volCenter, radio, bb = _get_branch_bbox_data(selection)
if fix_radio:
radio = radio_val
rig = _create_base_structure(rigName)
if ctl_wcm:
t = datatypes.Matrix()
else:
t = transform.getTransformFromPos(volCenter)
# configure selectable geo
connect_selectable(rig, selection)
ctt = None
# create world ctl
if world_ctl:
world_ctl = _create_control("world",
t,
radio * 1.5,
parent=rig,
icon=w_shape,
side=None,
indx=0,
color=13,
driven=None,
sets_config=sets_config)
if versions.current() >= 201650:
ctt = node.add_controller_tag(world_ctl, None)
_connect_tag_to_rig(rig, ctt)
else:
world_ctl = rig
# create global ctl
global_ctl = _create_control("global",
t,
radio * 1.1,
parent=world_ctl,
icon=gl_shape,
side="C",
indx=0,
color=17,
driven=None,
sets_config=sets_config)
if versions.current() >= 201650:
ctt = node.add_controller_tag(global_ctl, ctt)
_connect_tag_to_rig(rig, ctt)
# create local ctl
local_ctl = _create_control("local",
t,
radio,
parent=global_ctl,
icon=gl_shape,
side="C",
indx=0,
color=17,
driven=selection,
sets_config=sets_config)
if versions.current() >= 201650:
ctt = node.add_controller_tag(local_ctl, ctt)
_connect_tag_to_rig(rig, ctt)
return local_ctl
@utils.one_undo
def _create_custom_pivot(name,
side,
icon,
yZero,
selection=None,
parent=None,
sets_config=None):
"""Create a custom pivot control
Args:
name (str): Custompivot control name
side (str): Side. can be C, L or R
icon (str): Control shape
yZero (bool): If True, the control will be placed in the lowest
position of the bbox
selection (list of dagNode, optional): Elements affected by the
custom pivot
parent (dagNode, optional): Parent of the custom pivot. Should be
another ctl
sets_config (str, optional): Sets to add the controls
Returns:
TYPE: Description
"""
# should have an options in UI and store as attr for rebuild
# -side
# -Control Shape
# -Place in base or place in BBOX center
if not selection:
if pm.selected():
selection = pm.selected()
else:
pm.displayWarning("Selection is needed to create the root")
return
if not parent:
if selection and _is_valid_ctl(selection[-1]):
parent = selection[-1]
selection = selection[:-1]
else:
pm.displayWarning("The latest selected element should be a CTL. "
"PARENT is needed!")
return
# handle the 3rd stat for yZero
# this state will trigger to put it in world center
wolrd_center = False
if yZero > 1:
yZero = True
wolrd_center = True
volCenter, radio, bb = _get_branch_bbox_data(selection, yZero)
if volCenter:
if wolrd_center:
t = datatypes.Matrix()
else:
t = transform.getTransformFromPos(volCenter)
ctl = _create_control(name,
t,
radio,
parent,
icon,
side,
indx=0,
color=14,
driven=selection,
sets_config=sets_config)
# add ctl tag
if versions.current() >= 201650:
parentTag = pm.PyNode(pm.controller(parent, q=True)[0])
ctt = node.add_controller_tag(ctl, parentTag)
_connect_tag_to_rig(ctl.getParent(-1), ctt)
return ctl
# Getters ===========================================
def _get_simple_rig_root():
"""get the root from the scene.
If there is more than one It will return none and print warning
Returns:
dagNode: Rig root
"""
rig_models = [item for item in pm.ls(transforms=True)
if _is_simple_rig_root(item)]
if rig_models:
return rig_models[0]
def connect_selectable(rig, selection):
"""Configure selectable geo
Args:
rig (dagNode): rig root with geo Unselectable attr
selection (list): List of object to connect
"""
# configure selectable geo
for e in selection:
pm.connectAttr(rig.geoUnselectable,
e.attr("overrideEnabled"),
force=True)
e.attr("overrideDisplayType").set(2)
def _get_children(dagNode):
"""Get all children node
Args:
dagNode (PyNode): dagNode to get the childrens
Returns:
list of dagNode: children dagNodes
"""
children = dagNode.listRelatives(allDescendents=True,
type="transform")
return children
def _get_bbox_data(obj=None, yZero=True, *args):
"""Calculate the bounding box data
Args:
obj (None, optional): The object to calculate the bounding box
yZero (bool, optional): If true, sets the hight to the lowest point
*args: Maya dummy
Returns:
mutiy: volumen center vector position, radio and bounding box (bbox)
"""
volCenter = False
if not obj:
obj = pm.selected()[0]
shapes = pm.listRelatives(obj, ad=True, s=True)
shapes = [shp for shp in shapes if shp.type() == "mesh"]
if shapes:
bb = pm.polyEvaluate(shapes, b=True)
volCenter = [(axis[0] + axis[1]) / 2 for axis in bb]
if yZero:
volCenter[1] = bb[1][0]
radio = max([bb[0][1] - bb[0][0], bb[2][1] - bb[2][0]]) / 1.7
return volCenter, radio, bb
return volCenter, None, None
def _get_branch_bbox_data(selection=None, yZero=True, *args):
"""Get the bounding box from a hierachy branch
Args:
selection (None, optional): Description
yZero (bool, optional): Description
*args: Description
Returns:
multi: Absolute center, absoulte radio and absolute bbox
"""
absBB = None
absCenter = None
absRadio = 0.5
bbox_elements = []
if not isinstance(selection, list):
selection = [selection]
for e in selection:
bbox_elements.append(e)
for c in _get_children(e):
if c.getShapes():
bbox_elements.append(c)
for e in bbox_elements:
if not _is_valid_ctl(e):
bbCenter, bbRadio, bb = _get_bbox_data(e)
if bbCenter:
if not absBB:
absBB = bb
else:
absBB = [[min(bb[0][0], absBB[0][0]),
max(bb[0][1], absBB[0][1])],
[min(bb[1][0], absBB[1][0]),
max(bb[1][1], absBB[1][1])],
[min(bb[2][0], absBB[2][0]),
max(bb[2][1], absBB[2][1])]]
absCenter = [(axis[0] + axis[1]) / 2 for axis in absBB]
absRadio = max([absBB[0][1] - absBB[0][0],
absBB[2][1] - absBB[2][0]]) / 1.7
# set the cencter in the floor
if yZero:
absCenter[1] = absBB[1][0]
return absCenter, absRadio, absBB
# Build and IO ===========================================
def _collect_configuration_from_rig():
"""Collects the configuration from the rig and create a dictionary with it
Returns:
dict: Configuration dictionary
"""
rig_conf_dict = {}
ctl_settings = {}
# get root and name
rig_root = _get_simple_rig_root()
# get controls list in hierarchycal order
descendents = reversed(rig_root.listRelatives(allDescendents=True,
type="transform"))
ctl_list = [d for d in descendents if d.hasAttr("is_simple_rig_ctl")]
ctl_names_list = []
# get setting for each ctl
for c in ctl_list:
# settings
if not c.edit_mode.get() and _is_in_npo(c):
ctl_name = c.name()
ctl_names_list.append(ctl_name)
conf_icon = c.conf_icon.get()
# back compatible:
if c.hasAttr("conf_sets"):
conf_sets = c.conf_sets.get()
else:
conf_sets = ""
conf_radio = c.conf_radio.get()
conf_color = c.conf_color.get()
ctl_color = curve.get_color(c)
if len(ctl_name.split("_")) == 2:
ctl_side = None
ctl_index = 0
else:
ctl_side = ctl_name.split("_")[-2][0]
ctl_index = ctl_name.split("_")[-2][1:]
ctl_short_name = ctl_name.split("_")[0]
ctl_parent = c.getParent(2).name()
m = c.getMatrix(worldSpace=True)
ctl_transform = m.get()
# driven list
driven_list = [n.name() for n in _get_from_driven_attr(c)]
else:
pm.displayWarning("Configuration can not be collected for Ctl in "
"edit pivot mode or not reset SRT "
"Finish edit pivot for or reset "
"SRT: {}".format(c))
return None
shps = curve.collect_curve_data(c)
conf_ctl_dict = {"conf_icon": conf_icon,
"conf_radio": conf_radio,
"conf_color": conf_color,
"ctl_color": ctl_color,
"ctl_side": ctl_side,
"ctl_shapes": shps,
"ctl_index": ctl_index,
"ctl_parent": ctl_parent,
"ctl_transform": ctl_transform,
"ctl_short_name": ctl_short_name,
"driven_list": driven_list,
"sets_list": conf_sets}
ctl_settings[ctl_name] = conf_ctl_dict
rig_conf_dict["ctl_list"] = ctl_names_list
rig_conf_dict["ctl_settings"] = ctl_settings
rig_conf_dict["root_name"] = rig_root.name()
return rig_conf_dict
# @utils.one_undo
def _build_rig_from_model(dagNode,
rigName=RIG_ROOT,
suffix="geoRoot",
sets_config=None,
ctl_wcm=False,
fix_radio=False,
radio_val=100,
gl_shape="square",
world_ctl=True,
w_shape="circle"):
"""Build a rig from a model structure.
using suffix keyword from a given model build a rig.
Args:
dagNode (dagNode): model root node
rigName (str, optional): Name of the rig
suffix (str, optional): suffix to check inside the model structure
in order identify the custom pivots
sets_config (str, optional): list of sets in string separated by ","
ctl_wcm (bool, optional): If True, the world_ctl will ve placed in the
scene world center
fix_radio (bool, optional): If True, will use a fix radio value,
instead of the bbox radio
radio_val (int, optional): Fix value for Radio
gl_shape (str, optional): Global and local control shape
world_ctl (bool, optional): if True, will create world_ctl
w_shape (str, optional): World control shape
sets_config (None, optional): Groups to include the ctl
"""
suf = "_{}".format(string.removeInvalidCharacter(suffix))
pm.displayInfo("Searching elements using suffix: {}".format(suf))
parent_dict = {}
local_ctl = _create_simple_rig_root(rigName,
sets_config=sets_config,
ctl_wcm=ctl_wcm,
fix_radio=fix_radio,
radio_val=radio_val,
gl_shape=gl_shape,
world_ctl=world_ctl,
w_shape=w_shape)
if local_ctl:
descendents = reversed(dagNode.listRelatives(allDescendents=True,
type="transform"))
suff_list = suffix.split(",")
for d in descendents:
if list(filter(d.name().endswith, suff_list)) != []:
name = d.name().replace(suf, "")
if d.getParent().name() in parent_dict:
parent = parent_dict[d.getParent().name()]
else:
parent = local_ctl
print(d)
ctl = _create_custom_pivot(name,
"C",
"circle",
True,
selection=d,
parent=parent,
sets_config=sets_config)
parent_dict[d.name()] = ctl
def _build_rig_from_configuration(configDict):
"""Buiold rig from configuration
Args:
configDict (dict): The configuration dictionary
"""
rig = _create_base_structure(configDict["root_name"])
for c in configDict["ctl_list"]:
ctl_conf = configDict["ctl_settings"][c]
driven = []
for drv in ctl_conf["driven_list"]:
obj = pm.ls(drv)
if obj:
driven.append(obj[0])
else:
pm.displayWarning("Driven object {}: "
"Can't be found.".format(drv))
t = datatypes.Matrix(ctl_conf["ctl_transform"])
_create_control(ctl_conf["ctl_short_name"],
t,
ctl_conf["conf_radio"],
ctl_conf["ctl_parent"],
ctl_conf["conf_icon"],
ctl_conf["ctl_side"],
indx=ctl_conf["ctl_index"],
color=ctl_conf["ctl_color"],
driven=driven,
sets_config=ctl_conf["sets_list"])
curve.update_curve_from_data(ctl_conf["ctl_shapes"])
connect_selectable(rig, driven)
def export_configuration(filePath=None):
"""Export configuration to json file
Args:
filePath (str, optional): Path to save the file
"""
rig_conf_dict = _collect_configuration_from_rig()
data_string = json.dumps(rig_conf_dict, indent=4, sort_keys=True)
if not filePath:
startDir = pm.workspace(q=True, rootDirectory=True)
filePath = pm.fileDialog2(
dialogStyle=2,
fileMode=0,
startingDirectory=startDir,
fileFilter='Simple Rig Configuration .src (*%s)' % ".src")
if not filePath:
return
if not isinstance(filePath, string_types):
filePath = filePath[0]
f = open(filePath, 'w')
f.write(data_string)
f.close()
def import_configuration(filePath=None):
"""Import configuration from filePath
Args:
filePath (str, optional): File path to the configuration json file
"""
if not filePath:
startDir = pm.workspace(q=True, rootDirectory=True)
filePath = pm.fileDialog2(
dialogStyle=2,
fileMode=1,
startingDirectory=startDir,
fileFilter='Simple Rig Configuration .src (*%s)' % ".src")
if not filePath:
return
if not isinstance(filePath, string_types):
filePath = filePath[0]
configDict = json.load(open(filePath))
_build_rig_from_configuration(configDict)
# Convert to SHIFTER ===========================================
def _shifter_init_guide(name, worldCtl=False):
"""Initialize shifter guide
Args:
name (str): Name for the rig
worldCtl (bool, optional): if True, will set the guide to use world_ctl
Returns:
TYPE: Description
"""
guide = shifter.guide.Rig()
guide.initialHierarchy()
model = guide.model
# set there attribute for guide root
model.rig_name.set(name)
model.worldCtl.set(worldCtl)
return guide
def _shifter_control_component(name,
side,
indx,
t,
guide,
parent=None,
grps=""):
"""creates shifter control_01 component and sets the correct settings
Args:
name (str): Name of the component
side (str): side
indx (int): index
t (matrix): Transform Matrix
guide (guide): Shifter guide object
parent (dagNode, optional): Parent
grps (str, optional): groups
Returns:
TYPE: Description
"""
comp_guide = guide.getComponentGuide("control_01")
if parent is None:
parent = guide.model
if not isinstance(parent, str):
parent = pm.PyNode(parent)
comp_guide.draw(parent)
comp_guide.rename(comp_guide.root, name, side, indx)
root = comp_guide.root
# set the attributes for component
root.setMatrix(t, worldSpace=True)
root.neutralRotation.set(False)
root.joint.set(True)
root.ctlGrp.set(grps)
return root
def convert_to_shifter_guide():
"""Convert the configuration to a shifter guide.
Returns:
multi: guide and configuration dictionary
"""
# get configuration dict
configDict = _collect_configuration_from_rig()
if configDict:
# Create the guide
root_name = configDict["root_name"]
if "world_ctl" in configDict["ctl_list"]:
worldCtl = True
# we asume the world_ctl is always the first in the list
configDict["ctl_list"] = configDict["ctl_list"][1:]
else:
worldCtl = False
guide = _shifter_init_guide(root_name, worldCtl)
# dic to store the parent relation from the original rig to the guide
parentRelation = {}
if worldCtl:
parentRelation["world_ctl"] = guide.model
else:
first_ctl = configDict["ctl_list"][0]
p = configDict["ctl_settings"][first_ctl]["ctl_parent"]
parentRelation[p] = guide.model
# create components
for c in configDict["ctl_list"]:
ctl_conf = configDict["ctl_settings"][c]
t = datatypes.Matrix(ctl_conf["ctl_transform"])
# we need to parse the grps list in order to extract the first grp
# without sub groups. Shifter doesn't support this feature yet
grps = ctl_conf["sets_list"]
grps = [g.split(".")[-1] for g in grps.split(",")][0]
root = _shifter_control_component(
ctl_conf["ctl_short_name"],
ctl_conf["ctl_side"],
int(ctl_conf["ctl_index"]),
t,
guide,
parent=parentRelation[ctl_conf["ctl_parent"]],
grps=grps)
parentRelation[c] = root
return guide, configDict
else:
return None, None
# @utils.one_undo
def convert_to_shifter_rig():
"""Convert simple rig to Shifter rig
It will create the guide and build the rig from configuration
skinning automatic base on driven attr
"""
simple_rig_root = _get_simple_rig_root()
if simple_rig_root:
guide, configDict = convert_to_shifter_guide()
if guide:
# ensure the objects are removed from the original rig
for c in configDict["ctl_list"]:
ctl_conf = configDict["ctl_settings"][c]
for d in ctl_conf["driven_list"]:
driven = pm.ls(d)
if driven and driven[0].getParent(-1).hasAttr(
"is_simple_rig"):
pm.displayWarning("{}: cut for old rig hierarchy"
"to avoid delete it when delete "
"the old rig!!")
pm.parent(driven, w=True)
# delete original rig
pm.delete(simple_rig_root)
# build guide
pm.select(guide.model)
rig = shifter.Rig()
rig.buildFromSelection()
rig.model.jnt_vis.set(0)
attribute.addAttribute(rig.model, "geoUnselectable", "bool", True)
# skin driven to new rig and apply control shapes
driven = None
for c in configDict["ctl_list"]:
ctl_conf = configDict["ctl_settings"][c]
for d in ctl_conf["driven_list"]:
driven = pm.ls(d)
jnt = pm.ls(c.replace("ctl", "0_jnt"))
connect_selectable(rig.model, [driven[0]])
if driven and jnt:
try:
pm.skinCluster(jnt[0],
driven[0],
tsb=True,
nw=2,
n='{}_skinCluster'.format(d))
except RuntimeError:
pm.displayWarning("Automatic skinning, can't be "
"created for"
" {}. Skipped.".format(d))
curve.update_curve_from_data(ctl_conf["ctl_shapes"])
# ensure geo root is child of rig root
if driven:
pm.parent(driven[0].getParent(-1), rig.model)
else:
pm.displayWarning("The guide can not be extracted. Check log!")
else:
pm.displayWarning("No simple root to convert!")
# Edit ===========================================
def _remove_element_from_ctl(ctl, dagNode):
"""Remove element from a rig control
Args:
ctl (dagNode): Control to remove the element
dagNode (dagNode): Element to be removed
Returns:
TYPE: Description
"""
# Check the ctl is reset
if not _is_in_npo(ctl):
pm.displayWarning("{}: have SRT values. Reset, before edit "
"elements".format(ctl))
return
# get affected by pivot
driven = _get_from_driven_attr(ctl)
# if dagNode is not in affected by pivot disconnect
if dagNode in driven:
_disconnect_driven(dagNode)
_remove_from_driven_attr(dagNode)
_update_driven(ctl)
else:
pm.displayWarning(
"{} is is not connected to the ctl {}".format(dagNode,
ctl))
def _add_element_to_ctl(ctl, dagNode):
"""Add element to control
Args:
ctl (dagNode): Control to add element
dagNode (dagNode): Element to add to the control
"""
# ensure the element is not yet in pivot
driven = _get_from_driven_attr(ctl)
# Check the ctl is reset
if not _is_in_npo(ctl):
pm.displayWarning("{}: have SRT values. Reset, before edit "
"elements".format(ctl))
return
# if dagNode is not in affected by pivot disconnect
if dagNode not in driven:
# move\add the selected elements to new pivot.
_add_to_driven_attr(ctl, dagNode)
_update_driven(ctl)
def _delete_pivot(dagNode):
"""Remove custom pivot control
It will move all dependent elements and children pivots to his parent
element or move to the root if there is not parent pivot
Args:
dagNode (PyNode): Control to be removed
Returns:
TYPE: Description
"""
if _is_valid_ctl(dagNode):
# get children pivots
# Check the ctl is reset
if not _is_in_npo(dagNode):
pm.displayWarning("{}: have SRT values. Reset, before edit "
"elements".format(dagNode))
return
children = dagNode.listRelatives(type="transform")
if children:
pm.parent(children, dagNode.getParent(2))
# clean connections
for d in _get_from_driven_attr(dagNode):
_disconnect_driven(d)
# delete pivot
pm.delete(dagNode.getParent())
pm.select(clear=True)
def _parent_pivot(pivot, parent):
"""Reparent pivot to another pivot or root
Should avoid to parent under world_ctl or local_C0_ctl
Args:
pivot (dagNode): Custom pivot control
parent (dagNode): New parent
"""
# check it parent is valid pivot
if _is_valid_ctl(parent):
if _is_valid_ctl(pivot):
# Check the ctl is reset
if not _is_in_npo(pivot):
pm.displayWarning("{}: have SRT values. Reset, before edit "
"elements".format(pivot))
npo = pivot.getParent()
pm.parent(npo, parent)
# re-connect controller tag
pivotTag = pm.PyNode(pm.controller(pivot, q=True)[0])
node.controller_tag_connect(pivotTag, parent)
pm.select(clear=True)
else:
pm.displayWarning("The selected Pivot: {} is not a "
"valid simple rig ctl.".format(parent.name()))
else:
pm.displayWarning("The selected parent: {} is not a "
"valid simple rig ctl.".format(parent.name()))
def _edit_pivot_position(ctl):
"""Edit control pivot
set the pivot in editable mode
check that is in neutral pose
Args:
ctl (dagNode): Pivot to edit
"""
if not _is_in_npo(ctl):
pm.displayWarning("The control: {} should be in reset"
" position".format(ctl.name()))
return
if not ctl.attr("edit_mode").get():
# move child to parent
children = ctl.listRelatives(type="transform")
if children:
pm.parent(children, ctl.getParent())
# disconnect the driven elements
driven = _get_from_driven_attr(ctl)
ctl.attr("edit_mode").set(True)
for d in driven:
# first try to disconnect
_disconnect_driven(d)
pm.select(ctl)
else:
pm.displayWarning("The control: {} Is already in"
" Edit pivot Mode".format(ctl.name()))
return
def _consolidate_pivot_position(ctl):
"""Consolidate the pivot position after editing
Args:
ctl (dagNode): control to consolidate the new pivot position
"""
#
if ctl.attr("edit_mode").get():
# unparent the children
# rig = pm.PyNode(RIG_ROOT)
rig = _get_simple_rig_root()
npo = ctl.getParent()
children = npo.listRelatives(type="transform")
pm.parent(children, rig)
# filter out the ctl
children = [c for c in children if c != ctl]
# set the npo to his position
transform.matchWorldTransform(ctl, npo)
pm.parent(ctl, npo)
# reparent childrens
if children:
pm.parent(children, ctl)
# re-connect/update driven elements
_update_driven(ctl)
ctl.attr("edit_mode").set(False)
pm.select(ctl)
else:
pm.displayWarning("The control: {} Is NOT in"
" Edit pivot Mode".format(ctl.name()))
@utils.one_undo
def _delete_rig():
"""Delete the rig
Delete the rig and clean all connections on the geometry
"""
rig = _get_simple_rig_root()
if rig:
confirm = pm.confirmDialog(title='Confirm Delete Simple Rig',
message='Are you sure?',
button=['Yes', 'No'],
defaultButton='Yes',
cancelButton='No',
dismissString='No')
if confirm == "Yes":
children = rig.listRelatives(allDescendents=True,
type="transform")
to_delete = []
not_npo = []
for c in children:
if _is_valid_ctl(c):
if _is_in_npo(c):
to_delete.append(c)
else:
not_npo.append(c.name())
if not_npo:
pm.displayWarning("Please set all the controls to reset "
"position before delete rig. The following"
" controls are not "
"reset:{}".format(str(not_npo)))
return
for c in to_delete:
_delete_pivot(c)
pm.delete(rig)
else:
pm.displayWarning("No rig found to delete!")
# utils ===========================================
def _connect_tag_to_rig(rig, ctt):
"""Connect control tag
"""
ni = attribute.get_next_available_index(rig.rigCtlTags)
pm.connectAttr(ctt.message,
rig.attr("rigCtlTags[{}]".format(str(ni))))
def _validate_name(name):
"""Check and correct bad name formating
Args:
name (str): Name
Returns:
str: Corrected Name
"""
return string.removeInvalidCharacter(name)
def _is_valid_ctl(dagNode):
"""Check if the dagNode is a simple rig ctl
Args:
dagNode (PyNode): Control to check
Returns:
bool: True is has the expected tag attr
"""
return dagNode.hasAttr(CTL_TAG_ATTR)
def _is_simple_rig_root(dagNode):
"""Check if the dagNode is a simple rig ctl
Args:
dagNode (PyNode): Control to check
Returns:
bool: Return true if is simple rig
"""
return dagNode.hasAttr("is_simple_rig")
def _is_in_npo(dagNode):
"""check if the SRT is reset
SRT = Scale, Rotation, Translation
Args:
dagNode (PyNode): control to check
Returns:
bool: neutral pose status
"""
#
trAxis = ["tx", "ty", "tz", "rx", "ry", "rz"]
sAxis = ["sx", "sy", "sz"]
npo_status = True
for axis in trAxis:
val = dagNode.attr(axis).get()
if val != 0.0:
npo_status = False
pm.displayWarning("{}.{} is not neutral! Value is {}, "
"but should be {}".format(dagNode.name(),
axis,
str(val),
"0.0"))
for axis in sAxis:
val = dagNode.attr(axis).get()
if val != 1.0:
npo_status = False
pm.displayWarning("{}.{} is not neutral! Value is {}, "
"but should be {}".format(dagNode.name(),
axis,
str(val),
"1.0"))
return npo_status
# groups ==============================================
def _get_sets_grp(grpName="controllers_grp"):
"""Get set group
Args:
grpName (str, optional): group name
Returns:
PyNode: Set
"""
rig = _get_simple_rig_root()
sets = rig.listConnections(type="objectSet")
controllersGrp = None
for oSet in sets:
if grpName in oSet.name():
controllersGrp = oSet
return controllersGrp
def _extra_sets(sets_config):
"""Configure the extra sets from string
exp: sets_config = "animSets.basic.test,animSets.facial"
Args:
sets_config (str): extra sets configuration
Returns:
list: extra sets list
"""
sets_grp = _get_sets_grp("sets_grp")
sets_list = sets_config.split(",")
last_sets_list = []
for s in sets_list:
set_fullname = ".".join([sets_grp.name(), s])
parent_set = None
# ss is the subset
for ss in set_fullname.split("."):
if pm.ls(ss):
parent_set = pm.ls(ss)[0]
else:
child_set = pm.sets(None, n=ss)
if parent_set:
parent_set.add(child_set)
parent_set = child_set
last_sets_list.append(parent_set)
return last_sets_list
# Connect ===========================================
def _connect_driven(driver, driven):
"""Connect the driven element with multiply matrix
Before connect check if the driven is valid.
I.E. only elements not under geoRoot.
Args:
driver (PyNode): Driver control
driven (PyNode): Driven control
"""
if _is_valid_ctl(driven):
pm.displayWarning("{} can't not be driven or connected to a ctl, "
"because is a simple rig control".format(driven))
return
# Check the ctl is reset
if not _is_in_npo(driver):
pm.displayWarning("{}: have SRT values. Reset, before connect "
"elements".format(driver))
# connect message of the matrix mul nodes to the driven.
# So later is easy to delete
mOperatorNodes = "mOperatorNodes"
if not driven.hasAttr(mOperatorNodes):
driven.addAttr(mOperatorNodes, attributeType='message', multi=True)
# print driven.attr(mOperatorNodes)
mOp_attr = driven.attr(mOperatorNodes)
m = driven.worldMatrix.get()
im = driver.worldMatrix.get().inverse()
mul_node0 = applyop.gear_mulmatrix_op(im,
driver.worldMatrix)
mul_node1 = applyop.gear_mulmatrix_op(m,
mul_node0.output)
mul_node2 = applyop.gear_mulmatrix_op(mul_node1.output,
driven.parentInverseMatrix)
dm_node = node.createDecomposeMatrixNode(mul_node2.output)
pm.connectAttr(dm_node.outputTranslate, driven.t)
pm.connectAttr(dm_node.outputRotate, driven.r)
pm.connectAttr(dm_node.outputScale, driven.s)
pm.connectAttr(dm_node.outputShear, driven.shear)
pm.connectAttr(mul_node0.message,
mOp_attr.attr("{}[0]".format(mOperatorNodes)))
pm.connectAttr(mul_node1.message,
mOp_attr.attr("{}[1]".format(mOperatorNodes)))
pm.connectAttr(mul_node2.message,
mOp_attr.attr("{}[2]".format(mOperatorNodes)))
pm.connectAttr(dm_node.message,
mOp_attr.attr("{}[3]".format(mOperatorNodes)))
def _disconnect_driven(driven):
"""Disconnect driven control
delete the matrix mult nodes
Args:
driven (PyNode): Driven control to disconnect
"""
mOperatorNodes = "mOperatorNodes"
if driven.hasAttr(mOperatorNodes):
pm.delete(driven.attr(mOperatorNodes).inputs())
# @utils.one_undo
def _update_driven(driver):
"""Update the driven connections using the driver drivenElements attr
Args:
driver (PyNode): Driver control
"""
driven = _get_from_driven_attr(driver)
for d in driven:
# first try to disconnect
_disconnect_driven(d)
# Connect
_connect_driven(driver, d)
####################################
# Simple Rig dialog
####################################
class simpleRigUI(QtWidgets.QMainWindow, srUI.Ui_MainWindow):
"""UI dialog
"""
def __init__(self, parent=None):
super(simpleRigUI, self).__init__(parent)
self.setupUi(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.installEventFilter(self)
def keyPressEvent(self, event):
if not event.key() == QtCore.Qt.Key_Escape:
super(simpleRigUI, self).keyPressEvent(event)
class simpleRigTool(MayaQWidgetDockableMixin, QtWidgets.QDialog):
valueChanged = QtCore.Signal(int)
def __init__(self, parent=None):
self.toolName = "SimpleRigTool"
super(simpleRigTool, self).__init__(parent)
self.srUIInst = simpleRigUI()
self.setup_simpleRigWindow()
self.create_layout()
self.create_connections()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.installEventFilter(self)
def keyPressEvent(self, event):
if not event.key() == QtCore.Qt.Key_Escape:
super(simpleRigTool, self).keyPressEvent(event)
def setup_simpleRigWindow(self):
self.setObjectName(self.toolName)
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle("Simple Rig")
self.resize(280, 260)
def create_layout(self):
self.sr_layout = QtWidgets.QVBoxLayout()
self.sr_layout.addWidget(self.srUIInst)
self.setLayout(self.sr_layout)
def create_connections(self):
self.srUIInst.createRoot_pushButton.clicked.connect(self.create_root)
self.srUIInst.createCtl_pushButton.clicked.connect(self.create_ctl)
self.srUIInst.selectAffected_pushButton.clicked.connect(
self.select_affected)
self.srUIInst.reParentPivot_pushButton.clicked.connect(
self.parent_pivot)
self.srUIInst.add_pushButton.clicked.connect(self.add_to_ctl)
self.srUIInst.remove_pushButton.clicked.connect(self.remove_from_ctl)
self.srUIInst.editPivot_pushButton.clicked.connect(self.edit_pivot)
self.srUIInst.setPivot_pushButton.clicked.connect(self.set_pivot)
self.srUIInst.autoRig_pushButton.clicked.connect(self.auto_rig)
# Menus
self.srUIInst.deletePivot_action.triggered.connect(self.delete_pivot)
self.srUIInst.deleteRig_action.triggered.connect(self.delete_rig)
self.srUIInst.autoBuild_action.triggered.connect(self.auto_rig)
self.srUIInst.export_action.triggered.connect(self.export_config)
self.srUIInst.import_action.triggered.connect(self.import_config)
# Shifter
self.srUIInst.convertToShifterRig_action.triggered.connect(
self.shifter_rig)
self.srUIInst.createShifterGuide_action.triggered.connect(
self.shifter_guide)
# Misc
self.srUIInst.rootName_lineEdit.textChanged.connect(
self.rootName_text_changed)
self.srUIInst.createCtl_lineEdit.textChanged.connect(
self.ctlName_text_changed)
# ==============================================
# Slots ========================================
# ==============================================
def shifter_rig(self):
convert_to_shifter_rig()
def shifter_guide(self):
convert_to_shifter_guide()
def rootName_text_changed(self):
name = _validate_name(self.srUIInst.rootName_lineEdit.text())
self.srUIInst.rootName_lineEdit.setText(name)
def ctlName_text_changed(self):
name = _validate_name(self.srUIInst.createCtl_lineEdit.text())
self.srUIInst.createCtl_lineEdit.setText(name)
def create_root(self):
name = self.srUIInst.rootName_lineEdit.text()
sets_config = self.srUIInst.extraSets_lineEdit.text()
ctl_wcm = self.srUIInst.worldCenter_checkBox.isChecked()
fix_radio = self.srUIInst.fixSize_checkBox.isChecked()
radio_val = self.srUIInst.fixSize_doubleSpinBox.value()
iconIdx = self.srUIInst.mainCtlShape_comboBox.currentIndex()
icon = ["square", "circle"][iconIdx]
w_ctl = self.srUIInst.worldCtl_checkBox.isChecked()
iconIdx = self.srUIInst.worldCtlShape_comboBox.currentIndex()
w_icon = ["circle", "sphere"][iconIdx]
_create_simple_rig_root(name,
sets_config=sets_config,
ctl_wcm=ctl_wcm,
fix_radio=fix_radio,
radio_val=radio_val,
gl_shape=icon,
world_ctl=w_ctl,
w_shape=w_icon)
def create_ctl(self):
name = self.srUIInst.createCtl_lineEdit.text()
if name:
sideIdx = self.srUIInst.side_comboBox.currentIndex()
side = ["C", "L", "R"][sideIdx]
iconIdx = self.srUIInst.shape_comboBox.currentIndex()
icon = ["circle", "cube"][iconIdx]
position = self.srUIInst.position_comboBox.currentIndex()
sets_config = self.srUIInst.extraSets_lineEdit.text()
_create_custom_pivot(
name, side, icon, yZero=position, sets_config=sets_config)
else:
pm.displayWarning("Name is not valid")
# @utils.one_undo
def select_affected(self):
oSel = pm.selected()
if oSel:
ctl = oSel[0]
pm.select(_get_from_driven_attr(ctl))
# @utils.one_undo
def parent_pivot(self):
oSel = pm.selected()
if oSel and len(oSel) >= 2:
for c in oSel[:-1]:
_parent_pivot(c, oSel[-1])
# @utils.one_undo
def add_to_ctl(self):
oSel = pm.selected()
if oSel and len(oSel) >= 2:
for e in oSel[:-1]:
_add_element_to_ctl(oSel[-1], e)
# @utils.one_undo
def remove_from_ctl(self):
oSel = pm.selected()
if oSel and len(oSel) >= 2:
for e in oSel[:-1]:
_remove_element_from_ctl(oSel[-1], e)
# @utils.one_undo
def delete_pivot(self):
for d in pm.selected():
_delete_pivot(d)
# @utils.one_undo
def edit_pivot(self):
oSel = pm.selected()
if oSel and len(oSel) == 1:
_edit_pivot_position(oSel[0])
else:
pm.displayWarning("Please select one ctl")
# @utils.one_undo
def set_pivot(self):
oSel = pm.selected()
if oSel and len(oSel) == 1:
_consolidate_pivot_position(oSel[0])
else:
pm.displayWarning("Please select one ctl")
# @utils.one_undo
def delete_rig(self):
_delete_rig()
# @utils.one_undo
def auto_rig(self):
oSel = pm.selected()
if oSel and len(oSel) == 1:
suffix = self.srUIInst.autoBuild_lineEdit.text()
name = self.srUIInst.rootName_lineEdit.text()
sets_config = self.srUIInst.extraSets_lineEdit.text()
ctl_wcm = self.srUIInst.worldCenter_checkBox.isChecked()
fix_radio = self.srUIInst.fixSize_checkBox.isChecked()
radio_val = self.srUIInst.fixSize_doubleSpinBox.value()
iconIdx = self.srUIInst.mainCtlShape_comboBox.currentIndex()
icon = ["square", "circle"][iconIdx]
w_ctl = self.srUIInst.worldCtl_checkBox.isChecked()
iconIdx = self.srUIInst.worldCtlShape_comboBox.currentIndex()
w_icon = ["circle", "sphere"][iconIdx]
_build_rig_from_model(oSel[0],
name,
suffix,
sets_config,
ctl_wcm=ctl_wcm,
fix_radio=fix_radio,
radio_val=radio_val,
gl_shape=icon,
world_ctl=w_ctl,
w_shape=w_icon)
else:
pm.displayWarning("Please select root of the model")
def export_config(self):
export_configuration()
def import_config(self):
import_configuration()
def openSimpleRigUI(*args):
pyqt.showDialog(simpleRigTool, dockable=True)
####################################
if __name__ == "__main__":
openSimpleRigUI()
|
1625898
|
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional
from shared.xml_classes.common_messages.v2_g_ci_common_types import (
EvsestatusType,
ListOfRootCertificateIdsType,
MeterInfoType,
RationalNumberType,
ReceiptType,
V2GrequestType,
V2GresponseType,
ProcessingType,
)
__NAMESPACE__ = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class DynamicEvpptcontrolModeType:
class Meta:
name = "Dynamic_EVPPTControlModeType"
@dataclass
class DynamicSmdtcontrolModeType:
class Meta:
name = "Dynamic_SMDTControlModeType"
@dataclass
class EimAreqAuthorizationModeType:
class Meta:
name = "EIM_AReqAuthorizationModeType"
@dataclass
class EimAsresAuthorizationModeType:
class Meta:
name = "EIM_ASResAuthorizationModeType"
@dataclass
class EmaidlistType:
class Meta:
name = "EMAIDListType"
emaid: List[str] = field(
default_factory=list,
metadata={
"name": "EMAID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 8,
"max_length": 255,
}
)
@dataclass
class PriceLevelScheduleEntryType:
duration: Optional[int] = field(
default=None,
metadata={
"name": "Duration",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
price_level: Optional[int] = field(
default=None,
metadata={
"name": "PriceLevel",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class PriceScheduleType:
time_anchor: Optional[int] = field(
default=None,
metadata={
"name": "TimeAnchor",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
price_schedule_id: Optional[int] = field(
default=None,
metadata={
"name": "PriceScheduleID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"min_inclusive": 1,
"max_inclusive": 4294967295,
}
)
price_schedule_description: Optional[str] = field(
default=None,
metadata={
"name": "PriceScheduleDescription",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"max_length": 160,
}
)
@dataclass
class ScheduledSmdtcontrolModeType:
class Meta:
name = "Scheduled_SMDTControlModeType"
selected_schedule_tuple_id: Optional[int] = field(
default=None,
metadata={
"name": "SelectedScheduleTupleID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"min_inclusive": 1,
"max_inclusive": 4294967295,
}
)
@dataclass
class SelectedServiceType:
service_id: Optional[int] = field(
default=None,
metadata={
"name": "ServiceID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
parameter_set_id: Optional[int] = field(
default=None,
metadata={
"name": "ParameterSetID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class ServiceIdlistType:
class Meta:
name = "ServiceIDListType"
service_id: List[int] = field(
default_factory=list,
metadata={
"name": "ServiceID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 16,
}
)
@dataclass
class ServiceType:
service_id: Optional[int] = field(
default=None,
metadata={
"name": "ServiceID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
free_service: Optional[bool] = field(
default=None,
metadata={
"name": "FreeService",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class SubCertificatesType:
certificate: List[bytes] = field(
default_factory=list,
metadata={
"name": "Certificate",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 3,
"max_length": 1600,
"format": "base64",
}
)
@dataclass
class SupportedProvidersListType:
provider_id: List[str] = field(
default_factory=list,
metadata={
"name": "ProviderID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 128,
"max_length": 80,
}
)
@dataclass
class TargetPositionType:
target_offset_x: Optional[int] = field(
default=None,
metadata={
"name": "TargetOffsetX",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
target_offset_y: Optional[int] = field(
default=None,
metadata={
"name": "TargetOffsetY",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
class AuthorizationType(Enum):
EIM = "EIM"
PN_C = "PnC"
class ChannelSelectionType(Enum):
CHARGE = "Charge"
DISCHARGE = "Discharge"
class ChargeProgressType(Enum):
START = "Start"
STOP = "Stop"
STANDBY = "Standby"
SCHEDULE_RENEGOTIATION = "ScheduleRenegotiation"
class ChargingSessionType(Enum):
PAUSE = "Pause"
TERMINATE = "Terminate"
SERVICE_RENEGOTIATION = "ServiceRenegotiation"
class EcdhCurveType(Enum):
SECP521 = "SECP521"
X448 = "X448"
class EvCheckInStatusType(Enum):
CHECK_IN = "CheckIn"
PROCESSING = "Processing"
COMPLETED = "Completed"
class EvCheckOutStatusType(Enum):
CHECK_OUT = "CheckOut"
PROCESSING = "Processing"
COMPLETED = "Completed"
class EvseCheckOutStatusType(Enum):
SCHEDULED = "Scheduled"
COMPLETED = "Completed"
class ParkingMethodType(Enum):
AUTO_PARKING = "AutoParking"
MVGUIDE_MANUAL = "MVGuideManual"
MANUAL = "Manual"
class PowerToleranceAcceptanceType(Enum):
POWER_TOLERANCE_NOT_CONFIRMED = "PowerToleranceNotConfirmed"
POWER_TOLERANCE_CONFIRMED = "PowerToleranceConfirmed"
@dataclass
class AdditionalServiceType:
service_name: Optional[str] = field(
default=None,
metadata={
"name": "ServiceName",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 80,
}
)
service_fee: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "ServiceFee",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class AuthorizationResType(V2GresponseType):
evseprocessing: Optional[ProcessingType] = field(
default=None,
metadata={
"name": "EVSEProcessing",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class AuthorizationSetupReqType(V2GrequestType):
pass
@dataclass
class CertificateChainType:
certificate: Optional[bytes] = field(
default=None,
metadata={
"name": "Certificate",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 1600,
"format": "base64",
}
)
sub_certificates: Optional[SubCertificatesType] = field(
default=None,
metadata={
"name": "SubCertificates",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class ContractCertificateChainType:
certificate: Optional[bytes] = field(
default=None,
metadata={
"name": "Certificate",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 1600,
"format": "base64",
}
)
sub_certificates: Optional[SubCertificatesType] = field(
default=None,
metadata={
"name": "SubCertificates",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class DynamicSereqControlModeType:
class Meta:
name = "Dynamic_SEReqControlModeType"
departure_time: Optional[int] = field(
default=None,
metadata={
"name": "DepartureTime",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
minimum_soc: Optional[int] = field(
default=None,
metadata={
"name": "MinimumSOC",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_inclusive": 0,
"max_inclusive": 100,
}
)
target_soc: Optional[int] = field(
default=None,
metadata={
"name": "TargetSOC",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_inclusive": 0,
"max_inclusive": 100,
}
)
evtarget_energy_request: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EVTargetEnergyRequest",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
evmaximum_energy_request: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EVMaximumEnergyRequest",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
evminimum_energy_request: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EVMinimumEnergyRequest",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
evmaximum_v2_xenergy_request: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EVMaximumV2XEnergyRequest",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
evminimum_v2_xenergy_request: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EVMinimumV2XEnergyRequest",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class EvpowerScheduleEntryType:
class Meta:
name = "EVPowerScheduleEntryType"
duration: Optional[int] = field(
default=None,
metadata={
"name": "Duration",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
power: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "Power",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class EvpriceRuleType:
class Meta:
name = "EVPriceRuleType"
energy_fee: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EnergyFee",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
power_range_start: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "PowerRangeStart",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class MeteringConfirmationResType(V2GresponseType):
pass
@dataclass
class OverstayRuleType:
overstay_rule_description: Optional[str] = field(
default=None,
metadata={
"name": "OverstayRuleDescription",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"max_length": 160,
}
)
start_time: Optional[int] = field(
default=None,
metadata={
"name": "StartTime",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
overstay_fee: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "OverstayFee",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
overstay_fee_period: Optional[int] = field(
default=None,
metadata={
"name": "OverstayFeePeriod",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class ParameterType:
bool_value: Optional[bool] = field(
default=None,
metadata={
"name": "boolValue",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
byte_value: Optional[int] = field(
default=None,
metadata={
"name": "byteValue",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
short_value: Optional[int] = field(
default=None,
metadata={
"name": "shortValue",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
int_value: Optional[int] = field(
default=None,
metadata={
"name": "intValue",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
rational_number: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "rationalNumber",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
finite_string: Optional[str] = field(
default=None,
metadata={
"name": "finiteString",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"max_length": 80,
}
)
name: Optional[str] = field(
default=None,
metadata={
"name": "Name",
"type": "Attribute",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 80,
}
)
@dataclass
class PnCAsresAuthorizationModeType:
class Meta:
name = "PnC_ASResAuthorizationModeType"
gen_challenge: Optional[bytes] = field(
default=None,
metadata={
"name": "GenChallenge",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"length": 16,
"format": "base64",
}
)
supported_providers: Optional[SupportedProvidersListType] = field(
default=None,
metadata={
"name": "SupportedProviders",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class PowerDeliveryResType(V2GresponseType):
evsestatus: Optional[EvsestatusType] = field(
default=None,
metadata={
"name": "EVSEStatus",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class PowerScheduleEntryType:
duration: Optional[int] = field(
default=None,
metadata={
"name": "Duration",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
power: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "Power",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
power_l2: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "Power_L2",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
power_l3: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "Power_L3",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class PriceLevelScheduleEntryListType:
price_level_schedule_entry: List[PriceLevelScheduleEntryType] = field(
default_factory=list,
metadata={
"name": "PriceLevelScheduleEntry",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 1024,
}
)
@dataclass
class PriceRuleType:
energy_fee: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EnergyFee",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
parking_fee: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "ParkingFee",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
parking_fee_period: Optional[int] = field(
default=None,
metadata={
"name": "ParkingFeePeriod",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
carbon_dioxide_emission: Optional[int] = field(
default=None,
metadata={
"name": "CarbonDioxideEmission",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
renewable_generation_percentage: Optional[int] = field(
default=None,
metadata={
"name": "RenewableGenerationPercentage",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
power_range_start: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "PowerRangeStart",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class ScheduledEvpptcontrolModeType:
class Meta:
name = "Scheduled_EVPPTControlModeType"
selected_schedule_tuple_id: Optional[int] = field(
default=None,
metadata={
"name": "SelectedScheduleTupleID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"min_inclusive": 1,
"max_inclusive": 4294967295,
}
)
power_tolerance_acceptance: Optional[PowerToleranceAcceptanceType] = field(
default=None,
metadata={
"name": "PowerToleranceAcceptance",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class SelectedServiceListType:
selected_service: List[SelectedServiceType] = field(
default_factory=list,
metadata={
"name": "SelectedService",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 16,
}
)
@dataclass
class ServiceDetailReqType(V2GrequestType):
service_id: Optional[int] = field(
default=None,
metadata={
"name": "ServiceID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class ServiceDiscoveryReqType(V2GrequestType):
supported_service_ids: Optional[ServiceIdlistType] = field(
default=None,
metadata={
"name": "SupportedServiceIDs",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class ServiceListType:
service: List[ServiceType] = field(
default_factory=list,
metadata={
"name": "Service",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 8,
}
)
@dataclass
class ServiceSelectionResType(V2GresponseType):
pass
@dataclass
class SessionSetupReqType(V2GrequestType):
evccid: Optional[str] = field(
default=None,
metadata={
"name": "EVCCID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 255,
}
)
@dataclass
class SessionSetupResType(V2GresponseType):
evseid: Optional[str] = field(
default=None,
metadata={
"name": "EVSEID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 255,
}
)
@dataclass
class SessionStopReqType(V2GrequestType):
charging_session: Optional[ChargingSessionType] = field(
default=None,
metadata={
"name": "ChargingSession",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
evtermination_code: Optional[str] = field(
default=None,
metadata={
"name": "EVTerminationCode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"max_length": 80,
}
)
evtermination_explanation: Optional[str] = field(
default=None,
metadata={
"name": "EVTerminationExplanation",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"max_length": 160,
}
)
@dataclass
class SessionStopResType(V2GresponseType):
pass
@dataclass
class SignedCertificateChainType:
certificate: Optional[bytes] = field(
default=None,
metadata={
"name": "Certificate",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 1600,
"format": "base64",
}
)
sub_certificates: Optional[SubCertificatesType] = field(
default=None,
metadata={
"name": "SubCertificates",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
id: Optional[str] = field(
default=None,
metadata={
"name": "Id",
"type": "Attribute",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class SignedMeteringDataType:
session_id: Optional[bytes] = field(
default=None,
metadata={
"name": "SessionID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"length": 8,
"format": "base16",
}
)
meter_info: Optional[MeterInfoType] = field(
default=None,
metadata={
"name": "MeterInfo",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
receipt: Optional[ReceiptType] = field(
default=None,
metadata={
"name": "Receipt",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
dynamic_smdtcontrol_mode: Optional[DynamicSmdtcontrolModeType] = field(
default=None,
metadata={
"name": "Dynamic_SMDTControlMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
scheduled_smdtcontrol_mode: Optional[ScheduledSmdtcontrolModeType] = field(
default=None,
metadata={
"name": "Scheduled_SMDTControlMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
id: Optional[str] = field(
default=None,
metadata={
"name": "Id",
"type": "Attribute",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class TaxRuleType:
tax_rule_id: Optional[int] = field(
default=None,
metadata={
"name": "TaxRuleID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"min_inclusive": 1,
"max_inclusive": 4294967295,
}
)
tax_rule_name: Optional[str] = field(
default=None,
metadata={
"name": "TaxRuleName",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"max_length": 80,
}
)
tax_rate: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "TaxRate",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
tax_included_in_price: Optional[bool] = field(
default=None,
metadata={
"name": "TaxIncludedInPrice",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
applies_to_energy_fee: Optional[bool] = field(
default=None,
metadata={
"name": "AppliesToEnergyFee",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
applies_to_parking_fee: Optional[bool] = field(
default=None,
metadata={
"name": "AppliesToParkingFee",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
applies_to_overstay_fee: Optional[bool] = field(
default=None,
metadata={
"name": "AppliesToOverstayFee",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
applies_minimum_maximum_cost: Optional[bool] = field(
default=None,
metadata={
"name": "AppliesMinimumMaximumCost",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class VehicleCheckInReqType(V2GrequestType):
evcheck_in_status: Optional[EvCheckInStatusType] = field(
default=None,
metadata={
"name": "EVCheckInStatus",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
parking_method: Optional[ParkingMethodType] = field(
default=None,
metadata={
"name": "ParkingMethod",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
vehicle_frame: Optional[int] = field(
default=None,
metadata={
"name": "VehicleFrame",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
device_offset: Optional[int] = field(
default=None,
metadata={
"name": "DeviceOffset",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
vehicle_travel: Optional[int] = field(
default=None,
metadata={
"name": "VehicleTravel",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class VehicleCheckInResType(V2GresponseType):
parking_space: Optional[int] = field(
default=None,
metadata={
"name": "ParkingSpace",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
device_location: Optional[int] = field(
default=None,
metadata={
"name": "DeviceLocation",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
target_distance: Optional[int] = field(
default=None,
metadata={
"name": "TargetDistance",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class VehicleCheckOutReqType(V2GrequestType):
evcheck_out_status: Optional[EvCheckOutStatusType] = field(
default=None,
metadata={
"name": "EVCheckOutStatus",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
check_out_time: Optional[int] = field(
default=None,
metadata={
"name": "CheckOutTime",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class VehicleCheckOutResType(V2GresponseType):
evsecheck_out_status: Optional[EvseCheckOutStatusType] = field(
default=None,
metadata={
"name": "EVSECheckOutStatus",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class AdditionalServiceListType:
additional_service: List[AdditionalServiceType] = field(
default_factory=list,
metadata={
"name": "AdditionalService",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 5,
}
)
@dataclass
class AuthorizationRes(AuthorizationResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class AuthorizationSetupReq(AuthorizationSetupReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class AuthorizationSetupResType(V2GresponseType):
authorization_services: List[AuthorizationType] = field(
default_factory=list,
metadata={
"name": "AuthorizationServices",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 2,
}
)
certificate_installation_service: Optional[bool] = field(
default=None,
metadata={
"name": "CertificateInstallationService",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
eim_asres_authorization_mode: Optional[EimAsresAuthorizationModeType] = field(
default=None,
metadata={
"name": "EIM_ASResAuthorizationMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
pn_c_asres_authorization_mode: Optional[PnCAsresAuthorizationModeType] = field(
default=None,
metadata={
"name": "PnC_ASResAuthorizationMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class CertificateInstallationReqType(V2GrequestType):
oemprovisioning_certificate_chain: Optional[SignedCertificateChainType] = field(
default=None,
metadata={
"name": "OEMProvisioningCertificateChain",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
list_of_root_certificate_ids: Optional[ListOfRootCertificateIdsType] = field(
default=None,
metadata={
"name": "ListOfRootCertificateIDs",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
maximum_contract_certificate_chains: Optional[int] = field(
default=None,
metadata={
"name": "MaximumContractCertificateChains",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
prioritized_emaids: Optional[EmaidlistType] = field(
default=None,
metadata={
"name": "PrioritizedEMAIDs",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class EvpowerProfileEntryListType:
class Meta:
name = "EVPowerProfileEntryListType"
evpower_profile_entry: List[PowerScheduleEntryType] = field(
default_factory=list,
metadata={
"name": "EVPowerProfileEntry",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 2048,
}
)
@dataclass
class EvpowerScheduleEntryListType:
class Meta:
name = "EVPowerScheduleEntryListType"
evpower_schedule_entry: List[EvpowerScheduleEntryType] = field(
default_factory=list,
metadata={
"name": "EVPowerScheduleEntry",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 1024,
}
)
@dataclass
class EvpriceRuleStackType:
class Meta:
name = "EVPriceRuleStackType"
duration: Optional[int] = field(
default=None,
metadata={
"name": "Duration",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
evprice_rule: List[EvpriceRuleType] = field(
default_factory=list,
metadata={
"name": "EVPriceRule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 8,
}
)
@dataclass
class MeteringConfirmationReqType(V2GrequestType):
signed_metering_data: Optional[SignedMeteringDataType] = field(
default=None,
metadata={
"name": "SignedMeteringData",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class MeteringConfirmationRes(MeteringConfirmationResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class OverstayRuleListType:
overstay_time_threshold: Optional[int] = field(
default=None,
metadata={
"name": "OverstayTimeThreshold",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
overstay_power_threshold: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "OverstayPowerThreshold",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
overstay_rule: List[OverstayRuleType] = field(
default_factory=list,
metadata={
"name": "OverstayRule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 5,
}
)
@dataclass
class ParameterSetType:
parameter_set_id: Optional[int] = field(
default=None,
metadata={
"name": "ParameterSetID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
parameter: List[ParameterType] = field(
default_factory=list,
metadata={
"name": "Parameter",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 32,
}
)
@dataclass
class PnCAreqAuthorizationModeType:
class Meta:
name = "PnC_AReqAuthorizationModeType"
gen_challenge: Optional[bytes] = field(
default=None,
metadata={
"name": "GenChallenge",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"length": 16,
"format": "base64",
}
)
contract_certificate_chain: Optional[ContractCertificateChainType] = field(
default=None,
metadata={
"name": "ContractCertificateChain",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
id: Optional[str] = field(
default=None,
metadata={
"name": "Id",
"type": "Attribute",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class PowerDeliveryRes(PowerDeliveryResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class PowerScheduleEntryListType:
power_schedule_entry: List[PowerScheduleEntryType] = field(
default_factory=list,
metadata={
"name": "PowerScheduleEntry",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 1024,
}
)
@dataclass
class PriceLevelScheduleType(PriceScheduleType):
number_of_price_levels: Optional[int] = field(
default=None,
metadata={
"name": "NumberOfPriceLevels",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
price_level_schedule_entries: Optional[PriceLevelScheduleEntryListType] = field(
default=None,
metadata={
"name": "PriceLevelScheduleEntries",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
id: Optional[str] = field(
default=None,
metadata={
"name": "Id",
"type": "Attribute",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class PriceRuleStackType:
duration: Optional[int] = field(
default=None,
metadata={
"name": "Duration",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
price_rule: List[PriceRuleType] = field(
default_factory=list,
metadata={
"name": "PriceRule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 8,
}
)
@dataclass
class ServiceDetailReq(ServiceDetailReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class ServiceDiscoveryReq(ServiceDiscoveryReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class ServiceDiscoveryResType(V2GresponseType):
service_renegotiation_supported: Optional[bool] = field(
default=None,
metadata={
"name": "ServiceRenegotiationSupported",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
energy_transfer_service_list: Optional[ServiceListType] = field(
default=None,
metadata={
"name": "EnergyTransferServiceList",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
vaslist: Optional[ServiceListType] = field(
default=None,
metadata={
"name": "VASList",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class ServiceSelectionReqType(V2GrequestType):
selected_energy_transfer_service: Optional[SelectedServiceType] = field(
default=None,
metadata={
"name": "SelectedEnergyTransferService",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
selected_vaslist: Optional[SelectedServiceListType] = field(
default=None,
metadata={
"name": "SelectedVASList",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class ServiceSelectionRes(ServiceSelectionResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class SessionSetupReq(SessionSetupReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class SessionSetupRes(SessionSetupResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class SessionStopReq(SessionStopReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class SessionStopRes(SessionStopResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class SignedInstallationDataType:
contract_certificate_chain: Optional[ContractCertificateChainType] = field(
default=None,
metadata={
"name": "ContractCertificateChain",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
ecdhcurve: Optional[EcdhCurveType] = field(
default=None,
metadata={
"name": "ECDHCurve",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
dhpublic_key: Optional[bytes] = field(
default=None,
metadata={
"name": "DHPublicKey",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"length": 133,
"format": "base64",
}
)
secp521_encrypted_private_key: Optional[bytes] = field(
default=None,
metadata={
"name": "SECP521_EncryptedPrivateKey",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"length": 94,
"format": "base64",
}
)
x448_encrypted_private_key: Optional[bytes] = field(
default=None,
metadata={
"name": "X448_EncryptedPrivateKey",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"length": 84,
"format": "base64",
}
)
tpm_encrypted_private_key: Optional[bytes] = field(
default=None,
metadata={
"name": "TPM_EncryptedPrivateKey",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"length": 206,
"format": "base64",
}
)
id: Optional[str] = field(
default=None,
metadata={
"name": "Id",
"type": "Attribute",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class SignedMeteringData(SignedMeteringDataType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class TaxRuleListType:
tax_rule: List[TaxRuleType] = field(
default_factory=list,
metadata={
"name": "TaxRule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 10,
}
)
@dataclass
class VehicleCheckInReq(VehicleCheckInReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class VehicleCheckInRes(VehicleCheckInResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class VehicleCheckOutReq(VehicleCheckOutReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class VehicleCheckOutRes(VehicleCheckOutResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class AuthorizationReqType(V2GrequestType):
selected_authorization_service: Optional[AuthorizationType] = field(
default=None,
metadata={
"name": "SelectedAuthorizationService",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
eim_areq_authorization_mode: Optional[EimAreqAuthorizationModeType] = field(
default=None,
metadata={
"name": "EIM_AReqAuthorizationMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
pn_c_areq_authorization_mode: Optional[PnCAreqAuthorizationModeType] = field(
default=None,
metadata={
"name": "PnC_AReqAuthorizationMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class AuthorizationSetupRes(AuthorizationSetupResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class CertificateInstallationReq(CertificateInstallationReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class CertificateInstallationResType(V2GresponseType):
evseprocessing: Optional[ProcessingType] = field(
default=None,
metadata={
"name": "EVSEProcessing",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
cpscertificate_chain: Optional[CertificateChainType] = field(
default=None,
metadata={
"name": "CPSCertificateChain",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
signed_installation_data: Optional[SignedInstallationDataType] = field(
default=None,
metadata={
"name": "SignedInstallationData",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
remaining_contract_certificate_chains: Optional[int] = field(
default=None,
metadata={
"name": "RemainingContractCertificateChains",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class EvpowerProfileType:
class Meta:
name = "EVPowerProfileType"
time_anchor: Optional[int] = field(
default=None,
metadata={
"name": "TimeAnchor",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
dynamic_evpptcontrol_mode: Optional[DynamicEvpptcontrolModeType] = field(
default=None,
metadata={
"name": "Dynamic_EVPPTControlMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
scheduled_evpptcontrol_mode: Optional[ScheduledEvpptcontrolModeType] = field(
default=None,
metadata={
"name": "Scheduled_EVPPTControlMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
evpower_profile_entries: Optional[EvpowerProfileEntryListType] = field(
default=None,
metadata={
"name": "EVPowerProfileEntries",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class EvpowerScheduleType:
class Meta:
name = "EVPowerScheduleType"
time_anchor: Optional[int] = field(
default=None,
metadata={
"name": "TimeAnchor",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
evpower_schedule_entries: Optional[EvpowerScheduleEntryListType] = field(
default=None,
metadata={
"name": "EVPowerScheduleEntries",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class EvpriceRuleStackListType:
class Meta:
name = "EVPriceRuleStackListType"
evprice_rule_stack: List[EvpriceRuleStackType] = field(
default_factory=list,
metadata={
"name": "EVPriceRuleStack",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 1024,
}
)
@dataclass
class MeteringConfirmationReq(MeteringConfirmationReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class PowerScheduleType:
time_anchor: Optional[int] = field(
default=None,
metadata={
"name": "TimeAnchor",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
available_energy: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "AvailableEnergy",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
power_tolerance: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "PowerTolerance",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
power_schedule_entries: Optional[PowerScheduleEntryListType] = field(
default=None,
metadata={
"name": "PowerScheduleEntries",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class PriceRuleStackListType:
price_rule_stack: List[PriceRuleStackType] = field(
default_factory=list,
metadata={
"name": "PriceRuleStack",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 1024,
}
)
@dataclass
class ServiceDiscoveryRes(ServiceDiscoveryResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class ServiceParameterListType:
parameter_set: List[ParameterSetType] = field(
default_factory=list,
metadata={
"name": "ParameterSet",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 32,
}
)
@dataclass
class ServiceSelectionReq(ServiceSelectionReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class SignedInstallationData(SignedInstallationDataType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class AbsolutePriceScheduleType(PriceScheduleType):
currency: Optional[str] = field(
default=None,
metadata={
"name": "Currency",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 3,
}
)
language: Optional[str] = field(
default=None,
metadata={
"name": "Language",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 3,
}
)
price_algorithm: Optional[str] = field(
default=None,
metadata={
"name": "PriceAlgorithm",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 255,
}
)
minimum_cost: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "MinimumCost",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
maximum_cost: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "MaximumCost",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
tax_rules: Optional[TaxRuleListType] = field(
default=None,
metadata={
"name": "TaxRules",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
price_rule_stacks: Optional[PriceRuleStackListType] = field(
default=None,
metadata={
"name": "PriceRuleStacks",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
overstay_rules: Optional[OverstayRuleListType] = field(
default=None,
metadata={
"name": "OverstayRules",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
additional_selected_services: Optional[AdditionalServiceListType] = field(
default=None,
metadata={
"name": "AdditionalSelectedServices",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
id: Optional[str] = field(
default=None,
metadata={
"name": "Id",
"type": "Attribute",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class AuthorizationReq(AuthorizationReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class CertificateInstallationRes(CertificateInstallationResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class EvabsolutePriceScheduleType:
class Meta:
name = "EVAbsolutePriceScheduleType"
time_anchor: Optional[int] = field(
default=None,
metadata={
"name": "TimeAnchor",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
currency: Optional[str] = field(
default=None,
metadata={
"name": "Currency",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 3,
}
)
price_algorithm: Optional[str] = field(
default=None,
metadata={
"name": "PriceAlgorithm",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"max_length": 255,
}
)
evprice_rule_stacks: Optional[EvpriceRuleStackListType] = field(
default=None,
metadata={
"name": "EVPriceRuleStacks",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class PowerDeliveryReqType(V2GrequestType):
evprocessing: Optional[ProcessingType] = field(
default=None,
metadata={
"name": "EVProcessing",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
charge_progress: Optional[ChargeProgressType] = field(
default=None,
metadata={
"name": "ChargeProgress",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
evpower_profile: Optional[EvpowerProfileType] = field(
default=None,
metadata={
"name": "EVPowerProfile",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
bpt_channel_selection: Optional[ChannelSelectionType] = field(
default=None,
metadata={
"name": "BPT_ChannelSelection",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class ServiceDetailResType(V2GresponseType):
service_id: Optional[int] = field(
default=None,
metadata={
"name": "ServiceID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
service_parameter_list: Optional[ServiceParameterListType] = field(
default=None,
metadata={
"name": "ServiceParameterList",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class ChargingScheduleType:
power_schedule: Optional[PowerScheduleType] = field(
default=None,
metadata={
"name": "PowerSchedule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
absolute_price_schedule: Optional[AbsolutePriceScheduleType] = field(
default=None,
metadata={
"name": "AbsolutePriceSchedule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
price_level_schedule: Optional[PriceLevelScheduleType] = field(
default=None,
metadata={
"name": "PriceLevelSchedule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class DynamicSeresControlModeType:
class Meta:
name = "Dynamic_SEResControlModeType"
departure_time: Optional[int] = field(
default=None,
metadata={
"name": "DepartureTime",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
minimum_soc: Optional[int] = field(
default=None,
metadata={
"name": "MinimumSOC",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_inclusive": 0,
"max_inclusive": 100,
}
)
target_soc: Optional[int] = field(
default=None,
metadata={
"name": "TargetSOC",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_inclusive": 0,
"max_inclusive": 100,
}
)
absolute_price_schedule: Optional[AbsolutePriceScheduleType] = field(
default=None,
metadata={
"name": "AbsolutePriceSchedule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
price_level_schedule: Optional[PriceLevelScheduleType] = field(
default=None,
metadata={
"name": "PriceLevelSchedule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class EvenergyOfferType:
class Meta:
name = "EVEnergyOfferType"
evpower_schedule: Optional[EvpowerScheduleType] = field(
default=None,
metadata={
"name": "EVPowerSchedule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
evabsolute_price_schedule: Optional[EvabsolutePriceScheduleType] = field(
default=None,
metadata={
"name": "EVAbsolutePriceSchedule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
@dataclass
class PowerDeliveryReq(PowerDeliveryReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class ServiceDetailRes(ServiceDetailResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class ScheduleTupleType:
schedule_tuple_id: Optional[int] = field(
default=None,
metadata={
"name": "ScheduleTupleID",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"min_inclusive": 1,
"max_inclusive": 4294967295,
}
)
charging_schedule: Optional[ChargingScheduleType] = field(
default=None,
metadata={
"name": "ChargingSchedule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
discharging_schedule: Optional[ChargingScheduleType] = field(
default=None,
metadata={
"name": "DischargingSchedule",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class ScheduledSereqControlModeType:
class Meta:
name = "Scheduled_SEReqControlModeType"
departure_time: Optional[int] = field(
default=None,
metadata={
"name": "DepartureTime",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
evtarget_energy_request: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EVTargetEnergyRequest",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
evmaximum_energy_request: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EVMaximumEnergyRequest",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
evminimum_energy_request: Optional[RationalNumberType] = field(
default=None,
metadata={
"name": "EVMinimumEnergyRequest",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
evenergy_offer: Optional[EvenergyOfferType] = field(
default=None,
metadata={
"name": "EVEnergyOffer",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class ScheduleExchangeReqType(V2GrequestType):
maximum_supporting_points: Optional[int] = field(
default=None,
metadata={
"name": "MaximumSupportingPoints",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
"min_inclusive": 12,
"max_inclusive": 1024,
}
)
dynamic_sereq_control_mode: Optional[DynamicSereqControlModeType] = field(
default=None,
metadata={
"name": "Dynamic_SEReqControlMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
scheduled_sereq_control_mode: Optional[ScheduledSereqControlModeType] = field(
default=None,
metadata={
"name": "Scheduled_SEReqControlMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class ScheduledSeresControlModeType:
class Meta:
name = "Scheduled_SEResControlModeType"
schedule_tuple: List[ScheduleTupleType] = field(
default_factory=list,
metadata={
"name": "ScheduleTuple",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"min_occurs": 1,
"max_occurs": 3,
}
)
@dataclass
class ScheduleExchangeReq(ScheduleExchangeReqType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
@dataclass
class ScheduleExchangeResType(V2GresponseType):
evseprocessing: Optional[ProcessingType] = field(
default=None,
metadata={
"name": "EVSEProcessing",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
"required": True,
}
)
go_to_pause: Optional[bool] = field(
default=None,
metadata={
"name": "GoToPause",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
dynamic_seres_control_mode: Optional[DynamicSeresControlModeType] = field(
default=None,
metadata={
"name": "Dynamic_SEResControlMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
scheduled_seres_control_mode: Optional[ScheduledSeresControlModeType] = field(
default=None,
metadata={
"name": "Scheduled_SEResControlMode",
"type": "Element",
"namespace": "urn:iso:std:iso:15118:-20:CommonMessages",
}
)
@dataclass
class ScheduleExchangeRes(ScheduleExchangeResType):
class Meta:
namespace = "urn:iso:std:iso:15118:-20:CommonMessages"
|
1625900
|
import importlib
from datetime import timedelta
from functools import wraps
from types import ModuleType
from unittest import mock
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import strawberry_django_jwt.object_types
from strawberry_django_jwt import exceptions, utils
from strawberry_django_jwt.object_types import TokenPayloadType
from strawberry_django_jwt.settings import jwt_settings
from tests.decorators import OverrideJwtSettings
from tests.testcases import AsyncTestCase, TestCase
def reload_import(imp: ModuleType):
def wrap(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
importlib.reload(imp)
return fn(*args, **kwargs)
return wrapper
return wrap
class JWTPayloadTests(TestCase):
@mock.patch(
"django.contrib.auth.models.User.get_username",
return_value=mock.Mock(pk="test"),
)
def test_foreign_key_pk(self, *args):
payload = utils.jwt_payload(self.user)
username = jwt_settings.JWT_PAYLOAD_GET_USERNAME_HANDLER(payload)
self.assertEqual(username, "test")
@OverrideJwtSettings(JWT_AUDIENCE="test")
@reload_import(strawberry_django_jwt.utils.object_types)
def test_audience(self):
payload = utils.jwt_payload(self.user)
self.assertEqual(payload.aud, "test")
@OverrideJwtSettings(JWT_ISSUER="test")
@reload_import(strawberry_django_jwt.utils.object_types)
def test_issuer(self):
payload = utils.jwt_payload(self.user)
self.assertEqual(payload.iss, "test")
class AsymmetricAlgorithmsTests(TestCase):
def test_rsa_jwt(self):
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend(),
)
public_key = private_key.public_key()
payload = utils.jwt_payload(self.user)
with OverrideJwtSettings(
JWT_PUBLIC_KEY=public_key,
JWT_PRIVATE_KEY=private_key,
JWT_ALGORITHM="RS256",
):
token = utils.jwt_encode(payload)
decoded = utils.jwt_decode(token)
self.assertEqual(payload, decoded)
class GetHTTPAuthorizationHeaderTests(TestCase):
def test_get_authorization_header(self):
headers = {
jwt_settings.JWT_AUTH_HEADER_NAME: f"{jwt_settings.JWT_AUTH_HEADER_PREFIX} {self.token}",
}
request = self.request_factory.get("/", **headers)
authorization_header = utils.get_http_authorization(request)
self.assertEqual(authorization_header, self.token)
def test_invalid_header_prefix(self):
headers = {
jwt_settings.JWT_AUTH_HEADER_NAME: "INVALID token",
}
request = self.request_factory.get("/", **headers)
authorization_header = utils.get_http_authorization(request)
self.assertIsNone(authorization_header)
def test_get_authorization_cookie(self):
headers = {
jwt_settings.JWT_AUTH_HEADER_NAME: f"{jwt_settings.JWT_AUTH_HEADER_PREFIX} {self.token}",
}
request = self.request_factory.get("/", **headers)
request.COOKIES[jwt_settings.JWT_COOKIE_NAME] = self.token
authorization_cookie = utils.get_http_authorization(request)
self.assertEqual(authorization_cookie, self.token)
class GetCredentialsTests(TestCase):
@OverrideJwtSettings(JWT_ALLOW_ARGUMENT=True)
def test_argument_allowed(self):
kwargs = {
jwt_settings.JWT_ARGUMENT_NAME: self.token,
}
request = self.request_factory.get("/")
credentials = utils.get_credentials(request, **kwargs)
self.assertEqual(credentials, self.token)
@OverrideJwtSettings(JWT_ALLOW_ARGUMENT=True)
def test_input_argument(self):
kwargs = {
"input": {
jwt_settings.JWT_ARGUMENT_NAME: self.token,
},
}
request = self.request_factory.get("/")
credentials = utils.get_credentials(request, **kwargs)
self.assertEqual(credentials, self.token)
@OverrideJwtSettings(JWT_ALLOW_ARGUMENT=True)
def test_missing_argument(self):
request = self.request_factory.get("/")
credentials = utils.get_credentials(request)
self.assertIsNone(credentials)
class GetPayloadTests(TestCase):
@OverrideJwtSettings(
JWT_VERIFY_EXPIRATION=True, JWT_EXPIRATION_DELTA=timedelta(seconds=-1)
)
def test_expired_signature(self):
payload = utils.jwt_payload(self.user)
token = utils.jwt_encode(payload)
with self.assertRaises(exceptions.JSONWebTokenExpired):
utils.get_payload(token)
def test_decode_audience_missing(self):
payload = utils.jwt_payload(self.user)
token = utils.jwt_encode(payload)
with OverrideJwtSettings(JWT_AUDIENCE="test"):
with self.assertRaises(exceptions.JSONWebTokenError):
utils.get_payload(token)
def test_decode_error(self):
with self.assertRaises(exceptions.JSONWebTokenError):
utils.get_payload("invalid")
class GetUserByNaturalKeyTests(TestCase):
def test_user_does_not_exists(self):
user = utils.get_user_by_natural_key(0)
self.assertIsNone(user)
class GetUserByPayloadTests(TestCase):
def test_user_by_invalid_payload(self):
with self.assertRaises(exceptions.JSONWebTokenError):
utils.get_user_by_payload(TokenPayloadType())
@mock.patch(
"django.contrib.auth.models.User.is_active",
new_callable=mock.PropertyMock,
return_value=False,
)
def test_user_disabled_by_payload(self, *args):
payload = utils.jwt_payload(self.user)
with self.assertRaises(exceptions.JSONWebTokenError):
utils.get_user_by_payload(payload)
class GetUserByNaturalKeyTestsAsync(AsyncTestCase):
async def test_user_does_not_exists_async(self):
user = await utils.get_user_by_natural_key_async(0)
self.assertIsNone(user)
class GetUserByPayloadTestsAsync(AsyncTestCase):
async def test_user_by_invalid_payload_async(self):
with self.assertRaises(exceptions.JSONWebTokenError):
await utils.get_user_by_payload_async(TokenPayloadType())
async def test_user_disabled_by_payload_async(self):
payload = utils.jwt_payload(self.user)
with mock.patch(
"django.contrib.auth.models.User.is_active",
new_callable=mock.PropertyMock,
return_value=False,
):
with self.assertRaises(exceptions.JSONWebTokenError):
await utils.get_user_by_payload_async(payload)
|
1625909
|
import numpy as np
import scipy
from ._simsig_tools import _check_list,_rand_uniform
from ._generator_base import generator_base
#------------------------------------------------------------------------------------
__all__=['harmonics','Harmonics']
#------------------------------------------------------------------------------------
_SIGNAL_PARAMETER_DEFAULT = {'amp':1, 'f0':1, 'delta_f':0, 'delay':0,'phase0':0,'callback': None}
_SYSTEM_PARAMETER_DEFAULT = {'fs':10, 'length':512}
#------------------------------------------------------------------------------------
def harmonics(amplitude = [1],
f0 = [1],
delta_f = [0],
delay = [0],
phase0 = [0],
callback = [None],
fs=10,
length=512,
snr_db = None):
'''
Harmonic signal generation.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signals.
* f0: 1d ndarray,
initial frequency (carried frequency).
* delta_f: 1d ndarray,
delta_f frequency (frequency band).
* delay: 1d ndarray,
signal delay.
* phase0: 1d ndarray,
initla phase.
* callback: 1d ndarray,
callback for special operations on signals.
* fs: float,
is the sampling frequency.
* length: int,
is the signal length;
* snr_db: float,
sngnal-to-noise ration in dB.
Returns:
-------------
* signal: 1d ndarray (complex),
harmonic signal.
Notes
---------
* Fs and N are the system parameters.
* Simulate harmonic (actually frequency modulated signal)
in the following form:
..math::
s = sum{f_i(a_i*exp[j2pi(f_0_i(t-tau_i)+
Delta_f_i(t-tau_i)^2/(N/fs))+j varphi_0_i])}+noises,
where:
* i = 0,.., are the signals number in superposition
(actually the number of the set initial frequencies(f0));
* a_i is the amplitude;
* f_0_i is the initial frequency;
* tau_i is the signal delay;
* Delta f_i is the frequency band (from f_0 to f_0+Delta_f);
* varphi_0_i is the initial phase
* f_i is the modulation callback;
* t is the time (up to N/fs);
* N is length (size) of signals samples;
* fs is the sampling frequency;
* noises are the gaussian white noises.
Example
-----------
import dsatools.generator
from dsatools.generator import callbacks
import dsatools.utilits as ut
#Example1----------------------------------------
signal = dsatools.generator.harmonics()
ut.probe(signal)
#Example2----------------------------------------
signal = dsatools.generator.harmonics(amplitude=[1],
f0=[1,2,3],
delta_f=[0.3],
delay=[0],
phase0=[0],
callback=[None],
fs=10,
length=512,
snr_db=None,)
ut.probe(signal)
#Example3----------------------------------------
cb1 = callbacks.harmonic_modulataion(amp_am=0.5,freq_am=9.5,phase_shift=0)
cb2 = callbacks.harmonic_modulataion(amp_am=0.7,freq_am=8.2,phase_shift=0)
signal = dsatools.generator.harmonics(amplitude=[1,1,0.4,0.3],
f0=[1,2,3,4],
delta_f=[0.2,1.3,],
delay =[0,0,0,4],
phase0=[0,1.2],
callback=[cb1,None,cb2],
fs=10,
length=512,
snr_db=20,)
ut.probe(signal)
'''
signal = Harmonics(fs, length)
signal.set_signal_parameters(amplitude = amplitude,
f0 = f0,
delta_f = delta_f,
delay = delay,
phase0 = phase0,
callback = callback,)
return signal.get_signal(snr_db = snr_db)
#------------------------------------------------------------------------------------
class Harmonics(generator_base):
'''
Harmonic signal generation.
Atriburts
----------------
> system_parameters = {fs, length},
* fs: float,
is the sampling frequency.
* length: int,
is the signal length.
> signal_parameters = list of
{amp,f0,delta_f,delay,phase0,callback},
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Methods
-----------
* set_system_parameters;
* get_system_parameters;
* set_signal_parameters;
* add_signal_parameters;
* print_signal_parameters;
* get_signal.
Notes
---------
* Fs and N are the system parameters.
* Simulate harmonic (actually frequency modulated signal)
in the following form:
..math::
s = sum{f_i(a_i*exp[j2pi(f_0_i(t-tau_i)+
Delta_f_i(t-tau_i)^2/(N/fs))+j varphi_0_i])}+noises,
where:
* i = 0,.., are the signals number in superposition
(actually the number of the set initial frequencies(f0));
* a_i is the amplitude;
* f_0_i is the initial frequency;
* tau_i is the signal delay;
* Delta f_i is the frequency band (from f_0 to f_0+Delta_f);
* varphi_0_i is the initial phase
* f_i is the modulation callback;
* t is the time (up to N/fs);
* N is length (size) of signals samples;
* fs is the sampling frequency;
* noises are the gaussian white noises.
Example
-----------
import dsatools.generator
from dsatools.generator import callbacks
import dsatools.utilits as ut
cb1 = callbacks.harmonic_modulataion(amp_am=0.1,freq_am=0.5,phase_shift=0)
callbacks.probe_modulation(cb1,512)
cb2 = callbacks.pulse_modulataion(200,400)
callbacks.probe_modulation(cb2,512)
signal1 = dsatools.generator.Harmonics()
signal1.get_system_parameters()
signal1.set_signal_parameters(amplitude=[1,0.5],
f0=[1,2,3],
delta_f=[0.4,0.1],
delay=[0],
phase0=[0],
callback=[cb1,cb2],)
sig1 = signal1.get_signal(snr_db = 200)
ut.probe(sig1)
'''
#@override
def __init__(self,
fs = _SYSTEM_PARAMETER_DEFAULT['fs'],
length = _SYSTEM_PARAMETER_DEFAULT['length']
):
self._signal_parameters_dict_default = _SIGNAL_PARAMETER_DEFAULT.copy()
self._system_parameters_dict_default = _SYSTEM_PARAMETER_DEFAULT.copy()
self.set_system_parameters(fs, length)
self.set_signal_parameters_dict_default()
#------------------------------------------------------------------------------------
#@override
def set_system_parameters(self,
fs=_SYSTEM_PARAMETER_DEFAULT['fs'],
length = _SYSTEM_PARAMETER_DEFAULT['fs']):
'''
Set system parameters.
Parameters
-------------
* fs: float,
is the sampling frequency.
* length: int,
is the length of signal.
'''
self._system_parameters['fs'] = fs
self._system_parameters['length'] = length
#------------------------------------------------------------------------------------
#@override
def make_signal_parameters_dict(self,
amplitude = _SIGNAL_PARAMETER_DEFAULT['amp'],
f0 = _SIGNAL_PARAMETER_DEFAULT['f0'],
delta_f = _SIGNAL_PARAMETER_DEFAULT['delta_f'],
delay = _SIGNAL_PARAMETER_DEFAULT['delay'],
phase0 = _SIGNAL_PARAMETER_DEFAULT['phase0'],
callback = _SIGNAL_PARAMETER_DEFAULT['callback']):
'''
Make the signal parameters dictionary.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Returns
----------
* signal_parameters_dict: dict,
signal parameters dictionary.
'''
signal_parameters_dict = self.get_signal_parameters_dict_default()
signal_parameters_dict['amp'] = amplitude
signal_parameters_dict['f0'] = f0
signal_parameters_dict['delta_f'] = delta_f
signal_parameters_dict['delay'] = delay
signal_parameters_dict['phase0'] = phase0
signal_parameters_dict['callback'] = callback
return signal_parameters_dict
#------------------------------------------------------------------------------------
#@override
def add_signal_parameters(self,
amplitude = [_SIGNAL_PARAMETER_DEFAULT['amp']],
f0 = [_SIGNAL_PARAMETER_DEFAULT['f0']],
delta_f = [_SIGNAL_PARAMETER_DEFAULT['delta_f']],
delay = [_SIGNAL_PARAMETER_DEFAULT['delay']],
phase0 = [_SIGNAL_PARAMETER_DEFAULT['phase0']],
callback = [_SIGNAL_PARAMETER_DEFAULT['callback']]):
'''
Add signal parameters.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Notes
----------
* formats of the input: float, list, tuple.
* in the case of different length of array,
all will be resized to f0_s length.
'''
# main array - f0
f0 = _check_list(f0,-1)
len_list = len(f0) #required length for all other arrays
amplitude = _check_list(amplitude, len_list, 'last')
delta_f = _check_list(delta_f, len_list, 0)
delay = _check_list(delay, len_list, 0)
phase0 = _check_list(phase0, len_list, 0)
callback = _check_list(callback, len_list, 'None')
dict2add = []
for (amplitude_,
f0_,
delta_f_,
delay_,
phase0_,
callback_) in \
zip(amplitude,
f0,
delta_f,
delay,
phase0,
callback):
dict2add += [self.make_signal_parameters_dict(amplitude_,
f0_,
delta_f_,
delay_,
phase0_,
callback_)]
self.add_signal_parameters_dicts(dict2add)
#------------------------------------------------------------------------------------
#@override
def set_signal_parameters(self,
amplitude = [_SIGNAL_PARAMETER_DEFAULT['amp']],
f0 = [_SIGNAL_PARAMETER_DEFAULT['f0']],
delta_f = [_SIGNAL_PARAMETER_DEFAULT['delta_f']],
delay = [_SIGNAL_PARAMETER_DEFAULT['delay']],
phase0 = [_SIGNAL_PARAMETER_DEFAULT['phase0']],
callback = [_SIGNAL_PARAMETER_DEFAULT['callback']]):
'''
Set signal parameters.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Notes
----------
* formats of the input: float, list, tuple.
* in the case of different length of array,
all will be resized to f0_s length.
'''
self.clear_signal_parameters()
self.add_signal_parameters(amplitude,
f0,
delta_f,
delay,
phase0,
callback)
#------------------------------------------------------------------------------------
#@override
def add_random_signal_parameters(self,
n_of_params = 1,
amplitude_range = [0,_SIGNAL_PARAMETER_DEFAULT['amp']],
f0_range = [0,_SIGNAL_PARAMETER_DEFAULT['f0']],
delta_f_range = [0,_SIGNAL_PARAMETER_DEFAULT['delta_f']],
delay_range = [0,_SIGNAL_PARAMETER_DEFAULT['delay']],
phase0_range = [0,_SIGNAL_PARAMETER_DEFAULT['phase0']]):
'''
Add random uniformly distributed signal_parameters.
Parameters
-------------
* n_of_params: int,
number of paramentrs.
* amplitude_range: [float,float],
ranges of amplitudes.
* f0_range: [float,float],
ranges of the initial frequencies
(carried frequencies).
* delta_f_range: [float,float],
ranges of the delta_f frequencies
(frequency bands).
* delay_range: [float,float],
ranges of the signal delays.
* phase0_range: [float,float],
ranges of the initla phases.
Notes
-------
* Callbacks doesnot applied for this function.
'''
scale_float = _SCALE_TO_FLOAT_
amplitude = _rand_uniform(amplitude_range, n_of_params, scale_float)
f0 = _rand_uniform(f0_range, n_of_params, scale_float)
delta_f = _rand_uniform(delta_f_range, n_of_params, scale_float)
delay = _rand_uniform(delay_range, n_of_params, scale_float)
phase0 = _rand_uniform(phase0_range, n_of_params, scale_float)
self.add_signal_parameters(amplitude,
f0,
delta_f,
delay,
phase0,
callback = n_of_params * [None])
#------------------------------------------------------------------------------------
#@override
def _sim_one_sig(self, sig_param):
'''
Simulate one harmonic (actually frequency modulated signal).
Parameters
-----------
* sig_param: dict,
dictionary of signal parameters, whcih include
(a,f_0,\Delta f,\tau,phi0,callback).
Returns
-----------
* sig: 1d ndarray (complex),
simulated signal.
Notes
---------
* Fs and N are system parameters.
* In harmonic signal \tau and \varphi_0/2/pi
are play the same role.
* If callback is not None: s = callback(s)
(format of callback = f(x)),
if callback is None it does not applied.
* Signal in form:
..math::
s = f(a*exp[j2pi(f_0(t-tau)+Delta_f(t-tau)^2/(N/fs))+j varphi_0]),
where:
* a is the amplitude;
* f_0 is the initial frequency;
* tau is the signal delay;
* Delta_f is the frequency band
(from f_0 to f_0+\Delta f);
* N is length (size) of signals samples;
* fs is the sampling frequency;
* t is the time (up to N/fs);
* varphi_0 is the initial phase
* f modulation callback.
'''
fs = self._system_parameters['fs']
N = self._system_parameters['length']
f0 = sig_param['f0']
incF = sig_param['delta_f']
tau = sig_param['delay']
phi0 = sig_param['phase0']
A = sig_param['amp']
callback = sig_param['callback']
t = np.arange(N)/fs - tau
Tm = N/fs
sig = A*np.exp(2j*np.pi*( f0*t + incF*np.square(t)/2/Tm )+ phi0*1j )
sig = np.asarray(sig,dtype= np.complex)
if (callback in ['None', None]):
return sig
elif type(callback ) is not list:
callback = list([callback])
for callback_i in callback:
sig = callback_i(sig)
return sig
|
1625966
|
from utils import tk_dynamic as tkd, tk_utils, autocompletion
from utils.item_name_lists import NO_UNIQUE_MAP
import tkinter as tk
from tkinter import ttk
import time
class Drops(tkd.Frame):
def __init__(self, main_frame, parent, **kw):
tkd.Frame.__init__(self, parent, **kw)
self.drops = dict()
self.main_frame = main_frame
self._make_widgets()
def _make_widgets(self):
tkd.Label(self, text='Drops', font='helvetica 14').pack()
lf = tkd.Frame(self)
lf.pack(expand=1, fill=tk.BOTH)
scrollbar = ttk.Scrollbar(lf, orient=tk.VERTICAL)
self.m = tkd.Text(lf, height=8, width=23, yscrollcommand=scrollbar.set, font='courier 11', wrap=tk.WORD, state=tk.DISABLED, cursor='', exportselection=1, name='droplist', borderwidth=2)
self.m.bind('<Double-Button-1>', lambda _: (self.main_frame.img_panel.focus_force(), self.add_drop()))
self.m.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, pady=(1, 2), padx=1)
scrollbar.config(command=self.m.yview)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y, pady=(2, 1), padx=0)
def add_drop(self):
drop = autocompletion.acbox(enable=True, title='Add drop', unid_mode=self.main_frame.autocompletion_unids, add_to_last_run=self.main_frame.add_to_last_run)
if not drop or drop['input'] == '':
return
if drop['item_name'] is not None:
for i, item in enumerate(self.main_frame.grail_tab.grail):
if self.main_frame.autocompletion_unids:
base = ' '.join(drop['item_name'].split(' ')[:-1])
drop['Rarity'] = drop['item_name'].split(' ')[-1].replace('(', '').replace(')', '')
if base in NO_UNIQUE_MAP:
drop['TC'] = NO_UNIQUE_MAP[base].get('TC', '')
drop['Item Class'] = NO_UNIQUE_MAP[base].get('Item Class', '')
break
if base == item['Base Item']:
drop['TC'] = item.get('TC', '')
drop['Item Class'] = item.get('Item Class', '')
break
if item['Item'] == drop['item_name']:
prefix = ''
drop['Grailer'] = 'False'
drop['Eth Grailer'] = ''
if item.get('Found', False) is False:
if self.main_frame.auto_upload_herokuapp:
resp = self.main_frame.grail_tab.upload_to_herokuapp(
upd_dict={item['Item']: True},
show_confirm=False,
pop_up_msg="Congrats, a new drop! Add it to grail?\n\nHerokuapp login info:",
pop_up_title="Grail item")
else:
resp = tk_utils.mbox(msg="Congrats, a new drop! Add it to local grail?", title="Grail item")
if resp is not None:
self.main_frame.grail_tab.update_grail_from_index(i)
prefix += '(*)'
drop['Grailer'] = 'True'
if drop.get('eth', False) is True:
drop['Eth Grailer'] = 'False'
if drop.get('eth', False) is True and item.get('FoundEth', False) is False:
if self.main_frame.auto_upload_herokuapp:
resp = self.main_frame.grail_tab.upload_to_herokuapp(
eth_dict={item['Item']: True},
show_confirm=False,
pop_up_msg="Congrats, a new eth drop! Add it to eth grail?\n\nHerokuapp login info:",
pop_up_title="Eth grail item")
else:
resp = tk_utils.mbox(msg="Congrats, a new eth drop! Add it to local eth grail?", title="Eth grail item")
if resp is not None:
self.main_frame.grail_tab.grail[i].update({'FoundEth': True})
prefix += '(*)'
drop['Eth Grailer'] = 'True'
drop['input'] = (prefix + ' ' + drop['input']).strip()
drop['TC'] = item.get('TC', '')
drop['QLVL'] = item.get('QLVL', '')
drop['Item Class'] = item.get('Item Class', '')
drop['Rarity'] = item.get('Rarity', '')
break
last_run = drop.pop('last_run', False)
run_no = len(self.main_frame.timer_tab.laps)
if self.main_frame.timer_tab.is_running:
run_no += 1
if last_run and run_no > 0:
run_no -= 1
self.main_frame.add_to_last_run = last_run
drop['Real time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
drop['Profile'] = self.main_frame.active_profile
self.drops.setdefault(str(run_no), []).append(drop)
self.display_drop(drop=drop, run_no=run_no)
def display_drop(self, drop, run_no):
line = 'Run %s: %s' % (run_no, drop['input'].strip())
if self.m.get('1.0', tk.END) != '\n':
line = '\n' + line
self.m.config(state=tk.NORMAL)
self.m.insert(tk.END, line)
self.m.yview_moveto(1)
self.m.config(state=tk.DISABLED)
def delete_selected_drops(self):
if self.focus_get()._name == 'droplist':
cur_row = self.m.get('insert linestart', 'insert lineend+1c').strip()
resp = tk_utils.mbox(msg='Do you want to delete the row:\n%s' % cur_row, title='Warning')
if resp is True:
sep = cur_row.find(':')
run_no = cur_row[:sep].replace('Run ', '')
drop = cur_row[sep+2:]
try:
self.drops[run_no].remove(next(d for d in self.drops[run_no] if d['input'] == drop))
self.m.config(state=tk.NORMAL)
self.m.delete('insert linestart', 'insert lineend+1c')
self.m.config(state=tk.DISABLED)
except StopIteration:
pass
self.main_frame.img_panel.focus_force()
def save_state(self):
return dict(drops=self.drops)
def load_from_state(self, state):
self.m.config(state=tk.NORMAL)
self.m.delete(1.0, tk.END)
self.m.config(state=tk.DISABLED)
self.drops = state.get('drops', dict())
for k, v in self.drops.items():
for i in range(len(v)):
if not isinstance(v[i], dict):
self.drops[k][i] = {'item_name': None, 'input': v[i], 'extra': ''}
for run in sorted(self.drops.keys(), key=lambda x: int(x)):
for drop in self.drops[run]:
self.display_drop(drop=drop, run_no=run)
def reset_session(self):
self.drops = dict()
self.m.config(state=tk.NORMAL)
self.m.delete(1.0, tk.END)
self.m.config(state=tk.DISABLED)
|
1626010
|
import os
SECRET_KEY = 'amplitude-test'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_HTTPONLY = True
SESSION_SAVE_EVERY_REQUEST = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
ROOT_URLCONF = 'tests.urls'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'amplitude',
'tests',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'amplitude.middleware.SessionInfo',
'amplitude.middleware.SendPageViewEvent',
]
AMPLITUDE_API_KEY = 'abc123'
AMPLITUDE_INCLUDE_USER_DATA = True
AMPLITUDE_INCLUDE_GROUP_DATA = True
|
1626039
|
description = 'Virtual cryostat'
group = 'optional'
devices = dict(
T = device('nicos.devices.generic.DeviceAlias'),
Ts = device('nicos.devices.generic.DeviceAlias'),
T_cryo = device('nicos.devices.generic.VirtualRealTemperature',
description = 'A virtual (but realistic) temperature controller',
abslimits = (2, 1000),
warnlimits = (0, 325),
ramp = 60,
unit = 'K',
jitter = 0,
precision = 0.1,
window = 30.0,
lowlevel = True,
),
T_sample = device('nicos.devices.generic.ReadonlyParamDevice',
parameter = 'sample',
device = 'T_cryo',
description = 'Temperature of virtual sample',
lowlevel = True,
),
)
alias_config = {
'T': {'T_cryo': 100},
'Ts': {'T_sample': 100},
}
startupcode = """
AddEnvironment(T, Ts)
"""
|
1626046
|
import logging
from flask import Blueprint, request
from followthemoney import model
from followthemoney.compare import compare
from aleph.settings import MAX_EXPAND_ENTITIES
from aleph.model import Judgement
from aleph.logic.profiles import get_profile, decide_pairwise
from aleph.logic.expand import entity_tags, expand_proxies
from aleph.queues import queue_task, OP_UPDATE_ENTITY
from aleph.search import MatchQuery, QueryParser
from aleph.views.serializers import ProfileSerializer, SimilarSerializer
from aleph.views.context import tag_request
from aleph.views.util import obj_or_404, jsonify, parse_request, get_session_id
from aleph.views.util import get_index_entity, get_db_collection
from aleph.views.util import require
blueprint = Blueprint("profiles_api", __name__)
log = logging.getLogger(__name__)
@blueprint.route("/api/2/profiles/<profile_id>", methods=["GET"])
def view(profile_id):
"""
---
get:
summary: Retrieve a profile
description: >-
Get a profile with constituent items and the merged pseudo entity.
parameters:
- in: path
name: profile_id
required: true
schema:
type: string
responses:
'200':
description: OK
content:
application/json:
schema:
$ref: '#/components/schemas/Profile'
tags:
- Profile
"""
profile = obj_or_404(get_profile(profile_id, authz=request.authz))
require(request.authz.can(profile.get("collection_id"), request.authz.READ))
return ProfileSerializer.jsonify(profile)
@blueprint.route("/api/2/profiles/<profile_id>/tags", methods=["GET"])
def tags(profile_id):
"""
---
get:
summary: Get profile tags
description: >-
Get tags for the profile with id `profile_id`.
parameters:
- in: path
name: profile_id
required: true
schema:
type: string
responses:
'200':
description: OK
content:
application/json:
schema:
type: object
allOf:
- $ref: '#/components/schemas/QueryResponse'
properties:
results:
type: array
items:
$ref: '#/components/schemas/EntityTag'
tags:
- Profile
"""
profile = obj_or_404(get_profile(profile_id, authz=request.authz))
require(request.authz.can(profile.get("collection_id"), request.authz.READ))
tag_request(collection_id=profile.get("collection_id"))
results = entity_tags(profile["merged"], request.authz)
return jsonify({"status": "ok", "total": len(results), "results": results})
@blueprint.route("/api/2/profiles/<profile_id>/similar", methods=["GET"])
def similar(profile_id):
"""
---
get:
summary: Get similar entities
description: >
Get a list of similar entities to the profile with id `profile_id`
parameters:
- in: path
name: profile_id
required: true
schema:
type: string
- in: query
name: 'filter:schema'
schema:
items:
type: string
type: array
- in: query
name: 'filter:schemata'
schema:
items:
type: string
type: array
responses:
'200':
description: Returns a list of entities
content:
application/json:
schema:
$ref: '#/components/schemas/EntitiesResponse'
tags:
- Profile
"""
# enable_cache()
profile = obj_or_404(get_profile(profile_id, authz=request.authz))
require(request.authz.can(profile.get("collection_id"), request.authz.READ))
tag_request(collection_id=profile.get("collection_id"))
exclude = [item["entity_id"] for item in profile["items"]]
result = MatchQuery.handle(request, entity=profile["merged"], exclude=exclude)
entities = list(result.results)
result.results = []
for obj in entities:
item = {
"score": compare(model, profile["merged"], obj),
"judgement": Judgement.NO_JUDGEMENT,
"collection_id": profile.get("collection_id"),
"entity": obj,
}
result.results.append(item)
return SimilarSerializer.jsonify_result(result)
@blueprint.route("/api/2/profiles/<profile_id>/expand", methods=["GET"])
def expand(profile_id):
"""
---
get:
summary: Expand the profile to get its adjacent entities
description: >-
Get the property-wise list of entities adjacent to the entities that
are part of the profile `profile_id`.
parameters:
- in: path
name: profile_id
required: true
schema:
type: string
- description: properties to filter on
in: query
name: 'filter:property'
schema:
type: string
- in: query
description: number of entities to return per property
name: limit
schema:
type: number
responses:
'200':
description: OK
content:
application/json:
schema:
type: object
allOf:
- $ref: '#/components/schemas/QueryResponse'
properties:
results:
type: array
items:
$ref: '#/components/schemas/EntityExpand'
tags:
- Profile
"""
profile = obj_or_404(get_profile(profile_id, authz=request.authz))
require(request.authz.can(profile.get("collection_id"), request.authz.READ))
tag_request(collection_id=profile.get("collection_id"))
parser = QueryParser(request.args, request.authz, max_limit=MAX_EXPAND_ENTITIES)
properties = parser.filters.get("property")
results = expand_proxies(
profile.get("proxies"),
properties=properties,
authz=request.authz,
limit=parser.limit,
)
result = {
"status": "ok",
"total": sum(result["count"] for result in results),
"results": results,
}
return jsonify(result)
@blueprint.route("/api/2/profiles/_pairwise", methods=["POST"])
def pairwise():
"""
---
post:
summary: Make a pairwise judgement between an entity and a match.
description: >
This lets a user decide if they think a given xref match is a true or
false match. Implicitly, this might create or alter a profile in the
collection used by
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/Pairwise'
responses:
'200':
content:
application/json:
schema:
properties:
status:
description: accepted
type: string
profile_id:
description: profile_id for `entity`.
type: string
type: object
description: Accepted
tags:
- Profile
"""
data = parse_request("Pairwise")
entity = get_index_entity(data.get("entity_id"))
collection = get_db_collection(entity["collection_id"], request.authz.WRITE)
match = get_index_entity(data.get("match_id"))
match_collection = get_db_collection(match["collection_id"])
profile = decide_pairwise(
collection,
entity,
match_collection,
match,
judgement=data.get("judgement"),
authz=request.authz,
)
job_id = get_session_id()
queue_task(collection, OP_UPDATE_ENTITY, job_id=job_id, entity_id=entity.get("id"))
profile_id = profile.id if profile is not None else None
return jsonify({"status": "ok", "profile_id": profile_id}, status=200)
|
1626049
|
from pandas_datareader import DataReader
import numpy as np
import pandas as pd
import datetime
# Grab time series data for 5-year history for the stock (here AAPL)
# and for S&P-500 Index
start_date = datetime.datetime.now() - datetime.timedelta(days=1826)
end_date = datetime.date.today()
stock = 'MSFT'
index = '^GSPC'
# Grab time series data for 5-year history for the stock
# and for S&P-500 Index
df = DataReader(stock,'yahoo', start_date, end_date)
dfb = DataReader(index,'yahoo', start_date, end_date)
# create a time-series of monthly data points
rts = df.resample('M').last()
rbts = dfb.resample('M').last()
dfsm = pd.DataFrame({'s_adjclose' : rts['Adj Close'],
'b_adjclose' : rbts['Adj Close']},
index=rts.index)
# compute returns
dfsm[['s_returns','b_returns']] = dfsm[['s_adjclose','b_adjclose']]/\
dfsm[['s_adjclose','b_adjclose']].shift(1) -1
dfsm = dfsm.dropna()
covmat = np.cov(dfsm["s_returns"],dfsm["b_returns"])
# calculate measures now
beta = covmat[0,1]/covmat[1,1]
alpha= np.mean(dfsm["s_returns"])-beta*np.mean(dfsm["b_returns"])
# r_squared = 1. - SS_res/SS_tot
ypred = alpha + beta * dfsm["b_returns"]
SS_res = np.sum(np.power(ypred-dfsm["s_returns"],2))
SS_tot = covmat[0,0]*(len(dfsm)-1) # SS_tot is sample_variance*(n-1)
r_squared = 1. - SS_res/SS_tot
# 5- year volatiity and 1-year momentum
volatility = np.sqrt(covmat[0,0])
momentum = np.prod(1+dfsm["s_returns"].tail(12).values) -1
# annualize the numbers
prd = 12. # used monthly returns; 12 periods to annualize
alpha = alpha*prd
volatility = volatility*np.sqrt(prd)
print (f'beta = {beta}')
print (f'alpha = {alpha}')
print (f'r_squared = {r_squared}')
print (f'volatility = {volatility}')
print (f'momentum = {momentum}')
volume = df.Volume
volume = volume.tail(60).mean()
print (volume)
|
1626071
|
from django.conf.urls import url
from . import views, views_api
# NOTE: file/folder/hyperlink objects can be referred to as 'item', but only if
# ObjectPermissionMixin is used in the view
app_name = 'filesfolders'
urls_ui = [
url(
regex=r'^(?P<project>[0-9a-f-]+)$',
view=views.ProjectFileView.as_view(),
name='list',
),
url(
regex=r'^folder/(?P<folder>[0-9a-f-]+)$',
view=views.ProjectFileView.as_view(),
name='list',
),
url(
regex=r'^upload/(?P<project>[0-9a-f-]+)$',
view=views.FileCreateView.as_view(),
name='file_create',
),
url(
regex=r'^upload/in/(?P<folder>[0-9a-f-]+)$',
view=views.FileCreateView.as_view(),
name='file_create',
),
url(
regex=r'^update/(?P<item>[0-9a-f-]+)$',
view=views.FileUpdateView.as_view(),
name='file_update',
),
url(
regex=r'^delete/(?P<item>[0-9a-f-]+)$',
view=views.FileDeleteView.as_view(),
name='file_delete',
),
url(
regex=r'^download/(?P<file>[0-9a-f-]+)/(?P<file_name>[^\0/]+)$',
view=views.FileServeView.as_view(),
name='file_serve',
),
url(
regex=r'^download/(?P<secret>[\w\-]+)/(?P<file_name>[^\0/]+)$',
view=views.FileServePublicView.as_view(),
name='file_serve_public',
),
url(
regex=r'^link/(?P<file>[0-9a-f-]+)$',
view=views.FilePublicLinkView.as_view(),
name='file_public_link',
),
url(
regex=r'^folder/add/(?P<project>[0-9a-f-]+)$',
view=views.FolderCreateView.as_view(),
name='folder_create',
),
url(
regex=r'^folder/add/in/(?P<folder>[0-9a-f-]+)$',
view=views.FolderCreateView.as_view(),
name='folder_create',
),
url(
regex=r'^folder/update/(?P<item>[0-9a-f-]+)$',
view=views.FolderUpdateView.as_view(),
name='folder_update',
),
url(
regex=r'^folder/delete(?P<item>[0-9a-f-]+)$',
view=views.FolderDeleteView.as_view(),
name='folder_delete',
),
url(
regex=r'^link/add/(?P<project>[0-9a-f-]+)$',
view=views.HyperLinkCreateView.as_view(),
name='hyperlink_create',
),
url(
regex=r'^link/add/in/(?P<folder>[0-9a-f-]+)$',
view=views.HyperLinkCreateView.as_view(),
name='hyperlink_create',
),
url(
regex=r'^link/update/(?P<item>[0-9a-f-]+)$',
view=views.HyperLinkUpdateView.as_view(),
name='hyperlink_update',
),
url(
regex=r'^link/delete/(?P<item>[0-9a-f-]+)$',
view=views.HyperLinkDeleteView.as_view(),
name='hyperlink_delete',
),
url(
regex=r'^batch/(?P<project>[0-9a-f-]+)$',
view=views.BatchEditView.as_view(),
name='batch_edit',
),
url(
regex=r'^batch/in/(?P<folder>[0-9a-f-]+)$',
view=views.BatchEditView.as_view(),
name='batch_edit',
),
]
urls_api = [
url(
regex=r'^api/folder/list-create/(?P<project>[0-9a-f-]+)$',
view=views_api.FolderListCreateAPIView.as_view(),
name='api_folder_list_create',
),
url(
regex=r'^api/folder/retrieve-update-destroy/(?P<folder>[0-9a-f-]+)$',
view=views_api.FolderRetrieveUpdateDestroyAPIView.as_view(),
name='api_folder_retrieve_update_destroy',
),
url(
regex=r'^api/file/list-create/(?P<project>[0-9a-f-]+)$',
view=views_api.FileListCreateAPIView.as_view(),
name='api_file_list_create',
),
url(
regex=r'^api/file/retrieve-update-destroy/(?P<file>[0-9a-f-]+)$',
view=views_api.FileRetrieveUpdateDestroyAPIView.as_view(),
name='api_file_retrieve_update_destroy',
),
url(
regex=r'^api/file/serve/(?P<file>[0-9a-f-]+)$',
view=views_api.FileServeAPIView.as_view(),
name='api_file_serve',
),
url(
regex=r'^api/hyperlink/list-create/(?P<project>[0-9a-f-]+)$',
view=views_api.HyperLinkListCreateAPIView.as_view(),
name='api_hyperlink_list_create',
),
url(
regex=r'^api/hyperlink/retrieve-update-destroy/(?P<hyperlink>[0-9a-f-]+)$',
view=views_api.HyperLinkRetrieveUpdateDestroyAPIView.as_view(),
name='api_hyperlink_retrieve_update_destroy',
),
]
urlpatterns = urls_ui + urls_api
|
1626087
|
import os
import cv2
import sys
import pdb
import six
import glob
import time
import torch
import random
import pandas
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
# import pyarrow as pa
from PIL import Image
import torch.utils.data as data
import matplotlib.pyplot as plt
from utils import video_augmentation
from torch.utils.data.sampler import Sampler
sys.path.append("..")
class BaseFeeder(data.Dataset):
def __init__(self, prefix, gloss_dict, drop_ratio=1, num_gloss=-1, mode="train", transform_mode=True,
datatype="lmdb"):
self.mode = mode
self.ng = num_gloss
self.prefix = prefix
self.dict = gloss_dict
self.data_type = datatype
self.feat_prefix = f"{prefix}/features/fullFrame-256x256px/{mode}"
self.transform_mode = "train" if transform_mode else "test"
self.inputs_list = np.load(f"./preprocess/phoenix2014/{mode}_info.npy", allow_pickle=True).item()
# self.inputs_list = np.load(f"{prefix}/annotations/manual/{mode}.corpus.npy", allow_pickle=True).item()
# self.inputs_list = np.load(f"{prefix}/annotations/manual/{mode}.corpus.npy", allow_pickle=True).item()
# self.inputs_list = dict([*filter(lambda x: isinstance(x[0], str) or x[0] < 10, self.inputs_list.items())])
print(mode, len(self))
self.data_aug = self.transform()
print("")
def __getitem__(self, idx):
if self.data_type == "video":
input_data, label, fi = self.read_video(idx)
input_data, label = self.normalize(input_data, label)
# input_data, label = self.normalize(input_data, label, fi['fileid'])
return input_data, torch.LongTensor(label), self.inputs_list[idx]['original_info']
elif self.data_type == "lmdb":
input_data, label, fi = self.read_lmdb(idx)
input_data, label = self.normalize(input_data, label)
return input_data, torch.LongTensor(label), self.inputs_list[idx]['original_info']
else:
input_data, label = self.read_features(idx)
return input_data, label, self.inputs_list[idx]['original_info']
def read_video(self, index, num_glosses=-1):
# load file info
fi = self.inputs_list[index]
img_folder = os.path.join(self.prefix, "features/fullFrame-256x256px/" + fi['folder'])
img_list = sorted(glob.glob(img_folder))
label_list = []
for phase in fi['label'].split(" "):
if phase == '':
continue
if phase in self.dict.keys():
label_list.append(self.dict[phase][0])
return [cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB) for img_path in img_list], label_list, fi
def read_features(self, index):
# load file info
fi = self.inputs_list[index]
data = np.load(f"./features/{self.mode}/{fi['fileid']}_features.npy", allow_pickle=True).item()
return data['features'], data['label']
def normalize(self, video, label, file_id=None):
video, label = self.data_aug(video, label, file_id)
video = video.float() / 127.5 - 1
return video, label
def transform(self):
if self.transform_mode == "train":
print("Apply training transform.")
return video_augmentation.Compose([
# video_augmentation.CenterCrop(224),
# video_augmentation.WERAugment('/lustre/wangtao/current_exp/exp/baseline/boundary.npy'),
video_augmentation.RandomCrop(224),
video_augmentation.RandomHorizontalFlip(0.5),
video_augmentation.ToTensor(),
video_augmentation.TemporalRescale(0.2),
# video_augmentation.Resize(0.5),
])
else:
print("Apply testing transform.")
return video_augmentation.Compose([
video_augmentation.CenterCrop(224),
# video_augmentation.Resize(0.5),
video_augmentation.ToTensor(),
])
def byte_to_img(self, byteflow):
unpacked = pa.deserialize(byteflow)
imgbuf = unpacked[0]
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
img = Image.open(buf).convert('RGB')
return img
@staticmethod
def collate_fn(batch):
batch = [item for item in sorted(batch, key=lambda x: len(x[0]), reverse=True)]
video, label, info = list(zip(*batch))
if len(video[0].shape) > 3:
max_len = len(video[0])
video_length = torch.LongTensor([np.ceil(len(vid) / 4.0) * 4 + 12 for vid in video])
left_pad = 6
right_pad = int(np.ceil(max_len / 4.0)) * 4 - max_len + 6
max_len = max_len + left_pad + right_pad
padded_video = [torch.cat(
(
vid[0][None].expand(left_pad, -1, -1, -1),
vid,
vid[-1][None].expand(max_len - len(vid) - left_pad, -1, -1, -1),
)
, dim=0)
for vid in video]
padded_video = torch.stack(padded_video)
else:
max_len = len(video[0])
video_length = torch.LongTensor([len(vid) for vid in video])
padded_video = [torch.cat(
(
vid,
vid[-1][None].expand(max_len - len(vid), -1),
)
, dim=0)
for vid in video]
padded_video = torch.stack(padded_video).permute(0, 2, 1)
label_length = torch.LongTensor([len(lab) for lab in label])
if max(label_length) == 0:
return padded_video, video_length, [], [], info
else:
padded_label = []
for lab in label:
padded_label.extend(lab)
padded_label = torch.LongTensor(padded_label)
return padded_video, video_length, padded_label, label_length, info
def __len__(self):
return len(self.inputs_list) - 1
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
if __name__ == "__main__":
feeder = BaseFeeder()
dataloader = torch.utils.data.DataLoader(
dataset=feeder,
batch_size=1,
shuffle=True,
drop_last=True,
num_workers=0,
)
for data in dataloader:
pdb.set_trace()
|
1626116
|
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
import tree_filter_cuda as _C
class _Refine(Function):
@staticmethod
def forward(ctx, feature_in, edge_weight, sorted_index, sorted_parent, sorted_child):
feature_out, feature_aggr, feature_aggr_up, weight_sum, weight_sum_up, =\
_C.refine_forward(feature_in, edge_weight, sorted_index, sorted_parent, sorted_child)
ctx.save_for_backward(feature_in, edge_weight, sorted_index, sorted_parent,
sorted_child, feature_out, feature_aggr, feature_aggr_up, weight_sum,
weight_sum_up)
return feature_out
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
feature_in, edge_weight, sorted_index, sorted_parent,\
sorted_child, feature_out, feature_aggr, feature_aggr_up, weight_sum,\
weight_sum_up, = ctx.saved_tensors;
grad_feature = _C.refine_backward_feature(feature_in, edge_weight, sorted_index,
sorted_parent, sorted_child, feature_out, feature_aggr, feature_aggr_up,
weight_sum, weight_sum_up, grad_output)
grad_weight = _C.refine_backward_weight(feature_in, edge_weight, sorted_index,
sorted_parent, sorted_child, feature_out, feature_aggr, feature_aggr_up,
weight_sum, weight_sum_up, grad_output)
return grad_feature, grad_weight, None, None, None
refine = _Refine.apply
|
1626134
|
import asyncio
import json
from pyee import AsyncIOEventEmitter
import websockets
from .__version__ import __version__
from .libs.utils.util import ObnizUtil
class ObnizConnection:
def __init__(self, id, options):
self.id = id
self.socket = None
self.socket_local = None
self.debugprint = False
self.debugprint_binary = False
self.bufferd_amound_warn_bytes = 10 * 1000 * 1000 # 10M bytes
self.emitter = AsyncIOEventEmitter()
self.onopen = None
self.onconnect = None
self.onclose = None
self.on_connect_called = False
self.send_pool = None
self._sendQueueTimer = None
self._wait_for_local_connect_ready_timer = None
self.hw = None
self.connection_state = 'closed' # closed/connecting/connected/closing
self._connection_retry_count = 0
if options is None:
options = {}
self.options = {
"auto_connect": options.get("auto_connect") is not False,
"access_token": options.get("access_token"),
"obniz_server": options.get("obniz_server", "wss://obniz.io"),
"reset_obniz_on_ws_disconnection": options.get(
"reset_obniz_on_ws_disconnection"
)
is not False,
}
if self.options["auto_connect"]:
self.wsconnect()
# prompt(filled, callback) {
# obnizid = prompt('Please enter obniz id', filled)
# if (obnizid) {
# callback(obnizid)
# }
# }
# static get version() {
# packageJson = require('../package.json')
# return packageJson.version
# }
def ws_on_open(self):
self.print_debug("ws connected")
self._connection_retry_count = 0
if self.onopen is not None:
self.onopen(self)
def ws_on_message(self, data):
obj = json.loads(data)
if type(obj) is list:
for o in obj:
self.notify_to_module(o)
else:
# invalid json
pass
def ws_on_close(self):
self.print_debug("closed")
self.close()
# self.emitter.emit('closed')
if self.onclose and self.on_connect_called:
self.onclose(self)
self.on_connect_called = False
self._reconnect()
def connect_wait(self, callback, option={}):
timeout = option.get("timeout")
if self.on_connect_called:
callback(True)
return
def cb(connected):
nonlocal callback
if callback:
callback(connected)
# 2回目以降の呼び出しではcallbackを呼ばない
callback = None
self.emitter.once("connected", lambda: cb(True))
if not self.options["auto_connect"]:
self.emitter.once("closed", lambda: cb(False))
if timeout:
asyncio.get_event_loop().call_later(timeout, lambda: cb(False))
self.connect()
def _reconnect(self):
self._connection_retry_count += 1
try_after = 1000
if self._connection_retry_count > 15:
try_after = (self._connection_retry_count - 15) * 1000
limit = 60 * 1000
if try_after > limit:
try_after = limit
if self.options["auto_connect"]:
# setTimeout(() => {
# self.wsconnect() // always connect to mainserver if ws lost
# }, tryAfter)
pass
# wsOnError(event) {
# // console.error(event)
# }
# wsOnUnexpectedResponse(req, res) {
# if (res && res.statusCode == 404) {
# self.print_debug('obniz not online')
# } else {
# self.print_debug('invalid server response ' + res ? res.statusCode : '')
# }
# self.clearSocket(self.socket)
# delete self.socket
# self._reconnect()
# }
def wsconnect(self, desired_server):
server = self.options["obniz_server"]
if desired_server:
server = desired_server
if self.socket and self.socket.open:
self.close()
url = server + "/obniz/{}/ws/1".format(self.id)
query = ["obnizpy=" + __version__]
if self.options["access_token"]:
query.append("access_token=" + self.options["access_token"])
url += "?" + "&".join(query)
self.print_debug("connecting to " + url)
self.connection_state = 'connecting'
async def connecting():
async with websockets.connect(url) as websocket:
self.socket = websocket
self.ws_on_open()
while True:
try:
data = await websocket.recv()
self.ws_on_message(data)
except websockets.exceptions.ConnectionClosed as e:
# ws:redirectのときは正常
if self.connection_state == 'connected' :
self.error(e)
self.ws_on_close()
break
except Exception as e:
self.error(e)
break
asyncio.ensure_future(connecting())
# _connectLocal(host) {
# const url = 'ws://' + host
# self.print_debug('local connect to ' + url)
# ws
# if (self.isNode) {
# const wsClient = require('ws')
# ws = new wsClient(url)
# ws.on('open', () => {
# self.print_debug('connected to ' + url)
# self._callOnConnect()
# })
# ws.on('message', data => {
# self.print_debug('recvd via local')
# self.wsOnMessage(data)
# })
# ws.on('close', event => {
# console.log('local websocket closed')
# self._disconnectLocal()
# })
# ws.on('error', err => {
# console.error('local websocket error.', err)
# self._disconnectLocal()
# })
# ws.on('unexpected-response', event => {
# console.log('local websocket closed')
# self._disconnectLocal()
# })
# } else {
# ws = new WebSocket(url)
# ws.binaryType = 'arraybuffer'
# ws.onopen = () => {
# self.print_debug('connected to ' + url)
# self._callOnConnect()
# }
# ws.onmessage = event => {
# self.print_debug('recvd via local')
# self.wsOnMessage(event.data)
# }
# ws.onclose = event => {
# console.log('local websocket closed')
# self._disconnectLocal()
# }
# ws.onerror = err => {
# console.log('local websocket error.', err)
# self._disconnectLocal()
# }
# }
# self.socket_local = ws
# }
def _disconnect_local(self):
if self.socket_local:
if self.socket.open:
self.socket_local.close()
self.clear_socket(self.socket_local)
self.socket_local = None
if self._wait_for_local_connect_ready_timer:
# clearTimeout(self._wait_for_local_connect_ready_timer)
self._wait_for_local_connect_ready_timer = None
# should call. onlyl local connect was lost. and waiting.
self._call_on_connect()
def clear_socket(self, socket):
if socket is None:
return
# send queue
if self._sendQueueTimer:
del self._sendQueue
# clearTimeout(self._sendQueueTimer)
self._sendQueueTimer = None
# unbind
# if (self.isNode) {
# shouldRemoveObservers = [
# 'open',
# 'message',
# 'close',
# 'error',
# 'unexpected-response',
# ]
# for (i = 0 i < shouldRemoveObservers.length i++) {
# socket.removeAllListeners(shouldRemoveObservers[i])
# }
# } else {
# socket.onopen = null
# socket.onmessage = null
# socket.onclose = null
# socket.onerror = null
# }
def connect(self):
if self.socket and self.socket.open:
return
self.wsconnect()
def close(self):
# self._drainQueued()
self._disconnect_local()
if self.socket:
if self.socket.open:
# Connecting & Connected
self.connection_state = 'closing'
self.socket.close(1000, "close")
self.clear_socket(self.socket)
self.socket = None
self.connection_state = 'closed'
def _call_on_connect(self):
should_call = True
if self._wait_for_local_connect_ready_timer:
# obniz.js has wait local_connect
# clearTimeout(self._wait_for_local_connect_ready_timer)
self._wait_for_local_connect_ready_timer = None
else:
# obniz.js hasn't wait local_connect
if self.socket_local and self.socket.open:
# delayed connect
should_call = False
else:
# local_connect is not used
pass
self.connection_state = 'connected'
self.emitter.emit("connected")
if should_call:
if self.onconnect:
try:
if asyncio.iscoroutinefunction(self.onconnect):
asyncio.ensure_future(self.onconnect(self))
else:
self.onconnect(self)
except Exception as e:
self.error(e)
self.on_connect_called = True
def print_debug(self, str):
if self.debugprint or self.debugprint_binary:
print("Obniz: " + str)
def send(self, obj, options=None):
if obj is None:
print("obnizpy. didnt send ", obj)
return
if type(obj) is list:
for o in obj:
self.send(o)
return
if self.send_pool is not None:
self.send_pool.append(obj)
return
send_data = ObnizUtil.json_dumps([obj])
if self.debugprint:
self.print_debug("send: " + send_data)
# queue sending
# self._drainQueued()
self._send_routed(send_data)
def _send_routed(self, data):
if self.socket_local and self.socket_local.on and type(data) is not str:
self.print_debug("send via local")
self.socket_local.send(data)
if self.socket_local.buffered_amount > self.bufferd_amound_warn_bytes:
self.warning(
"over " + self.socket_local.buffered_amount + " bytes queued"
)
return
if self.socket and self.socket.open:
asyncio.ensure_future(self.socket.send(data))
# if self.socket.buffered_amount > self.bufferd_amound_warn_bytes:
# self.warning(
# f'over {self.socket.buffered_amount} bytes queued'
# )
return
# def _drainQueued(self):
# if self._sendQueue is None:
# return
# expect_size = 0
# for q in self._sendQueue:
# expect_size += q.length
# filled = 0
# # TODO: Uint8でなくていいかチェック
# # send_data = new Uint8Array(expectSize)
# send_data = [None] * expect_size
# for q in self._sendQueue:
# for i, d in enumerate(q):
# send_data[filled + i] = d
# filled += q.length
# self._send_routed(send_data)
# del self._sendQueue
# # clearTimeout(self._sendQueueTimer)
# # self._sendQueueTimer = null
def _prepare_components(self):
pass
def notify_to_module(self, obj):
if self.debugprint:
self.print_debug(json.dumps(obj))
if "ws" in obj:
self.handle_ws_command(obj["ws"])
return
if "system" in obj:
self.handle_system_command(obj["system"])
return
# _canConnectToInsecure() {
# if (self.isNode) {
# return True
# } else {
# return location.protocol != 'https:'
# }
# }
def handle_ws_command(self, ws_obj):
if "ready" in ws_obj:
self.firmware_ver = ws_obj["obniz"]["firmware"]
self.hw = ws_obj["obniz"]["hw"]
if self.options["reset_obniz_on_ws_disconnection"]:
self.reset_on_disconnect(True)
# if (
# ws_obj.get('local_connect', {}).get('ip') and
# self.wscommand and
# self.options.local_connect and
# self._canConnectToInsecure()
# ):
# self._connectLocal(ws_obj.local_connect.ip)
# # self._waitForLocalConnectReadyTimer = setTimeout(() => {
# # self._callOnConnect()
# # }, 3000)
# else:
self._call_on_connect()
if "redirect" in ws_obj:
server = ws_obj["redirect"]
self.print_debug("WS connection changed to " + server)
# close current ws immidiately
self.socket.close(1000, "close")
self.clear_socket(self.socket)
self.socket = None
# connect to new server
self.wsconnect(server)
def handle_system_command(self, ws_obj):
pass
# static get WSCommand() {
# return WSCommand
# }
# binary2Json(binary) {
# data = new Uint8Array(binary)
# json = []
# while (data !== null) {
# const frame = WSCommand.dequeueOne(data)
# if (!frame) break
# obj = {}
# for (i = 0 i < self.wscommands.length i++) {
# const command = self.wscommands[i]
# if (command.module === frame.module) {
# command.notifyFromBinary(obj, frame.func, frame.payload)
# break
# }
# }
# json.push(obj)
# data = frame.next
# }
# return json
# }
def warning(self, msg):
print("warning:" + str(msg))
def error(self, msg):
print("error:" + str(msg))
|
1626167
|
from pprint import pprint
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
# Import the deployment specific modules
from libcloud.compute.deployment import ScriptDeployment
from libcloud.compute.deployment import MultiStepDeployment
cls = get_driver(Provider.EXOSCALE)
driver = cls('api key', 'api secret key')
image = driver.list_images()[0]
size = driver.list_sizes()[0]
# Define the scripts that you want to run during deployment
script = ScriptDeployment('/bin/date')
msd = MultiStepDeployment([script])
node = driver.deploy_node(name='test', image=image, size=size,
ssh_key='~/.ssh/id_rsa_test',
ex_keyname='test-keypair',
deploy=msd)
# The stdout of the deployment can be checked on the `script` object
pprint(script.stdout)
|
1626168
|
import unittest
from unittest.mock import Mock, MagicMock, patch, call
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.core.operations import SymmOp
from bsym.interface.pymatgen import ( unique_symmetry_operations_as_vectors_from_structure,
space_group_from_structure,
parse_site_distribution,
unique_structure_substitutions,
new_structure_from_substitution,
configuration_space_from_structure,
space_group_symbol_from_structure,
configuration_space_from_molecule,
structure_cartesian_coordinates_mapping,
molecule_cartesian_coordinates_mapping )
from itertools import permutations
from bsym import SymmetryOperation, Configuration, SpaceGroup, PointGroup, ConfigurationSpace
class TestPymatgenInterface( unittest.TestCase ):
def setUp( self ):
# construct a pymatgen Structure instance using the site fractional coordinates
# face-centered cubic lattice
coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 0.5, 0.5, 0.0 ],
[ 0.0, 0.5, 0.5 ],
[ 0.5, 0.0, 0.5 ] ] )
atom_list = [ 'Li' ] * len( coords )
lattice = Lattice.from_parameters( a=3.0, b=3.0, c=3.0, alpha=90, beta=90, gamma=90 )
self.structure = Structure( lattice, atom_list, coords )
# construct a pymatgen Molecule instance
# square molecule (D4h)
m_coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 1.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0 ],
[ 1.0, 1.0, 0.0 ] ] )
molecule = Molecule( atom_list, m_coords )
molecule = Molecule( molecule.species, molecule.cart_coords - molecule.center_of_mass )
self.molecule = molecule
def test_new_structure_from_substitution( self ):
substitution_index = [ 2,3 ]
new_species_list = [ 'Mg', 'Fe' ]
s_new = new_structure_from_substitution( self.structure, substitution_index, new_species_list )
self.assertEqual( s_new[2].species_string, 'Mg' )
self.assertEqual( s_new[3].species_string, 'Fe' )
def test_new_structure_from_substitution_raises_ValueError_with_oversize_index( self ):
substitution_index = [ 0, 1, 2, 3, 4 ]
new_species_list = [ 'Mg', 'Fe' ]
with self.assertRaises( ValueError ):
new_structure_from_substitution( self.structure, substitution_index, new_species_list )
def test_new_structure_from_substitution_raises_ValueError_with_invalid_index( self ):
substitution_index = [ 2, 4 ]
new_species_list = [ 'Mg', 'Fe' ]
with self.assertRaises( ValueError ):
new_structure_from_substitution( self.structure, substitution_index, new_species_list )
def test_parse_site_distribution( self ):
site_distribution = { 'Mg': 1, 'Li': 3 }
n, d = parse_site_distribution( site_distribution )
for k, v in n.items():
self.assertEqual( site_distribution[ d[ k ] ], v )
def test_structure_cartesian_coordinates_mapping( self ):
mock_symmop = Mock( spec=SymmOp )
new_coords = np.array( [ [ 0.5, 0.5, 0.5 ] ] )
mock_symmop.operate_multi = Mock( return_value=new_coords )
self.structure.lattice.get_cartesian_coords = Mock( return_value=np.array( [ [ 2.0, 2.0, 2.0 ] ] ) )
mapped_coords = structure_cartesian_coordinates_mapping( self.structure, mock_symmop )
np.testing.assert_array_equal( mapped_coords, np.array( [ [ 2.0, 2.0, 2.0 ] ] ) )
np.testing.assert_array_equal( mock_symmop.operate_multi.call_args[0][0], self.structure.frac_coords )
def test_molecule_cartesian_coordinates_mapping( self ):
mock_symmop = Mock( spec=SymmOp )
new_coords = np.array( [ [ 0.5, 0.5, 0,5 ] ] )
mock_symmop.operate_multi = Mock( return_value=new_coords )
mapped_coords = molecule_cartesian_coordinates_mapping( self.molecule, mock_symmop )
np.testing.assert_array_equal( mapped_coords, new_coords )
np.testing.assert_array_equal( mock_symmop.operate_multi.call_args[0][0], self.molecule.cart_coords )
if __name__ == '__main__':
unittest.main()
|
1626195
|
from google.cloud import bigquery
import os, re
import ftplib
# writeable part of the filesystem for Cloud Functions instance
gc_write_dir = "/tmp"
def get_file_ftp(host, path_to_file, ftp_configuration):
"""
Copy an existing file from FTP via ftp://*host*/*path_to_file* link to home directory.
The function return the full path to the file that has been downloaded.
"""
# Construct FTP object and get the file on a server
with ftplib.FTP(host, user=ftp_configuration["user"], passwd=ftp_configuration["psswd"]) as ftp:
filename = re.findall("[^/]*$", path_to_file)[0]
with open(filename, "wb") as wf:
ftp.retrbinary("RETR " + filename, wf.write)
file_location = gc_write_dir + "/" + filename
print("File " + path_to_file + " has got successfully.")
return file_location
def give_file_gbq(path_to_file, bq_configuration):
"""
Download file from *path_to_file* to BigQuery table using *bq_configuration* settings.
"""
# construct Client object with the path to the table in which data will be stored
client = bigquery.Client(project=bq_configuration["project_id"])
dataset_ref = client.dataset(bq_configuration["dataset_id"])
table_ref = dataset_ref.table(bq_configuration["table_id"])
# determine uploading options
job_config = bigquery.LoadJobConfig()
job_config.source_format = bq_configuration["source_format"].upper()
job_config.write_disposition = bq_configuration["write_disposition"]
if bq_configuration["source_format"].upper() == "CSV":
job_config.field_delimiter = bq_configuration["delimiter"]
job_config.skip_leading_rows = 1
job_config.autodetect = True
# upload the file to BigQuery table
with open(path_to_file, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, location=bq_configuration["location"],
job_config=job_config)
job.result()
print("The Job " + job.job_id + " in status " + job.state + " for table " + bq_configuration["project_id"] + "." +
bq_configuration["dataset_id"] + "." + bq_configuration["table_id"] + ".")
os.remove(path_to_file)
def ftp(request):
"""
Function to execute.
"""
try:
# get POST data from Flask.request object
request_json = request.get_json()
ftp_configuration = request_json["ftp"]
bq_configuration = request_json["bq"]
if not bq_configuration.get("location"):
bq_configuration["location"] = "US"
bq_configuration["write_disposition"] = "WRITE_TRUNCATE"
host = re.sub("ftp://", "", re.findall("ftp://[^/]*", ftp_configuration["path_to_file"])[0])
path_to_file = re.sub("/$", "", re.sub("ftp://" + host + "/", "", ftp_configuration["path_to_file"]))
except Exception as error:
print("An error occured with POST request data.")
print(str(error))
raise SystemExit
# go to writable directory
os.chdir(gc_write_dir)
# get the file from FTP
try:
ftp_file = get_file_ftp(host, path_to_file, ftp_configuration)
except Exception as error:
print("An error occured trying to get file from ftp.")
print(str(error))
raise SystemExit
# upload the file to BigQuery
try:
give_file_gbq(ftp_file, bq_configuration)
except Exception as error:
print("An error occured trying to upload file to Google BigQuery.")
print(str(error))
|
1626211
|
class FacepyError(Exception):
"""Base class for exceptions raised by Facepy."""
class FacebookError(FacepyError):
"""Exception for Facebook errors."""
def __init__(self, message=None, code=None, error_data=None, error_subcode=None,
is_transient=None, error_user_title=None, error_user_msg=None,
fbtrace_id=None):
self.message = message
self.code = code
self.error_data = error_data
self.error_subcode = error_subcode
self.is_transient = is_transient
self.error_user_title = error_user_title
self.error_user_msg = error_user_msg
self.fbtrace_id = fbtrace_id
if self.code:
message = '[%s] %s' % (self.code, self.message)
super(FacebookError, self).__init__(message)
class OAuthError(FacebookError):
"""Exception for Facebook errors specifically related to OAuth."""
class HTTPError(FacepyError):
"""Exception for transport errors."""
class SignedRequestError(FacepyError):
"""Exception for invalid signed requests."""
class InternalFacebookError(FacebookError):
"""Exception for Facebook internal server error."""
|
1626244
|
import logging
import os.path
import sys
import time
dirname = os.path.dirname(os.path.abspath(__file__))
sys.path.append(dirname)
sys.path.append(os.path.join(dirname, '..'))
from cassandra.cluster import Cluster
from cassandra.io.asyncorereactor import AsyncoreConnection
from cassandra.query import SimpleStatement
log = logging.getLogger()
log.setLevel('INFO')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
log.addHandler(handler)
supported_reactors = [AsyncoreConnection]
try:
from cassandra.io.libevreactor import LibevConnection
supported_reactors.append(LibevConnection)
except ImportError, exc:
log.warning("Not benchmarking libev reactor: %s" % (exc,))
KEYSPACE = "testkeyspace"
TABLE = "testtable"
NUM_QUERIES = 10000
def setup():
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
rows = session.execute("SELECT keyspace_name FROM system.schema_keyspaces")
if KEYSPACE in [row[0] for row in rows]:
log.debug("dropping existing keyspace...")
session.execute("DROP KEYSPACE " + KEYSPACE)
log.debug("Creating keyspace...")
session.execute("""
CREATE KEYSPACE %s
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }
""" % KEYSPACE)
log.debug("Setting keyspace...")
session.set_keyspace(KEYSPACE)
log.debug("Creating table...")
session.execute("""
CREATE TABLE %s (
thekey text,
col1 text,
col2 text,
PRIMARY KEY (thekey, col1)
)
""" % TABLE)
def teardown():
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
session.execute("DROP KEYSPACE " + KEYSPACE)
def benchmark(run_fn):
for conn_class in supported_reactors:
setup()
log.info("==== %s ====" % (conn_class.__name__,))
cluster = Cluster(['127.0.0.1'])
cluster.connection_class = conn_class
session = cluster.connect(KEYSPACE)
log.debug("Sleeping for two seconds...")
time.sleep(2.0)
query = SimpleStatement("""
INSERT INTO {table} (thekey, col1, col2)
VALUES (%(key)s, %(a)s, %(b)s)
""".format(table=TABLE))
values = {'key': 'key', 'a': 'a', 'b': 'b'}
log.debug("Beginning inserts...")
start = time.time()
try:
run_fn(session, query, values, NUM_QUERIES)
end = time.time()
finally:
teardown()
total = end - start
log.info("Total time: %0.2fs" % total)
log.info("Average throughput: %0.2f/sec" % (NUM_QUERIES / total))
|
1626259
|
import datetime
import urllib.request, urllib.parse, urllib.error
from http.cookiejar import CookieJar
import os
import re
from .central import CentralBase
from ..utils import utils
class CSLWeekly(CentralBase):
def __init__(self, name, storage):
CentralBase.__init__(self, name, storage)
self.baseurl = 'http://www.egazette.nic.in/Digital.aspx'
self.search_endp = 'Digital.aspx'
self.result_table = 'GV_Content_Detail'
self.gazette_js = 'window.open\(\'(?P<href>[^\']+)'
self.partnum = '30'
def get_post_data(self, tags, dateobj):
postdata = []
for tag in tags:
name = None
value = None
if tag.name == 'input':
name = tag.get('name')
value = tag.get('value')
t = tag.get('type')
if t == 'image':
continue
if name == 'btnSubmit':
value = 'Submit'
elif tag.name == 'select':
name = tag.get('name')
if name == 'ddlYear':
value = '%d' % dateobj.year
elif name == 'ddlCategory':
value = self.gztype
elif name == 'ddlPartSection':
value = self.partnum
if name:
if value == None:
value = ''
postdata.append((name, value))
return postdata
def get_category_postdata(self, postdata):
newdata = []
for k, v in postdata:
if k == '__EVENTTARGET':
v = 'ddlCategory'
elif k == 'ddlPartSection':
v = 'Select Part & Section'
elif k == 'ddlYear':
v = '2020'
newdata.append((k, v))
return newdata
def get_part_postdata(self, postdata):
newdata = []
for k, v in postdata:
if k == '__EVENTTARGET':
v = 'ddlPartSection'
elif k == 'ddlYear':
v = '2020'
newdata.append((k, v))
return newdata
def get_search_results(self, search_url, dateobj, cookiejar):
response = self.download_url(search_url, savecookies = cookiejar, loadcookies=cookiejar)
postdata = self.get_form_data(response.webpage, dateobj)
if postdata == None:
return None
postdata = self.get_category_postdata(postdata)
response = self.download_url(search_url, savecookies = cookiejar, \
referer = search_url, \
loadcookies = cookiejar, postdata = postdata)
postdata = self.get_form_data(response.webpage, dateobj)
postdata = self.get_part_postdata(postdata)
response = self.download_url(search_url, savecookies = cookiejar, \
referer = search_url, \
loadcookies = cookiejar, postdata = postdata)
postdata = self.get_form_data(response.webpage, dateobj)
response = self.download_url(search_url, savecookies = cookiejar, \
referer = search_url, \
loadcookies = cookiejar, postdata = postdata)
postdata = self.get_form_data(response.webpage, dateobj)
response = self.download_url(search_url, savecookies = cookiejar, \
referer = search_url, \
loadcookies = cookiejar, postdata = postdata)
return response
def sync(self, fromdate, todate, event):
newdownloads = []
while fromdate <= todate:
if event.is_set():
self.logger.warning('Exiting prematurely as timer event is set')
break
dateobj = fromdate.date()
lastdate = datetime.datetime(fromdate.year, 12, 31)
if todate < lastdate:
lastdate = todate
lastdate = lastdate.date()
self.logger.info('Dates: %s to %s', dateobj, lastdate)
dls = self.download_dates(self.name, dateobj, lastdate)
self.logger.info('Got %d gazettes between %s and %s' % (len(dls), dateobj, lastdate))
newdownloads.extend(dls)
fromdate = datetime.datetime(fromdate.year + 1, 1, 1)
return newdownloads
def download_dates(self, relpath, fromdate, todate):
dls = []
cookiejar = CookieJar()
response = self.download_url(self.baseurl, savecookies = cookiejar, loadcookies = cookiejar)
if not response:
self.logger.warning('Could not fetch %s for the day %s', self.baseurl, dateobj)
return dls
curr_url = response.response_url
search_url = urllib.parse.urljoin(curr_url, self.search_endp)
response = self.get_search_results(search_url, fromdate, cookiejar)
pagenum = 1
while response != None and response.webpage != None:
metainfos, nextpage = self.parse_search_results(response.webpage, \
fromdate, pagenum)
metainfos = self.filter_by_date(metainfos, fromdate, todate)
postdata = self.get_form_data(response.webpage, fromdate)
relurls = self.download_metainfos(relpath, metainfos, search_url, \
postdata, cookiejar)
dls.extend(relurls)
if nextpage:
pagenum += 1
self.logger.info('Going to page %d for date %s', pagenum, fromdate)
response = self.download_nextpage(nextpage, search_url, postdata, cookiejar)
else:
break
return dls
def download_gazette(self, relpath, search_url, postdata, \
metainfo, cookiejar):
response = self.download_url(search_url, postdata = postdata, \
loadcookies= cookiejar)
if not response or not response.webpage:
self.logger.warning('Could not get the page for %s' % metainfo)
return None
webpage = response.webpage.decode('utf-8', 'ignore')
reobj = re.search(self.gazette_js, webpage)
if not reobj:
self.logger.warning('Could not get url link for %s' % metainfo)
return None
href = reobj.groupdict()['href']
gzurl = urllib.parse.urljoin(search_url, href)
reobj = re.search('(?P<gzid>[^/]+).pdf$', href)
if not reobj:
self.logger.warning('Could not get gazette id in %s' % href)
return None
gzid = reobj.groupdict()['gzid']
metainfo['gazetteid'] = gzid
relurl = os.path.join(relpath, metainfo.get_date().__str__(), gzid)
if self.save_gazette(relurl, gzurl, metainfo):
return relurl
return None
def filter_by_date(self, metainfos, fromdate, todate):
minfos = []
for metainfo in metainfos:
dateobj = metainfo.get_date()
if dateobj and dateobj >= fromdate and dateobj <= todate:
minfos.append(metainfo)
return minfos
def process_result_row(self, tr, metainfos, dateobj, order):
metainfo = utils.MetaInfo()
metainfos.append(metainfo)
metainfo.set_gztype(self.gztype)
i = 0
for td in tr.find_all('td'):
if len(order) > i:
col = order[i]
txt = utils.get_tag_contents(td)
if txt:
txt = txt.strip()
if col == 'ministry':
metainfo.set_ministry(txt)
elif col == 'subject':
metainfo.set_subject(txt)
elif col == 'download':
inp = td.find('input')
if inp:
name = inp.get('name')
if name:
metainfo[col] = name
else:
link = td.find('a')
if link:
metainfo[col] = link
elif col == 'gazetteid':
metainfo[col] = txt
elif col == 'issuedate':
dateobj = None
try:
dateobj = utils.to_dateobj(txt)
except:
self.logger.warning('Unable to get date from %s', txt)
if dateobj:
metainfo.set_date(dateobj)
i += 1
def find_next_page(self, tr, curr_page):
classes = tr.get('class')
if classes and 'pager' in classes:
prev_page = None
for td in tr.find_all('td'):
link = td.find('a')
txt = utils.get_tag_contents(td)
if txt:
txt = txt.strip()
self.logger.debug('%s %s %s', txt, curr_page, link)
if txt == '...' and curr_page % 10 == 0 and link \
and prev_page == curr_page:
return link
try:
page_no = int(txt)
prev_page = page_no
except:
page_no = None
if page_no == curr_page + 1 and link:
return link
return None
class CSLExtraordinary(CSLWeekly):
def __init__(self, name, storage):
CSLWeekly.__init__(self, name, storage)
self.gztype = 'Extra Ordinary'
self.partnum = '31'
|
1626260
|
import sys
print 'Count:', len(sys.argv)
print 'Type:', type(sys.argv)
for arg in sys.argv:
print 'Argument:', arg
|
1626273
|
from tslearn.utils import to_time_series_dataset
from tslearn.clustering import silhouette_score
import tslearn.clustering as clust
from scipy import signal
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from gippy import GeoImage
import gippy.algorithms as alg
import re
from os import listdir, walk
def calulate_indices(filepath, asset_dict, indices):
''' Create image files for indices
:param filepath (str): Full path to directory containing satellite scenes in default structure created
by sat-search load --download
:param asset_dict (dict): Keys = asset (band) names in scene files (e.g. 'B01', 'B02'); Values = value names
corresponding to keys (e.g. 'red', 'nir')
:param indices (list): Which indices to generate? Options include any index included in gippy.alg.indices
:return: None (writes files to disk)
'''
subdirs = [x[0] for x in walk(filepath)]
subdirs = subdirs[1:len(subdirs)]
for folder in subdirs:
# Filepath points to folder of geotiffs of Sentinel 2 time-series of bands 4 (red) and 8 (nir)
files = [folder + '/' + f for f in listdir(folder) if not f.startswith('.')]
# Asset (band) names
pattern = '[^_.]+(?=\.[^_.]*$)'
bands = [re.search(pattern, f).group(0) for f in files]
# Match band names
bands = [asset_dict.get(band, band) for band in bands]
img = GeoImage.open(filenames=files, bandnames=bands, nodata=0)
for ind in indices:
alg.indices(img, products=[ind], filename=folder + '/index_' + ind + '.tif')
img = None
def apply_savgol(x, value, window, poly):
""" Perform Savgol signal smoothing on time-series in dataframe group object (x)
Parameters
----------
x: (pd.DataFrame.groupby) Grouped dataframe object
window (int): smoothing window - pass to signal.savgol_filter 'window_length' param
poly (int): polynomial order used to fit samples - pass to signal.savgol_filter 'polyorder' param
value (str): Name of value (variable) to smooth
Returns
-------
x: "Smoothed" time-series
"""
x[value] = signal.savgol_filter(x[value], window_length=window, polyorder=poly)
return x
class TimeSeriesSample:
def __init__(self, time_series_df, n_samples, ts_var, seed):
# Take random `n_samples of pixels from time-series dataframe
self.ts_var = ts_var
self.group = time_series_df.groupby(['lc', 'pixel', 'array_index'])
self.arranged_group = np.arange(self.group.ngroups)
# Ensure same pixels are sampled each time function is run when same `n_samples` parameter is supplied
np.random.seed(seed)
np.random.shuffle(self.arranged_group)
# Take the random sample
self.sample = time_series_df[self.group.ngroup().isin(self.arranged_group[:n_samples])]
if self.sample['date'].dtype != 'O':
self.sample['date'] = self.sample['date'].dt.strftime('%Y-%m-%d')
self.sample_dates = self.sample['date'].unique()
self.tslist = self.sample.groupby(['lc', 'pixel', 'array_index'])[self.ts_var].apply(list)
self.dataset = None
def smooth(self, window=7, poly=3):
# Perform Savgol signal smoothing to each time-series
self.sample = self.sample.groupby(['lc', 'pixel', 'array_index']).apply(apply_savgol, self.ts_var, window, poly)
self.tslist = self.sample.groupby(['lc', 'pixel', 'array_index'])[self.ts_var].apply(list)
return self
@ property
def ts_dataset(self):
#tslist = self.sample.groupby(['lc', 'pixel', 'array_index'])[self.ts_var].apply(list)
self.dataset = to_time_series_dataset(self.tslist)
return self.dataset
def cluster_time_series(ts_sample, cluster_alg, n_clusters, cluster_metric, score=False):
# Dataframe to store cluster results
clust_df = pd.DataFrame(ts_sample.tslist.tolist(), index=ts_sample.tslist.index).reset_index()
clust_df.columns.values[3:] = ts_sample.sample_dates
# Fit model
if cluster_alg == "GAKM":
km = clust.GlobalAlignmentKernelKMeans(n_clusters=n_clusters)
if cluster_alg == "TSKM":
km = clust.TimeSeriesKMeans(n_clusters=n_clusters, metric=cluster_metric)
# Add predicted cluster labels to cluster results dataframe
labels = km.fit_predict(ts_sample.ts_dataset)
clust_df['cluster'] = labels
if score:
s = silhouette_score(ts_sample.ts_dataset, labels)
return clust_df, s
return clust_df
def cluster_grid_search(parameter_grid):
''' Perform grid search on cluster_ndvi_ts parameters
:param parameter_grid: (dict) parameter grid containing all parameter values to explore
:return: 1) dictionary with cluster labels and silhouette scores 2) dataframe with parameter combinations
and corresponding silhouette score
'''
# List of all possible parameter combinations
d = []
for vals in itertools.product(*parameter_grid.values()):
d.append(dict(zip(parameter_grid, vals)))
# Convert to data frame; use to store silhouette scores
df = pd.DataFrame(d)
df = df.drop(['ts_sample'], axis=1)
# Perform grid search
output = {'clusters': [], 'scores': []}
for values in itertools.product(*parameter_grid.values()):
# Run clustering function on all combinations of parameters in parameter grid
clusters, score = cluster_time_series(**dict(zip(parameter_grid, values)))
# 'clusters' = dataframes with cluster results; scores = silhouette scores of corresponding cluster results
output['clusters'].append(clusters)
output['scores'].append(score)
# Add silhouette scores to dataframe
df['sil_score'] = output['scores']
return output, df
def cluster_mean_quantiles(df):
'''Calculate mean and 10th, 90th percentile for each cluster at all dates in time series
:param df: dataframe output from `cluster_ndvi_ts`
:return: two dataframes: one for mean time-series per-cluster, one for quantile time-series per-cluster
'''
# Columns with ndvi values
cols = df.columns[3:-1]
# Cluster means at each time-step
m = df.groupby('cluster', as_index=False)[cols].mean().T.reset_index()
m = m.iloc[1:]
m.rename(columns={'index':'date'}, inplace=True)
m.set_index('date', drop=True, inplace=True)
m.index = pd.to_datetime(m.index)
# Cluster 10th and 90th percentile at each time-step
q = df.groupby('cluster', as_index=False)[cols].quantile([.1, 0.9]).T.reset_index()
q.rename(columns={'index':'date'}, inplace=True)
q.set_index('date', drop=True, inplace=True)
q.index = pd.to_datetime(q.index)
return m, q
def plot_clusters(obj, index=None, fill=True, title=None, save=False, filename=None):
if type(obj) is dict:
cluster_df = obj['clusters'][index]
else:
cluster_df = obj
# Get cluster means and 10th, 90th quantiles
m, q = cluster_mean_quantiles(cluster_df)
# Plot cluster results
nclusts = len(cluster_df.cluster.unique())
color = iter(plt.cm.Set2(np.linspace(0, 1, nclusts)))
fig = plt.figure(figsize=(10, 8))
cnt = 0
for i in range(0, nclusts):
# Plot mean time-series for each cluster
c = next(color)
plt.plot(m.index, m[i], 'k', color=c)
# Fill 10th and 90th quantile time-series of each cluster
if fill:
plt.fill_between(m.index, q.iloc[:, [cnt]].values.flatten(), q.iloc[:, [cnt+1]].values.flatten(),
alpha=0.5, edgecolor=c, facecolor=c)
cnt += 2
# Legend and title
plt.legend(loc='upper left')
plt.title(title)
# Axis labels
ax = fig.add_subplot(111)
ax.set_xlabel('Date')
ax.set_ylabel('NDVI')
if save:
pattern = '.png'
if not pattern in filename:
raise ValueError('File type should be .png')
fig.savefig(filename)
|
1626276
|
import numpy as np
import sys
import time
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import GroupKFold
from sklearn.base import BaseEstimator
from scipy.linalg import cholesky, solve_triangular
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from ml_dft.kernel_functions import RBFKernel, MaternKernel
import os
import warnings
def get_alpha_add(n_basis, n_grid, delta, v):
alpha_add = np.pi * ((np.arange(n_basis / 2) / (n_grid * delta))**2 + v**2) / v
alpha_add = np.repeat(alpha_add, 2)
return alpha_add
class MultivariateGaussianProcessCV(BaseEstimator):
def __init__(self, krr_param_grid=None, cv_type=None, cv_nfolds=5, cv_groups=None,
cv_shuffles=1, n_components=None, single_combo=True,
verbose=0, copy_X=True, v=None, n_basis=None, n_grid=None, delta=None,
id=1, cleanup=True, kernel=None, squared_dist=False, kernel_params=None,
delta_learning=False, mae=False, replace_fit=True):
self.krr_param_grid = krr_param_grid
self.verbose = verbose
self.cv_nfolds = cv_nfolds
self.cv_type = cv_type
self.cv_groups = cv_groups
self.cv_shuffles = cv_shuffles
self.n_components = n_components
self.single_combo = single_combo
self.copy_X = copy_X
self.n_grid = n_grid
self.delta = delta
self.n_basis = n_basis
self.id = id
self.cleanup = cleanup
self.kernel = kernel
self.squared_dist = squared_dist
self.device = None
self.replace_fit = replace_fit
self.delta_learning = delta_learning
self.mae = mae
if self.kernel is None:
self.kernel = RBFKernel()
elif self.kernel == 'rbf':
self.kernel = RBFKernel(**kernel_params)
elif self.kernel == 'matern':
self.kernel = MaternKernel(**kernel_params)
if 'v' in self.krr_param_grid is not None and not single_combo:
raise ValueError('Can only add to alpha if single_combo=True')
def score(self, y_true, y_pred):
return np.mean((y_true - y_pred) ** 2)
def fit(self, X, y, labels=None, dist=None, importance_weights=None, cv_indices=None,
dist_savename=None):
t = time.time()
if y.ndim < 2:
y = y.reshape(-1, 1)
if self.n_components is not None:
if self.verbose > 0:
elapsed = time.time() - t
print('PCA [%dmin %dsec]' % (int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
self.pca = PCA(n_components=self.n_components, svd_solver='arpack')
y_ = self.pca.fit_transform(y)
if self.verbose > 0:
print('Lost %.1f%% information ' % (self.pca.noise_variance_) +
'[%dmin %dsec]' % (int(elapsed / 60), int(elapsed % 60)))
elapsed = time.time() - t
else:
y_ = y
if labels is not None:
raise RuntimeError('Not implemented.')
if cv_indices is None:
cv_indices = np.arange(X.shape[0])
if self.cv_type is None:
kfold = RepeatedKFold(n_splits=self.cv_nfolds, n_repeats=self.cv_shuffles)
cv_folds = kfold.split(X[cv_indices])
n_cv_folds = kfold.get_n_splits()
elif self.cv_type == 'iter':
cv_folds = self.cv_groups
n_cv_folds = len(self.cv_groups)
elif self.cv_type == 'group':
groups = self.cv_groups
if self.cv_nfolds is None:
self.cv_nfolds = len(np.unique(groups))
kfold = GroupKFold(n_splits=self.cv_nfolds)
cv_folds = kfold.split(X[cv_indices], y[cv_indices], groups)
n_cv_folds = kfold.get_n_splits()
else:
raise Exception('Cross-validation type not supported')
add_train_inds = np.setdiff1d(np.arange(X.shape[0]), cv_indices)
cv_folds = list(cv_folds)
cv_folds = [(np.concatenate((train_fold, add_train_inds)), test_fold) for train_fold, test_fold in cv_folds]
if self.verbose > 0:
elapsed = time.time() - t
print('Computing distance matrix [%dmin %dsec]' % (
int(elapsed / 60), int(elapsed % 60)))
sys.stdout.flush()
if dist is None:
dist = euclidean_distances(X, None, squared=self.squared_dist)
if dist_savename is not None:
if self.verbose > 0:
print('Saving distance matrix to file:', dist_savename)
np.save(dist_savename, dist)
if importance_weights is None:
self.krr_param_grid['lambda'] = [0]
importance_weights = np.ones((X.shape[0], ))
importance_weights = importance_weights**(0.5)
errors = []
if 'v' in self.krr_param_grid:
for fold_i, (train_i, test_i) in enumerate(cv_folds):
fold_errors = np.empty((len(self.krr_param_grid['v']),
len(self.krr_param_grid['gamma']),
1,
len(self.krr_param_grid['alpha']), y_.shape[1]))
if self.verbose > 0:
elapsed = time.time() - t
print('CV %d of %d [%dmin %dsec]' % (fold_i + 1,
n_cv_folds,
int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
for v_i, v in enumerate(self.krr_param_grid['v']):
for gamma_i, gamma in enumerate(self.krr_param_grid['gamma']):
for lamb_i, lamb in enumerate(self.krr_param_grid['lambda']):
iw = importance_weights**lamb
iw = iw[:, None]
K_train = self.kernel.apply_to_dist(dist[np.ix_(train_i, train_i)], gamma=gamma)
K_train *= np.outer(iw[train_i], iw[train_i])
K_test = self.kernel.apply_to_dist(dist[np.ix_(test_i, train_i)], gamma=gamma)
if self.verbose > 0:
sys.stdout.write('.')
sys.stdout.flush()
for alpha_i, alpha in enumerate(self.krr_param_grid['alpha']):
if self.verbose > 0:
sys.stdout.write(',')
sys.stdout.flush()
for y_i in np.arange(y_.shape[1]):
K_train_ = K_train.copy()
alpha_add = get_alpha_add(self.n_basis, self.n_grid, self.delta, v)
K_train_.flat[::K_train_.shape[0] + 1] += alpha * alpha_add[y_i]
try:
L_ = cholesky(K_train_, lower=True)
x = solve_triangular(L_, y_[train_i, y_i], lower=True)
dual_coef_ = solve_triangular(L_.T, x)
pred_mean = np.dot(K_test, dual_coef_)
if self.mae:
e = np.mean(np.abs(pred_mean - y_[test_i, y_i]), 0)
else:
e = np.mean((pred_mean - y_[test_i, y_i]) ** 2, 0)
except np.linalg.LinAlgError:
e = np.inf
fold_errors[v_i, gamma_i, 0, alpha_i, y_i] = e
if self.verbose > 0:
sys.stdout.write('\n')
sys.stdout.flush()
errors.append(fold_errors)
errors = np.array(errors)
errors = np.mean(errors, 0) # average over folds
else:
for fold_i, (train_i, test_i) in enumerate(cv_folds):
fold_errors = np.empty((len(self.krr_param_grid['gamma']),
len(self.krr_param_grid['lambda']),
len(self.krr_param_grid['alpha']), y_.shape[1]))
if self.verbose > 0:
elapsed = time.time() - t
print('CV %d of %d [%dmin %dsec]' % (fold_i + 1,
n_cv_folds,
int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
for gamma_i, gamma in enumerate(self.krr_param_grid['gamma']):
if self.verbose > 0:
sys.stdout.write('.')
sys.stdout.flush()
for lamb_i, lamb in enumerate(self.krr_param_grid['lambda']):
iw = importance_weights**lamb
iw = iw[:, None]
K_train = self.kernel.apply_to_dist(dist[np.ix_(train_i, train_i)], gamma=gamma)
K_train *= np.outer(iw[train_i], iw[train_i])
K_test = self.kernel.apply_to_dist(dist[np.ix_(test_i, train_i)], gamma=gamma)
for alpha_i, alpha in enumerate(self.krr_param_grid['alpha']):
if self.verbose > 0:
sys.stdout.write(',')
sys.stdout.flush()
K_train_ = K_train.copy()
K_train_.flat[::K_train_.shape[0] + 1] += alpha
try:
L_ = cholesky(K_train_, lower=True)
x = solve_triangular(L_, iw[train_i] * y_[train_i], lower=True)
dual_coef_ = iw[train_i] * solve_triangular(L_.T, x)
pred_mean = np.dot(K_test, dual_coef_)
if self.mae:
e = np.mean(np.abs(pred_mean - y_[test_i]) * importance_weights[test_i, None]**2, 0)
else:
e = np.mean(((pred_mean - y_[test_i]) ** 2) * importance_weights[test_i, None]**2, 0)
except np.linalg.LinAlgError:
e = np.inf
fold_errors[gamma_i, lamb_i, alpha_i] = e
if self.verbose > 0:
sys.stdout.write('\n')
sys.stdout.flush()
errors.append(fold_errors)
errors = np.array(errors)
errors = np.mean(errors, 0) # average over folds
self.dual_coefs_ = np.empty((y_.shape[1], X.shape[0]))
self.alphas_ = np.empty(y_.shape[1])
self.lambdas_ = np.empty(y_.shape[1])
self.gammas_ = np.empty(y_.shape[1])
if self.verbose > 0:
elapsed = time.time() - t
print('Refit [%dmin %dsec]' % (int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
print_count = 0
if not self.single_combo:
for i in range(y_.shape[1]):
min_params = np.argsort(errors[:, :, :, i], axis=None)
# lin_alg_errors = 0
gamma_i, lamb_i, alpha_i = np.unravel_index(min_params[0],
errors.shape[:2])
gamma = self.krr_param_grid['gamma'][gamma_i]
lamb = self.krr_param_grid['lambda'][lamb_i]
alpha = self.krr_param_grid['alpha'][alpha_i]
self.alphas_[i] = alpha
self.gammas_[i] = gamma
self.lambdas_[i] = lamb
if (gamma_i in (0, len(self.krr_param_grid['gamma']) - 1) or
lamb_i in (0, len(self.krr_param_grid['lambda']) - 1) or
alpha_i in (0, len(self.krr_param_grid['alpha']) - 1)):
if print_count <= 200:
fmtstr = '%d: gamma=%g\talpha=%g\tlambda=%g\terror=%g\tmean=%g'
print(fmtstr % (i, gamma, alpha, lamb,
errors[gamma_i, lamb_i, alpha_i, i],
errors[gamma_i, lamb_i, alpha_i, i] /
np.mean(np.abs(y_[:, i]))))
print_count += 1
else:
errors = np.mean(errors, -1) # average over outputs
if self.verbose > 1:
print('CV errors:')
print(errors)
print('Alpha params:')
print(self.krr_param_grid['alpha'])
print('Gamma params:')
print(self.krr_param_grid['gamma'])
print('Lambda params:')
print(self.krr_param_grid['lambda'])
if self.verbose > 0:
print('Min error: ', np.min(errors))
# print np.log(errors)
# plt.imshow(np.log(errors))
# plt.xticks(range(10), map('{:.1e}'.format, list(self.krr_param_grid['alpha'])))
# plt.yticks(range(10), map('{:.1e}'.format, list(self.krr_param_grid['gamma'])))
# plt.xlabel('alpha')
# plt.ylabel('gamma')
# plt.colorbar()
# plt.show()
min_params = np.argsort(errors, axis=None)
if 'v' in self.krr_param_grid:
v_i, gamma_i, lamb_i, alpha_i = np.unravel_index(min_params[0],
errors.shape)
else:
gamma_i, lamb_i, alpha_i = np.unravel_index(min_params[0],
errors.shape)
if 'v' in self.krr_param_grid:
v = self.krr_param_grid['v'][v_i]
print('v=', v)
gamma = self.krr_param_grid['gamma'][gamma_i]
alpha = self.krr_param_grid['alpha'][alpha_i]
lamb = self.krr_param_grid['lambda'][lamb_i]
if 'v' in self.krr_param_grid:
if v == self.krr_param_grid['v'][0]:
print('v at lower edge.')
if v == self.krr_param_grid['v'][-1]:
print('v at upper edge.')
if len(self.krr_param_grid['gamma']) > 1:
if gamma == self.krr_param_grid['gamma'][0]:
print('Gamma at lower edge.')
if gamma == self.krr_param_grid['gamma'][-1]:
print('Gamma at upper edge.')
if len(self.krr_param_grid['alpha']) > 1:
if alpha == self.krr_param_grid['alpha'][0]:
print('Alpha at lower edge.')
if alpha == self.krr_param_grid['alpha'][-1]:
print('Alpha at upper edge.')
if len(self.krr_param_grid['lambda']) > 1:
if lamb == self.krr_param_grid['lambda'][0]:
print('Lambda at lower edge.')
if lamb == self.krr_param_grid['lambda'][-1]:
print('Lambda at upper edge.')
self.alphas_[:] = alpha
self.gammas_[:] = gamma
self.lambdas_[:] = lamb
if 'v' in self.krr_param_grid:
alpha_add = get_alpha_add(self.n_basis, self.n_grid, self.delta, v)
self.alphas_ *= alpha_add
combos = list(zip(self.alphas_, self.gammas_, self.lambdas_))
n_unique_combos = len(set(combos))
self.L_fit_ = [None] * n_unique_combos
for i, (alpha, gamma, lamb) in enumerate(set(combos)):
if self.verbose > 0:
elapsed = time.time() - t
print('Parameter combinations ' +
'%d of %d [%dmin %dsec]' % (i + 1, n_unique_combos,
int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
y_list = [i for i in range(y_.shape[1]) if
self.alphas_[i] == alpha and self.gammas_[i] == gamma and self.lambdas_[i] == lamb]
iw = importance_weights**lamb
iw = iw[:, None]
K = self.kernel.apply_to_dist(dist, gamma=gamma)
K *= np.outer(iw, iw)
# np.exp(K, K)
while True:
K.flat[::K.shape[0] + 1] += alpha - (alpha / 10)
try:
if self.verbose > 0:
print('trying cholesky decomposition, alpha', alpha)
L_ = cholesky(K, lower=True)
self.L_fit_[i] = L_
x = solve_triangular(L_, iw * y_[:, y_list], lower=True)
# x = solve_triangular(L_, y_[:, y_list], lower=True)
dual_coef_ = solve_triangular(L_.T, x)
self.dual_coefs_[y_list] = iw.T * dual_coef_.T.copy()
break
except np.linalg.LinAlgError:
if self.verbose > 0:
print('LinalgError, increasing alpha')
alpha *= 10
self.alphas_[0] = alpha
if self.copy_X:
self.X_fit_ = X.copy()
self.y_fit_ = y.copy()
else:
self.X_fit_ = X
self.y_fit_ = y
self.errors = errors
if self.verbose > 0:
elapsed = time.time() - t
print('Done [%dmin %dsec]' % (int(elapsed / 60), int(elapsed % 60)))
sys.stdout.flush()
def add_sample(self, x, y):
""" Adds a sample to the kernel matrix via an efficient update to the model
Args:
x : The sample to be added
"""
n = self.X_fit_.shape[0]
print('n', n)
if self.verbose > 1:
print("adding training datapoint")
self.X_fit_ = np.concatenate((self.X_fit_, x), axis=0)
if self.verbose > 1:
print("adding training label")
self.y_fit_ = np.concatenate((self.y_fit_, y), axis=0)
L_k = np.empty((self.L_fit_.shape[0], n + 1, n + 1))
self.dual_coefs_ = np.empty((self.dual_coefs_.shape[0], n + 1))
print(L_k.shape)
for i, gamma in enumerate(np.unique(self.gammas_)):
alpha = self.alphas_[i]
if self.verbose > 1:
print('Calculating kernel entries for new point')
dist = euclidean_distances(x, self.X_fit_, squared=self.squared_dist)
k = self.kernel.apply_to_dist(dist, gamma=gamma).T
# print('n', n)
k1 = k[:n]
k2 = k[n:] + alpha
if self.verbose > 1:
print('Updating Cholesky factor')
L_k[i, :n, :n] = self.L_fit_[i]
L_k[i, :n, -1:] = 0
L_k[i, -1:, :n] = solve_triangular(self.L_fit_[i], k1, lower=True).T
# print('k2', k2)
# print('dotprod', np.dot(L_k[i, -1:, :n], L_k[i, -1:, :n].T))
# print('var', k2 - np.dot(L_k[i, -1:, :n], L_k[i, -1:, :n].T))
L_k[i, -1:, -1:] = np.sqrt(k2 - np.dot(L_k[i, -1:, :n], L_k[i, -1:, :n].T))
self.L_fit_ = L_k
if self.verbose > 1:
print('Updating dual_coefs')
v = solve_triangular(L_k[i], self.y_fit_, lower=True)
self.dual_coefs_[i] = solve_triangular(L_k[i].T, v).T
def predict(self, X, verbose=None, variance=False, dist=None):
t = time.time()
if verbose is None:
verbose = self.verbose
y_ = np.empty(shape=(X.shape[0], len(self.alphas_)))
if verbose > 0:
elapsed = time.time() - t
print('Computing distance matrix [%dmin %dsec]' % (
int(elapsed / 60), int(elapsed % 60)))
sys.stdout.flush()
if dist is None:
if X.shape == self.X_fit_.shape and np.allclose(X, self.X_fit_):
dist = euclidean_distances(self.X_fit_, squared=self.squared_dist)
else:
dist = euclidean_distances(X, self.X_fit_, squared=self.squared_dist)
if variance:
if verbose > 0:
elapsed = time.time() - t
print('Test distances [%dmin %dsec]' % (int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
dist_test = euclidean_distances(X, X, squared=self.squared_dist)
pred_var = np.zeros((X.shape[0],))
for i, gamma in enumerate(np.unique(self.gammas_)):
if verbose > 0:
print('Gamma %d of %d [%dmin %dsec]' % (i + 1,
len(np.unique(self.gammas_)), int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
y_list = [i for i in range(len(self.gammas_)) if
self.gammas_[i] == gamma]
K = self.kernel.apply_to_dist(dist, gamma=gamma)
y_[:, y_list] = np.dot(K, self.dual_coefs_[y_list].T)
if variance:
K_test = self.kernel.apply_to_dist(dist_test, gamma=gamma)
V = solve_triangular(self.L_fit_[i], K.T, lower=True)
# v = np.dot(K, np.dot(self.L_fit_[i], K.T))
v = np.sum(V * V, axis=0)
pred_var = K_test.flat[::X.shape[0] + 1] - v
if self.n_components is not None:
y = self.pca.inverse_transform(y_)
else:
y = y_
if y.shape[1] == 1:
y = y.flatten()
if verbose > 0:
elapsed = time.time() - t
print('Done [%dmin %dsec]' % (int(elapsed / 60), int(elapsed % 60)))
sys.stdout.flush()
if variance:
return y, pred_var
else:
return y
def save(self, filename):
np.save(filename + '_alphas', self.alphas_)
np.save(filename + '_dual_coefs', self.dual_coefs_)
np.save(filename + '_gammas', self.gammas_)
np.save(filename + '_lambdas', self.lambdas_)
if not os.path.exists(filename + '_X_fit.npy') or self.replace_fit:
np.save(filename + '_X_fit', self.X_fit_)
np.save(filename + '_y_fit', self.y_fit_)
# np.save(filename + '_L_fit', self.L_fit_)
np.save(filename + '_errors', self.errors)
np.save(filename + '_kernel', self.kernel)
def load(self, filename):
self.alphas_ = np.load(filename + '_alphas.npy', allow_pickle=True)
self.dual_coefs_ = np.load(filename + '_dual_coefs.npy', allow_pickle=True)
self.gammas_ = np.load(filename + '_gammas.npy', allow_pickle=True)
self.X_fit_ = np.load(filename + '_X_fit.npy', allow_pickle=True)
# self.L_fit_ = np.load(filename + '_L_fit.npy', allow_pickle=True)
self.errors = np.load(filename + '_errors.npy', allow_pickle=True)
self.kernel = np.load(filename + '_kernel.npy', allow_pickle=True)[()]
if os.path.exists(filename + '_y_fit.npy'):
self.y_fit_ = np.load(filename + '_y_fit.npy', allow_pickle=True)
else:
warnings.warn('No labels file found, not adding labels to model')
if os.path.exists(filename + '_lambdas.npy'):
self.lambdas_ = np.load(filename + '_lambdas.npy', allow_pickle=True)
else:
warnings.warn('No lambdas file found, not adding importance weights to model')
|
1626278
|
import pytest
import textwrap
from ansible_builder.steps import AdditionalBuildSteps
@pytest.mark.parametrize('verb', ['prepend', 'append'])
def test_additional_build_steps(verb):
additional_build_steps = {
'prepend': ["RUN echo This is the prepend test", "RUN whoami"],
'append': textwrap.dedent("""
RUN echo This is the append test
RUN whoami
""")
}
steps = AdditionalBuildSteps(additional_build_steps[verb])
assert len(list(steps)) == 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.