id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
426014
|
from rest_framework import viewsets
from api.suids.serializers import SuidSerializer
from api.base import ShareViewSet
from share.models import SourceUniqueIdentifier
class SuidViewSet(ShareViewSet, viewsets.ReadOnlyModelViewSet):
serializer_class = SuidSerializer
ordering = ('id', )
def get_queryset(self):
return SourceUniqueIdentifier.objects.all()
|
426015
|
from __future__ import absolute_import,print_function,unicode_literals
_E='.env'
_D='always'
_C=True
_B=False
_A=None
import io,logging,os,re,shutil,sys,tempfile
from collections import OrderedDict
from contextlib import contextmanager
from .compat import IS_TYPE_CHECKING,PY2,StringIO,to_env
from .parser import Binding,parse_stream
logger=logging.getLogger(__name__)
if IS_TYPE_CHECKING:
from typing import Dict,Iterator,Match,Optional,Pattern,Union,Text,IO,Tuple
if sys.version_info>=(3,6):_PathLike=os.PathLike
else:_PathLike=Text
if sys.version_info>=(3,0):_StringIO=StringIO
else:_StringIO=StringIO[Text]
__posix_variable=re.compile('\n \\$\\{\n (?P<name>[^\\}:]*)\n (?::-\n (?P<default>[^\\}]*)\n )?\n \\}\n ',re.VERBOSE)
def with_warn_for_invalid_lines(mappings):
for A in mappings:
if A.error:logger.warning('Python-dotenv could not parse statement starting at line %s',A.original.line)
yield A
class DotEnv:
def __init__(A,dotenv_path,verbose=_B,encoding=_A,interpolate=_C):A.dotenv_path=dotenv_path;A._dict=_A;A.verbose=verbose;A.encoding=encoding;A.interpolate=interpolate
@contextmanager
def _get_stream(self):
A=self
if isinstance(A.dotenv_path,StringIO):yield A.dotenv_path
elif os.path.isfile(A.dotenv_path):
with io.open(A.dotenv_path,encoding=A.encoding)as B:yield B
else:
if A.verbose:logger.info('Python-dotenv could not find configuration file %s.',A.dotenv_path or _E)
yield StringIO('')
def dict(A):
if A._dict:return A._dict
B=OrderedDict(A.parse());A._dict=resolve_nested_variables(B)if A.interpolate else B;return A._dict
def parse(B):
with B._get_stream()as C:
for A in with_warn_for_invalid_lines(parse_stream(C)):
if A.key is not _A:yield(A.key,A.value)
def set_as_environment_variables(C,override=_B):
for (A,B) in C.dict().items():
if A in os.environ and not override:continue
if B is not _A:os.environ[to_env(A)]=to_env(B)
return _C
def get(A,key):
B=key;C=A.dict()
if B in C:return C[B]
if A.verbose:logger.warning('Key %s not found in %s.',B,A.dotenv_path)
return _A
def get_key(dotenv_path,key_to_get):return DotEnv(dotenv_path,verbose=_C).get(key_to_get)
@contextmanager
def rewrite(path):
try:
with tempfile.NamedTemporaryFile(mode='w+',delete=_B)as A:
with io.open(path)as B:yield(B,A)
except BaseException:
if os.path.isfile(A.name):os.unlink(A.name)
raise
else:shutil.move(A.name,path)
def set_key(dotenv_path,key_to_set,value_to_set,quote_mode=_D):
K='"';E=quote_mode;C=dotenv_path;B=key_to_set;A=value_to_set;A=A.strip("'").strip(K)
if not os.path.exists(C):logger.warning("Can't write to %s - it doesn't exist.",C);return _A,B,A
if' 'in A:E=_D
if E==_D:F='"{}"'.format(A.replace(K,'\\"'))
else:F=A
G='{}={}\n'.format(B,F)
with rewrite(C)as(J,D):
H=_B
for I in with_warn_for_invalid_lines(parse_stream(J)):
if I.key==B:D.write(G);H=_C
else:D.write(I.original.string)
if not H:D.write(G)
return _C,B,A
def unset_key(dotenv_path,key_to_unset,quote_mode=_D):
B=dotenv_path;A=key_to_unset
if not os.path.exists(B):logger.warning("Can't delete from %s - it doesn't exist.",B);return _A,A
C=_B
with rewrite(B)as(E,F):
for D in with_warn_for_invalid_lines(parse_stream(E)):
if D.key==A:C=_C
else:F.write(D.original.string)
if not C:logger.warning("Key %s not removed from %s - key doesn't exist.",A,B);return _A,A
return C,A
def resolve_nested_variables(values):
def C(name,default):A=default;A=A if A is not _A else'';C=os.getenv(name,B.get(name,A));return C
def D(match):A=match.groupdict();return C(name=A['name'],default=A['default'])
B={}
for (E,A) in values.items():B[E]=__posix_variable.sub(D,A)if A is not _A else _A
return B
def _walk_to_root(path):
A=path
if not os.path.exists(A):raise IOError('Starting path not found')
if os.path.isfile(A):A=os.path.dirname(A)
C=_A;B=os.path.abspath(A)
while C!=B:yield B;D=os.path.abspath(os.path.join(B,os.path.pardir));C,B=B,D
def find_dotenv(filename=_E,raise_error_if_not_found=_B,usecwd=_B):
H='.py'
def E():B='__file__';A=__import__('__main__',_A,_A,fromlist=[B]);return not hasattr(A,B)
if usecwd or E()or getattr(sys,'frozen',_B):B=os.getcwd()
else:
A=sys._getframe()
if PY2 and not __file__.endswith(H):C=__file__.rsplit('.',1)[0]+H
else:C=__file__
while A.f_code.co_filename==C:assert A.f_back is not _A;A=A.f_back
F=A.f_code.co_filename;B=os.path.dirname(os.path.abspath(F))
for G in _walk_to_root(B):
D=os.path.join(G,filename)
if os.path.isfile(D):return D
if raise_error_if_not_found:raise IOError('File not found')
return''
def load_dotenv(dotenv_path=_A,stream=_A,verbose=_B,override=_B,interpolate=_C,**A):B=dotenv_path or stream or find_dotenv();return DotEnv(B,verbose=verbose,interpolate=interpolate,**A).set_as_environment_variables(override=override)
def dotenv_values(dotenv_path=_A,stream=_A,verbose=_B,interpolate=_C,**A):B=dotenv_path or stream or find_dotenv();return DotEnv(B,verbose=verbose,interpolate=interpolate,**A).dict()
|
426058
|
from lazypredict.Supervised import LazyClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.discriminant_analysis import (
LinearDiscriminantAnalysis,
QuadraticDiscriminantAnalysis,
)
from sklearn.model_selection import train_test_split
from sklearn import datasets
def main():
# Load data
data = datasets.load_breast_cancer()
X, y = data.data, data.target
# Split in training and testing data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Lazy Classifier configuration with SPECIFIC classifier algorithms
clf = LazyClassifier(
verbose=0,
ignore_warnings=True,
custom_metric=None,
classifiers=[
RandomForestClassifier,
LinearDiscriminantAnalysis,
ExtraTreesClassifier,
QuadraticDiscriminantAnalysis,
SGDClassifier,
],
)
# Training and testing evaluation
models, predictions = clf.fit(X_train, X_test, y_train, y_test)
print(models)
if __name__ == "__main__":
main()
|
426090
|
import unittest
import sys
from conveyor.multinb import Pipeline
class TestPipelineRun(unittest.TestCase):
def setUp(self):
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
def test_pipeline_additions(self):
data_processing1 = Pipeline()
data_processing1.add_notebook(filename="conveyor/examples/tests/load_data.ipynb", carry_vars=['df'])
data_processing1.add_notebook(filename="conveyor/examples/tests/process_data.ipynb",
carry_vars=['magic_number'],start_cell_idx=3)
def transform_magic(from_state):
to_state = dict()
to_state['transformed_magic_number'] = -1 * from_state['magic_number']
return to_state
data_processing1.add_transform(transform_magic)
assert len(data_processing1.steps) == 3
def test_pipeline_output_exists(self):
data_processing2 = Pipeline()
data_processing2.add_notebook(filename="conveyor/examples/tests/load_data.ipynb", carry_vars=['df'])
data_processing2.add_notebook(filename="conveyor/examples/tests/process_data.ipynb",
carry_vars=['magic_number'],start_cell_idx=3)
def transform_magic(from_state):
to_state = dict()
to_state['transformed_magic_number'] = -1 * from_state['magic_number']
return to_state
data_processing2.add_transform(transform_magic)
output1 = data_processing2.run()
assert output1 is not None
def test_pipeline_output_accurate(self):
data_processing3 = Pipeline()
data_processing3.add_notebook(filename="conveyor/examples/tests/load_data.ipynb", carry_vars=['df'])
data_processing3.add_notebook(filename="conveyor/examples/tests/process_data.ipynb",
carry_vars=['magic_number'],start_cell_idx=3)
def transform_magic(from_state):
to_state = dict()
to_state['transformed_magic_number'] = -1 * from_state['magic_number']
return to_state
data_processing3.add_transform(transform_magic)
output2 = data_processing3.run()
assert output2[-1]['result']['transformed_magic_number'] == -30
def test_pipeline_output_stages(self):
data_processing4 = Pipeline()
data_processing4.add_notebook(filename="conveyor/examples/tests/load_data.ipynb", carry_vars=['df'])
data_processing4.add_notebook(filename="conveyor/examples/tests/process_data.ipynb",
carry_vars=['magic_number'],start_cell_idx=3)
def transform_magic(from_state):
to_state = dict()
to_state['transformed_magic_number'] = -1 * from_state['magic_number']
return to_state
data_processing4.add_transform(transform_magic)
output3 = data_processing4.run()
assert len(output3) == 3
|
426121
|
from __future__ import unicode_literals, print_function
import unittest
import boto3
import sys
import os
import uuid
import json
from os.path import dirname, join
from moto import mock_dynamodb2
from todo.api.create import create
from todo.api.delete import delete, handler
from todo.api.get import get_one
from dbconfig import init
class TestUpdateAPI(unittest.TestCase):
@mock_dynamodb2
def test_delete_function(self):
client, table = init()
item = {'item': 'I need to finish this test!', 'completed': True}
todo = create(client, '1', item, 'todo_test', ['completed', 'item'])
delete(client, '1', todo['todoId'], table.table_name)
todo_from_get = get_one(client, '1', todo['todoId'], table.table_name)
# Verify it's an empty dict
assert not todo_from_get
@mock_dynamodb2
def test_delete_handler(self):
client, table = init()
item = {'item': 'I need to finish this test!', 'completed': True}
todo = create(client, '1', item, 'todo_test', ['completed', 'item'])
event = {
'queryStringParameters': {
'id': todo['todoId']
},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': '1'
}
}
}
}
handler(event, {})
todo_from_get = get_one(client, '1', todo['todoId'], table.table_name)
# Verify it's an empty dict
assert not todo_from_get
if __name__ == '__main__':
unittest.main()
|
426145
|
def make_cmd_params(command, nodes, env, sourcehash):
inputs = {}
outputs = []
files = [] #to be monitored
refs = []
output_refs = []
params = {
"lineno": command["cmd"]["lineno"],
"source": command["cmd"]["source"],
"sourcehash": sourcehash,
"refs": refs,
"output_refs": output_refs,
"inputs": inputs,
"outputs": outputs,
"files": files,
}
for noderef in command["noderefs"]:
if noderef["type"] == "file":
name = noderef["value"]
if name not in files:
files.append(name)
refs.append(noderef)
elif noderef["type"] == "doc" and noderef["index"] == -1:
refs.append(None)
elif noderef["type"] in ("doc", "variable"):
node = nodes[noderef["type"]][noderef["index"]]
name = node["name"]
inputs[name] = noderef["type"]
refs.append(name)
elif noderef["type"] == "env":
envname = nodes["env"][noderef["index"]]["name"]
refs.append({"type": "env", "value": env[envname]})
elif noderef["type"] == "varexp":
subrefs = []
for subnoderef in noderef["noderefs"]:
if subnoderef["type"] == "variable":
node = nodes["variable"][subnoderef["index"]]
name = node["name"]
subrefs.append(name)
inputs[name] = "variable"
elif subnoderef["type"] == "env":
envname = nodes["env"][subnoderef["index"]]["name"]
subrefs.append("$" + envname)
ref = {"type": "varexp", "value": noderef["value"], "refs": subrefs}
refs.append(ref)
else:
raise ValueError(command["cmd"]["source"], noderef["type"])
for output in command["outputs"]:
type_ = output["type"]
noderef = output["noderef"]
assert noderef["type"] == "doc"
if noderef["index"] == -1:
output_refs.append({"type": type_, "name": None})
else:
node = nodes["doc"][noderef["index"]]
name = node["name"]
if name not in outputs:
outputs.append(name)
output_refs.append({"type": type_, "name": name})
capture = command.get("capture", None)
if capture is not None:
assert capture["type"] == "context"
type_ = "capture"
if capture["index"] == -1:
output_refs.append({"type": type_, "name": None})
else:
node = nodes["context"][capture["index"]]
name = node["name"]
outputs.append(name)
output_refs.append({"type": type_, "name": name})
pragma = command.get("pragma", None)
if pragma is not None:
params["pragma"] = pragma
params["command"] = command["parsed"]
return params
|
426180
|
import functools
import time
from typing import Optional
def on_interval(seconds: int): # in seconds
def wrapper(fn):
_last_called_time: Optional[float] = None
@functools.wraps(fn)
def inner(*args, **kwargs):
nonlocal _last_called_time
now = time.time()
if not _last_called_time or _last_called_time + seconds < now:
_last_called_time = now
return fn(*args, **kwargs)
return inner
return wrapper
|
426215
|
import logging
logger = logging.getLogger('awx.main.migrations')
def migrate_to_multi_cred(app, schema_editor):
Job = app.get_model('main', 'Job')
JobTemplate = app.get_model('main', 'JobTemplate')
ct = 0
for cls in (Job, JobTemplate):
for j in cls.objects.iterator():
if j.credential:
ct += 1
logger.debug('Migrating cred %s to %s %s multi-cred relation.', j.credential_id, cls, j.id)
j.credentials.add(j.credential)
if j.vault_credential:
ct += 1
logger.debug('Migrating cred %s to %s %s multi-cred relation.', j.vault_credential_id, cls, j.id)
j.credentials.add(j.vault_credential)
for cred in j.extra_credentials.all():
ct += 1
logger.debug('Migrating cred %s to %s %s multi-cred relation.', cred.id, cls, j.id)
j.credentials.add(cred)
if ct:
logger.info('Finished migrating %s credentials to multi-cred', ct)
def migrate_back_from_multi_cred(app, schema_editor):
Job = app.get_model('main', 'Job')
JobTemplate = app.get_model('main', 'JobTemplate')
CredentialType = app.get_model('main', 'CredentialType')
vault_credtype = CredentialType.objects.get(kind='vault')
ssh_credtype = CredentialType.objects.get(kind='ssh')
ct = 0
for cls in (Job, JobTemplate):
for j in cls.objects.iterator():
for cred in j.credentials.iterator():
changed = False
if cred.credential_type_id == vault_credtype.id:
changed = True
ct += 1
logger.debug('Reverse migrating vault cred %s for %s %s', cred.id, cls, j.id)
j.vault_credential = cred
elif cred.credential_type_id == ssh_credtype.id:
changed = True
ct += 1
logger.debug('Reverse migrating ssh cred %s for %s %s', cred.id, cls, j.id)
j.credential = cred
else:
changed = True
ct += 1
logger.debug('Reverse migrating cloud cred %s for %s %s', cred.id, cls, j.id)
j.extra_credentials.add(cred)
if changed:
j.save()
if ct:
logger.info('Finished reverse migrating %s credentials from multi-cred', ct)
def migrate_workflow_cred(app, schema_editor):
WorkflowJobTemplateNode = app.get_model('main', 'WorkflowJobTemplateNode')
WorkflowJobNode = app.get_model('main', 'WorkflowJobNode')
ct = 0
for cls in (WorkflowJobNode, WorkflowJobTemplateNode):
for node in cls.objects.iterator():
if node.credential:
logger.debug('Migrating prompted credential %s for %s %s', node.credential_id, cls, node.id)
ct += 1
node.credentials.add(node.credential)
if ct:
logger.info('Finished migrating total of %s workflow prompted credentials', ct)
def migrate_workflow_cred_reverse(app, schema_editor):
WorkflowJobTemplateNode = app.get_model('main', 'WorkflowJobTemplateNode')
WorkflowJobNode = app.get_model('main', 'WorkflowJobNode')
ct = 0
for cls in (WorkflowJobNode, WorkflowJobTemplateNode):
for node in cls.objects.iterator():
cred = node.credentials.first()
if cred:
node.credential = cred
logger.debug('Reverse migrating prompted credential %s for %s %s', node.credential_id, cls, node.id)
ct += 1
node.save(update_fields=['credential'])
if ct:
logger.info('Finished reverse migrating total of %s workflow prompted credentials', ct)
def migrate_inventory_source_cred(app, schema_editor):
InventoryUpdate = app.get_model('main', 'InventoryUpdate')
InventorySource = app.get_model('main', 'InventorySource')
ct = 0
for cls in (InventoryUpdate, InventorySource):
for obj in cls.objects.iterator():
if obj.credential:
ct += 1
logger.debug('Migrating credential %s for %s %s', obj.credential_id, cls, obj.id)
obj.credentials.add(obj.credential)
if ct:
logger.info('Finished migrating %s inventory source credentials to multi-cred', ct)
def migrate_inventory_source_cred_reverse(app, schema_editor):
InventoryUpdate = app.get_model('main', 'InventoryUpdate')
InventorySource = app.get_model('main', 'InventorySource')
ct = 0
for cls in (InventoryUpdate, InventorySource):
for obj in cls.objects.iterator():
cred = obj.credentials.first()
if cred:
ct += 1
logger.debug('Reverse migrating credential %s for %s %s', cred.id, cls, obj.id)
obj.credential = cred
obj.save()
if ct:
logger.info('Finished reverse migrating %s inventory source credentials from multi-cred', ct)
|
426218
|
from __future__ import print_function
import boto3
import os
S3 = boto3.client("s3")
BUCKET = "asi-lens-test-data"
def check_report_bom(name):
"""Check whether the report has Byte Order Marks
Parameters
----------
name : str
Filename of the report.
"""
with open(name, "r") as f:
report_json = f.read()
if report_json.count("\\ufeff"):
print(
" WARNING: {} Byte-order-Marks found in report! This might"
" provoke failures on codeship. Have you checked that the csv"
" is encoded in UTF8 without BOM?".format(
report_json.count("\\ufeff")
)
)
if __name__ == "__main__":
from test_regression import datasets
for dataset in datasets:
report_name = dataset.replace(".csv", ".json")
report_path = os.path.join("generated_reports", report_name)
check_report_bom(report_path)
S3.upload_file(
report_path, Bucket=BUCKET, Key="output/{}".format(report_name)
)
|
426233
|
class SchemaAlreadyRegistered(Exception):
"""Error raised while trying to register a Schema that has already
been registered
"""
def __init__(self, name):
"""
Args:
name (str): schema name
"""
super(SchemaAlreadyRegistered, self).__init__("Schema \"%s\" has already been registered in mapper" % name)
class SchemaNotRegistered(Exception):
"""Error raised while trying to use a Schema that has not
been registered
"""
def __init__(self, name):
"""
Args:
name (str): schema name
"""
super(SchemaNotRegistered, self).__init__("Schema \"%s\" has not been registered in mapper" % name)
class MissingMapping(Exception):
"""Error raised when no mapper has been defined for a class
while dumping an instance of that class.
"""
def __init__(self, type):
"""
Args:
type (type): a type
"""
message = "Missing mapping for object of type %s" % type
super(MissingMapping, self).__init__(message)
self.type = type
class InvalidDocument(Exception):
"""Error raised when validating a document.
It's composed of all the errors detected.
"""
def __init__(self, errors):
super(InvalidDocument, self).__init__()
self.errors = errors
def __len__(self):
"""Returns errors count
Returns:
int
"""
return len(self.errors)
def __getitem__(self, index):
"""Returns error at index
Args:
index (int): index
Returns:
Exception
"""
return self.errors[index]
class ValidationError(Exception):
"""Base class for validation errors"""
def __init__(self, message, path):
"""
Args:
message (str): error message
path (list): path of the invalid data
"""
super(ValidationError, self).__init__(message)
self.path = path
class InvalidType(ValidationError):
"""Error raised by `Type` validator"""
def __init__(self, invalid, expected, path):
"""
Args:
invalid (type): invalid type received
expected (type): type expected
path (list): path of the invalid data
"""
message = "Invalid type, got %s instead of %s" % (invalid, expected)
super(InvalidType, self).__init__(message, path)
self.invalid = invalid
self.expected = expected
class NotEqual(ValidationError):
"""Error raised by `Equal` validator"""
def __init__(self, invalid, expected, path):
"""
Args:
invalid (object): invalid value
expected (object): expected value
path (list): path of the invalid data
"""
message = "Invalid value, got %s instead of %s" % (invalid, expected)
super(NotEqual, self).__init__(message, path)
self.invalid = invalid
self.expected = expected
class InvalidMatch(ValidationError):
"""Error raised by `Match` validator"""
def __init__(self, invalid, regexp, path):
"""
Args:
invalid (str): invalid value
regexp (regexp): a regexp
path (list): path of the invalid data
"""
message = "Value \"%s\" does not match pattern \"%s\"" % (invalid, regexp.pattern)
super(InvalidMatch, self).__init__(message, path)
self.invalid = invalid
self.regexp = regexp
class InvalidIn(ValidationError):
"""Error raised by `In` validator"""
def __init__(self, invalid, expected, path):
"""
Args:
invalid (str): invalid value
expected (list): list of expected values
path (list): path of the invalid data
"""
message = "Value \"%s\" is not in %s" % (invalid, expected)
super(InvalidIn, self).__init__(message, path)
self.invalid = invalid
self.expected = expected
class InvalidLength(ValidationError):
"""Error raised by `Length` validator"""
def __init__(self, length, min, max, path):
"""
Args:
length (int): received length
min (int): minimum length
max (int): maximum length
path (list): path of the invalid data
"""
message = "Got %s items, should have been between %s and %s" % (length, min, max)
super(InvalidLength, self).__init__(message, path)
self.max = max
self.min = min
self.length = length
class InvalidRange(ValidationError):
"""Error raised by `Between` validator"""
def __init__(self, value, min, max, path):
"""
Args:
value (int): value received
min (int): minimum value
max (int): maximum value
path (list): path of the invalid data
"""
message = "%s is not between %s and %s" % (value, min, max)
super(InvalidRange, self).__init__(message, path)
self.max = max
self.min = min
self.value = value
class InvalidURL(ValidationError):
"""Error raised by `URL` validator"""
def __init__(self, invalid, path):
"""
Args:
invalid (str): invalid URL
path (list): path of the invalid data
"""
message = "Invalid URL : %s" % (invalid)
super(InvalidURL, self).__init__(message, path)
self.invalid = invalid
class InvalidDateTimeFormat(ValidationError):
"""Error raised by `DateTimeFormat` validator"""
def __init__(self, value, format, path):
"""
Args:
value (str): invalid datetime string
format (str): format used to parse datetime
path (list): path of the invalid data
"""
message = "Date value \"%s\" can't be parsed with format \"%s\"" % (value, format)
super(InvalidDateTimeFormat, self).__init__(message, path)
self.value = value
self.format = format
class NotNone(ValidationError):
"""Error raised by `IsNone` validator"""
def __init__(self, invalid, path):
"""
Args:
invalid (str): invalid value
path (list): path of the invalid data
"""
message = "Value is not None : %s" % (invalid)
super(NotNone, self).__init__(message, path)
self.invalid = invalid
class MissingPolymorphicKey(ValidationError):
"""Error raised if Polymorphic object do not contain a type key"""
def __init__(self, key, path):
"""
Args:
key (str): polymorphic key
path (list): path of the invalid data
"""
message = "Polymorphic document does not contain the \"%s\" key " % key
super(MissingPolymorphicKey, self).__init__(message, path)
self.key = key
class InvalidPolymorphicType(ValidationError):
"""Error raised by a polymorphic field when it does not have a
mapping for the data it tries to validate
"""
def __init__(self, invalid_type, supported_types, path):
"""
Args:
invalid_type (str): invalid type received
supported_types (list): valid types supported
path (list): path of the invalid data
"""
message = "Invalid polymorphic document \"%s\" is not supported only \"%s\" " % (invalid_type, supported_types)
super(InvalidPolymorphicType, self).__init__(message, path)
self.invalid_type = invalid_type
self.supported_types = supported_types
class MissingKey(ValidationError):
"""Error raised when a key is missing from data"""
def __init__(self, key, path):
"""
Args:
key (str): missing key
path (list): path of the missing data
"""
message = "Document does not contain the \"%s\" key" % key
super(MissingKey, self).__init__(message, path)
self.key = key
|
426238
|
import torch
import torch.nn as nn
from block.models.networks.mlp import MLP
from .utils import grad_mul_const # mask_softmax, grad_reverse, grad_reverse_mask,
eps = 1e-12
class CFVQA(nn.Module):
"""
Wraps another model
The original model must return a dictionnary containing the 'logits' key (predictions before softmax)
Returns:
- logits_vq: the original predictions of the model, i.e., NIE
- logits_q: the predictions from the question-only branch
- logits_v: the predictions from the vision-only branch
- logits_all: the predictions from the ensemble model
- logits_cfvqa: the predictions based on CF-VQA, i.e., TIE
=> Use `logits_all`, `logits_q` and `logits_v` for the loss
"""
def __init__(self, model, output_size, classif_q, classif_v, fusion_mode, end_classif=True, is_va=True):
super().__init__()
self.net = model
self.end_classif = end_classif
assert fusion_mode in ['rubi', 'hm', 'sum'], "Fusion mode should be rubi/hm/sum."
self.fusion_mode = fusion_mode
self.is_va = is_va and (not fusion_mode=='rubi') # RUBi does not consider V->A
# Q->A branch
self.q_1 = MLP(**classif_q)
if self.end_classif: # default: True (following RUBi)
self.q_2 = nn.Linear(output_size, output_size)
# V->A branch
if self.is_va: # default: True (containing V->A)
self.v_1 = MLP(**classif_v)
if self.end_classif: # default: True (following RUBi)
self.v_2 = nn.Linear(output_size, output_size)
self.constant = nn.Parameter(torch.tensor(0.0))
def forward(self, batch):
out = {}
# model prediction
net_out = self.net(batch)
logits = net_out['logits']
# Q->A branch
q_embedding = net_out['q_emb'] # N * q_emb
q_embedding = grad_mul_const(q_embedding, 0.0) # don't backpropagate
q_pred = self.q_1(q_embedding)
# V->A branch
if self.is_va:
v_embedding = net_out['v_emb'] # N * v_emb
v_embedding = grad_mul_const(v_embedding, 0.0) # don't backpropagate
v_pred = self.v_1(v_embedding)
else:
v_pred = None
# both q, k and v are the facts
z_qkv = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=True, v_fact=True) # te
# q is the fact while k and v are the counterfactuals
z_q = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=False, v_fact=False) # nie
logits_cfvqa = z_qkv - z_q
if self.end_classif:
q_out = self.q_2(q_pred)
if self.is_va:
v_out = self.v_2(v_pred)
else:
q_out = q_pred
if self.is_va:
v_out = v_pred
out['logits_all'] = z_qkv # for optimization
out['logits_vq'] = logits # predictions of the original VQ branch, i.e., NIE
out['logits_cfvqa'] = logits_cfvqa # predictions of CFVQA, i.e., TIE
out['logits_q'] = q_out # for optimization
if self.is_va:
out['logits_v'] = v_out # for optimization
if self.is_va:
out['z_nde'] = self.fusion(logits.clone().detach(), q_pred.clone().detach(), v_pred.clone().detach(), q_fact=True, k_fact=False, v_fact=False) # tie
else:
out['z_nde'] = self.fusion(logits.clone().detach(), q_pred.clone().detach(), None, q_fact=True, k_fact=False, v_fact=False) # tie
return out
def process_answers(self, out, key=''):
out = self.net.process_answers(out, key='_all')
out = self.net.process_answers(out, key='_vq')
out = self.net.process_answers(out, key='_cfvqa')
out = self.net.process_answers(out, key='_q')
if self.is_va:
out = self.net.process_answers(out, key='_v')
return out
def fusion(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
z_k, z_q, z_v = self.transform(z_k, z_q, z_v, q_fact, k_fact, v_fact)
if self.fusion_mode == 'rubi':
z = z_k * torch.sigmoid(z_q)
elif self.fusion_mode == 'hm':
if self.is_va:
z = z_k * z_q * z_v
else:
z = z_k * z_q
z = torch.log(z + eps) - torch.log1p(z)
elif self.fusion_mode == 'sum':
if self.is_va:
z = z_k + z_q + z_v
else:
z = z_k + z_q
z = torch.log(torch.sigmoid(z) + eps)
return z
def transform(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
if not k_fact:
z_k = self.constant * torch.ones_like(z_k).cuda()
if not q_fact:
z_q = self.constant * torch.ones_like(z_q).cuda()
if self.is_va:
if not v_fact:
z_v = self.constant * torch.ones_like(z_v).cuda()
if self.fusion_mode == 'hm':
z_k = torch.sigmoid(z_k)
z_q = torch.sigmoid(z_q)
if self.is_va:
z_v = torch.sigmoid(z_v)
return z_k, z_q, z_v
|
426263
|
from abc import ABC, abstractmethod
import threading, uuid, datetime, traceback
class Task(threading.Thread, ABC):
def __init__(self):
self.running: bool = False
self._on_finish_events = []
self._on_error_events = []
self._on_start_events = []
self._id = str(uuid.uuid4())
self.start_date = datetime.datetime.now()
self.end_date = None
self.last_update_date = None
self._progress = 0
self.status = "PENDING"
self.canceled = False
threading.Thread.__init__(self)
def id(self):
return self._id
def append_on_finish_event(self, callback):
self._on_finish_events += [callback]
def append_on_error_event(self, callback):
self._on_error_events += [callback]
def append_on_start_event(self, callback):
self._on_start_events += [callback]
def start(self):
super().start()
def run(self):
for event in self._on_start_events:
event()
self.status = "RUNNING"
self.running = True
if self.canceled:
self.status = "CANCELED"
for event in self._on_error_events:
event()
self.running = False
self.end_date = datetime.datetime.now()
else:
try:
self.handle()
self.status = "DONE"
for event in self._on_finish_events:
event()
except:
self.status = "ERROR"
for event in self._on_error_events:
event()
finally:
self.running = False
self.end_date = datetime.datetime.now()
def type(self):
return self.__class__.__name__
"""
The argument the task (exemple: the environement name, the build information, etc...)
"""
@abstractmethod
def argument(self):
pass
"""
The handler
"""
@abstractmethod
def handle(self):
pass
"""
The description of what the task do
"""
@abstractmethod
def description(self):
pass
@abstractmethod
def get_message(self) -> str:
pass
@abstractmethod
def get_progress(self) -> int:
pass
@abstractmethod
def set_progress(self, progress: int):
pass
|
426300
|
import logging
from ..analyzer.timewindowanalyzer import TimeWindowAnalyzer
###
L = logging.getLogger(__name__)
###
class TimeSeriesPredictor(TimeWindowAnalyzer):
'''
Trained model based time window analyzer, which collects
data into time series and predicts certain value.
'''
ConfigDefaults = {
'path': '',
'predicted_attribute': 'predicted'
}
def __init__(
self, app, pipeline, model, matrix_id=None,
dtype=[('value', 'f8'), ('predicted', 'f8'), ('count', 'i8')],
columns=15, analyze_on_clock=False, resolution=60, start_time=None,
clock_driven=False, id=None, config=None):
super().__init__(
app, pipeline, matrix_id=matrix_id, dtype=dtype,
columns=columns, analyze_on_clock=analyze_on_clock,
resolution=resolution, start_time=start_time,
clock_driven=clock_driven, id=id, config=config
)
self.Model = model
self.PredictedAttribute = self.Config['predicted_attribute']
self.initialize_window()
def initialize_window(self):
'''
Specific initialization if needed.
'''
pass
def enrich(self, context, event, predicted):
'''
Enriches event with predicted value, override if needed.
'''
event[self.PredictedAttribute] = predicted
def assign(self, *args):
'''
Record predicted values into time window matrix (or anywhere) if needed.
'''
pass
def process(self, context, event):
if self.predicate(context, event):
sample, column = self.evaluate(context, event)
else:
return event
if sample is not None:
transformed_sample = self.Model.transform(sample)
predicted = self.Model.predict(transformed_sample)
self.enrich(context, event, predicted)
self.assign(predicted, column)
else:
self.enrich(context, event, None)
return event
def alarm(self, *args):
'''
Compare real value and predicted and raise an alarm.
'''
pass
|
426310
|
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
def get_house_prices_and_rooms():
# getting the data
interesting_columns = ['house_price', 'number_of_rooms']
houses_df = pd.read_csv('data/HousingData.csv')[interesting_columns]
# getting data without outliers
number_of_rooms = houses_df['number_of_rooms']
house_prices_normal = houses_df['house_price']
# adding an outlier
house_prices_with_outliers = house_prices_normal.copy()
house_prices_with_outliers.loc[3] = 500
return number_of_rooms, house_prices_normal, house_prices_with_outliers
def plot_house_prices_and_rooms():
number_of_rooms, house_prices_normal, house_prices_with_outliers = get_house_prices_and_rooms()
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 4))
pd.concat([number_of_rooms, house_prices_normal], axis=1).plot(kind='scatter',
y='number_of_rooms',
x='house_price',
ax=axes[0],
title='Nr of rooms and house prices')
pd.concat([number_of_rooms, house_prices_with_outliers], axis=1).plot(kind='scatter',
y='number_of_rooms',
x='house_price',
ax=axes[1],
title='Nr of rooms and house prices (with an outlier)')
plt.show()
def get_heights_data_metric():
return pd.DataFrame({
'ages': [2, 4, 4, 6, 8, 9, 12, 14],
'heights': [120, 125, 127, 135, 140, 139, 170, 210]
})
def get_heights_data_freedom_units():
return pd.DataFrame({
'ages': [2, 4, 4, 6, 8, 9, 12, 14],
'heights': [3.93700787, 4.10104987, 4.16666667, 4.42913386, 4.59317585,
4.56036745, 5.57742782, 6.88976378]
})
def _make_square(df, i):
x_mean = df.ages.mean()
y_mean = df.heights.mean()
x = df.iloc[i].ages
y = df.iloc[i].heights
alpha = .1
if x > x_mean and y > y_mean:
plt.fill_betweenx(x1=x_mean, x2=x, y=(y_mean, y), alpha=alpha,
color='b')
elif x < x_mean and y < y_mean:
plt.fill_betweenx(x1=x, x2=x_mean, y=(y, y_mean), alpha=alpha,
color='b')
elif x < x_mean and y > y_mean:
plt.fill_betweenx(x1=x, x2=x_mean, y=(y_mean, y), alpha=alpha,
color='r')
else:
plt.fill_betweenx(x1=x_mean, x2=x, y=(y, y_mean), alpha=alpha,
color='r')
def quick_scatterplot(df, plot_center=False, plot_squares=None):
df.plot(kind='scatter', x='ages', y='heights', figsize=(12, 6))
if plot_center:
plt.scatter(df.ages.mean(), df.heights.mean(), color='k', marker='+',
s=250, )
if plot_squares:
if plot_squares == 'all':
for i in range(len(df)):
_make_square(df, i)
else:
_make_square(df, plot_squares)
plt.show()
def get_data_for_spearman():
np.random.seed(100)
x = np.linspace(-100, 100)
a = pd.Series(x ** 3)[3::] / 100000
a.index = a.index * 10
a = a + a.index / 100
return a.reset_index().rename(columns={'index': 'a', 0: 'b'})
import math
def generate_correlated_data(n_points, corr):
xx = np.array([0, 1])
yy = np.array([0, 1])
means = [xx.mean(), yy.mean()]
stds = [xx.std() / 3, yy.std() / 3]
covs = [[stds[0]**2 , stds[0]*stds[1]*corr],
[stds[0]*stds[1]*corr, stds[1]**2]]
m = pd.DataFrame(np.random.multivariate_normal(means, covs, n_points))
m.columns = ['a', 'b']
return m
def plot_scatter(df, color, figsize=None):
if not figsize:
figsize=(8, 8)
f, ax = plt.subplots(figsize=figsize)
label = 'Corr: % 0.2f' % (df[df.columns[0]].corr(df[df.columns[1]]))
# ax = df.plot(kind='scatter', x=df.columns[0], y=df.columns[1], label=label, figsize=(8, 8), color=color)
ax = df.plot(kind='scatter', x=df.columns[0], y=df.columns[1], label=label, figsize=(8, 8), color=color, ax=ax)
# plt.ylim([0, 1.2])
# plt.xlim([0, 1.2])
return ax.get_figure()
def scatter_plot(df, ax, color, figsize=None):
# this should not exist, but needed if for something
if not figsize:
figsize = (8, 8)
return df.plot(kind='scatter',
x=df.columns[0],
y=df.columns[1],
label='Corr: % 0.2f' % (df[df.columns[0]].corr(df[df.columns[1]])),
figsize=figsize,
color=color, ax=ax)
def multiple_from_angle(x):
return math.tan(math.radians(x))
def generate_example(corr, slope, n_points=500, color='b'):
df = generate_correlated_data(n_points, corr)
# plot_scatter(df, color=color)
# removing trend
df['b'] = df['b'] - df['a']
# plot_scatter(df, color=color)
# adding trend back in
multiple = multiple_from_angle(slope)
df['b'] = df['b'] + multiple * df['a']
return df
def plot_correlated_distrs():
f, ax = plt.subplots(figsize=(10, 6))
med_up = pd.DataFrame([np.linspace(0, 1, 500), np.linspace(0, 1, 500) * .4]).T
slight_up = pd.DataFrame([np.linspace(0, 1, 500), np.linspace(0, 1, 500) * .2]).T
tiny_up = pd.DataFrame([np.linspace(0, 1, 500), np.linspace(0, 1, 500) * 0.05]).T
tiny_down = pd.DataFrame([np.linspace(0, 1, 500), np.linspace(0, 1, 500) * -0.05]).T
slight_down = pd.DataFrame([np.linspace(0, 1, 500), np.linspace(0, 1, 500) * -.2]).T
bit_down = pd.DataFrame([np.linspace(0, 1, 500), np.linspace(0, 1, 500) * -.4]).T
scatter_plot(med_up, ax=ax, color='#003d66')
scatter_plot(slight_up, ax=ax, color='#66c2ff')
scatter_plot(tiny_up, ax=ax, color='#99d6ff')
scatter_plot(tiny_down, ax=ax, color='#ffcccc')
scatter_plot(slight_down, ax=ax, color='#ff9999')
scatter_plot(bit_down, ax=ax, color='#800000')
plt.axhline(0, ls='--', c='grey')
plt.ylim([-.5,.5])
plt.show()
def plot_correlation_bars():
df1 = generate_correlated_data(n_points=400, corr=.85)
df2 = generate_correlated_data(n_points=400, corr=.98)
f, ax = plt.subplots(figsize=(8, 8))
pd.Series(np.linspace(0, 1, 100) + 0.2, index=np.linspace(0, 1, 100) ).plot(ax=ax, color='blue')
pd.Series(np.linspace(0, 1, 100) - 0.2, index=np.linspace(0, 1, 100) ).plot(ax=ax, color='blue')
pd.Series(np.linspace(0, 1, 100) + 0.1, index=np.linspace(0, 1, 100) ).plot(ax=ax, color='orange')
pd.Series(np.linspace(0, 1, 100) - 0.1, index=np.linspace(0, 1, 100) ).plot(ax=ax, color='orange')
scatter_plot(df1, ax, 'blue')
scatter_plot(df2, ax, 'orange')
plt.show()
def plot_angled_correlations():
# some datasets for the sake of lazyness
df1 = pd.read_csv('data/df_45.csv')
df2 = pd.read_csv('data/df_25.csv')
f, ax = plt.subplots(figsize=(8, 8))
scatter_plot(df1, ax, 'blue')
scatter_plot(df2, ax, 'orange')
plt.show()
def plot_positive_and_negative():
df1 = generate_example(corr=.95, slope=35)
df2 = generate_example(corr=.95, slope=-25)
f, ax = plt.subplots(figsize=(8, 8))
scatter_plot(df1, ax, 'blue')
scatter_plot(df2, ax, 'orange')
plt.ylim([-1, 1])
plt.show()
|
426320
|
from core.case.base import TestCaseBase, TestType
from core.case.decorator import case, data_provider
from core.config.setting import TestSettingBase
from core.resource.pool import ResourcePool
from core.result.reporter import StepResult
@case(priority=1, test_type=TestType.SANITY)
class HelloWorldTest(TestCaseBase):
def collect_resource(self, pool: ResourcePool):
self.reporter.add_step_group("Collect Resources")
self.ap = pool.collect_device("AP", 1)[0]
self.reporter.end_step_group()
def setup(self):
self.reporter.add(StepResult.INFO, self.ap.name)
self.reporter.add(StepResult.INFO, "This is setup step")
def test(self):
self.reporter.add(StepResult.INFO, f"Test Setting {self.setting.case_setting1}")
self.reporter.add(StepResult.PASS, "This Passed step")
self.reporter.add_step_group("Step group1")
self.reporter.add(StepResult.FAIL, "This is Failed Step")
self.reporter.end_step_group()
def cleanup(self):
self.reporter.add(StepResult.INFO, "This is clean up step")
class HelloWorldTestSetting(TestSettingBase):
case_setting1 = "setting1"
case_setting2 = 20
|
426343
|
import pyaudio
import time
import threading
import os
import sys
import select
import numpy as np
import audio_streamer
import effector
import graceful_killer as kl
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
PB_FILE = './frozen_model/frozen.pb'
UFF_FILE = './frozen_model/frozen.uff'
ENGINE_FILE = './tensorrt_engine_fp'
DEVICE_NAME = 'DUO-CAPTURE'
"""
Provides guitor effector functionality using AI
"""
class Sharingan():
TIMEOUT_SEC = 0.5*1024.0*1.0/44100.0
def __init__(self):
self.input = np.zeros([1,1,1024])
self.output = np.zeros([1,1,1024])
self.effector = effector.Effector(PB_FILE, UFF_FILE, ENGINE_FILE)
self.enabled = True
self.quit = False
self.input_lock = threading.Lock()
self.output_condition = threading.Condition(threading.Lock())
self.output_condition.state = False # True: notify called, otherwise False
self.killer = kl.GracefulKiller()
def start(self):
"""
start sharingan. has to be run from main thread
since I can't touch cuda from other thread somehow.
"""
self.effector.create_engine()
self.effector.initialize_engine()
# warm up
for i in range(10):
self.effector.effect(np.zeros([1, 1, 1024]))
audio = audio_streamer.AudioStreamer(input_device_name=DEVICE_NAME, output_device_name=DEVICE_NAME)
audio.open_device(self, self.audio_arrived)
audio.start_streaming()
self.print_instruction()
while audio.is_streaming() and not self.should_quit():
# Once audio data is arrived. is should be stored in self.input
self.input_lock.acquire()
local_input = None
if(self.input is not None):
local_input = np.copy(self.input)
self.input_lock.release()
if(local_input is not None):
# We give the data to effector to modulate sound data
# Send signal to audio callback function
if(self.enabled):
local_output = self.effector.effect(local_input)
else:
local_output = local_input
self.output_condition.acquire()
self.output_condition.state = False
self.output = np.copy(local_output)
self.output_condition.state = True
self.output_condition.notify()
self.output_condition.release()
# Receive Keyboard iput
self.handle_input()
print("quitting")
audio.stop_streaming()
audio.close_device()
print("sharingan finished")
def audio_arrived(self, context, seq, in_data):
if(self.quit):
return in_data
self.input_lock.acquire()
# store data that will be processed by main thread
context.input = np.copy(in_data)
self.input_lock.release()
self.output_condition.acquire()
self.output_condition.state = False
# wait for main thread finish processing
self.output_condition.wait(self.TIMEOUT_SEC)
# if timeout, use in_data to play, otherwise use processed data
is_timeout = self.output_condition.state == False
final_output = None
if(is_timeout):
print("timeout")
final_output = in_data
else:
final_output = np.copy(context.output)
self.output_condition.release()
# return data to play
return final_output
def should_quit(self):
if(self.quit):
return True
if(self.killer.kill_now):
return True
return False
def print_instruction(self):
print("input 'q' to quit")
print(" 'e' to enable effector(default)")
print(" 'd' to disable effector")
def handle_input(self):
ch = None
if select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []):
ch = sys.stdin.read(1)
if ch is None:
return
elif ch == 'q' or ch == 'Q':
print("quit")
self.quit = True
elif ch == 'e' or ch == 'E':
self.enabled = True
print("effector enabled")
elif ch == 'd' or ch == 'd':
self.enabled = False
print("effector disabled")
if __name__ == "__main__":
sharingan = Sharingan()
sharingan.start()
|
426377
|
from __future__ import annotations
import asyncio
from datetime import datetime, timedelta
from typing import Any, Literal, Optional
import httpx
from ._httpx_args import merge_with_default_httpx_args
from ._lib.utils import remove_none
from ._resources import (
AdminEvents,
AttachedResources,
Authentication,
Clients,
ClientScopes,
Groups,
Roles,
Sessions,
Users,
)
class KeycloakAdmin:
"""Base class for Keycloak Admin API endpoints.
It handles the ``access_token`` and guarantees it being valid when using the
``get_access_token`` method or accessing a protected Keycloak resource.
"""
__keycloak_resources: AttachedResources = [
("roles", Roles),
("client_scopes", ClientScopes),
("users", Users),
("clients", Clients),
("admin_events", AdminEvents),
("authentication", Authentication),
("groups", Groups),
("sessions", Sessions),
]
roles: Roles
"""https://www.keycloak.org/docs-api/15.0/rest-api/index.html#_roles_resource"""
client_scopes: ClientScopes
"""https://www.keycloak.org/docs-api/15.0/rest-api/index.html#_client_scopes_resource"""
users: Users
"""https://www.keycloak.org/docs-api/15.0/rest-api/index.html#_users_resource"""
clients: Clients
"""https://www.keycloak.org/docs-api/15.0/rest-api/index.html#_clients_resource"""
admin_events: AdminEvents
"""https://www.keycloak.org/docs-api/15.0/rest-api/index.html#_realms_admin_resource"""
authentication: Authentication
"""https://www.keycloak.org/docs-api/15.0/rest-api/index.html#_authentication_management_resource"""
groups: Groups
"""https://www.keycloak.org/docs-api/15.0/rest-api/index.html#_groups_resource"""
sessions: Sessions
leeway: int
"""A token will be considered as expired seconds before its actual expiry controlled by this value."""
def __init__(
self,
server_url: str,
grant_type: Literal["client_credentials", "password"],
username: Optional[str] = None,
password: Optional[str] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
realm: str = "master",
leeway: int = 10,
httpx_args={},
):
"""Initialize ``KeycloakAdmin`` with either client or user credentials.
Should not be used directly. The usage
of ``with_client_credentials`` or ``with_password`` should be
preferred.
"""
allowed_grant_types = ["client_credentials", "password"]
if not any([grant_type == allowed for allowed in allowed_grant_types]):
raise Exception(f"'grant_type' needs to be in '{allowed_grant_types}'")
self._realm = realm
self._username = username
self._client_id = client_id
self._client_secret = client_secret
self._password = password
self._server_url = server_url
self._grant_type = grant_type
self.leeway = leeway
self.__connection = httpx.AsyncClient(
**merge_with_default_httpx_args(httpx_args)
)
self.__access_token = None
self.__refresh_token = None
self.__set_keycloak_resources()
self.__lock = asyncio.Lock()
@classmethod
def with_client_credentials(
cls,
server_url: str,
client_id: str,
client_secret: str,
realm: str = "master",
leeway: int = 10,
httpx_args={},
) -> KeycloakAdmin:
"""Instantiate ``KeycloakAdmin`` with ``client_id`` and ``client_secret``."""
return cls(
server_url,
grant_type="client_credentials",
client_id=client_id,
client_secret=client_secret,
realm=realm,
leeway=leeway,
httpx_args=httpx_args,
)
@classmethod
def with_password(
cls,
server_url: str,
username: Optional[str] = None,
password: Optional[str] = None,
client_id: Optional[str] = "admin-cli",
realm: str = "master",
leeway: int = 10,
httpx_args={},
) -> KeycloakAdmin:
"""Instantiate ``KeycloakAdmin`` with user credentials (username and password)."""
return cls(
server_url,
grant_type="password",
client_id=client_id,
username=username,
password=password,
realm=realm,
leeway=leeway,
httpx_args=httpx_args,
)
@property
def realm(self):
"""Realm property."""
return self._realm
@property
def username(self):
"""Username property."""
return self._username
@property
def client_id(self):
"""Client id property."""
return self._client_id
@property
def client_secret(self):
"""Client secret property."""
return self.client_secret
@property
def password(self):
"""Password property."""
return self._password
@property
def server_url(self):
"""Server url property."""
return self._server_url
@property
def grant_type(self):
"""Token endpoint grant type."""
return self._grant_type
def get_url(self):
"""Get the admin api base url."""
return f"{self._server_url}/admin/realms/{self._realm}"
async def close(self):
"""Closes open httpx connection."""
await self.__connection.aclose()
def close_sync(self):
"""Synchronously close open httpx connection."""
loop = asyncio.get_event_loop()
loop.run_until_complete(self.close())
def __set_keycloak_resources(self):
for resources_name, resource in self.__keycloak_resources:
setattr(
self,
resources_name,
resource(self.__get_connection, self.get_url),
)
async def __aenter__(self) -> KeycloakAdmin:
"""For entering asynchronous context manger."""
return self
async def __aexit__(self, *_, **__):
"""Cleanup for asynchronous context manger."""
await self.close()
async def get_access_token(self):
"""Get ``access_token``. Guaranteed not to be expired."""
async with self.__lock:
if not self.__access_token:
await self.__token()
else:
now = datetime.now()
if now > self.access_token_expire:
if now < self.refresh_token_expire:
await self.__token_refresh()
else:
await self.__token()
return self.__access_token
def get_token_url(self) -> str:
"""Openid connect token endpoint url."""
return f"{self._server_url}/realms/{self._realm}/protocol/openid-connect/token"
def __parse_token_response(self, token_response: dict[str, Any]):
self.__access_token = token_response["access_token"]
self.__refresh_token = token_response.get("refresh_token")
self.access_token_expire = datetime.now() + timedelta(
seconds=token_response["expires_in"] - self.leeway
)
self.refresh_token_expire = datetime.now() + timedelta(
seconds=token_response.get("refresh_expires_in", 0) - self.leeway
)
async def __token_refresh(self):
headers = httpx.Headers({"Content-Type": "application/x-www-form-urlencoded"})
payload = remove_none(
{
"client_id": self._client_id,
"client_secret": self._client_secret,
"grant_type": "refresh_token",
"refresh_token": self.__refresh_token,
}
)
try:
response = await self.__connection.post(
self.get_token_url(), data=payload, headers=headers
)
self.__parse_token_response(response.json())
except httpx.HTTPStatusError as ex:
except_errors = [
"Refresh token expired",
"Token is not active",
"Session not active",
]
error_description = ex.response.json().get("error_description", "")
if ex.response.status_code == 400 and any(
error in error_description for error in except_errors
):
await self.__token()
async def __token(self):
headers = httpx.Headers({"Content-Type": "application/x-www-form-urlencoded"})
payload = remove_none(
{
"client_id": self._client_id,
"grant_type": self._grant_type,
"client_secret": self._client_secret,
"username": self._username,
"password": self._password,
}
)
response = await self.__connection.post(
self.get_token_url(), data=payload, headers=headers
)
self.__parse_token_response(response.json())
async def __get_connection(self):
access_token = await self.get_access_token()
def auth_interceptor(request: httpx.Request) -> httpx.Request:
request.headers["Authorization"] = f"Bearer {access_token}"
return request
self.__connection.auth = auth_interceptor
return self.__connection
|
426438
|
import acurl_ng
def create_request(
method,
url,
headers=(),
cookies=(),
auth=None,
data=None,
cert=None,
):
# Cookies should be the byte string representation of the cookie
if isinstance(method, str):
method = method.encode()
headers = tuple(h.encode("utf-8") if hasattr(h, "encode") else h for h in headers)
return acurl_ng.Request(method, url, headers, cookies, auth, data, cert)
|
426443
|
from typing import Union
from scrapy.http import Response, TextResponse
from scrapypuppeteer import PuppeteerRequest
from scrapypuppeteer.actions import GoTo, PuppeteerServiceAction
class PuppeteerResponse(Response):
def __init__(self,
url: str,
puppeteer_request: PuppeteerRequest,
context_id: str,
page_id: str,
**kwargs):
self.puppeteer_request = puppeteer_request
self.context_id = context_id
self.page_id = page_id
super().__init__(url, **kwargs)
def follow(self,
action: Union[str, PuppeteerServiceAction],
close_page=True,
**kwargs) -> PuppeteerRequest:
"""
Execute action in same browser page.
:param action: URL (may be relative) or browser action.
:param close_page: whether to close page after request completion
:param kwargs:
:return:
"""
page_id = None if self.puppeteer_request.close_page else self.page_id
if isinstance(action, str):
action = self.urljoin(action)
elif isinstance(action, GoTo):
action.url = self.urljoin(action.url)
else:
kwargs['url'] = self.url
kwargs['dont_filter'] = True
return PuppeteerRequest(action, context_id=self.context_id, page_id=page_id,
close_page=close_page, **kwargs)
class PuppeteerHtmlResponse(PuppeteerResponse, TextResponse):
"""
scrapy.TextResponse capturing state of a page in browser.
Additionally exposes received html and cookies via corresponding attributes.
"""
def __init__(self, url, puppeteer_request, context_id, page_id, **kwargs):
self.html = kwargs.pop('html')
self.cookies = kwargs.pop('cookies')
kwargs.setdefault('body', self.html)
kwargs.setdefault('encoding', 'utf-8')
super().__init__(url, puppeteer_request, context_id, page_id, **kwargs)
class PuppeteerJsonResponse(PuppeteerResponse):
"""
Response for CustomJsAction.
Result is available via self.data object.
"""
def __init__(self, url, puppeteer_request, context_id, page_id, **kwargs):
self.data = kwargs
super().__init__(url, puppeteer_request, context_id, page_id)
class PuppeteerScreenshotResponse(PuppeteerResponse):
"""
Response for Screenshot action.
Screenshot is available via self.screenshot as base64 encoded string.
"""
def __init__(self, url, puppeteer_request, context_id, page_id, **kwargs):
self.screenshot = kwargs.get('screenshot')
super().__init__(url, puppeteer_request, context_id, page_id, **kwargs)
|
426445
|
from . import prefix
from fastapi import APIRouter
from fastapi.responses import JSONResponse
import starlette.status as status_code
from src.domain_logic.engagement_domain import EngagementDomain
from src.usecases.insert.insert_engagement import insert_engagement
from ..usecases.update.update_engagement import update_engagement
engagement_router = APIRouter(
prefix=f"{prefix}/engagement", tags=["engagements"])
@engagement_router.post(path="/insert",
response_model=EngagementDomain,
status_code=status_code.HTTP_201_CREATED)
async def insert(engagement: EngagementDomain):
result = await insert_engagement(engagement_domain=engagement)
if result:
return JSONResponse(content=engagement.__dict__, media_type="application/json")
error = {
"ERROR": f"The blog with id {engagement.blog_id} was not found"
}
return JSONResponse(content=error, media_type="application/json",
status_code=status_code.HTTP_422_UNPROCESSABLE_ENTITY)
@engagement_router.post(path="/update", response_model=EngagementDomain, status_code=status_code.HTTP_201_CREATED)
async def update(engagement: EngagementDomain):
result = await update_engagement(engagement_domain=engagement)
if result:
return JSONResponse(content=engagement.__dict__, media_type="application/json")
error = {
"ERROR": f"No engagement was found"
}
return JSONResponse(content=error, media_type="applicatoin/json",
status_code=status_code.HTTP_422_UNPROCESSABLE_ENTITY)
|
426470
|
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from random import randint #, shuffle, sample
import numpy as np
import pytest
from mmgroup import MM0, MMV, MMSpace, Cocode
from mmgroup.clifford12 import leech2matrix_eval_A_odd_mod15_aux
from mmgroup.clifford12 import leech2matrix_eval_A
from mmgroup.mm import mm_aux_index_extern_to_sparse
from mmgroup.mm import mm_aux_index_leech2_to_sparse
from mmgroup.mm import mm_aux_index_sparse_to_leech2
from mmgroup.mm import mm_aux_index_sparse_to_leech
V = MMV(15)
##################################################################
def eval_a_odd(v, signs):
a = np.zeros(2, dtype = np.uint64)
x = leech2matrix_eval_A_odd_mod15_aux(v.data, signs, a)
v1 = [((int(a[i >> 4]) >> (4 * (i & 15)))) & 15 for i in range(24)]
w = np.array(v1, dtype = np.int32) % 15
return x, w
def eval_a_odd_ref(v, signs):
A = np.array(v["A"], dtype = np.int32)
v_list = [(-1)**((signs >> i) & 1) for i in range(24)]
v1 = np.array(v_list, dtype = np.int32)
w = (v1 @ A) % 15
return (w @ v1) % 15, w
def eval_a_odd_testdata():
data = [
(V("A",2,2), 0x3),
]
for d in data:
yield d
for i in range(5):
v = V('R')
for j in range(10):
yield v, randint(0, 0xffffff)
@pytest.mark.involution
def test_eval_a_odd(verbose = 0):
for n, (v, b) in enumerate(eval_a_odd_testdata()):
if verbose:
print("Test %d" % (n+1))
print("b =", hex(b))
print(v["A"])
x, w = eval_a_odd(v, b)
x_ref, w_ref = eval_a_odd_ref(v, b)
assert (w == w_ref).all(), (w, w_ref)
assert x == x_ref, (x, x_ref)
##################################################################
def eval_a_ref(v, v2, e = 0):
if (e % 3):
v = v * MM0('t', e)
A = np.array(v["A"], dtype = np.int32)
v2_sp = mm_aux_index_leech2_to_sparse(v2)
v_2 = np.zeros(24, dtype = np.int32)
res = mm_aux_index_sparse_to_leech(v2_sp, v_2)
return (v_2 @ A @ v_2) % 15
def rand_leech2():
ext = 300 + randint(0, 98279)
sp = mm_aux_index_extern_to_sparse(ext)
return mm_aux_index_sparse_to_leech2(sp)
def eval_a_testdata():
data = [
(V("A",2,2), Cocode([2,3]).ord, 0),
]
for d in data:
yield d
for i0 in range(24):
for i1 in range(i0):
yield V('R'), Cocode([i0,i1]).ord, 0
for k in range(100):
yield V('R'), rand_leech2(), 0
for e in (1,2):
for k in range(100):
yield V('R'), rand_leech2(), e
@pytest.mark.involution
def test_eval_a(verbose = 0):
for n, (v, v2, e) in enumerate(eval_a_testdata()):
if verbose:
print("Test %d" % (n+1))
print("v['A'] =")
print(v['A'])
print("v2 =", hex(v2), ", e =", e)
m = v.eval_A(v2, e)
m_ref = eval_a_ref(v, v2, e)
assert m == m_ref, (hex(v2), e, m, m_ref)
if e == 0:
m_orig = leech2matrix_eval_A(15, v.data, v2)
assert m == m_orig
|
426489
|
import logging
import sys
import requests
import unicodecsv as csv
from utils import validate_basic_params, is_int_string
log = logging.getLogger()
"""
Read in threat intel csv file and post as json to cluster.
@return Nothing, will raise an exception if failed.
@param cluster: The name of your cluster, i.e. customer.portal.jask.io
@param username: The username to use when authenticating to Trident.
@param api_key: API key used to authenticate the specified username.
@param filename: The name of the threat intel csv file to be posted.
@param confidence: The default confidence level for intel from this file.
"""
def threat_intel_from_csv(cluster, username, api_key, filename, default_confidence):
if not filename:
log.error('Filename not specified')
raise ValueError('Filename not specified')
validate_basic_params(cluster, username, api_key)
results = []
with open(filename, 'rb') as csvfile:
csvreader = csv.DictReader(csvfile)
intel_fields = ['value', 'confidence', 'source', 'tags', 'ttl', 'override_ttl']
for row in csvreader:
(value, confidence, source, tags, ttl, override_ttl) = (row.get(f) for f in intel_fields)
if confidence and is_int_string(confidence):
confidence = int(confidence)
if ttl and is_int_string(ttl):
ttl = int(ttl)
results.append({
'value': value,
'confidence': confidence or default_confidence,
'source': source or 'User Import',
'tags': tags.split(',') if tags else [],
'ttl': ttl or None,
'active': True,
'override_ttl': True if (override_ttl and override_ttl.lower()=='true') else False
})
intel = {'objects': results}
params = {'username': username, 'api_key': api_key}
url = 'https://%s/api/intelligence' % cluster
response = requests.post(url, json=intel, params=params)
response.raise_for_status()
|
426555
|
from django.conf.urls import patterns, url
from webalyzer.collector import views
urlpatterns = patterns(
'',
url(
'^check/(?P<source_type>[\w]+)/'
'(?P<domain>[\w\.]+)/'
'(?P<source_hash>[\-\w\.]+)$',
views.collect_check,
name='collect_check'
),
url(r'^$', views.collect, name='collect'),
)
|
426573
|
import torch
from en_transformer.utils import rot
from en_transformer import EnTransformer
torch.set_default_dtype(torch.float64)
def test_readme():
model = EnTransformer(
dim = 512,
depth = 1,
dim_head = 64,
heads = 8,
edge_dim = 4,
neighbors = 6
)
feats = torch.randn(1, 32, 512)
coors = torch.randn(1, 32, 3)
edges = torch.randn(1, 32, 1024, 4)
mask = torch.ones(1, 32).bool()
feats, coors = model(feats, coors, edges, mask = mask)
assert True, 'it runs'
def test_equivariance():
model = EnTransformer(
dim = 512,
depth = 1,
edge_dim = 4,
rel_pos_emb = True
)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 3)
edges = torch.randn(1, 16, 16, 4)
feats1, coors1 = model(feats, coors @ R + T, edges)
feats2, coors2 = model(feats, coors, edges)
assert torch.allclose(feats1, feats2, atol = 1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol = 1e-6), 'type 1 features are equivariant'
def test_equivariance_with_cross_product():
model = EnTransformer(
dim = 512,
depth = 1,
edge_dim = 4,
rel_pos_emb = True,
use_cross_product = True
)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 3)
edges = torch.randn(1, 16, 16, 4)
feats1, coors1 = model(feats, coors @ R + T, edges)
feats2, coors2 = model(feats, coors, edges)
assert torch.allclose(feats1, feats2, atol = 1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol = 1e-6), 'type 1 features are equivariant'
def test_equivariance_with_nearest_neighbors():
model = EnTransformer(
dim = 512,
depth = 1,
edge_dim = 4,
neighbors = 5
)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 3)
edges = torch.randn(1, 16, 16, 4)
feats1, coors1 = model(feats, coors @ R + T, edges)
feats2, coors2 = model(feats, coors, edges)
assert torch.allclose(feats1, feats2, atol = 1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol = 1e-6), 'type 1 features are equivariant'
def test_equivariance_with_sparse_neighbors():
model = EnTransformer(
dim = 512,
depth = 1,
heads = 4,
dim_head = 32,
neighbors = 0,
only_sparse_neighbors = True
)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 3)
i = torch.arange(feats.shape[1])
adj_mat = (i[:, None] <= (i[None, :] + 1)) & (i[:, None] >= (i[None, :] - 1))
feats1, coors1 = model(feats, coors @ R + T, adj_mat = adj_mat)
feats2, coors2 = model(feats, coors, adj_mat = adj_mat)
assert torch.allclose(feats1, feats2, atol = 1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol = 1e-6), 'type 1 features are equivariant'
def test_depth():
model = EnTransformer(
dim = 8,
depth = 12,
edge_dim = 4,
neighbors = 16
)
feats = torch.randn(1, 128, 8)
coors = torch.randn(1, 128, 3)
edges = torch.randn(1, 128, 128, 4)
feats, coors = model(feats, coors, edges)
assert not torch.any(torch.isnan(feats)), 'no NaN in features'
assert not torch.any(torch.isnan(coors)), 'no NaN in coordinates'
|
426577
|
import sys
## Helper script to convert from simple keyframe-list formatted results to the temporal refinement results file format ('frames' mode)
## Not necessary to use this script, just a convenience if you want to print your system's results in a simple
## keyframe-list format and then use this script to convert your results to a format used by the evaluation scripts
def convertFileFormat_keyframeToTemporalRefinement(keyframe_results_filename, temporal_refinement_results_filename, ground_truth_filename):
f = open(ground_truth_filename)
ground_truth_lines = f.readlines()
f.close()
fin = open(keyframe_results_filename, 'r')
fout = open(temporal_refinement_results_filename, 'w')
while True:
line = fin.readline()
if line == '':
break
if not line.startswith("Query"):
print "Error: query line is malformed: %s" % line
exit()
query_num = int(line.split()[-1])
print "On Query %d" % query_num
fout.write(line)
results_list = []
results_subdict = {}
while True:
file_pos = fin.tell()
line = fin.readline()
if line.startswith("Query"):
fin.seek(file_pos)
break
if line == '':
break
video_name = line[0:line.rindex('_')] + ".mp4"
if video_name not in ground_truth_lines[query_num]:
continue
if video_name not in results_list:
results_list.append(video_name)
results_subdict[video_name] = []
keyframe_number = int(line[line.rindex('/')+1 : line.rindex('.')])
if keyframe_number not in results_subdict[video_name]:
results_subdict[video_name].append(keyframe_number)
for video in results_list:
fout.write("%s" % video)
for keyframe_number in results_subdict[video]:
fout.write(",%d" % keyframe_number)
fout.write('\n')
fin.close()
fout.close()
def printUsage():
print "Usage: python " + sys.argv[0] + " keyframe_results_filename temporal_refinement_results_filename ground_truth_filename"
print "keyframe_results_filename: file following the keyframe_results_file_format.txt rules"
print "temporal_refinement_results_filename: output filename for results in temporal refinement file format"
print "ground_truth_filename: light_dataset_public.txt or full_dataset_public.txt"
if __name__ == "__main__":
if len(sys.argv) < 4:
printUsage()
exit()
keyframe_results_filename = sys.argv[1]
temporal_refinement_results_filename = sys.argv[2]
ground_truth_filename = sys.argv[3]
convertFileFormat_keyframeToTemporalRefinement(keyframe_results_filename, temporal_refinement_results_filename, ground_truth_filename)
|
426589
|
from subprocess import call
CWD = "../"
DEGREE = [8, 16, 32, 48, 64]
DELTA = [0.1, 0.2, 0.3, 0.4, 0.5]
call(['make'], cwd=CWD)
call(['./bin/decode_client', '-s'], cwd=CWD)
for degree in DEGREE:
for delta in DELTA:
print(degree, delta)
call(['./bin/decode_server', '--degree', str(degree), '--delta', str(delta)], cwd=CWD)
call(['./bin/decode_client', '-r', '-o', 'decode_out/out_{}_{}.png'.format(degree, delta)], cwd=CWD)
|
426602
|
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
from django.utils import translation
class LanguageURLSpecifyMiddleware(MiddlewareMixin):
"""
Checks if url is containing language code like "domain.com/ru/something/",
and then middleware is activating this language for user
with using path without language code like "domain.com/something/".
Made for SEO purposes, normal use of website will not trigger this.
"""
def process_request(self, request):
path_split = request.path.split('/')
if path_split and len(path_split) >= 2:
language = path_split[1] # ex. "ru"
if language in settings.LANGUAGES_SHORT_CODES:
translation.activate(language)
request.path_info = '/'+'/'.join(path_split[2:])
if request.path[-1] != '/':
request.path += '/'
def process_response(self, request, response):
path_split = request.path.split('/')
if path_split and len(path_split) >= 2:
language = path_split[1] # ex. "ru"
if language in settings.LANGUAGES_SHORT_CODES:
translation.activate(language)
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, language)
if request.path[-1] != '/':
request.path += '/'
return response
|
426633
|
import httplib2
from lxml.html import parse
from .pastesource import PasteSource
class PastebinSource(PasteSource):
baseurl = 'http://pastebin.com'
def __init__(self, *args, **kwargs):
pass
def new_urls(self, backend):
doc = parse('http://pastebin.com/archive').getroot()
for link in doc.cssselect('.maintable tr td a'):
app = link.get('href')
if app.startswith('/archive/'):
continue
if not backend.already_visited_url(self.full_url(app)):
yield self, app
def get_paste(self, path):
url = 'http://pastebin.com/raw.php?i=' + path[1:]
http = httplib2.Http()
try:
res = http.request(url)
except AttributeError as e:
res = ({'status': '503'}, '')
return res
def full_url(self, path):
return self.baseurl + path
|
426664
|
import abc
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torchvision import transforms
class SiameseNetwork(nn.Module):
def __init__(self, model_path):
super(SiameseNetwork, self).__init__()
self.conv = models.__dict__['resnet50'](pretrained=True)
self.conv = torch.nn.Sequential(*(list(self.conv.children())[:-1]))
self.liner = nn.Sequential(nn.Linear(2048, 512), nn.Sigmoid())
self.out = nn.Linear(512, 1)
self.load_weights(model_path)
self.freeze_representation()
def load_weights(self, path):
weight_path = path
if os.path.isfile(weight_path):
checkpoint = torch.load(weight_path, map_location="cpu")
# rename pre-trained keys
state_dict = checkpoint
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('module') and not k.startswith('module.encoder_q.fc'):
# remove prefix
state_dict[k[len("module."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
self.load_state_dict(state_dict, strict=False)
else:
print("=> no checkpoint found at '{}'".format(weight_path))
def freeze_representation(self):
# freeze all layers but the last fc
count = 0
for name, param in self.conv.named_parameters():
count += 1
count1 = 0
for name, param in self.conv.named_parameters():
count1 += 1
if (count1 < 0.9 * count):
param.requires_grad = False
@abc.abstractmethod
def forward(self, output1, output2):
pass
@abc.abstractmethod
def predict(self, images):
pass
@staticmethod
def load_data_to_gpu(images):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
image_transformation = transforms.Compose([
normalize
])
refs = images[0]
probes = images[1]
refs = torch.from_numpy(refs).transpose(1, 3).transpose(2, 3).float()
probes = torch.from_numpy(probes).transpose(1, 3).transpose(2, 3).float()
for i in range(len(refs)):
refs[i] = image_transformation(refs[i])
for i in range(len(probes)):
probes[i] = image_transformation(probes[i])
refs = refs.cuda()
probes = probes.cuda()
return refs, probes
def forward_sister_network(self, x):
x = self.conv(x)
x = x.reshape(x.size(0), -1)
return x
class SiameseNetworkWithSigmoid(SiameseNetwork):
def __init__(self, model_path):
super(SiameseNetworkWithSigmoid, self).__init__(model_path)
def forward(self, x1, x2):
out1 = self.forward_sister_network(x1)
out1 = self.liner(out1)
out2 = self.forward_sister_network(x2)
out2 = self.liner(out2)
dis = torch.abs(out1 - out2)
out = self.out(dis)
return torch.sigmoid(out)
def predict(self, images):
refs, probes = self.load_data_to_gpu(images)
output = (self.forward(refs, probes)).detach().cpu().numpy()
return output
class SiameseNetworkWith2048Distance(SiameseNetwork):
def __init__(self, model_path):
super(SiameseNetworkWith2048Distance, self).__init__(model_path)
def forward(self, output1, output2):
output1 = self.forward_sister_network(output1)
output2 = self.forward_sister_network(output2)
return output1, output2
def predict(self, images):
refs, probes = self.load_data_to_gpu(images)
output1, output2 = self.forward(refs, probes)
output = F.pairwise_distance(output1, output2).detach().cpu().numpy()
output = 1 / (1 + output)
return output
class SiameseNetworkWith512Distance(SiameseNetwork):
def __init__(self, model_path):
super(SiameseNetworkWith512Distance, self).__init__(model_path)
def forward(self, output1, output2):
out1 = self.forward_sister_network(output1)
output1 = self.liner(out1)
out2 = self.forward_sister_network(output2)
output2 = self.liner(out2)
return output1, output2
def predict(self, images):
refs, probes = self.load_data_to_gpu(images)
output1, output2 = self.forward(refs, probes)
output = F.pairwise_distance(output1, output2).detach().cpu().numpy()
output = 1 / (1 + output)
return output
|
426668
|
try:
from source.utils import *
except:
from utils import *
class Metadata_SQLITE_Connector():
def __init__(self,metadata_file):
self.metadata_file_tsv=metadata_file
self.db_file=metadata_file.replace('.tsv','')+'.db'
if not file_exists(self.metadata_file_tsv):
print(f'Metadata file missing: {metadata_file}')
return
self.insert_step=5000
self.info_splitter='##'
if not file_exists(self.db_file):
print('Creating SQL database',self.db_file)
self.create_sql_table()
self.start_sqlite_cursor()
def start_sqlite_cursor(self):
self.sqlite_connection = sqlite3.connect(self.db_file)
self.cursor = self.sqlite_connection.cursor()
self.get_db_headers()
def commit_and_close_sqlite_cursor(self):
self.sqlite_connection.commit()
self.sqlite_connection.close()
def close_sql_connection(self):
try:
self.sqlite_connection.close()
except:
return
def check_all_tables(self):
self.cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
all_tables = self.cursor.fetchall()
print(all_tables)
def convert_row_to_sql(self,ref,row_info):
res=[ref]
for db in self.db_headers:
if db in row_info:
res.append(self.info_splitter.join(row_info[db]))
else:
res.append(None)
return res
def generate_insert_command(self):
headers_str=', '.join(self.db_headers)
headers_str=f'(REF, {headers_str})'.upper()
n_values=['?' for i in range(len(self.db_headers)+1)]
n_values_str=', '.join(n_values)
n_values_str=f'({n_values_str})'
insert_command = f'INSERT INTO METADATA {headers_str} values {n_values_str}'
return insert_command
def generate_fetch_command(self,ref_id):
headers_str=', '.join(self.db_headers)
headers_str=f'REF, {headers_str}'.upper()
fetch_command = f'SELECT {headers_str} FROM METADATA WHERE REF="{ref_id}"'
return fetch_command
def yield_metadata(self):
res=[]
with open(self.metadata_file_tsv, 'r') as file:
for line in file:
row_info={}
line = line.strip('\n')
line = line.split('\t')
current_ref = line[0]
if '|' in line: line.remove('|')
annotations = line[1:]
for link in annotations:
if link:
temp_link = link.split(':')
link_type = temp_link[0]
link_text = ':'.join(temp_link[1:])
link_text=link_text.strip()
if link_type not in row_info: row_info[link_type]=set()
row_info[link_type].add(link_text)
if link_type == 'description' and link_text == 'NA':
link_text = None
if link_text and link_type == 'description':
get_common_links_metadata(link_text, res=row_info)
res.append(self.convert_row_to_sql(current_ref,row_info))
return res
def get_db_headers(self):
res = set()
try:
schema_command = f'PRAGMA table_info(METADATA);'
res_fetch = self.cursor.execute(schema_command).fetchall()
res_fetch.pop(0)
for line in res_fetch:
link_type=line[1]
res.add(link_type)
except:
with open(self.metadata_file_tsv, 'r') as file:
for line in file:
line = line.strip('\n')
line = line.split('\t')
annotations = line[2:]
for link in annotations:
if link:
temp_link = link.split(':')
link_type = temp_link[0]
res.add(link_type)
self.db_headers=sorted(list(res))
def create_sql_table(self):
self.get_db_headers()
if os.path.exists(self.db_file):
os.remove(self.db_file)
self.start_sqlite_cursor()
create_table_command = f'CREATE TABLE METADATA (REF TEXT, '
for header in self.db_headers:
create_table_command+=f'{header.upper()} TEXT, '
create_table_command=create_table_command.rstrip(', ')
create_table_command+=')'
self.cursor.execute(create_table_command)
self.sqlite_connection.commit()
create_index_command=f'CREATE INDEX REF_IDX ON METADATA (REF)'
self.cursor.execute(create_index_command)
self.store_metadata()
self.commit_and_close_sqlite_cursor()
def generate_inserts(self, metadata):
step=self.insert_step
for i in range(0, len(metadata), step):
yield metadata[i:i + step]
def store_metadata(self):
insert_command=self.generate_insert_command()
metadata_yielder=self.yield_metadata()
generator_insert = self.generate_inserts(metadata_yielder)
for table_chunk in generator_insert:
self.cursor.executemany(insert_command, table_chunk)
self.sqlite_connection.commit()
def convert_sql_to_dict(self,sql_result):
sql_result=sql_result[1:]
res={}
for i in range(len(self.db_headers)):
db=self.db_headers[i].lower()
db_res=sql_result[i]
if db_res:
db_res=db_res.split(self.info_splitter)
if db not in res: res[db]=set()
res[db].update(db_res)
return res
def fetch_metadata(self,ref_id):
if not file_exists(self.db_file):
return {}
fetch_command=self.generate_fetch_command(ref_id)
res_fetch=self.cursor.execute(fetch_command).fetchone()
try:
res=self.convert_sql_to_dict(res_fetch)
return res
except:
print(f'Failed retrieving {ref_id} in {self.db_file}')
return {}
def test_database(self):
res=set()
if not file_exists(self.metadata_file_tsv): return res
with open(self.metadata_file_tsv) as file:
for line in file:
ref=line.split('\t')[0]
try:
ref_info=self.fetch_metadata(ref)
except:
print(f'Failed retrieving {ref} in {self.db_file}')
res.add(ref)
return res
if __name__ == '__main__':
import time
metadata_connector=Metadata_SQLITE_Connector('/media/HDD/data/mantis_references/NOG_dmnd/10/metadata.tsv')
metadata_connector.test_database()
start=time.time()
for i in range(10000):
res=metadata_connector.fetch_metadata('1134474.O59_000005')
print(time.time()-start)
|
426671
|
import logging
import os
import numpy as np
_log = logging.getLogger('test_webbpsf')
_log.addHandler(logging.NullHandler())
from .. import webbpsf_core
# ------------------ MIRI Tests ----------------------------
from .test_webbpsf import generic_output_test, do_test_source_offset, do_test_set_position_from_siaf
test_miri = lambda: generic_output_test('MIRI')
test_miri_source_offset_00 = lambda: do_test_source_offset('MIRI', theta=0.0, monochromatic=8e-6)
test_miri_source_offset_45 = lambda: do_test_source_offset('MIRI', theta=45.0, monochromatic=8e-6)
test_miri_set_siaf = lambda: do_test_set_position_from_siaf('MIRI',
['MIRIM_SUB128', 'MIRIM_FP1MIMF', 'MIRIM_BRIGHTSKY',
'MIRIM_TASLITLESSPRISM', ])
def do_test_miri_fqpm(nlambda=1, clobber=True, angle=0.0, offset=0.0, oversample=2, outputdir=None, display=False,
save=False):
miri = webbpsf_core.MIRI()
miri.pupilopd = None
miri.filter = 'F1065C'
miri.image_mask = 'FQPM1065'
miri.pupil_mask = 'MASKFQPM'
# for offset in np.linspace(0.0, 1.0, nsteps):
# miri.options['source_offset_theta'] = 0.0
miri.options['source_offset_r'] = offset
# for angle in [0,45]:
miri.options['source_offset_theta'] = angle
psf = miri.calc_psf(oversample=oversample, nlambda=nlambda, save_intermediates=False, display=display)
if save:
if outputdir is None:
import tempfile
outputdir = tempfile.gettempdir()
fn = os.path.join(outputdir, 'test_miri_fqpm_t{0}_r{1:.2f}.fits'.format(angle, offset))
psf.writeto(fn, clobber=clobber)
# FIXME - add some assertion tests here.
def test_miri_fqpm_centered(*args, **kwargs):
do_test_miri_fqpm(angle=0.0, offset=0.0)
def test_miri_fqpm_offset_00(*args, **kwargs):
do_test_miri_fqpm(angle=0.0, offset=1.0)
def test_miri_fqpm_offset_45(*args, **kwargs):
do_test_miri_fqpm(angle=45.0, offset=1.0)
def test_miri_aperturename():
""" Test aperture name functionality """
miri = webbpsf_core.MIRI()
assert miri.aperturename == miri._detectors[miri.detector], "Default SIAF aperture is not as expected"
ref_tel_coords = miri._tel_coords()
miri.aperturename = 'MIRIM_SUB256'
assert miri.detector_position == (128, 128), "Changing to a subarray aperture didn't change the " \
"reference pixel coords as expected"
assert np.any( miri._tel_coords() != ref_tel_coords), "Changing to a subarray aperture didn't change the V2V3 coords as expected."
|
426697
|
import datetime
from unittest import TestCase
from ocd_backend.models.postgres_database import PostgresDatabase
|
426706
|
import jupytext
def test_remove_encoding_907(tmp_path, python_notebook):
# Pair all notebooks to py:percent files
(tmp_path / "jupytext.toml").write_text('formats="ipynb,py:percent"')
# Create a contents manager
cm = jupytext.TextFileContentsManager()
cm.root_dir = str(tmp_path)
# Save the notebook in Jupyter
cm.save(dict(type="notebook", content=python_notebook), path="nb.ipynb")
# No encoding is present in the py file
py = (tmp_path / "nb.py").read_text()
assert "coding" not in py
# Add the encoding line
py = "# -*- coding: utf-8 -*-\n" + py
(tmp_path / "nb.py").write_text(py)
# Reload the notebook
nb = cm.get("nb.ipynb")["content"]
assert "encoding" in nb.metadata["jupytext"]
# Save the notebook
cm.save(dict(type="notebook", content=nb), path="nb.ipynb")
# The encoding is still present in the py file
py = (tmp_path / "nb.py").read_text()
assert py.startswith("# -*- coding: utf-8 -*-")
# Remove the encoding (mock ipyupgrade)
py = "\n".join(py.splitlines()[1:])
(tmp_path / "nb.py").write_text(py)
# Reload the notebook - the encoding is not there anymore
nb = cm.get("nb.ipynb")["content"]
assert "encoding" not in nb.metadata["jupytext"]
# Save the notebook - the encoding is not there anymore
py = (tmp_path / "nb.py").read_text()
assert "coding" not in py
|
426727
|
import argparse
from tensorflow.keras.datasets import mnist
from keras.utils.np_utils import to_categorical
from onnx2keras import onnx_to_keras
import onnx
parser = argparse.ArgumentParser(description='Keras MNIST ONNX import example')
parser.add_argument('--model-path', type=str, default="onnx_models/conv2D_mnist.onnx",
help='Path of the onnx file to load')
parser.add_argument('--input-1D', action='store_true', default=False,
help='To change the input size to a 784 length vector')
parser.add_argument('--no-channel', action='store_true', default=False,
help='If --input-1D is enabled, removes the channel dimension. (bs, 1, 784) -> (bs, 784)')
args = parser.parse_args()
# Load MNIST data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
if args.input_1D:
if args.no_channel:
x_train = x_train.reshape((x_train.shape[0], 784))
x_test = x_test.reshape((x_test.shape[0], 784))
else:
x_train = x_train.reshape((x_train.shape[0], 1, 784))
x_test = x_test.reshape((x_test.shape[0], 1, 784))
else:
x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))
# Get one hot encoding from labels
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print("Train data shape:", x_train.shape)
print("Train labels shape:", y_train.shape)
print("Test data shape:", x_test.shape)
print("Test labels shape:", y_test.shape)
# Load ONNX model
onnx_model = onnx.load(args.model_path)
# Call the converter (input - is the main model input name, can be different for your model)
model = onnx_to_keras(onnx_model, ['input1'])
model.compile(loss='categorical_crossentropy',
optimizer="adam",
metrics=['accuracy'])
model.summary()
# Evaluation
acc = model.evaluate(x_test, y_test)
print("Evaluation result: Loss:", acc[0], " Accuracy:", acc[1])
|
426732
|
import mmcv
import numpy as np
import torch
from torch.utils.data import Dataset
from openselfsup.utils import build_from_cfg
from torchvision.transforms import Compose
import torchvision.transforms.functional as TF
from .registry import DATASETS, PIPELINES
from .builder import build_datasource
from .utils import to_numpy
def get_max_iou(pred_boxes, gt_box):
"""
pred_boxes : multiple coordinate for predict bounding boxes (x, y, w, h)
gt_box : the coordinate for ground truth bounding box (x, y, w, h)
return : the max iou score about pred_boxes and gt_box
"""
# 1.get the coordinate of inters
ixmin = np.maximum(pred_boxes[:, 0], gt_box[0])
ixmax = np.minimum(pred_boxes[:, 0] + pred_boxes[:, 2], gt_box[0] + gt_box[2])
iymin = np.maximum(pred_boxes[:, 1], gt_box[1])
iymax = np.minimum(pred_boxes[:, 1] + pred_boxes[:, 3], gt_box[1] + gt_box[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
# 2. calculate the area of inters
inters = iw * ih
# 3. calculate the area of union
uni = (pred_boxes[:, 2] * pred_boxes[:, 3] + gt_box[2] * gt_box[3] - inters)
# 4. calculate the overlaps and find the max overlap between pred_boxes and gt_box
iou = inters / uni
iou_max = np.max(iou)
return iou_max
def box_filter(boxes, min_size=20, max_ratio=None, topN=None, max_iou_thr=None):
proposal = []
for box in boxes:
# Calculate width and height of the box
w, h = box[2], box[3]
# Filter for size
if min_size:
if w < min_size or h < min_size:
continue
# Filter for box ratio
if max_ratio:
if w / h > max_ratio or h / w > max_ratio:
continue
# Filter for overlap
if max_iou_thr:
if len(proposal):
iou_max = get_max_iou(np.array(proposal), np.array(box))
if iou_max > max_iou_thr:
continue
proposal.append(box)
if not len(proposal): # ensure at least one box for each image
proposal.append(boxes[0])
if topN:
if topN <= len(proposal):
return proposal[:topN]
else:
return proposal
else:
return proposal
@DATASETS.register_module
class CorrespondenceDataset(Dataset):
"""Dataset for generating corresponding intra- and inter-RoIs.
"""
def __init__(self,
data_source,
format_pipeline,
patch_size=224,
min_size=96,
max_ratio=3,
topN=100,
max_iou_thr=0.5,
knn_image_num=10,
topk_bbox_ratio=0.1,
prefetch=False):
self.data_source = build_datasource(data_source)
format_pipeline = [build_from_cfg(p, PIPELINES) for p in format_pipeline]
self.format_pipeline = Compose(format_pipeline)
self.patch_size = patch_size
self.min_size = min_size
self.max_ratio = max_ratio
self.topN = topN
self.max_iou_thr = max_iou_thr
self.knn_image_num = knn_image_num
self.topk_bbox_ratio = topk_bbox_ratio
self.prefetch = prefetch
def __len__(self):
return self.data_source.get_length()
def __getitem__(self, idx):
img, knn_imgs, box, knn_boxes = self.data_source.get_sample(idx)
filtered_box = box_filter(box, self.min_size, self.max_ratio, self.topN, self.max_iou_thr)
filtered_knn_boxes = [
box_filter(knn_box, self.min_size, self.max_ratio, self.topN, self.max_iou_thr)
for knn_box in knn_boxes
]
patch_list = []
for x, y, w, h in filtered_box:
patch = TF.resized_crop(img, y, x, h, w, (self.patch_size, self.patch_size))
if self.prefetch:
patch = torch.from_numpy(to_numpy(patch))
else:
patch = self.format_pipeline(patch)
patch_list.append(patch)
knn_patch_lists = []
for k in range(len(knn_imgs)):
knn_patch_list = []
for x, y, w, h in filtered_knn_boxes[k]:
patch = TF.resized_crop(knn_imgs[k], y, x, h, w, (self.patch_size, self.patch_size))
if self.prefetch:
patch = torch.from_numpy(to_numpy(patch))
else:
patch = self.format_pipeline(patch)
knn_patch_list.append(patch)
knn_patch_lists.append(torch.stack(knn_patch_list))
filtered_box = torch.from_numpy(np.array(filtered_box))
filtered_knn_boxes = [torch.from_numpy(np.array(knn_box)) for knn_box in filtered_knn_boxes]
knn_img_keys = ['{}nn_img'.format(k) for k in range(len(knn_imgs))]
knn_bbox_keys = ['{}nn_bbox'.format(k) for k in range(len(knn_imgs))]
# img: BCHW, knn_img: K BCHW, bbox: Bx4, knn_bbox= K Bx4
# K is the number of knn images, B is the number of filtered bboxes
dict1 = dict(img=torch.stack(patch_list))
dict2 = dict(bbox=filtered_box)
dict3 = dict(zip(knn_img_keys, knn_patch_lists))
dict4 = dict(zip(knn_bbox_keys, filtered_knn_boxes))
return {**dict1, **dict2, **dict3, **dict4}
def evaluate(self, json_file, intra_bbox, inter_bbox, **kwargs):
assert (len(intra_bbox) == len(inter_bbox)), \
"Mismatch the number of images in part training set, got: intra: {} inter: {}".format(
len(intra_bbox), len(inter_bbox))
data = mmcv.load(json_file)
# dict
data_new = {}
# sub-dict
info = {}
image_info = {}
pseudo_anno = {}
info['bbox_min_size'] = self.min_size
info['bbox_max_aspect_ratio'] = self.max_ratio
info['bbox_max_iou'] = self.max_iou_thr
info['intra_bbox_num'] = self.topN
info['knn_image_num'] = self.knn_image_num
info['knn_bbox_pair_ratio'] = self.topk_bbox_ratio
image_info['file_name'] = data['images']['file_name']
image_info['id'] = data['images']['id']
pseudo_anno['image_id'] = data['pseudo_annotations']['image_id']
pseudo_anno['bbox'] = intra_bbox
pseudo_anno['knn_image_id'] = data['pseudo_annotations']['knn_image_id']
pseudo_anno['knn_bbox_pair'] = inter_bbox
data_new['info'] = info
data_new['images'] = image_info
data_new['pseudo_annotations'] = pseudo_anno
return data_new
|
426763
|
from setuptools import setup
from codecs import open
from os import path
# from peewee_validates import __version__ # the build requires preinstalled peewee and datautils
__version__ = '1.0.8'
root_dir = path.abspath(path.dirname(__file__))
with open(path.join(root_dir, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(root_dir, 'requirements.txt'), encoding='utf-8') as f:
install_requires = list(map(str.strip, f.readlines()))
setup(
name='peewee-validates',
version=__version__,
description='Simple and flexible model validator for Peewee ORM.',
long_description=long_description,
url='https://github.com/timster/peewee-validates',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Database :: Front-Ends',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='peewee orm database form validation development',
py_modules=['peewee_validates'],
install_requires=install_requires,
)
|
426766
|
from nnunet.training.network_training.nnUNetMultiTrainierV2 import nnUNetMultiTrainerV2
# from nnunet.training.dataloading.dataset_loading import DataLoader3DwithTag as DataLoader3D
from nnunet.training.dataloading.dataset_loading import DataLoader3DmergeTag as DataLoader3D
from nnunet.training.dataloading.dataset_loading import DataLoader2DwithTag as DataLoader2D
from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
class nnUNetMultiBinaryTrainer(nnUNetMultiTrainerV2):
def __init__(self, plans_file, fold, tasks,tags, output_folder_dict=None, dataset_directory_dict=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, tasks,tags, output_folder_dict=output_folder_dict, dataset_directory_dict=dataset_directory_dict, batch_dice=batch_dice, stage=stage,
unpack_data=unpack_data, deterministic=deterministic, fp16=fp16)
self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})
def get_basic_generators(self, task):
self.load_dataset(task)
self.do_split(task)
if self.threeD:
dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,self.tags[task],self.tags[self.tasks[0]],
False, oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size,self.tags[task],self.tags[self.tasks[0]], False,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
else:
dl_tr = DataLoader2D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,self.tags[task],
# self.plans.get('transpose_forward'),
transpose=None,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size,self.tags[task],
# self.plans.get('transpose_forward'),
transpose=None,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
return dl_tr, dl_val
|
426781
|
import mock
from django.test import SimpleTestCase
from ..base import mock_class_instance
from contentcuration.models import ContentNode
from contentcuration.utils.cache import ResourceSizeCache
class ResourceSizeCacheTestCase(SimpleTestCase):
def setUp(self):
super(ResourceSizeCacheTestCase, self).setUp()
self.node = mock.Mock(spec_set=ContentNode())
self.node.pk = "abcdefghijklmnopqrstuvwxyz"
self.redis_client = mock_class_instance("redis.client.StrictRedis")
self.cache_client = mock_class_instance("django_redis.client.DefaultClient")
self.cache_client.get_client.return_value = self.redis_client
self.cache = mock.Mock(client=self.cache_client)
self.helper = ResourceSizeCache(self.node, self.cache)
def test_redis_client(self):
self.assertEqual(self.helper.redis_client, self.redis_client)
self.cache_client.get_client.assert_called_once_with(write=True)
def test_redis_client__not_redis(self):
self.cache.client = mock.Mock()
self.assertIsNone(self.helper.redis_client)
def test_hash_key(self):
self.assertEqual("resource_size:abcd", self.helper.hash_key)
def test_size_key(self):
self.assertEqual("abcdefghijklmnopqrstuvwxyz:value", self.helper.size_key)
def test_modified_key(self):
self.assertEqual("abcdefghijklmnopqrstuvwxyz:modified", self.helper.modified_key)
def test_cache_get(self):
self.redis_client.hget.return_value = 123
self.assertEqual(123, self.helper.cache_get("test_key"))
self.redis_client.hget.assert_called_once_with(self.helper.hash_key, "test_key")
def test_cache_get__not_redis(self):
self.cache.client = mock.Mock()
self.cache.get.return_value = 123
self.assertEqual(123, self.helper.cache_get("test_key"))
self.cache.get.assert_called_once_with("{}:{}".format(self.helper.hash_key, "test_key"))
def test_cache_set(self):
self.helper.cache_set("test_key", 123)
self.redis_client.hset.assert_called_once_with(self.helper.hash_key, "test_key", 123)
def test_cache_set__delete(self):
self.helper.cache_set("test_key", None)
self.redis_client.hdel.assert_called_once_with(self.helper.hash_key, "test_key")
def test_cache_set__not_redis(self):
self.cache.client = mock.Mock()
self.helper.cache_set("test_key", 123)
self.cache.set.assert_called_once_with("{}:{}".format(self.helper.hash_key, "test_key"), 123)
def test_get_size(self):
with mock.patch.object(self.helper, 'cache_get') as cache_get:
cache_get.return_value = 123
self.assertEqual(123, self.helper.get_size())
cache_get.assert_called_once_with(self.helper.size_key)
def test_set_size(self):
with mock.patch.object(self.helper, 'cache_set') as cache_set:
self.helper.set_size(123)
cache_set.assert_called_once_with(self.helper.size_key, 123)
def test_get_modified(self):
with mock.patch.object(self.helper, 'cache_get') as cache_get:
cache_get.return_value = '2021-01-01 00:00:00'
modified = self.helper.get_modified()
self.assertIsNotNone(modified)
self.assertEqual('2021-01-01T00:00:00', modified.isoformat())
cache_get.assert_called_once_with(self.helper.modified_key)
def test_set_modified(self):
with mock.patch.object(self.helper, 'cache_set') as cache_set:
self.helper.set_modified('2021-01-01 00:00:00')
cache_set.assert_called_once_with(self.helper.modified_key, '2021-01-01 00:00:00')
|
426785
|
import wx
from math import radians
from util.primitives.funcs import do
from gui.windowfx import ApplySmokeAndMirrors
from common import pref
from logging import getLogger; log = getLogger('OverlayImage')
class SimpleOverlayImage(wx.PopupWindow):
"""
Used for tab previews when dragging them around
"""
def __init__(self,parent,host):
"""
Usses the OnPaint function of host to draw and region itself
host.OnPaint(otherdc,otherwindow)
"""
wx.PopupWindow.__init__(self,parent)
events=[
(wx.EVT_PAINT, self.OnPaint)
]
do(self.Bind(event, method) for (event,method) in events)
self.host=host
self.Size=host.Size
def OnPaint(self,event):
if not wx.IsDestroyed(self):
self.host.OnPaint(otherdc = wx.PaintDC(self), otherwindow = self)
def Transition(self,dest):
"""
Animated move to destination (x,y)
NOT YET IMPLEMENTED!!!
"""
pass
def Teleport(self,dest):
"""
Move to location, but uses center point as opposed to upper left
"""
self.Move((dest[0]-(self.Size.width/2),dest[1]-(self.Size.height/2)))
self.Refresh()
@property
def alpha(self):
return pref('tabs.preview_alpha',200)
class OverlayImage(wx.PopupWindow):
"""
Image that overlaps the window
"""
def __init__(self,parent,image,size= wx.Size(-1,-1),rot=0):
"""
image - wx.Image of the item
"""
wx.PopupWindow.__init__(self,parent)
events=[
(wx.EVT_PAINT,self.onPaint),
(wx.EVT_MOVE,self.OnMove)
]
do(self.Bind(event, method) for (event,method) in events)
self.parent=parent
self.rot = rot
if size != wx.Size(-1, -1):
self.SetSize(size)
if isinstance(image, wx.Bitmap):
self.bitmap = image
else:
self.SetImage(image, size)
def SetImage(self, image, size = wx.Size(-1,-1)):
log.info('Overlay Image has been updated')
self.image = image
prebitmap = wx.ImageFromBitmap(image.GetBitmap(size))
prebitmap.ConvertAlphaToMask()
self.bitmap = wx.BitmapFromImage(prebitmap)
self.width, self.height = self.bitmap.Width, self.bitmap.Height
self.GenBitmap()
def OnMove(self,event):
self.Refresh()
def onPaint(self,event):
dc = wx.PaintDC(self)
dc.DrawBitmap(self.bitmap,0,0,False)
def GenBitmap(self):
"""
Generates a local cached bitmap from the bitmap
then sets the region
"""
if self.rot:
self.bitmap=wx.BitmapFromImage(self.bitmap.ConvertToImage().Rotate(radians(90*self.rot),(0,0)))
if self.Size != (self.bitmap.Width+1,self.bitmap.Height+1):
wx.PopupWindow.SetSize(self,(self.bitmap.Width+1,self.bitmap.Height+1))
ApplySmokeAndMirrors(self, self.bitmap)
def SetBitmapSize(self,size):
'Change the size of the image, sizes 0 and lower keep it the same.'
if size == self.Size: return
if size[0] > 0: self.width = size[0]
if size[1] > 0: self.height = size[1]
prebitmap=self.image.GetBitmap((self.width,self.height)).ConvertToImage()
prebitmap.ConvertAlphaToMask()
self.bitmap=wx.BitmapFromImage(prebitmap)
self.GenBitmap()
def SetRotation(self,rot=0):
self.rot=rot
def Transition(self,dest):
"""
Animated move to destination (x,y)
NOT YET IMPLEMENTED!!!
"""
pass
def Teleport(self,dest):
"""
Move to location, but uses center point as opposed to upper left
"""
self.Move((dest[0]-(self.Size.width//2),dest[1]))#-(self.Size.height//2)
|
426824
|
import torch
import os
import shutil
import datetime
from utils.util import *
class Log(object):
def save_train_info(self, epoch, batch, maxbatch, losses, top1, top5):
"""
loss may contain several parts
"""
loss = losses[0]
loss1 = losses[1]
loss2 = losses[2]
loss3 = losses[3]
root_dir = os.path.abspath('./')
log_dir = os.path.join(root_dir, 'log')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_file = os.path.join(log_dir, 'log_train.txt')
if not os.path.exists(log_file):
os.mknod(log_file)
with open(log_file, 'a') as f:
f.write('DFL-CNN <==> Train <==> Epoch: [{0}][{1}/{2}]\n'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Loss1 {loss1.val:.4f} ({loss1.avg:.4f})\t'
'Loss2 {loss2.val:.4f} ({loss2.avg:.4f})\t'
'Loss3 {loss3.val:.4f} ({loss3.avg:.4f})\n'
'Prec@1 ({top1.avg:.3f})\t'
'Prec@5 ({top5.avg:.3f})\n'.format(epoch, batch, maxbatch,loss = loss,loss1 = loss1,loss2 = loss2, loss3=loss3, top1=top1, top5=top5))
def save_test_info(self, epoch, top1, top5):
root_dir = os.path.abspath('./')
log_dir = os.path.join(root_dir, 'log')
# check log_dir
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_file = os.path.join(log_dir, 'log_test.txt')
if not os.path.exists(log_file):
os.mknod(log_file)
with open(log_file, 'a') as f:
f.write('DFL-CNN <==> Test <==> Epoch: [{:4d}] Top1:{top1.avg:.3f}% Top5:{top5.avg:.3f}%\n'.format(epoch, top1=top1, top5=top5))
# this is for weight
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""[summary]
[description]
Arguments:
state {[type]} -- [description] a dict describe some params
is_best {bool} -- [description] a bool value
Keyword Arguments:
filename {str} -- [description] (default: {'checkpoint.pth.tar'})
"""
root_dir = get_root_dir()
weight_dir = os.path.join(root_dir, 'weight')
if not os.path.exists(weight_dir):
os.mkdir(weight_dir)
epoch = state['epoch']
prec1 = state['prec1']
file_path = os.path.join(weight_dir, 'epoch_{:04d}_top1_{:02d}_{}'.format(int(epoch), int(prec1), filename))
torch.save(state, file_path)
best_path = os.path.join(weight_dir, 'model_best.pth.tar')
if is_best:
shutil.copyfile(file_path, best_path)
|
426886
|
import requests
from django.conf import settings
from care.users.models import phone_number_regex
def _opt_in(phone_number):
url_data = {
"method": "OPT_IN",
"auth_scheme": "plain",
"v": "1.1",
"phone_number": phone_number,
"password": settings.WHATSAPP_API_PASSWORD,
"userid": settings.WHATSAPP_API_USERNAME,
"channel": "whatsapp",
}
resp = requests.post(settings.WHATSAPP_API_ENDPOINT, params=url_data)
return resp
def _send(phone_number, message, notification_id):
_opt_in(phone_number)
url_data = {
"method": "SendMessage",
"auth_scheme": "plain",
"v": "1.1",
"send_to": phone_number,
"msg": message["message"],
"isHSM": "True",
"buttonUrlParam": str(notification_id),
"msg_type": "HSM",
"password": <PASSWORD>.WHATSAPP_API_PASSWORD,
"userid": settings.WHATSAPP_API_USERNAME,
"isTemplate": "true",
"header": message["header"],
"footer": message["footer"],
}
resp = requests.post(settings.WHATSAPP_API_ENDPOINT, params=url_data)
return resp
def sendWhatsappMessage(phone_numbers, message, notification_id, many=False):
if not many:
phone_numbers = [phone_numbers]
phone_numbers = list(set(phone_numbers))
for phone in phone_numbers:
try:
phone_number_regex(phone)
except Exception:
continue
_send(phone, message, notification_id)
return True
|
426904
|
import numpy as np
from n2v.utils import n2v_utils
from n2v.utils.n2v_utils import tta_forward, tta_backward
def test_get_subpatch():
patch = np.arange(100)
patch.shape = (10, 10)
subpatch_target = np.array([[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45],
[51, 52, 53, 54, 55]])
subpatch_test = n2v_utils.get_subpatch(patch, (3, 3), 2)
assert np.sum(subpatch_target - subpatch_test) == 0
subpatch_test = n2v_utils.get_subpatch(patch, (3, 3), 1)
assert np.sum(subpatch_target[1:-1, 1:-1] - subpatch_test) == 0
patch = np.arange(1000)
patch.shape = (10, 10, 10)
subpatch_target = np.array([[[31, 32, 33],
[41, 42, 43],
[51, 52, 53]],
[[131, 132, 133],
[141, 142, 143],
[151, 152, 153]],
[[231, 232, 233],
[241, 242, 243],
[251, 252, 253]]])
subpatch_test = n2v_utils.get_subpatch(patch, (1, 4, 2), 1)
assert np.sum(subpatch_target - subpatch_test) == 0
def test_random_neighbor():
coord = np.array([51, 52, 32])
shape = [128, 128, 128]
for i in range(1000):
coords = n2v_utils.random_neighbor(shape, coord)
assert np.all(coords != coord)
shape = [55, 53, 32]
for i in range(1000):
coords = n2v_utils.random_neighbor(shape, coord)
assert np.all(coords != coord)
def test_pm_normal_neighbor_withoutCP():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_normal_withoutCP(1)
for i in range(100):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 100
patch = np.arange(1000)
patch.shape = (10, 10, 10, 1)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
for i in range(100):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 1000
def test_pm_uniform_withCP():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_uniform_withCP(3)
for i in range(100):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 100
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
for i in range(10):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 1000
def test_pm_normal_additive():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_normal_additive(0)
val = sampler(patch, coords, len(patch.shape))
for v, y, x in zip(val, *coords):
assert v == patch[y, x]
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
val = sampler(patch, coords, len(patch.shape))
for v, z, y, x in zip(val, *coords):
assert v == patch[z, y, x]
def test_pm_normal_fitted():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_normal_fitted(3)
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert isinstance(v, float)
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert isinstance(v, float)
def test_pm_identity():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_identity(1)
val = sampler(patch, coords, len(patch.shape))
for v, y, x in zip(val, *coords):
assert v == patch[y, x]
patch = np.arange(1000)
patch.shape = (10, 10, 10, 1)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
val = sampler(patch, coords, len(patch.shape))
for v, z, y, x in zip(val, *coords):
assert v == patch[z, y, x]
def test_tta():
img, _ = np.meshgrid(range(200), range(100))
img[:50, :50] = 50
aug = tta_forward(img[..., np.newaxis])
avg = tta_backward(aug)
assert np.sum(avg[..., 0] - img) == 0
|
426908
|
import arcpy
#Makes sure Spatial Analyst is turned on.
if arcpy.CheckExtension("Spatial")== "Available":
arcpy.CheckOutExtension("Spatial")
from arcpy.sa import *
else:
arcpy.AddError("You do not have the Spatial Analyst Extension, and therefore cannot use this tool.")
#Input folder.
folder_path= raw_input("Please enter the name and location of the folder containing the data to be masked: ")
arcpy.env.workspace= r"%s" %folder_path
#Masking file.
Mask= raw_input("Please enter the name of the masking file: ")
Mask_file= r"%s" %Mask
#For all the rasters in the file, perform an extract by mask.
for rasters in arcpy.ListRasters():
#Out name is the Output File name. EBM stands for "Extract By Mask".
Out_Name= "EBM" + rasters[]
print Out_Name
outExtractByMask = ExtractByMask(rasters, Mask_file)
outExtractByMask.save(Out_Name)
|
426925
|
import os
from distutils.dir_util import copy_tree
import time
import pytest
from nixui.graphics import main_window
from nixui import state_model
from nixui.options.option_tree import OptionTree
from nixui.options.attribute import Attribute
SAMPLES_PATH = 'tests/sample'
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
class Helpers:
class timeout(object):
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
@pytest.fixture
def helpers():
return Helpers
@pytest.fixture
def samples_path(tmpdir):
copy_tree(SAMPLES_PATH, str(tmpdir))
return tmpdir
@pytest.fixture
def statemodel(samples_path):
os.environ['CONFIGURATION_PATH'] = os.path.abspath(os.path.join(samples_path, 'configuration.nix'))
return state_model.StateModel()
@pytest.fixture
def nix_gui_main_window(statemodel, qtbot):
nix_gui_mw = main_window.NixGuiMainWindow(statemodel)
yield nix_gui_mw
nix_gui_mw.close()
@pytest.fixture
def option_tree():
os.environ['CONFIGURATION_PATH'] = os.path.abspath(os.path.join(SAMPLES_PATH, 'configuration.nix'))
statemodel = state_model.StateModel()
return statemodel.option_tree
@pytest.fixture
def minimal_option_tree():
return OptionTree(
{
Attribute('myList'): {'type_string': 'list of strings'},
Attribute('myAttrs'): {'type_string': 'attribute set of submodules'},
Attribute('myAttrs."<name>"'): {},
},
{}
)
@pytest.fixture
def minimal_state_model(mocker, minimal_option_tree):
mocker.patch('nixui.state_model.api.get_option_tree', return_value=minimal_option_tree)
return state_model.StateModel()
|
426961
|
from core.advbase import *
class Beautician_Zardin(Adv):
def x_proc(self, e):
if self.buff("s2"):
self.afflics.stun.on(f"{e.name}_stunning_beauty", 1.0, 5.5)
variants = {None: Beautician_Zardin}
|
426977
|
import numpy as np
import datetime
from collections import defaultdict
from argparse import Namespace
import json
import os
import copy
from shutil import copyfile
from pointcloud import translate_transform_to_new_center_of_rotation
def ns_to_dict(ns):
return {k: ns_to_dict(v) if type(v) == Namespace else v for k, v in ns.__dict__.items()}
def eval_translation(t, gt_t):
levels = np.array([0, 0, 0])
level_thresholds = np.array([0.02, 0.1, 0.2])
dist = np.linalg.norm(t[:2] - gt_t[:2])
for idx, thresh in enumerate(level_thresholds):
if dist < thresh:
levels[idx] = 1
return dist, levels
def angle_diff(a, b):
d = b - a
return float((d + np.pi) % (np.pi * 2.0) - np.pi)
def eval_angle(a, gt_a, accept_inverted_angle):
levels = np.array([0, 0, 0])
level_thresholds = np.array([1., 5.0, 10.0])
dist = np.abs(angle_diff(a, gt_a)) / np.pi * 180.
if accept_inverted_angle:
dist = np.minimum(dist, np.abs(angle_diff(a + np.pi, gt_a)) / np.pi * 180.)
for idx, thresh in enumerate(level_thresholds):
if dist < thresh:
levels[idx] = 1
return dist, levels
def eval_transform(t, gt_t, a, gt_a, accept_inverted_angle):
_, levels_translation = eval_translation(t, gt_t)
_, levels_angle = eval_angle(a, gt_a, accept_inverted_angle=accept_inverted_angle)
return np.minimum(levels_translation, levels_angle)
def evaluate_held(cfg, val_idxs, all_pred_translations, all_pred_angles, all_gt_translations, all_gt_angles, eval_dir=None, avg_window=5, mean_time=0):
tracks = defaultdict(dict)
for idx, file_idx in enumerate(val_idxs):
meta = json.load(open(f'{cfg.data.basepath}/meta/{str(file_idx).zfill(8)}.json', 'r'))
trackid = meta['trackid']
frame2 = meta['frames'][1]
timestamp1, timestamp2 = meta['timestamps']
pred_translation = all_pred_translations[idx]
time_passed = np.maximum(0.05, timestamp2 - timestamp1)
tracks[trackid][frame2] = (pred_translation, time_passed)
velocities = defaultdict(list)
for trackid, track in tracks.items():
track_translations = list(zip(*track.items()))[1]
track_translations = np.array(track_translations)
# print(track_translations.shape)
if eval_dir is not None:
with open(f'{eval_dir}/track{trackid}.txt', 'w') as file_handler:
for idx, (track_translation, time_passed) in enumerate(track_translations):
prev_translations = track_translations[max(0, idx - avg_window + 1):idx + avg_window + 1]
# print(trackid, idx, prev_translations.shape)
prev_velocities = prev_translations[:, 0] / prev_translations[:, 1]
mean_velocity = np.mean(prev_velocities, axis=0).copy()
# mean_translation[0][2] = 0.
# print(mean_translation)
mean_velocity_length = np.linalg.norm(mean_velocity[:2])
velocities[trackid].append(mean_velocity_length)
file_handler.write(f'{mean_velocity_length}\n')
return velocities, dict(mean_time=mean_time)
def process_velocities(tracks, eval_dir, avg_window):
if eval_dir is not None:
eval_dir = eval_dir + '/velocities'
os.makedirs(eval_dir, exist_ok=True)
else:
return
velocities = defaultdict(list)
for intermediate_trackid, traj in tracks.items():
max_frame = max(traj.keys())
start_frames = [idx for idx in range(max_frame + 1) if idx in traj.keys() and idx - 1 not in traj.keys()]
for start_frame in start_frames:
new_track_id = intermediate_trackid + start_frame - 1 # -1 because start frame is not actually the start frame, but the second after the initial pose (pc1)
track_translations = [(np.array([0., 0, 0]), 0.1)]
for curr_frame in range(start_frame, max_frame + 1):
track_translations.append(traj[curr_frame])
if curr_frame + 1 not in traj.keys():
break
# track_translations = list(zip(*track.items()))[1]
track_translations = np.array(track_translations)
# print(track_translations.shape)
if eval_dir is not None:
with open(f'{eval_dir}/track{new_track_id:09}.txt', 'w') as file_handler:
# velocities[new_track_id].append(0.)
# file_handler.write(f'{0.}\n')
for idx, (track_translation, time_passed) in enumerate(track_translations):
prev_translations = track_translations[max(0, idx - avg_window):idx + avg_window + 1]
prev_velocities = prev_translations[:, 0] / prev_translations[:, 1]
mean_velocity = np.mean(prev_velocities, axis=0).copy()
mean_velocity_length = np.linalg.norm(mean_velocity[:2])
velocities[new_track_id].append(mean_velocity_length)
file_handler.write(f'{mean_velocity_length}\n')
return velocities
def get_at_dist_measures(eval_measures, dist):
return Namespace(
corr_levels=eval_measures[dist]['corr_levels'].tolist(),
corr_levels_translation=eval_measures[dist]['corr_levels_translation'].tolist(),
mean_dist_translation=eval_measures[dist]['mean_dist_translation'],
mean_sq_dist_translation=eval_measures[dist]['mean_sq_dist_translation'],
corr_levels_angles=eval_measures[dist]['corr_levels_angles'].tolist(),
mean_dist_angle=eval_measures[dist]['mean_dist_angle'],
mean_sq_dist_angle=eval_measures[dist]['mean_sq_dist_angle'],
num=eval_measures[dist]['num'],
)
def evaluate(cfg, val_idxs, all_pred_translations, all_pred_angles, all_gt_translations, all_gt_angles, all_pred_centers, all_gt_pc1centers, eval_dir=None, accept_inverted_angle=False, detailed_eval=False, avg_window=5, mean_time=0):
new_all_pred_translations = translate_transform_to_new_center_of_rotation(all_pred_translations, all_pred_angles, all_pred_centers, all_gt_pc1centers)
np.set_printoptions(precision=3, suppress=True)
# print(np.concatenate([all_pred_translations, new_all_pred_translations, all_gt_translations, all_pred_angles, all_gt_angles], axis=1))
tracks = defaultdict(dict)
empty_dict = {'corr_levels_translation': np.array([0, 0, 0], dtype=float), 'corr_levels_angles': np.array([0, 0, 0], dtype=float), 'corr_levels': np.array([0, 0, 0], dtype=float), 'mean_dist_translation': 0.0, 'mean_sq_dist_translation': 0.0, 'mean_dist_angle': 0.0, 'mean_sq_dist_angle': 0.0, 'num': 0}
eval_measures = {
'all': copy.deepcopy(empty_dict),
'5m': copy.deepcopy(empty_dict),
'10m': copy.deepcopy(empty_dict),
'15m': copy.deepcopy(empty_dict),
'20m': copy.deepcopy(empty_dict),
'val': {
'all': copy.deepcopy(empty_dict),
'5m': copy.deepcopy(empty_dict),
'10m': copy.deepcopy(empty_dict),
'15m': copy.deepcopy(empty_dict),
'20m': copy.deepcopy(empty_dict),
},
'test': {
'all': copy.deepcopy(empty_dict),
'5m': copy.deepcopy(empty_dict),
'10m': copy.deepcopy(empty_dict),
'15m': copy.deepcopy(empty_dict),
'20m': copy.deepcopy(empty_dict),
},
}
per_transform_info = []
for idx, val_idx, translation, gt_translation, pred_angle, gt_angle, gt_pc1center in zip([x for x in range(len(val_idxs))], val_idxs, new_all_pred_translations, all_gt_translations, all_pred_angles, all_gt_angles, all_gt_pc1centers):
meta = json.load(open(f'{cfg.data.basepath}/meta/{str(val_idx).zfill(8)}.json', 'r'))
if 'KITTI_tracklets' in cfg.data.basepath:
is_test = 'trackids' in meta and meta['trackids'][0] in [2, 6, 7, 8, 10]
elif 'Synth' in cfg.data.basepath:
is_test = idx >= 1000
dist_transl, levels_transl = eval_translation(translation, gt_translation)
dist_angle, levels_angle = eval_angle(pred_angle, gt_angle, accept_inverted_angle=accept_inverted_angle)
levels = eval_transform(translation, gt_translation, pred_angle, gt_angle, accept_inverted_angle=accept_inverted_angle)
for _set in ['both', 'val', 'test']:
if dist_transl > 10000:
continue
node = eval_measures
if _set in ['val', 'test']:
node = eval_measures[_set]
if (_set == 'test') != is_test:
continue
for key in ['all', '5m', '10m', '15m', '20m']:
centroid_distance = np.linalg.norm(gt_pc1center)
if key == '5m' and centroid_distance > 5.:
continue
if key == '10m' and centroid_distance > 10.:
continue
if key == '15m' and centroid_distance > 15.:
continue
if key == '20m' and centroid_distance > 20.:
continue
node[key]['num'] += 1
node[key]['corr_levels_translation'] += levels_transl
node[key]['mean_dist_translation'] += dist_transl
node[key]['mean_sq_dist_translation'] += dist_transl * dist_transl
node[key]['corr_levels_angles'] += levels_angle
node[key]['mean_dist_angle'] += dist_angle
node[key]['mean_sq_dist_angle'] += dist_angle * dist_angle
node[key]['corr_levels'] += levels
if detailed_eval:
per_transform_info.append([levels, dist_transl, dist_angle])
for _set in ['both', 'val', 'test']:
node = eval_measures
if _set in ['val', 'test']:
node = eval_measures[_set]
for key in ['all', '5m', '10m', '15m', '20m']:
num_predictions = float(node[key]['num'])
if node[key]['num'] == 0:
num_predictions = 1e-20 # make numbers really large, indicates eval is not valid
node[key]['corr_levels_translation'] /= num_predictions
node[key]['mean_dist_translation'] /= num_predictions
node[key]['mean_sq_dist_translation'] = np.sqrt(node[key]['mean_sq_dist_translation'] / num_predictions)
node[key]['corr_levels_angles'] /= num_predictions
node[key]['mean_dist_angle'] /= num_predictions
node[key]['mean_sq_dist_angle'] = np.sqrt(node[key]['mean_sq_dist_angle'] / num_predictions)
node[key]['corr_levels'] /= num_predictions
reg_eval_measures = np.array([0, 0], dtype=float)
for idx, file_idx in enumerate(val_idxs):
meta = json.load(open(f'{cfg.data.basepath}/meta/{str(file_idx).zfill(8)}.json', 'r'))
if 'seq' in meta:
seq = meta['seq']
trackid = meta['trackids'][0]
frame1, frame2 = meta['frames']
intermediate_trackid = seq * 10000000 + trackid * 10000
pred_translation = all_pred_translations[idx]
time_passed = 0.1
tracks[intermediate_trackid][frame2] = (pred_translation, time_passed)
if len(tracks) > 0:
velocities = process_velocities(tracks, eval_dir, avg_window)
velocities
# print(velocities)
eval_dict = Namespace(
corr_levels=eval_measures['all']['corr_levels'].tolist(),
corr_levels_translation=eval_measures['all']['corr_levels_translation'].tolist(),
mean_dist_translation=eval_measures['all']['mean_dist_translation'],
mean_sq_dist_translation=eval_measures['all']['mean_sq_dist_translation'],
corr_levels_angles=eval_measures['all']['corr_levels_angles'].tolist(),
mean_dist_angle=eval_measures['all']['mean_dist_angle'],
mean_sq_dist_angle=eval_measures['all']['mean_sq_dist_angle'],
num=eval_measures['all']['num'],
eval_5m=get_at_dist_measures(eval_measures, '5m'),
eval_10m=get_at_dist_measures(eval_measures, '10m'),
eval_15m=get_at_dist_measures(eval_measures, '15m'),
eval_20m=get_at_dist_measures(eval_measures, '20m'),
val=Namespace(
corr_levels=eval_measures['val']['all']['corr_levels'].tolist(),
corr_levels_translation=eval_measures['val']['all']['corr_levels_translation'].tolist(),
mean_dist_translation=eval_measures['val']['all']['mean_dist_translation'],
mean_sq_dist_translation=eval_measures['val']['all']['mean_sq_dist_translation'],
corr_levels_angles=eval_measures['val']['all']['corr_levels_angles'].tolist(),
mean_dist_angle=eval_measures['val']['all']['mean_dist_angle'],
mean_sq_dist_angle=eval_measures['val']['all']['mean_sq_dist_angle'],
num=eval_measures['val']['all']['num'],
eval_5m=get_at_dist_measures(eval_measures['val'], '5m'),
eval_10m=get_at_dist_measures(eval_measures['val'], '10m'),
eval_15m=get_at_dist_measures(eval_measures['val'], '15m'),
eval_20m=get_at_dist_measures(eval_measures['val'], '20m'),
),
test=Namespace(
corr_levels=eval_measures['test']['all']['corr_levels'].tolist(),
corr_levels_translation=eval_measures['test']['all']['corr_levels_translation'].tolist(),
mean_dist_translation=eval_measures['test']['all']['mean_dist_translation'],
mean_sq_dist_translation=eval_measures['test']['all']['mean_sq_dist_translation'],
corr_levels_angles=eval_measures['test']['all']['corr_levels_angles'].tolist(),
mean_dist_angle=eval_measures['test']['all']['mean_dist_angle'],
mean_sq_dist_angle=eval_measures['test']['all']['mean_sq_dist_angle'],
num=eval_measures['test']['all']['num'],
eval_5m=get_at_dist_measures(eval_measures['test'], '5m'),
eval_10m=get_at_dist_measures(eval_measures['test'], '10m'),
eval_15m=get_at_dist_measures(eval_measures['test'], '15m'),
eval_20m=get_at_dist_measures(eval_measures['test'], '20m'),
),
reg_eval=Namespace(fitness=reg_eval_measures[0], inlier_rmse=reg_eval_measures[1]),
# num=len(val_idxs),
mean_time=mean_time)
if eval_dir is not None:
os.makedirs(eval_dir, exist_ok=True)
filename = f'{eval_dir}/eval{"_180" if accept_inverted_angle else ""}.json'
if os.path.isfile(filename):
datestr_now = datetime.datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
copyfile(filename, f'{filename[:-5]}_{datestr_now}.json')
if mean_time == 0:
prev_eval_dict = json.load(open(filename, 'r'))
if 'mean_time' in prev_eval_dict:
eval_dict.__dict__['mean_time'] = prev_eval_dict['mean_time']
with open(filename, 'w') as fhandle:
json.dump(ns_to_dict(eval_dict), fhandle)
if detailed_eval:
return eval_dict, per_transform_info
return eval_dict
|
426979
|
from dino.config import ConfigKeys
from dino import environ
import eventlet
import traceback
import logging
import sys
import os
from dino.endpoint.base import PublishException
logger = logging.getLogger(__name__)
DINO_DEBUG = os.environ.get('DINO_DEBUG')
if DINO_DEBUG is not None and DINO_DEBUG.lower() in {'1', 'true', 'yes'}:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
class PubSub(object):
def __init__(self, env):
self.env = env
if len(self.env.config) == 0 or self.env.config.get(ConfigKeys.TESTING, False):
self.env.publish = PubSub.mock_publish
return
conf = self.env.config
self.env.publish = self.do_publish
self._setup_internal_queue(conf, env)
self._setup_external_queue(conf, env)
def _setup_internal_queue(self, conf, env):
queue_type = conf.get(ConfigKeys.TYPE, domain=ConfigKeys.QUEUE, default=None)
if queue_type is None:
raise RuntimeError('no message queue specified')
if queue_type == 'redis':
from dino.endpoint.redis import RedisPublisher
self.env.internal_publisher = RedisPublisher(env, is_external_queue=False)
elif queue_type == 'amqp':
from dino.endpoint.amqp import AmqpPublisher
self.env.internal_publisher = AmqpPublisher(env, is_external_queue=False)
elif queue_type == 'mock':
from dino.endpoint.mock import MockPublisher
self.env.internal_publisher = MockPublisher(env, is_external_queue=False)
else:
raise RuntimeError('unknown message queue type "{}"'.format(queue_type))
def _setup_external_queue(self, conf, env):
ext_queue_type = conf.get(ConfigKeys.TYPE, domain=ConfigKeys.EXTERNAL_QUEUE)
if ext_queue_type is None:
# external queue not required
self.env.external_publisher = PubSub.mock_publish
return
if ext_queue_type in {'rabbitmq', 'amqp'}:
from dino.endpoint.amqp import AmqpPublisher
self.env.external_publisher = AmqpPublisher(env, is_external_queue=True)
elif ext_queue_type == 'redis':
from dino.endpoint.redis import RedisPublisher
self.env.external_publisher = RedisPublisher(env, is_external_queue=True)
elif ext_queue_type == 'kafka':
from dino.endpoint.kafka import KafkaPublisher
self.env.external_publisher = KafkaPublisher(env, is_external_queue=True)
elif ext_queue_type == 'mock':
from dino.endpoint.mock import MockPublisher
self.env.external_publisher = MockPublisher(env, is_external_queue=True)
else:
raise RuntimeError(
'unknown external queue type "{}"; available types are [mock,redis,amqp,rabbitmq,kafka]'.format(
ext_queue_type)
)
def do_publish(self, message: dict, external: bool=None):
logger.debug('publish: verb %s id %s external? %s' % (message['verb'], message['id'], str(external or False)))
if external is None or not external:
external = False
# avoid hanging clients
eventlet.spawn(self._do_publish_async, message, external)
def _do_publish_async(self, message: dict, external: bool):
if external:
return self._do_publish_external(message)
else:
return self._do_publish_internal(message)
def _do_publish_external(self, message: dict):
try:
return self.env.external_publisher.publish(message)
except PublishException:
logger.error('failed to publish external event multiple times! Republishing to internal queue')
return self.env.internal_publisher.publish(message)
except Exception as e:
logger.error('could not publish message "%s", because: %s' % (str(message), str(e)))
logger.exception(traceback.format_exc())
self.env.stats.incr('publish.error')
environ.env.capture_exception(sys.exc_info())
return None
def _do_publish_internal(self, message: dict):
try:
return self.env.internal_publisher.publish(message)
except Exception as e:
logger.error('could not publish message "%s", because: %s' % (str(message), str(e)))
logger.exception(traceback.format_exc())
self.env.stats.incr('publish.error')
environ.env.capture_exception(sys.exc_info())
return None
@staticmethod
def mock_publish(message, external=False):
pass
|
426988
|
x = [1, [2, None]]
y = [1, 2]
z = [1, 2]
x[1][0] = y # should nudge y to over the right
z[1] = x # should nudge BOTH x and y over to the right
|
426992
|
import responses
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
@mock_http_response(
responses.GET, "/sapi/v1/lending/daily/product/list", mock_item, 200
)
def test_savings_flexible_products():
"""Tests the API endpoint to get flexible product list"""
client = Client(key, secret)
response = client.savings_flexible_products()
response.should.equal(mock_item)
|
426998
|
import itertools
import re
import time
from typing import Optional
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from tabulate import tabulate
from stopots_bot.constants import Constants, EQUIVALENTS
from stopots_bot.utils import cls, is_a_valid_id, is_a_valid_username, log_error, random_from_list
class BOT:
"""Classe do BOT"""
def __init__(self, username: str = None, validator_type: str = 'check', auto_stop: bool = False,
auto_ready: bool = True, use_equivalence: bool = True,
dictionary: dict = None, driver: webdriver = None):
self.username = username
self.validator_type = validator_type
self.auto_stop = auto_stop
self.auto_ready = auto_ready
self.use_equivalence = use_equivalence
self.dictionary = dictionary
self.driver = driver
def join_game(self, room_id: Optional[int] = None, avatar_id: Optional[int] = 0) -> None:
"""
Executa os passos para entrar no jogo.
:param room_id: número da sala.
:param avatar_id: número do avatar.
"""
if not self.driver:
print('Webdriver not defined ')
quit()
print('Entrando no jogo...')
self.driver.get(f'{Constants.url}{room_id if room_id else ""}')
wait = WebDriverWait(self.driver, 30)
# botão entre anônimo
wait.until(ec.presence_of_element_located((By.XPATH, Constants.enter_button)))
wait.until(ec.element_to_be_clickable((By.XPATH, Constants.enter_button)))
self.driver.find_element_by_xpath(Constants.enter_button).click()
# tela de carregamento
wait.until(ec.invisibility_of_element_located((By.XPATH, Constants.loading_animation)))
# wait.until(ec.NoSuchElementException((By.XPATH, Constants.loading_animation)))
print(f'Entrando na sala {room_id if room_id else ""}...')
# input do username
user_input = Constants.username_input if not room_id else Constants.username_input2
wait.until(ec.presence_of_element_located((By.XPATH, user_input)))
if self.username is not None and is_a_valid_username(self.username):
self.driver.find_element_by_xpath(user_input).clear()
self.driver.find_element_by_xpath(user_input).send_keys(self.username)
else:
self.username = self.driver.find_element_by_xpath(user_input).get_attribute('value')
# Avatar
if 1 <= avatar_id <= 36:
time.sleep(2)
# Botão edit => abre menu avatar
wait.until(ec.element_to_be_clickable((By.XPATH, Constants.avatar_edit_button)))
self.driver.find_element_by_xpath(Constants.avatar_edit_button).click()
# Icone do Avatar
if avatar_id > 14:
self.driver.execute_script('arguments[0].scrollIntoView(true);',
self.driver.find_element_by_xpath(Constants.avatar(avatar_id)))
wait.until(ec.element_to_be_clickable((By.XPATH, Constants.avatar(avatar_id))))
self.driver.find_element_by_xpath(Constants.avatar(avatar_id)).click()
# Botão de confirmar escolha
wait.until(ec.element_to_be_clickable((By.XPATH, Constants.avatar_confirm_button)))
self.driver.find_element_by_xpath(Constants.avatar_confirm_button).click()
# Esperar a animação de fade
wait.until(ec.invisibility_of_element_located((By.XPATH, Constants.fade_animation)))
time.sleep(2)
# Botão de jogar
play_button = Constants.play_button if not room_id else Constants.play_button2
wait.until(ec.element_to_be_clickable((By.XPATH, play_button)))
self.driver.find_element_by_xpath(play_button).click()
print(f'Logado como: {self.username}')
def show_game_info(self) -> None:
"""Mostrar o round atual e o total"""
try:
rounds = self.driver.find_element_by_xpath(Constants.rounds).text
total = self.driver.find_element_by_xpath(Constants.rounds_total).text
print(f'Rodadas: {rounds}{total}')
except NoSuchElementException:
pass
except Exception as e:
log_error('Game Info', e)
players = []
for x in range(1, 15):
try:
nick = self.driver.find_element_by_xpath(Constants.PlayerList.nick(x)).text
points = self.driver.find_element_by_xpath(Constants.PlayerList.points(x)).text
if nick:
players.append([nick, points])
except NoSuchElementException:
break
except Exception as e:
log_error('Player List', e)
break
print('- Jogadores -\n', tabulate(players, ('Nome', 'Pontos')))
def show_round_end_rank(self) -> None:
"""Mostra a colocação dos jogadores no final da partida."""
h3_status = self.driver.find_element_by_xpath(Constants.ScorePanel.h3).text.upper()
ranks = []
if h3_status == 'RANKING DA RODADA':
for x in range(1, 15):
try:
position = self.driver.find_element_by_xpath(Constants.RankPanel.position(x)).text
nick = self.driver.find_element_by_xpath(Constants.RankPanel.nick(x)).text
points = self.driver.find_element_by_xpath(Constants.RankPanel.points(x)).text
if nick:
ranks.append([f'{position}º', nick, points])
except NoSuchElementException:
break
except Exception as e:
log_error('Round End', e)
print('\n- Ranking da Rodada -\n', tabulate(ranks, ('Pos', 'Jogador', 'Pontos')))
elif h3_status == 'FIM DE JOGO!' or \
self.driver.find_element_by_xpath(Constants.ScorePanel.h4).text.upper() == 'RANKING FINAL':
for x in range(1, 4):
try:
ranks.append([
self.driver.find_element_by_xpath(Constants.ScorePanel.nick(x)).text,
self.driver.find_element_by_xpath(Constants.ScorePanel.points(x)).text
])
except NoSuchElementException:
break
except Exception as e:
log_error('Game End', e)
break
print('- Fim de Jogo -\n', tabulate(ranks, ('Nome', 'Pontos')))
print('')
def find_letter(self) -> Optional[str]:
"""
Procura a letra atual da partida.
:return: letra se encontrada | None
"""
try:
letter = self.driver.find_element_by_xpath(Constants.letter).text.lower()
print(f'Letra Atual: {letter if letter else "?"}')
return letter
except NoSuchElementException:
return None
except Exception as e:
log_error('Find letter', e)
return None
def get_answer(self, letter: str, category: str) -> Optional[str]:
"""
Seleciona uma resposta aleatoria do dicionário.
:param letter: letra inicial.
:param category: categoria.
:return: resposta aleatoria | None
"""
return random_from_list(self.dictionary[letter][category]).lower()
def get_equivalent_answers(self, letter: str, category: str) -> Optional[list[str]]:
"""
Retorna todas as respostas possiveis com as categorias equivalentes.
:param letter: letra incial.
:param category: categoria.
:return: lista com as respostas | None
"""
try:
normal_answers = self.dictionary[letter][category] if category != 'nome' else []
return list({*normal_answers,
*list(itertools.chain(*[self.dictionary[letter][equiva] for equiva in EQUIVALENTS[category]]))})
except Exception as e:
log_error('Get equivalent answers', e)
return None
def auto_complete(self, letter: str) -> None:
"""
Completa os campos com suas respectivas categorias.
:param letter: letra atual.
"""
print('Auto Completando...')
for x in range(1, 13):
try:
field_input = self.driver.find_element_by_xpath(Constants.FormPanel.field_input(x)).get_attribute('value')
if not field_input:
field_category = self.driver.find_element_by_xpath(Constants.FormPanel.field_category(x)).text.lower()
if field_category:
if field_category in EQUIVALENTS and self.use_equivalence:
answer = random_from_list(self.get_equivalent_answers(letter, field_category))
else:
answer = self.get_answer(letter, field_category)
if answer:
self.driver.find_element_by_xpath(Constants.FormPanel.field_input(x)).send_keys(answer)
except (NoSuchElementException, AttributeError):
continue
except Exception as e:
log_error('Auto Complete', e)
def validate(self, letter: str) -> None:
"""
Avalia as respostas conforme o tipo do avaliador.
:param letter: letra atual.
"""
if self.driver.find_element_by_xpath(Constants.yellow_button_clickable):
def check():
"""Avaliará as respostas com base no dicionario e negará as outras."""
print('Avaliando Respostas...')
category = self.driver.find_element_by_xpath(Constants.AnswerPanel.category).text
category = re.sub('TEMA: ', '', category).lower()
for x in range(1, 15):
try:
if self.driver.find_element_by_xpath(Constants.AnswerPanel.label_status(x)).text.upper() == 'VALIDADO!':
category_answer = self.driver.find_element_by_xpath(Constants.AnswerPanel.label_answer(x)).text.lower()
if category in EQUIVALENTS and self.use_equivalence:
answers = self.get_equivalent_answers(letter, category)
else:
answers = self.dictionary[letter][category]
if category_answer not in answers:
self.driver.find_element_by_xpath(Constants.AnswerPanel.label_clickable(x)).click()
except NoSuchElementException:
continue
except Exception as e:
log_error('Validate', e)
self.driver.find_element_by_xpath(Constants.yellow_button_clickable).click()
def quick():
"""Apenas confirma as respostas sem verificar."""
self.driver.find_element_by_xpath(Constants.yellow_button_clickable).click()
def deny():
"""Invalidará todas as respostas inclusive as suas. """
print('Negando todas as respostas...')
for x in range(1, 15):
if self.driver.find_element_by_xpath(Constants.AnswerPanel.label_status(x)).text.upper() == 'VALIDADO!':
self.driver.find_element_by_xpath(Constants.AnswerPanel.label_clickable(x)).click()
self.driver.find_element_by_xpath(Constants.yellow_button_clickable).click()
def accept():
"""Confirma todas as respostas inclusive as erradas"""
print('Confirmando todas as respostas...')
for x in range(1, 15):
if self.driver.find_element_by_xpath(Constants.AnswerPanel.label_report(x)).text.upper() == 'DENUNCIAR':
Constants.AnswerPanel.label_clickable(x)
self.driver.find_element_by_xpath(Constants.yellow_button_clickable).click()
{'check': check, 'quick': quick, 'deny': deny, 'accept': accept}[self.validator_type]()
'''def greedy():
pass'''
def do_stop(self, letter: str) -> None:
"""
Verifica se respostas começam com a letra certa e tem o tamanho mínimo para então pressionar o botão de STOP!
:param letter: letra inicial.
"""
if self.driver.find_element_by_xpath(Constants.yellow_button_clickable):
for x in range(1, 13):
input_field = self.driver.find_element_by_xpath(Constants.FormPanel.field_input(x))\
.get_attribute('value').lower()
if not input_field[0] == letter and len(input_field) >= 2:
break
else:
print('STOP! Pressionado.')
self.driver.find_element_by_xpath(Constants.yellow_button_clickable).click()
def try_auto_ready(self) -> None:
"""Pressiona o botão de pronto automaticamente"""
try:
if self.auto_ready and self.driver.find_element_by_xpath(
Constants.ready_button).text.upper() == 'ESTOU PRONTO':
self.driver.find_element_by_xpath(Constants.yellow_button_clickable).click()
except NoSuchElementException:
pass
except Exception as e:
log_error('Auto Ready', e)
def afk_detector(self) -> None:
"""Detecta o balão de inatividade para confirmar a presença"""
try:
if self.driver.find_element_by_xpath(Constants.afk_button_xpath):
WebDriverWait(self.driver, 2).until(ec.element_to_be_clickable((By.XPATH, Constants.afk_button_xpath)))
self.driver.find_element_by_xpath(Constants.afk_button_xpath).click()
elif self.driver.find_elements_by_xpath(Constants.afk_box):
pass
except NoSuchElementException:
pass
except Exception as e:
log_error('AFK Detector', e)
def detect_round_end(self) -> None:
"""Detecta se é o final da rodada/partida para mostrar a colocação dos jogadores"""
try:
if self.driver.find_element_by_xpath(Constants.trophy):
self.show_round_end_rank()
except NoSuchElementException:
pass
except Exception as e:
log_error('Round End Rank', e)
def detect_button_state(self) -> None:
"""Detecta o estado do botão para executar as ações de acordo"""
try:
letter = self.find_letter()
if letter:
button = self.driver.find_element_by_xpath(Constants.yellow_button).text.upper()
if button == 'STOP!':
self.auto_complete(letter)
if self.auto_stop:
self.do_stop(letter)
elif button == 'AVALIAR' and self.validator_type != 'null':
self.validate(letter)
except NoSuchElementException:
pass
except Exception as e:
log_error('Main', e)
def close(self) -> None:
"""Fecha o navegador e o bot"""
if self.driver:
self.driver.quit()
exit()
def loop(self) -> None:
"""LOOP do BOT"""
try:
while True:
cls()
self.detect_button_state()
self.try_auto_ready()
self.show_game_info()
self.detect_round_end()
self.afk_detector()
time.sleep(3)
except KeyboardInterrupt:
cls()
print('Options:\n'
'1 - Sair da Sala.\n'
'2 - Fechar o bot.')
while True:
end_option = input('> ').strip()
if is_a_valid_id(end_option):
end_option = int(end_option)
else:
print('Opção invalida.')
break
if end_option == 1:
if self.driver.find_element_by_xpath(Constants.exit):
self.driver.find_element_by_xpath(f'{Constants.exit}/.').click()
print('Deseja entrar em outra sala? (s/n)')
while True:
rejoin_input = input('> ')
if rejoin_input.strip().lower() in 'sn':
break
else:
print('Opção invalida.')
if rejoin_input == 's':
wait = WebDriverWait(self.driver, 10)
wait.until(ec.presence_of_element_located((By.XPATH, Constants.play_button)))
time.sleep(1)
self.driver.find_element_by_xpath(Constants.play_button_clickable).click()
self.loop()
elif rejoin_input == 'n':
self.close()
elif end_option == 2:
self.close()
|
427018
|
from unittest.mock import create_autospec, ANY
import pytest
from operator import itemgetter
from stack.argument_processors.repo import RepoArgProcessor
from stack.commands import DatabaseConnection
from stack.exception import ArgError
FAKE_REPO_DATA = {
'name': 'fakename',
'alias': 'fakealias',
'url': 'uri:///',
'autorefresh': 0,
'assumeyes': 0,
'type': 'rpm-md',
'is_mirrorlist': 0,
'gpgcheck': 0,
'gpgkey': '',
'os': 'sles',
'pallet_id': None,
'is_enabled': 1
}
REPO_NON_REQUIRED_ARGS = [
{'is_mirrorlist': 1},
{'EXTRA_IGNORED': 'KWARG'},
{'is_mirrorlist': 1, 'EXTRA_IGNORED': 'KWARG'},
{'autorefresh': '1'},
{'type': 'yast'},
]
DEFAULT_GPGKEY_VALUE = None
class TestRepoArgumentProcessor:
@pytest.fixture
def argument_processor(self):
test_arg_processor = RepoArgProcessor()
test_arg_processor.db = create_autospec(DatabaseConnection, instance=True)
return test_arg_processor
def test_repo_not_found(self, argument_processor):
argument_processor.db.select.return_value = ()
assert argument_processor.get_repo_id('no_such_repo') == None
def test_get_repos_by_box(self, argument_processor):
fakebox_name = 'fakebox'
fakebox_id = 0
argument_processor.db.select.side_effect = [
[[fakebox_id]],
[list(FAKE_REPO_DATA.values())[0:-1]]
]
assert {fakebox_name: {FAKE_REPO_DATA['name']: FAKE_REPO_DATA}} == argument_processor.get_repos_by_box(fakebox_name)
argument_processor.db.select.assert_called_with(ANY, (fakebox_id,))
def test_insert_repo(self, argument_processor):
# setup the id check in the db to be empty
argument_processor.db.select.return_value = ()
basic_repo_data = itemgetter('name', 'alias', 'url')(FAKE_REPO_DATA)
argument_processor.insert_repo(*basic_repo_data)
# verify db call sequence
argument_processor.db.select.assert_called_once_with(ANY, (basic_repo_data[0], basic_repo_data[1]))
argument_processor.db.execute.assert_called_once_with(ANY, basic_repo_data + (DEFAULT_GPGKEY_VALUE, ))
@pytest.mark.parametrize("kwargs", REPO_NON_REQUIRED_ARGS)
def test_insert_repo_optional_args(self, argument_processor, kwargs):
''' many columns in the repos table are optional - the arg proc should ignore invalid columns '''
# setup the id check in the db to be empty
argument_processor.db.select.return_value = ()
basic_repo_data = itemgetter('name', 'alias', 'url')(FAKE_REPO_DATA)
argument_processor.insert_repo(*basic_repo_data, **kwargs)
expected_vals = []
for key in argument_processor.OPTIONAL_REPO_COLUMNS:
if key in kwargs:
expected_vals.append(kwargs[key])
# verify db call sequence only has expected key/vals
argument_processor.db.select.assert_called_once_with(ANY, (basic_repo_data[0], basic_repo_data[1]))
argument_processor.db.execute.assert_called_with(ANY, basic_repo_data + tuple(expected_vals) + (DEFAULT_GPGKEY_VALUE, ))
def test_insert_repo_already_exists(self, argument_processor):
# setup the id check in the db to be empty
argument_processor.db.select.return_value = ()
basic_repo_data = itemgetter('name', 'alias', 'url')(FAKE_REPO_DATA)
argument_processor.insert_repo(*basic_repo_data)
argument_processor.db.select.assert_called_once_with(ANY, (basic_repo_data[0], basic_repo_data[1]))
argument_processor.db.execute.assert_called_once_with(ANY, basic_repo_data + (DEFAULT_GPGKEY_VALUE, ))
# now re-attempt insert which should fail
# reset test fixture
argument_processor.db.reset_mock()
argument_processor.db.select.return_value = ((1,),)
with pytest.raises(ArgError):
argument_processor.insert_repo(*basic_repo_data)
# verify db call sequence
argument_processor.db.select.assert_called_once_with(ANY, (basic_repo_data[0], basic_repo_data[1]))
argument_processor.db.execute.assert_not_called()
|
427023
|
import json
import requests
from requests.api import get, head
import csv
def get_octopus_resource(uri, headers, skip_count = 0):
items = []
skip_querystring = ""
if '?' in uri:
skip_querystring = '&skip='
else:
skip_querystring = '?skip='
response = requests.get((uri + skip_querystring + str(skip_count)), headers=headers)
response.raise_for_status()
# Get results of API call
results = json.loads(response.content.decode('utf-8'))
# Store results
if hasattr(results, 'keys') and 'Items' in results.keys():
items += results['Items']
# Check to see if there are more results
if (len(results['Items']) > 0) and (len(results['Items']) == results['ItemsPerPage']):
skip_count += results['ItemsPerPage']
items += get_octopus_resource(uri, headers, skip_count)
else:
return results
# return results
return items
octopus_server_uri = 'https://YourURL'
octopus_api_key = 'API-YourAPIKey'
headers = {'X-Octopus-ApiKey': octopus_api_key}
space_name = 'Default'
project_name = "MyProject"
team_name = "MyTeam"
# Get space
uri = '{0}/api/spaces'.format(octopus_server_uri)
spaces = get_octopus_resource(uri, headers)
space = next((x for x in spaces if x['Name'] == space_name), None)
# Get project
uri = '{0}/api/{1}/projects'.format(octopus_server_uri, space['Id'])
projects = get_octopus_resource(uri, headers)
project = next((p for p in projects if p['Name'] == project_name), None)
# Get team
uri = '{0}/api/{1}/teams'.format(octopus_server_uri, space['Id'])
teams = get_octopus_resource(uri, headers)
team = next((t for t in teams if t['Name'] == team_name), None)
# Get scoped user roles
uri = '{0}/api/{1}/teams/{2}/scopeduserroles'.format(octopus_server_uri, space['Id'], team['Id'])
scoped_user_roles = get_octopus_resource(uri, headers)
for scoped_user_role in scoped_user_roles:
if project['Id'] in scoped_user_role['ProjectIds']:
scoped_user_role['ProjectIds'].remove(project['Id'])
# Update the scoped user role
print('Removing team {0} from project {1}'.format(team['Name'], project['Name']))
uri = '{0}/api/{1}/scopeduserroles/{2}'.format(octopus_server_uri, space['Id'], scoped_user_role['Id'])
response = requests.put(uri, headers=headers, json=scoped_user_role)
response.raise_for_status()
|
427026
|
import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 7):
sys.exit('Sorry, Python 3.7+ is required for Lunas.')
with open('requirements.txt') as r:
requires = [l.strip() for l in r]
setup(
name='Lunas',
version='0.5.1a',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
url='https://github.com/pluiez/lunas',
license='LICENSE',
description='Building customisable data processing pipeline and data iterators for machine learning.',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
install_requires=requires,
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
|
427036
|
import rest_framework_filters as filters
from django.db.models import DateTimeField
from ..models import Contact, Comment, Category
NAME_FILTERS = ['exact', 'in', 'startswith', 'endswith', 'contains']
class CharArrayFilter(filters.BaseCSVFilter, filters.CharFilter):
pass
class CategoryFilter(filters.FilterSet):
class Meta:
model = Category
fields = {
'name': NAME_FILTERS,
'updated': ['exact', 'gt', 'gte', 'lt', 'lte'],
'created': ['exact', 'gt', 'gte', 'lt', 'lte'],
}
filter_overrides = {
DateTimeField: {'filter_class': filters.IsoDateTimeFilter}
}
class ContactFilter(filters.FilterSet):
phones__contains = CharArrayFilter(
field_name='phones', lookup_expr='contains')
emails__contains = CharArrayFilter(
field_name='emails', lookup_expr='contains')
class Meta:
model = Contact
fields = {
'name': NAME_FILTERS,
'updated': ['exact', 'gt', 'gte', 'lt', 'lte'],
'created': ['exact', 'gt', 'gte', 'lt', 'lte'],
}
filter_overrides = {
DateTimeField: {'filter_class': filters.IsoDateTimeFilter}
}
class CommentFilter(filters.FilterSet):
class Meta:
model = Comment
fields = {
'message': NAME_FILTERS,
'user': ['exact', 'in'],
'updated': ['exact', 'gt', 'gte', 'lt', 'lte'],
'created': ['exact', 'gt', 'gte', 'lt', 'lte'],
}
filter_overrides = {
DateTimeField: {'filter_class': filters.IsoDateTimeFilter}
}
|
427054
|
def Union2SortedArrays(arr1, arr2):
m = arr1[-1]
n = arr2[-1]
ans = 0
if m > n:
ans = m
else:
ans = n
returner = []
newtable = [0] * (ans + 1)
returner.append(arr1[0])
newtable[arr1[0]] += 1
for i in range(1, len(arr1)):
if arr1[i] != arr1[i - 1]:
returner.append(arr1[i])
newtable[arr1[i]] += 1
for j in range(0, len(arr2)):
if newtable[arr2[j]] == 0:
returner.append(arr2[j])
newtable[arr2[j]] += 1
return returner
print(Union2SortedArrays([1, 2, 3, 4, 5], [1, 2, 3]))
|
427061
|
import os
from collections import defaultdict
from copy import deepcopy
graph = defaultdict(set)
class Param:
layer = None
kernel_height = None
kernel_width = None
pad = 0
pad_x = None
pad_y = None
nhidden = None
nchannel = None
threshold = None
stride = 1
kernel_size = 1
def __init__(self, *param, **kwargs):
for dic in param:
for key in dic:
setattr(self, key, dic[key])
for key in kwargs:
setattr(self, key, kwargs[key])
self.NotJoin = []
if self.pad_x == None:
self.pad_x = self.pad
self.NotJoin.append("pad_x")
if self.pad_y == None:
self.pad_y = self.pad
self.NotJoin.append("pad_y")
if self.kernel_width == None:
self.kernel_width = self.kernel_size
self.NotJoin.append("kernel_width")
if self.kernel_height == None:
self.kernel_height = self.kernel_size
self.NotJoin.append("kernel_height")
def join(self):
cmd = ""
items = self.__dict__.items()
for item in items:
if item[0] in self.NotJoin:
continue
if item[0] == "NotJoin":
continue
if item[0] == "layer":
continue
if item[0] == "pad_x" and item[1] == 0:
continue
if item[0] == "pad_y" and item[1] == 0:
continue
if item[0] == "kernel_width" and item[1] == 1:
continue
if item[0] == "kernel_height" and item[1] == 1:
continue
cmd += " %s=%s\n" % (item[0], str(item[1]))
return cmd
class Node:
cnt = [0]
def __init__(self):
self.idx = self.cnt[0]
self.cnt[0] += 1
self.ch = -1
self.x = -1
self.y = -1
self.has_shape = False
def SetShape(self, ch, y, x):
self.ch = ch
self.x = x
self.y = y
self.has_shape = True
class Layer:
cnt = defaultdict(int)
def __init__(self, param):
self.attached = None
self.in_nodes = []
self.out_nodes = []
self.param = param
self.cnt[param.layer] += 1
self.idx = self.cnt[param.layer]
self.name = "%s_%s" % (self.param.layer, str(self.idx))
def Print(self):
cmd_in = ",".join([str(n.idx) for n in self.in_nodes])
cmd_out = ",".join([str(n.idx) for n in self.out_nodes])
if len(cmd_out) == 0:
cmd_out = cmd_in
cmd = "layer[%s->%s] = %s:%s\n%s" % \
(cmd_in, cmd_out, self.param.layer, self.name, self.param.join())
if self.attached != None:
cmd += self.attached.Print()
return cmd
def VZ(self):
info = ["shape=box", "style=filled", "fixedsize=true", "width=1.1", "height=0.6798"]
if self.param.layer == "conv":
info.append("color=royalblue1")
info.append('label="convolution\n%dx%d/%d, %d"' % (self.param.kernel_height, \
self.param.kernel_width, self.param.stride, self.param.nchannel))
elif self.param.layer == "fullc":
info.append("color=royalblue1")
info.append('label="fullc\n%d"' % self.param.nhidden)
elif self.param.layer == "batch_norm":
info.append("color=orchid1")
info.append('label="batch_norm"')
elif "concat" in self.param.layer:
info.append("color=seagreen1")
info.append('label=%s' % self.param.layer)
elif self.param.layer == "split":
info.append("color=seagreen1")
info.append('label=%s' % self.param.layer)
elif self.param.layer == "flatten":
info.append("color=seagreen1")
info.append('label=%s' % self.param.layer)
elif "pooling" in self.param.layer:
info.append("color=firebrick2")
info.append('label="%s\n%dx%d/%d"' % (self.param.layer, self.param.kernel_height, \
self.param.kernel_width, self.param.stride))
elif "elu" in self.param.layer:
info.append("color=salmon")
info.append('label=%s' % self.param.layer)
else:
info.append("color=olivedrab1")
info.append('label=%s' % self.param.layer)
return "%s [%s];\n" % (self.name, ",".join(info))
def AddConnection(conn1, conn2):
if conn2.param.layer == "dropout":
conn2.in_nodes = conn1.out_nodes
conn2.out_nodes = conn1.out_nodes
conn1.attached = conn2
return
nd = Node()
conn1.out_nodes.append(nd)
conn2.in_nodes.append(nd)
global graph
graph[conn1].add(conn2)
def ConnectToData(conn, data):
conn.in_nodes.append(data)
global graph
graph[data].add(conn)
def CheckConnection(conn):
if conn.param.layer == "fullc":
assert(conn.param.nhidden != None)
assert(len(conn.out_nodes) == 1)
assert(len(conn.in_nodes) == 1)
if conn.in_nodes[0].has_shape:
assert(conn.in_nodes[0].ch == 1 and conn.in_nodes[0].y == 1)
conn.out_nodes[0].SetShape(1, 1, conn.param.nhidden)
elif conn.param.layer == "flatten":
assert(len(conn.out_nodes) == 1)
assert(len(conn.in_nodes) == 1)
if conn.in_nodes[0].has_shape:
conn.out_nodes[0].SetShape(1, 1, conn.in_nodes[0].ch * conn.in_nodes[0].x * conn.in_nodes[0].y)
elif conn.param.layer == "max_pooling" or conn.param.layer == "avg_pooling":
assert((conn.param.kernel_height != None and conn.param.kernel_width != None) or \
conn.param.kernel_size != None)
assert(len(conn.out_nodes) == 1)
assert(len(conn.in_nodes) == 1)
if conn.in_nodes[0].has_shape:
ch = conn.in_nodes[0].ch
x = conn.in_nodes[0].x
y = conn.in_nodes[0].y
x = min(x + 2 * conn.param.pad_x - conn.param.kernel_width + conn.param.stride - 1, \
x + 2 * conn.param.pad_x - 1) / conn.param.stride + 1
y = min(y + 2 * conn.param.pad_y - conn.param.kernel_height + conn.param.stride - 1, \
y + 2 * conn.param.pad_y - 1) / conn.param.stride + 1
assert(x > 0)
assert(y > 0)
conn.out_nodes[0].SetShape(ch, y, x)
elif conn.param.layer == "conv":
assert((conn.param.kernel_height != None and conn.param.kernel_width != None) or \
conn.param.kernel_size != None)
assert(len(conn.out_nodes) == 1)
assert(len(conn.in_nodes) == 1)
if conn.in_nodes[0].has_shape:
assert(conn.param.nchannel != None)
ch = conn.param.nchannel
x = conn.in_nodes[0].x
y = conn.in_nodes[0].y
x = (x + 2 * conn.param.pad_x - conn.param.kernel_width) / conn.param.stride + 1
y = (y + 2 * conn.param.pad_y - conn.param.kernel_height) / conn.param.stride + 1
assert(x > 0)
assert(y > 0)
conn.out_nodes[0].SetShape(ch, y, x)
elif conn.param.layer == "split":
assert(len(conn.in_nodes) == 1)
assert(len(conn.out_nodes) >= 1)
if conn.in_nodes[0].has_shape:
for nd in conn.out_nodes:
nd.SetShape(conn.in_nodes[0].ch, conn.in_nodes[0].y, conn.in_nodes[0].x)
elif conn.param.layer == "concat":
assert(len(conn.in_nodes) > 0)
if conn.in_nodes[0].has_shape:
x = 0
y = conn.in_nodes[0].y
ch = conn.in_nodes[0].ch
for i in xrange(len(conn.in_nodes)):
assert(conn.in_nodes[i].ch == conn.in_nodes[0].ch)
assert(conn.in_nodes[i].y == conn.in_nodes[0].y)
x += conn.in_nodes[i].x
conn.out_nodes[0].SetShape(ch, y, x)
elif conn.param.layer == "ch_concat":
assert(len(conn.in_nodes) > 0)
if conn.in_nodes[0].has_shape:
x = conn.in_nodes[0].x
y = conn.in_nodes[0].y
ch = 0
for i in xrange(len(conn.in_nodes)):
assert(conn.in_nodes[i].x == conn.in_nodes[0].x)
assert(conn.in_nodes[i].y == conn.in_nodes[0].y)
ch += conn.in_nodes[i].ch
conn.out_nodes[0].SetShape(ch, y, x)
elif conn.param.layer == "flatten":
if conn.in_nodes[0].has_shape:
x = conn.in_nodes[0].x
y = conn.in_nodes[0].y
ch = conn.in_nodes[0].ch
conn.out_nodes[0].SetShape(1, 1, x * y * ch)
else:
if conn.in_nodes[0].has_shape:
x = conn.in_nodes[0].x
y = conn.in_nodes[0].y
ch = conn.in_nodes[0].ch
if len(conn.out_nodes) > 0:
conn.out_nodes[0].SetShape(ch, y, x)
def ConvFactory(nchannel, kernel_size=1, pad=0, stride = 1, bn = True, act = "relu"):
tmp = []
tmp.append(Layer(Param({"layer":"conv", "pad":pad, "nchannel":nchannel, "stride":stride, "kernel_size":kernel_size})))
if bn:
tmp.append(Layer(Param({"layer":"batch_norm"})))
tmp.append(Layer(Param({"layer":act})))
for i in xrange(1, len(tmp)):
AddConnection(tmp[i-1], tmp[i])
return tmp
def DFS(layer, table, seq):
out_layers = graph[layer]
for l in out_layers - table:
DFS(l, table, seq)
if layer not in table:
table.add(layer)
seq.append(layer)
def Generate(layer):
table = set([])
seq = []
DFS(layer, table, seq)
seq = seq[::-1]
idx = 1
table = set([])
for c in seq:
for nd in c.out_nodes:
if nd not in table:
table.add(nd)
nd.idx = idx
idx += 1
conf = ""
for c in seq:
print "Init %s" % c.name
CheckConnection(c)
try:
print "output size: %d-%d-%d" % (c.out_nodes[0].ch, c.out_nodes[0].y, c.out_nodes[0].x)
except:
pass
conf += c.Print()
return conf
def Graphviz(layer, show_size=True):
table = set([])
seq = []
DFS(layer, table, seq)
seq = seq[::-1]
info = "data [shape=box, fixedsize=true, width=1.1, height=0.6798];\n"
dot = "digraph G {\n"
if show_size == True:
dot += '%s -> data [dir="back", label="%sx%sx%s"];\n' % (seq[0].name,
str(layer.in_nodes[0].ch), str(layer.in_nodes[0].y), str(layer.in_nodes[0].x))
else:
dot += '%s -> data [dir="back"];\n' % seq[0].name
for c in seq:
info += c.VZ()
conns = graph[c]
for nxt in conns:
sz = ""
if show_size == True:
try:
sz = "%dx%dx%d" % (c.out_nodes[0].ch, c.out_nodes[0].y, c.out_nodes[0].x)
except:
pass
cmd = '%s -> %s [dir="back", label="%s"];\n' % (nxt.name, c.name, sz)
dot += cmd
dot += info
dot += "}\n"
return dot
#############################################################################################################
#
# Modify your network from here
# Advise for write factory: return a list, first element is input layer, last element is output layer
#
#############################################################################################################
def FactoryInception(ch_1x1, ch_3x3r, ch_3x3, ch_3x3dr, ch_3x3d, ch_proj, act="rrelu", stride = 1):
param = {}
split = Layer(Param({"layer":"split"}))
concat = Layer(Param({"layer":"ch_concat"}))
#1x1
if stride != 2:
# Manual assemble layers
param["layer"] = "conv"
param["stride"] = 1
param["pad"] = 0
param["kernel_size"] = 1
param["nchannel"] = ch_1x1
conv1x1 = Layer(Param(param))
bn1x1 = Layer(Param({"layer":"batch_norm"}))
act1x1 = Layer(Param({"layer":act}))
AddConnection(split, conv1x1)
AddConnection(conv1x1, bn1x1)
AddConnection(bn1x1, act1x1)
AddConnection(act1x1, concat)
#3x3reduce + 3x3
# Use exist factory
conv3x3r = ConvFactory(stride= 1, pad = 0, kernel_size = 1, nchannel = ch_3x3r, bn = True, act = act)
conv3x3 = ConvFactory(stride= stride, pad = 1, kernel_size = 3, nchannel = ch_3x3, bn = True, act = act)
AddConnection(split, conv3x3r[0])
AddConnection(conv3x3r[-1], conv3x3[0])
AddConnection(conv3x3[-1], concat)
#double 3x3reduce + double 3x3
conv3x3dr = ConvFactory(stride= 1, pad = 0, kernel_size = 1, nchannel = ch_3x3dr, bn = True, act = act)
conv3x3d = ConvFactory(stride = stride, pad = 1, kernel_size = 3, nchannel = ch_3x3d, bn = True, act = act)
AddConnection(split, conv3x3dr[0])
AddConnection(conv3x3dr[-1], conv3x3d[0])
AddConnection(conv3x3d[-1], concat)
# pool + project
if stride == 1:
param["layer"] = "avg_pooling"
param["stride"] = 1
param["pad"] = 1
param["kernel_size"] = 3
del(param["nchannel"])
pool = Layer(Param(param))
param["layer"] = "conv"
param["stride"] = 1
param["pad"] = 0
param["kernel_size"] = 1
param["nchannel"] = ch_proj
proj2 = Layer(Param(param))
bn2 = Layer(Param({"layer":"batch_norm"}))
act2 = Layer(Param({"layer":"relu"}))
AddConnection(split, pool)
AddConnection(pool, proj2)
AddConnection(proj2, bn2)
AddConnection(bn2, act2)
AddConnection(act2, concat)
else:
param["layer"] = "max_pooling"
param["stride"] = stride
param["pad"] = 0
param["kernel_size"] = 3
try:
del(param["nchannel"])
except:
pass
pool = Layer(Param(param))
AddConnection(split, pool)
AddConnection(pool, concat)
return [split, concat]
Factory = FactoryInception
data = Node()
data.SetShape(3,224,224)
conv1 = ConvFactory(kernel_size = 7, stride = 2, pad = 3, nchannel = 64, bn = True, act = "rrelu")
pool1 = Layer(Param({"layer":"max_pooling", "kernel_size":3, "stride":2}))
conv2a = ConvFactory(kernel_size = 1, stride = 1, pad = 0, nchannel = 64, bn = True, act = "rrelu")
conv2b = ConvFactory(kernel_size = 3, stride = 1, pad = 1, nchannel = 192, bn = True, act = "rrelu")
pool2 = Layer(Param({"layer":"max_pooling", "kernel_size":3, "stride":2}))
in3a = Factory(64, 64, 64, 64, 96, 32)
in3b = Factory(64, 64, 96, 64, 96, 64)
in3c = Factory(0, 128, 160, 64, 96, 0, stride=2)
in4a = Factory(224, 64, 96, 96, 128, 128)
in4b = Factory(192, 96, 128, 96, 128, 128)
in4c = Factory(160, 128, 160, 128, 160, 128)
in4d = Factory(96, 128, 192, 160, 192, 128)
in4e = Factory(0, 128, 192, 192, 256, 0, stride = 2)
in5a = Factory(352, 192, 320, 160, 224, 128)
in5b = Factory(352, 192, 320, 192, 224, 128)
avg = Layer(Param({"layer":"avg_pooling", "kernel_size":7, "stride":1}))
flatten = Layer(Param({"layer":"flatten"}))
fc = Layer(Param({"layer":"fullc", "nhidden":1000}))
loss = Layer(Param({"layer":"softmax"}))
ConnectToData(conv1[0], data)
AddConnection(conv1[-1], pool1)
AddConnection(pool1, conv2a[0])
AddConnection(conv2a[-1], conv2b[0])
AddConnection(conv2b[-1], pool2)
AddConnection(pool2, in3a[0])
AddConnection(in3a[-1], in3b[0])
AddConnection(in3b[-1], in3c[0])
AddConnection(in3c[-1], in4a[0])
AddConnection(in4a[-1], in4b[0])
AddConnection(in4b[-1], in4c[0])
AddConnection(in4c[-1], in4d[0])
AddConnection(in4d[-1], in4e[0])
AddConnection(in4e[-1], in5a[0])
AddConnection(in5a[-1], in5b[0])
AddConnection(in5b[-1], avg)
AddConnection(avg, flatten)
AddConnection(flatten, fc)
AddConnection(fc, loss)
conf = ""
conf += Generate(conv1[0])
fo = open("inception.conf", "w")
fo.write(conf)
fo.close()
dot = Graphviz(conv1[0])
fw = open("inception.gv", "w")
fw.write(dot)
fw.close()
os.system("dot -Tpng inception.gv -o inception.png")
|
427071
|
import abc
class MediaLoader(metaclass=abc.ABCMeta):
@abc.abstractmethod
def play(self):
pass
@abc.abstractproperty
def ext(self):
pass
@classmethod
def __subclasshook__(cls, C):
if cls is MediaLoader:
attrs = set(dir(C))
if set(cls.__abstractmethods__) <= attrs:
return True
return NotImplemented
|
427082
|
import board
import busio
i2c = busio.I2C(board.SCL, board.SDA)
count = 0
# Wait for I2C lock
while not i2c.try_lock():
pass
# Scan for devices on the I2C bus
print("Scanning I2C bus")
for x in i2c.scan():
print(hex(x))
count += 1
print("%d device(s) found on I2C bus" % count)
# Release the I2C bus
i2c.unlock()
|
427098
|
import redis
class PlayerStatus:
def __init__(self):
self.redis_connection = redis.StrictRedis()
self.key = "score_1"
def accumulate_points(self, new_points):
current_score = int(self.redis_connection.get(self.key) or 0)
score = current_score + new_points
self.redis_connection.set(self.key, score)
@property
def points(self):
return int(self.redis_connection.get(self.key) or 0)
@points.setter
def points(self, new_points):
self.redis_connection.set(self.key, new_points)
"""
player_status = PlayerStatus()
player_status.accumulate_points(20)
player_status.points += 20
player_status.points = 20
print(player_status.points)
"""
|
427116
|
import pytest
from qcodes.utils.helpers import is_function
def test_non_function():
assert not is_function(0, 0)
assert not is_function('hello!', 0)
assert not is_function(None, 0)
def test_function():
def f0():
raise RuntimeError('function should not get called')
def f1(a):
raise RuntimeError('function should not get called')
def f2(a, b):
raise RuntimeError('function should not get called')
assert is_function(f0, 0)
assert is_function(f1, 1)
assert is_function(f2, 2)
assert not (is_function(f0, 1) or is_function(f0, 2))
assert not (is_function(f1, 0) or is_function(f1, 2))
assert not (is_function(f2, 0) or is_function(f2, 1))
# make sure we only accept valid arg_count
with pytest.raises(TypeError):
is_function(f0, 'lots')
with pytest.raises(TypeError):
is_function(f0, -1)
class AClass:
def method_a(self):
raise RuntimeError('function should not get called')
def method_b(self, v):
raise RuntimeError('function should not get called')
async def method_c(self, v):
raise RuntimeError('function should not get called')
def test_methods():
a = AClass()
assert is_function(a.method_a, 0)
assert not is_function(a.method_a, 1)
assert is_function(a.method_b, 1)
assert is_function(a.method_c, 1, coroutine=True)
def test_type_cast():
assert is_function(int, 1)
assert is_function(float, 1)
assert is_function(str, 1)
assert not (is_function(int, 0) or is_function(int, 2))
assert not (is_function(float, 0) or is_function(float, 2))
assert not (is_function(str, 0) or is_function(str, 2))
def test_coroutine_check():
def f_sync():
raise RuntimeError('function should not get called')
assert is_function(f_sync, 0)
assert is_function(f_sync, 0, coroutine=False)
async def f_async():
raise RuntimeError('function should not get called')
assert not is_function(f_async, 0, coroutine=False)
assert is_function(f_async, 0, coroutine=True)
assert not is_function(f_async, 0)
|
427124
|
from polyglotdb import CorpusContext
from polyglotdb.syllabification.probabilistic import split_ons_coda_prob, split_nonsyllabic_prob, norm_count_dict
from polyglotdb.syllabification.maxonset import split_ons_coda_maxonset, split_nonsyllabic_maxonset
from polyglotdb.syllabification.main import syllabify
def test_find_onsets(timed_config):
syllabics = ['ae', 'aa', 'uw', 'ay', 'eh']
expected_onsets = {('k',), tuple(), ('d',), ('t',), ('g',)}
expected_freqs = {('k',): 2, tuple(): 3, ('d',): 1, ('t',): 1, ('g',): 1}
with CorpusContext(timed_config) as c:
c.encode_syllabic_segments(syllabics)
assert c.has_syllabics
onsets = c.find_onsets()
assert (set(onsets.keys()) == expected_onsets)
assert (onsets == expected_freqs)
def test_find_codas(timed_config):
expected_codas = {('t', 's'), ('r',), ('t',), ('g', 'z'), tuple(), ('s',)}
expected_freqs = {('t', 's'): 1, tuple(): 2, ('r',): 2, ('t',): 1, ('g', 'z'): 1, ('s',): 1}
with CorpusContext(timed_config) as c:
codas = c.find_codas()
assert (set(codas.keys()) == expected_codas)
assert (codas == expected_freqs)
def test_probabilistic_syllabification(acoustic_config, timed_config, acoustic_syllabics):
with CorpusContext(timed_config) as c:
onsets = norm_count_dict(c.find_onsets())
codas = norm_count_dict(c.find_codas())
print(onsets)
print(codas)
expected = [(['z', 'g'], 1), (['g', 'z'], 2), (['t', 's', 'k'], 2), (['t', 'd'], 1)]
for s, e in expected:
result = split_ons_coda_prob(s, onsets, codas)
print(s, e, result)
assert (e == result)
with CorpusContext(acoustic_config) as c:
c.reset_class('syllabic')
c.encode_syllabic_segments(acoustic_syllabics)
assert c.has_syllabics
onsets = norm_count_dict(c.find_onsets())
codas = norm_count_dict(c.find_codas())
# nonsyllabic
expected = [(['d', 'g', 'z'], 2), (['sh'], 0)]
for s, e in expected:
result = split_nonsyllabic_prob(s, onsets, codas)
print(s, e, result)
assert (e == result)
def test_maxonset_syllabification(timed_config):
with CorpusContext(timed_config) as c:
onsets = set(c.find_onsets().keys())
print(onsets)
expected = [(['z', 'g'], 1), (['g', 'z'], 2), (['t', 's', 'k'], 2), (['t', 'd'], 1)]
for s, e in expected:
result = split_ons_coda_maxonset(s, onsets)
print(s, e, result)
assert (e == result)
# nonsyllabic
expected = [(['d', 'g', 'z'], 1), (['sh'], 0)]
for s, e in expected:
result = split_nonsyllabic_maxonset(s, onsets)
print(s, e, result)
assert (e == result)
def test_syllabify():
expected = {('n', 'ay', 'iy', 'v'): [{'label': 'n.ay'}, {'label': 'iy.v'}],
('l', 'ow', 'w', 'er'): [{'label': 'l.ow'}, {'label': 'w.er'}]}
s = ['ay', 'iy', 'ow', 'er']
o = [('n',), ('v',), ('w',), ('l',)]
c = [('v',)]
for k, v in expected.items():
test = syllabify(k, s, o, c, 'maxonset')
assert (len(test) == len(v))
for i, x in enumerate(v):
for k2, v2 in x.items():
assert (v2 == test[i][k2])
def test_encode_syllables_acoustic(acoustic_config):
syllabics = ['ae', 'aa', 'uw', 'ay', 'eh', 'ih', 'aw', 'ey', 'iy',
'uh', 'ah', 'ao', 'er', 'ow']
with CorpusContext(acoustic_config) as c:
c.encode_syllabic_segments(syllabics)
assert c.has_syllabics
c.encode_syllables()
assert c.has_syllables
q = c.query_graph(c.phone).filter(c.phone.label == 'dh')
q = q.filter(c.phone.begin == c.phone.syllable.begin)
q = q.order_by(c.phone.begin)
q = q.columns(c.phone.label, c.phone.begin)
results = q.all()
assert (len(results) == 5)
q = c.query_graph(c.syllable)
q = q.columns(c.syllable.phone.filter_by_subset('onset').label.column_name('onset'),
c.syllable.phone.filter_by_subset('nucleus').label.column_name('nucleus'),
c.syllable.phone.filter_by_subset('coda').label.column_name('coda'))
for r in q.all():
assert (all(x not in syllabics for x in r['onset']))
assert (all(x in syllabics for x in r['nucleus']))
assert (all(x not in syllabics for x in r['coda']))
def test_encode_stress_from_word_property(acoustic_utt_config, stress_pattern_file):
with CorpusContext(acoustic_utt_config) as c:
c.enrich_lexicon_from_csv(stress_pattern_file)
c.encode_stress_from_word_property('stress_pattern')
q = c.query_graph(c.syllable)
q = q.filter(c.syllable.stress == '1')
q = q.columns(c.syllable.label.column_name('syllable'),
c.syllable.word.label.column_name('word'))
results = q.all()
print(q.all())
assert len(results) == 8
for r in q.all():
assert r['word'] in ['words', 'acoustic', 'intensity', 'corpus']
if r['word'] == 'words':
assert r['syllable'] == 'w.er.d.z'
elif r['word'] == 'acoustic':
assert r['syllable'] == 'k.uw'
elif r['word'] == 'intensity':
assert r['syllable'] == 't.eh.n'
elif r['word'] == 'corpus':
assert r['syllable'] == 'k.er.p'
|
427189
|
import atexit
import click
import logging
import orrb
import orrb.utils as utils
import os
import sys
import time
import numpy as np
from mpi4py import MPI
from queue import Queue
def _load_states():
return np.loadtxt(utils.package_relative_path('assets/states/qpos.csv'), delimiter=',')
def _build_renderer(num_gpus, num_workers, base_port, mpi_rank, mpi_size,
render_depth, render_segmentation, render_normals):
config = orrb.RemoteRendererConfig()
config.camera_names = ['vision_cam_left', 'vision_cam_top', 'vision_cam_right']
config.image_width = 200
config.image_height = 200
config.renderer_version = orrb.get_renderer_version()
config.model_xml_path = 'dactyl.xml'
config.model_mapping_path = 'dactyl.mapping'
config.renderer_config_path = 'dactyl.renderer_config.json'
config.asset_basedir = utils.package_relative_path('assets')
config.render_depth = render_depth
config.render_normals = render_normals
config.render_segmentation = render_segmentation
server_configs = utils.build_server_configs(num_gpus, num_workers, base_port, mpi_rank,
mpi_size)
return config, orrb.RemoteRenderer('OrrbRenderer0', server_configs, config)
@click.command()
@click.option('--num-gpus', type=int, default=1)
@click.option('--workers-per-gpu', type=int, default=1)
@click.option('--iterations', type=int, default=20)
@click.option('--base-port', type=int, default=7000)
@click.option('--render-depth', type=bool, default=False)
@click.option('--render-normals', type=bool, default=False)
@click.option('--render-segmentation', type=bool, default=False)
def main(num_gpus, workers_per_gpu, iterations, base_port,
render_depth, render_normals, render_segmentation):
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
mpi_size = mpi_comm.Get_size()
config, renderer = _build_renderer(num_gpus, workers_per_gpu, base_port, mpi_rank, mpi_size,
render_depth, render_normals, render_segmentation)
states = _load_states()
queue = Queue()
cameras_count = len(config.camera_names)
batch_size = len(states)
seed = mpi_rank * 11713
all_workers = num_gpus * workers_per_gpu
assert all_workers % mpi_size == 0
local_workers = all_workers // mpi_size
renderer.start()
atexit.register(utils.renderer_closer, renderer)
sleep_time = 5.0 + all_workers * 0.5
logging.info(f'Sleeping for: {sleep_time}s.')
time.sleep(sleep_time)
logging.info(f'Queueing {iterations} iterations + 1 warmup, on {local_workers} local workers.')
for _ in range((iterations + 1) * local_workers):
renderer.render_batch_async(utils.build_batch(states, seed), queue)
seed += batch_size
logging.info('Warmup pass.')
for _ in range(local_workers):
result = queue.get()
queue.task_done()
logging.info('Warmup done. Waiting on a barrier.')
mpi_comm.Barrier()
logging.info('Starting benchmark.')
start_time = time.time()
for _ in range(iterations * local_workers):
result = queue.get()
queue.task_done()
logging.info('Done. Waiting on a barrier.')
mpi_comm.Barrier()
delta_time = time.time() - start_time
total_frames = all_workers * iterations * batch_size * cameras_count
fps = float(total_frames) / delta_time
if mpi_rank is 0:
logging.info(f'{total_frames} frames in {delta_time}s : {fps} fps.')
if __name__ == '__main__':
utils.setup_logging()
main()
|
427213
|
import angr
import time
# pylint: disable=arguments-differ,unused-argument
class gettimeofday(angr.SimProcedure):
def run(self, tv, tz):
if self.state.solver.is_true(tv == 0):
return -1
if angr.options.USE_SYSTEM_TIMES in self.state.options:
flt = time.time()
result = {'tv_sec': int(flt), 'tv_usec': int(flt * 1000000)}
else:
result = {
'tv_sec': self.state.solver.BVS('tv_sec', self.arch.bits, key=('api', 'gettimeofday', 'tv_sec')),
'tv_usec': self.state.solver.BVS('tv_usec', self.arch.bits, key=('api', 'gettimeofday', 'tv_usec')),
}
self.state.mem[tv].struct.timeval = result
return 0
class clock_gettime(angr.SimProcedure):
def run(self, which_clock, timespec_ptr):
if not self.state.solver.is_true(which_clock == 0):
raise angr.errors.SimProcedureError("clock_gettime doesn't know how to deal with a clock other than CLOCK_REALTIME")
if self.state.solver.is_true(timespec_ptr == 0):
return -1
if angr.options.USE_SYSTEM_TIMES in self.state.options:
flt = time.time()
result = {'tv_sec': int(flt), 'tv_nsec': int(flt * 1000000000)}
else:
result = {
'tv_sec': self.state.solver.BVS('tv_sec', self.arch.bits, key=('api', 'clock_gettime', 'tv_sec')),
'tv_nsec': self.state.solver.BVS('tv_nsec', self.arch.bits, key=('api', 'clock_gettime', 'tv_nsec')),
}
self.state.mem[timespec_ptr].struct.timespec = result
return 0
|
427215
|
from typing import Dict, List, Optional
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtWidgets import QGridLayout, QGroupBox, QApplication
from PyQt5.uic import loadUi
from .option_items import (
CapsuleOptionItem,
EnumOptionItem,
FloatOptionItem,
IntOptionItem,
BoolOptionItem
)
from brainframe.api.bf_codecs import CapsuleOption
from brainframe_qt.api_utils import api
from brainframe_qt.ui.dialogs.capsule_configuration import capsule_utils
from brainframe_qt.ui.resources.paths import qt_ui_paths
class BaseCapsuleOptionsWidget(QGroupBox):
capsule_options_changed = pyqtSignal()
"""Alerts the dialog holding the options widget that the current options
have been modified by the user, such options may or may not be valid
Connected to:
- CapsuleConfigDialog -- Dynamic
[parent].is_inputs_valid
"""
def __init__(self, parent=None):
super().__init__(parent=parent)
loadUi(qt_ui_paths.capsule_options_ui, self)
self.option_items: List[CapsuleOptionItem] = []
"""Only capsule-specific option items. This does not include items that
exist for all capsules, such as 'capsule_enabled'."""
self.all_items: List[CapsuleOptionItem] = []
"""All option items, including special cases such as
self.enabled_option"""
self.enabled_option: BoolOptionItem = None
"""This holds the option for enabling and disabling a capsule."""
self.grid_layout: QGridLayout = self.grid.layout()
self.current_capsule = None
def change_capsule(self, capsule_name):
"""When an item on the QListWidget is selected
:param capsule_name: The name of the capsule to edit options for
"""
self._reset()
self.current_capsule = capsule_name
capsule = api.get_capsule(capsule_name)
# Change name of capsule
title = f"[{capsule_utils.pretty_snakecase(capsule_name)}] "
title += self.tr("Options")
self.setTitle(title)
# Set capsule description
capsule_description = capsule.description or ""
self.capsule_description_area.setVisible(bool(capsule_description))
self.capsule_description_label.setText(capsule_description)
# Add configuration that every capsule _always_ has
self.enabled_option = self._add_option(
name=self.tr("Capsule Enabled"),
type_=CapsuleOption.Type.BOOL,
value=api.is_capsule_active(capsule_name, stream_id=None),
constraints={})
self.all_items.append(self.enabled_option)
# Add options specific to this capsule
option_values = api.get_capsule_option_vals(capsule_name)
for option_name, option in capsule.options.items():
item = self._add_option(
name=option_name,
type_=option.type,
value=option_values[option_name],
constraints=option.constraints,
description=option.description)
# Keep track of the option
self.option_items.append(item)
self.all_items.append(item)
def options_valid(self):
"""Returns True if none of the options are invalid.
Essentially, it checks the validator for each option and verifies that
they are all within the correct types and ranges.
"""
return all(option.is_valid() for option in self.all_items)
def _add_option(self, name: str, type_: CapsuleOption.Type, value,
constraints: Dict, description: Optional[str] = None):
parent = self
args = name, value, constraints, description, parent
if type_ is CapsuleOption.Type.BOOL:
item = BoolOptionItem(*args)
elif type_ is CapsuleOption.Type.ENUM:
item = EnumOptionItem(*args)
elif type_ is CapsuleOption.Type.FLOAT:
item = FloatOptionItem(*args)
elif type_ is CapsuleOption.Type.INT:
item = IntOptionItem(*args)
else:
message = QApplication.translate(
"BaseCapsuleOptionsWidget",
"The capsule option of name {} has an invalid type of type {}")
message = message.format(name, type_)
raise TypeError(message)
_TOOLTIP_COL = 0
_NAME_COL = 1
_VALUE_COL = 2
_SPACER_COL = 3
_ENABLE_COL = 4
row = len(self.all_items) + 2
self.grid_layout.addWidget(item.label_widget, row, _NAME_COL)
if item.tooltip_button:
self.grid_layout.addWidget(item.tooltip_button, row, _TOOLTIP_COL)
self.grid_layout.addWidget(item.option_widget, row, _VALUE_COL)
self.grid_layout.addWidget(item.override_checkbox, row, _ENABLE_COL,
Qt.AlignRight)
# Whenever this option is changed, make sure that our signal emits
item.change_signal.connect(self._on_inputs_changed)
return item
def apply_changes(self, stream_id=None):
"""This will send changes to the server for this capsule
Connected to:
- QButtonBox -- Dynamic
[child].button(QDialogButtonBox.Apply).clicked
"""
# Make sure that the options are valid
if not self.options_valid():
message = QApplication.translate(
"BaseCapsuleOptionsWidget",
"Not all options are valid!")
raise ValueError(message)
if not len(self.all_items):
message = QApplication.translate(
"BaseCapsuleOptionsWidget",
"You can't apply changes if the capsule never got set!")
raise RuntimeError(message)
unlocked_option_vals = {option_item.option_name: option_item.val
for option_item in self.option_items
if not option_item.locked}
api.set_capsule_option_vals(
capsule_name=self.current_capsule,
stream_id=stream_id,
option_vals=unlocked_option_vals)
if not self.enabled_option.locked:
api.set_capsule_active(
capsule_name=self.current_capsule,
stream_id=stream_id,
active=self.enabled_option.val)
else:
api.set_capsule_active(
capsule_name=self.current_capsule,
stream_id=stream_id,
active=None)
def _on_inputs_changed(self):
"""
This gets called when any capsule option gets edited/changed
The 'on_change' from the child could be a variety of signals,
depending on the specific subclass of the CapsuleOptionItem.
Connected to:
- CapsuleOptionItem -- Dynamic
[child].change_signal
"""
self.capsule_options_changed.emit()
def _reset(self):
"""Clear any state specific to any one capsule"""
# Tell QT to delete widgets
for option_item in self.all_items:
option_item.delete()
# Clear references
self.enabled_option = None
self.current_capsule = None
self.option_items = []
self.all_items = []
|
427220
|
import os
import docker
def start_nginx():
os.system("docker rm -f migrate")
os.system("mkdir ~/migrate")
os.system("mkdir -p ~/migrate/html/checkpoints")
os.system("mkdir -p ~/migrate/html/images")
# ~/migrate/html是存储html的位置,资源也存在这个目录,方便读取
os.system("docker run --name migrate -itd -p 8080:80 -v ~/migrate/html:/usr/share/nginx/html nginx")
# 获取checkpoint压缩文件
def migrate(container_id, checkpoint_name="simple"):
os.system(f"./docker-popcorn-notify {container_id} aarch64")
os.system(f"docker checkpoint create {container_id} {checkpoint_name}")
os.system(f"./recode.sh {container_id} {checkpoint_name} aarch64")
os.system("python mnt.py")
out = os.popen("cd /tmp/simple && crit show mountpoints* | grep acpi")
text = out.read()
out.close()
while "acpi" in text:
deleteMountpoint()
out = os.popen("cd /tmp/simple && crit show mountpoints* | grep acpi")
text = out.read()
out.close()
os.system(f"cd /tmp && tar -czf {checkpoint_name}.tar.xz {checkpoint_name}")
os.system(f"mv /tmp/{checkpoint_name}.tar.xz ~/migrate/html/checkpoints/")
path = "checkpoints/" + checkpoint_name + ".tar.xz"
os.system(f"rm -rf /tmp/{checkpoint_name}")
os.system(f"docker logs {container_id}")
return path
if __name__ == '__main__':
start_nginx()
os.system("docker rm -f migrate_test")
os.system("docker run --cap-add all --name migrate_test -d 123toorc/hcontainer-helloworld:hcontainer")
print(migrate("migrate_test"))
|
427228
|
from ggplot import *
import pandas as pd
df = pd.DataFrame({
"x": [0, 1, 1, 0] + [5, 10, 10, 5],
"y": [0, 0, 1, 1] + [10, 10, 20, 20],
"g": ["a", "a", "a", "a"] + ["b", "b", "b", "b"]
})
print ggplot(df, aes(x='x', y='y', fill='g')) + geom_polygon()
print ggplot(df, aes(x='x', y='y', color='g')) + geom_polygon()
print ggplot(df[df.g=="b"], aes(x='x', y='y')) + geom_polygon(alpha=0.25, linetype='dashed')
|
427230
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pymc3 as pm
import theano
import theano.tensor as tt
#load data
train_df = pd.read_csv('redwoods.csv')
#scale it so it is a bit easier to work with
xloc = train_df['redwoodfull.x']*100
yloc = train_df['redwoodfull.y']*100
#discretize spatial data
D = 2 #dimension
num_bins = 64
hist, xedges, yedges = np.histogram2d(yloc, xloc,
bins=np.linspace(0,100,num_bins+1))
xcenters = xedges[:-1] + np.diff(xedges)/2
ycenters = yedges[:-1] + np.diff(yedges)/2
f, ax = plt.subplots(1, 2, figsize=(8, 4),)
lengthscale_ = 7
sns.kdeplot(xloc,yloc,
bw=lengthscale_,
cmap="viridis", shade=True, ax=ax[0])
ax[0].scatter(xloc, yloc, color='r', alpha=.25)
ax[0].set_xlim(0,100)
ax[0].set_ylim(0,100)
ax[1].imshow(hist,
cmap='viridis', origin='lower')
ax[1].axis('off')
#%%
#input/output
xv, yv = np.meshgrid(xcenters, ycenters)
x_data = np.vstack((yv.flatten(),xv.flatten())).T
y_data = hist.flatten()
#%% pymc3 minibatch setup
# Not suitable for 2D mapping problem, overestimated lengthscale
batchsize = 10
Xbatch = pm.Minibatch(x_data, batchsize**2)
Ybatch = pm.Minibatch(y_data, batchsize**2)
#%% set up minibatch
data = hist
batchsize = 10
z1, z2 = batchsize, batchsize
s1, s2 = np.shape(data)
yshared = theano.shared(data)
x1shared = theano.shared(ycenters[:,np.newaxis].repeat(64,axis=1))
x2shared = theano.shared(xcenters[:,np.newaxis].T.repeat(64,axis=0))
ixs1 = pm.tt_rng().uniform(size=(1,), low=0, high=s1-z1-1e-10).astype('int64')
ixs2 = pm.tt_rng().uniform(size=(1,), low=0, high=s2-z2-1e-10).astype('int64')
range1 = tt.arange(ixs1.squeeze(),(ixs1+z1).squeeze())
range2 = tt.arange(ixs2.squeeze(),(ixs2+z2).squeeze())
Ybatch = yshared[range1][:,range2].flatten()
Xbatch1 = x1shared[range1][:,range2].flatten()
Xbatch2 = x2shared[range1][:,range2].flatten()
Xbatch = tt.stack((Xbatch1,Xbatch2)).T
import theano
theano.config.compute_test_value = 'off'
#%%
with pm.Model() as model:
#hyper-parameter priors
# weakly informative prior
# l = pm.HalfCauchy('l', beta=3.)
# informative prior
l = pm.Gamma('l', alpha=5, beta=1,
transform=pm.distributions.transforms.LogExpM1())
eta = pm.HalfCauchy('eta', beta=3.)
cov_func = eta**2 * pm.gp.cov.Matern32(D, ls=l*np.ones(D))
#Gaussian Process
gp = pm.gp.Latent(cov_func=cov_func)
f = gp.prior('f', X=Xbatch, shape=batchsize**2)
obs = pm.Poisson('obs', mu=tt.exp(f), observed=Ybatch, total_size=y_data.shape)
approx = pm.fit(20000,
method='fullrank_advi',
callbacks=[pm.callbacks.CheckParametersConvergence(tolerance=1e-4)])
trace = approx.sample(1000)
pm.traceplot(trace, varnames=['l','eta']);
#%%
with model:
group_1 = pm.Group([l,eta], vfam='fr') # latent1 has full rank approximation
group_other = pm.Group(None, vfam='mf') # other variables have mean field Q
approx = pm.Approximation([group_1, group_other])
pm.KLqp(approx).fit(100000,
callbacks=[pm.callbacks.CheckParametersConvergence(tolerance=1e-4)])
trace = approx.sample(1000)
#%% prediction
#nx=50
#x = np.linspace(0, 100, nx)
#y = np.linspace(0, 100, nx)
#xv, yv = np.meshgrid(x, y)
#x_pred = np.vstack((yv.flatten(),xv.flatten())).T
# add the GP conditional to the model, given the new X values
with pm.Model() as predi_model:
#hyper-parameter priors
l = pm.HalfNormal('l', sd=.1)
eta = pm.HalfCauchy('eta', beta=3.)
cov_func = eta**2 * pm.gp.cov.Matern32(D, ls=l*np.ones(D))
#Gaussian Process
gp = pm.gp.Latent(cov_func=cov_func)
f = gp.prior('f1', X=x_data)
obs = pm.Poisson('obs1', mu=tt.exp(f), observed=y_data)
f_pred = gp.conditional('f_pred', x_data)
# Sample from the GP conditional distribution
pred_samples = pm.sample_ppc(trace, vars=[f_pred], samples=100)
ftrace = np.mean(pred_samples['f_pred'], axis=0)
ftrace = np.reshape(ftrace, (num_bins, num_bins))
latent_rate = np.exp(ftrace)
#%%
f, ax = plt.subplots(1, 3, figsize=(12, 4), )
sns.kdeplot(xloc,yloc,
bw=.3,
cmap="viridis", shade=True, ax=ax[0])
ax[0].scatter(xloc, yloc, color='r', alpha=.25)
ax[0].set_xlim(0,1)
ax[0].set_ylim(0,1)
ax[1].imshow(hist,
cmap='viridis', origin='lower')
ax[1].axis('off')
#%%
ax[2].imshow(latent_rate,
cmap='viridis', origin='lower', interpolation='gaussian')
ax[2].scatter(xloc/2, yloc/2, color='r', alpha=.25)
ax[2].axis('off')
plt.tight_layout();
|
427236
|
import os
import sys
import time
import torch
import torchaudio
import numpy as np
from ronn.model import ronnModel
from ronn.utils import calculate_receptive_field
sample_rate = 44100
params = {
"n_inputs": 2,
"n_outputs": 2,
"n_layers": 7,
"n_channels": 4,
"kernel_size": 3,
"activation": "ReLU",
"dilation_growth": 3,
"init": "xavier_normal",
"film_dim": 2,
"seed": 4,
}
# construct the model with our desired parameters
processor = ronnModel(**params)
rf = calculate_receptive_field(
params["kernel_size"],
params["n_layers"],
1,
params["dilation_growth"] ** np.arange(params["n_layers"]),
)
print(f"{rf} samples or {(rf/sample_rate)*1000:0.2f} ms")
# load some audio
x, sr = torchaudio.load("samples/clean/clean_guitar.wav")
c, s = x.size()
x = x[:, : 44100 * 4]
x_pad = torch.nn.functional.pad(x, (rf, 0))
x_pad = x_pad.view(1, c, -1)
y = None
# process that audio
out = processor(x_pad, y=y)
# normalize
out /= out.abs().max()
# remove DC
out -= torch.mean(out, dim=-1).view(c, 1)
# save the processed audio to disk
torchaudio.save("samples/processed/p_clean_guitar.wav", out.view(c, -1), sr)
torchaudio.save("samples/processed/c_clean_guitar.wav", x.view(c, -1), sr)
|
427259
|
c.NotebookApp.ip = '0.0.0.0'
c.NotebookApp.open_browser = False
# Python
c.NotebookApp.password = '<PASSWORD>'
|
427282
|
from django import template
from main.models import Warden, HostelSuperintendent, Security
register = template.Library()
@register.simple_tag
def active_page(request, view_name):
from django.urls import resolve, Resolver404
path = resolve(request.path_info)
if not request:
return ""
try:
return "active" if path.url_name == view_name else ""
except Resolver404:
return ""
def is_warden(user):
return False if not Warden.objects.filter(user=user) else True
def is_hostelsuperintendent(user):
return False if not HostelSuperintendent.objects.filter(user=user) else True
def is_security(user):
return False if not Security.objects.filter(user=user) else True
@register.simple_tag
def get_user_status(request):
if request.user.is_authenticated:
if is_warden(request.user):
return 'warden'
elif is_security(request.user):
return 'security'
elif is_hostelsuperintendent(request.user):
return 'hostelsuperintendent'
elif request.user.is_staff:
return 'staff'
else:
return 'authenticated'
else:
return 'unauthenticated'
def get_base_template(request):
userstatus = get_user_status(request)
if userstatus == 'warden':
return 'wardenbase.html'
elif userstatus == 'security':
return 'security_dash_base.html'
elif userstatus == 'hostelsuperintendent':
return 'superintendentbase.html'
else:
return 'indexbase.html'
|
427321
|
from magma import *
__all__ = ['PS7Wrap']
def PS7Wrap():
PS7Path = os.path.dirname(__file__) + "/vsrc/"
vlibs = [PS7Path+"ps7_wrap.v",PS7Path+"axi_master_stub.v",PS7Path+"axi_master32_stub.v",PS7Path+"axi_slave_stub.v"]
verilogFile = PS7Path + "ps7_stub.v"
ps7 = DefineCircuit("ps7_stub",
"inout MIO", Array(54,Bit),
"output DDR_WEB", Bit,
"inout DDR_VRP", Bit,
"inout DDR_VRN", Bit,
"inout DDR_RAS_n", Bit,
"inout DDR_ODT", Bit,
"inout DDR_DRSTB", Bit,
"inout DDR_DQS", Array(4,Bit),
"inout DDR_DQS_n", Array(4,Bit),
"inout DDR_DQ", Array(32,Bit),
"inout DDR_DM", Array(4,Bit),
"inout DDR_CS_n", Bit,
"inout DDR_CKE", Bit,
"inout DDR_Clk", Bit,
"inout DDR_Clk_n", Bit,
"inout DDR_CAS_n", Bit,
"inout DDR_BankAddr", Array(3,Bit),
"inout DDR_Addr", Array(15,Bit),
"inout PS_PORB", Bit,
"inout PS_SRSTB", Bit,
"inout PS_CLK", Bit,
"output CLK", Bit,
"output RST_n", Bit
)
with open(verilogFile,'r') as verilog:
ps7.verilog=verilog.read()
ps7.verilogLib = vlibs
EndCircuit()
return ps7
|
427330
|
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddlenlp as nlp
from paddlenlp.data import Stack, Tuple, Pad
def predict(model, data, tokenizer, label_map, batch_size=1):
"""
Predicts the data labels.
Args:
model (obj:`paddle.nn.Layer`): A model to classify texts.
data (obj:`List(Example)`): The processed data whose each element is a Example (numedtuple) object.
A Example object contains `text`(word_ids) and `se_len`(sequence length).
tokenizer(obj:`PretrainedTokenizer`): This tokenizer inherits from :class:`~paddlenlp.transformers.PretrainedTokenizer`
which contains most of the methods. Users should refer to the superclass for more information regarding methods.
label_map(obj:`dict`): The label id (key) to label str (value) map.
batch_size(obj:`int`, defaults to 1): The number of batch.
Returns:
results(obj:`dict`): All the predictions labels.
"""
examples = []
for text in data:
input_ids, segment_ids = convert_example(
text,
tokenizer,
max_seq_length=128,
is_test=True)
examples.append((input_ids, segment_ids))
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input id
Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment id
): fn(samples)
# Seperates data into some batches.
batches = []
one_batch = []
for example in examples:
one_batch.append(example)
if len(one_batch) == batch_size:
batches.append(one_batch)
one_batch = []
if one_batch:
# The last batch whose size is less than the config batch_size setting.
batches.append(one_batch)
results = []
model.eval()
for batch in batches:
input_ids, segment_ids = batchify_fn(batch)
input_ids = paddle.to_tensor(input_ids)
segment_ids = paddle.to_tensor(segment_ids)
logits = model(input_ids, segment_ids)
probs = F.softmax(logits, axis=1)
idx = paddle.argmax(probs, axis=1).numpy()
idx = idx.tolist()
labels = [label_map[i] for i in idx]
results.extend(labels)
return results
@paddle.no_grad()
def evaluate(model, criterion, metric, data_loader):
"""
Given a dataset, it evals model and computes the metric.
Args:
model(obj:`paddle.nn.Layer`): A model to classify texts.
data_loader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.
criterion(obj:`paddle.nn.Layer`): It can compute the loss.
metric(obj:`paddle.metric.Metric`): The evaluation metric.
"""
model.eval()
metric.reset()
losses = []
for batch in data_loader:
input_ids, token_type_ids, labels = batch
logits = model(input_ids, token_type_ids)
loss = criterion(logits, labels)
losses.append(loss.numpy())
correct = metric.compute(logits, labels)
metric.update(correct)
accu = metric.accumulate()
print("eval loss: %.5f, accu: %.5f" % (np.mean(losses), accu))
model.train()
metric.reset()
def convert_example(example, tokenizer, max_seq_length=512, is_test=False):
"""
Builds model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens. And creates a mask from the two sequences passed
to be used in a sequence-pair classification task.
A BERT sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
A BERT sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If only one sequence, only returns the first portion of the mask (0's).
Args:
example(obj:`list[str]`): List of input data, containing text and label if it have label.
tokenizer(obj:`PretrainedTokenizer`): This tokenizer inherits from :class:`~paddlenlp.transformers.PretrainedTokenizer`
which contains most of the methods. Users should refer to the superclass for more information regarding methods.
max_seq_len(obj:`int`): The maximum total input sequence length after tokenization.
Sequences longer than this will be truncated, sequences shorter will be padded.
is_test(obj:`False`, defaults to `False`): Whether the example contains label or not.
Returns:
input_ids(obj:`list[int]`): The list of token ids.
token_type_ids(obj: `list[int]`): List of sequence pair mask.
label(obj:`numpy.array`, data type of int64, optional): The input label if not is_test.
"""
encoded_inputs = tokenizer(text=example["text"], max_seq_len=max_seq_length)
input_ids = encoded_inputs["input_ids"]
token_type_ids = encoded_inputs["token_type_ids"]
if not is_test:
label = np.array([example["label"]], dtype="int64")
return input_ids, token_type_ids, label
else:
return input_ids, token_type_ids
def create_dataloader(dataset,
mode='train',
batch_size=1,
batchify_fn=None,
trans_fn=None):
if trans_fn:
dataset = dataset.map(trans_fn)
shuffle = True if mode == 'train' else False
if mode == 'train':
batch_sampler = paddle.io.DistributedBatchSampler(
dataset, batch_size=batch_size, shuffle=shuffle)
else:
batch_sampler = paddle.io.BatchSampler(
dataset, batch_size=batch_size, shuffle=shuffle)
return paddle.io.DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
collate_fn=batchify_fn,
return_list=True)
|
427394
|
import os
from generate_matches import generate_matches_carla, ImageIndex
from display_matches import display_correspondences
if __name__ == '__main__':
benchmark_folder = '/media/lukas/storage04/benchmarkpublic/GNNET_BENCHMARK_PUBLIC' # set this to the folder you have downloaded the benchmark to.
carla_folder = os.path.join(benchmark_folder, 'carla_training_validation')
matches, img1, img2 = generate_matches_carla(benchmark_folder=carla_folder, all_weathers=True,
image_index_1=ImageIndex(0, 0, 10), image_index_2=ImageIndex(3, 4, 14),
use_dso_depths=False)
display_correspondences(img1, img2, matches, 20)
|
427442
|
import asyncio
import logging
from unittest import TestCase
import src.async_kinesis_client.kinesis_producer
from tests.mocks import KinesisProducerMock
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
class TestProducer(TestCase):
def setUp(self):
try:
self.event_loop = asyncio.get_event_loop()
except RuntimeError:
self.event_loop = asyncio.new_event_loop()
self.producer_mock = KinesisProducerMock()
self.producer = self.producer_mock.get_producer()
def test_producer(self):
async def test():
await self.producer.put_record({'Data': b'zzzz'})
await self.producer.put_record({'Data': b'wwww'})
self.assertEqual(2, len(self.producer_mock.records))
self.assertEqual(b'zzzz', self.producer_mock.records[0].get('Data').get('Data'))
self.assertEqual(b'wwww', self.producer_mock.records[1].get('Data').get('Data'))
self.assertEqual('1', self.producer_mock.records[1].get('SequenceNumberForOrdering'))
self.event_loop.run_until_complete(test())
def test_multiple_records(self):
async def test():
records = [
{'Data': b'zzzz'},
{'Data': b'wwww'}
]
await self.producer.put_records(records=records)
await self.producer.flush()
self.assertEqual(2, len(self.producer_mock.records))
self.assertEqual(b'zzzz', self.producer_mock.records[0].get('Data'))
self.assertEqual(b'wwww', self.producer_mock.records[1].get('Data'))
self.event_loop.run_until_complete(test())
def test_limits(self):
src.async_kinesis_client.kinesis_producer.MAX_RECORDS_IN_BATCH = 3
src.async_kinesis_client.kinesis_producer.MAX_RECORD_SIZE = 10
async def test():
# Check that 4th record triggers flush
records = [
{'Data': b'zzzz'},
{'Data': b'wwww'},
{'Data': b'qqqq'},
{'Data': b'dddd'},
]
await self.producer.put_records(records=records)
self.assertEqual(3, len(self.producer_mock.records))
self.assertEqual(1, len(self.producer.record_buf))
await self.producer.flush()
# Check that too big record raises ValueError
records = [
{'Data': ('looongcatislooong' * 10).encode()}
]
try:
await self.producer.put_records(records=records)
except ValueError:
pass
else:
self.fail('ValueError not raised')
src.async_kinesis_client.kinesis_producer.MAX_BATCH_SIZE = 14
self.producer_mock.records = []
# Check that exceeding MAX_BATCH_SIZE triggers flush
records = [
{'Data': b'zzzz'},
{'Data': b'wwww'},
{'Data': b'qqqq'},
{'Data': b'dddd'}
]
self.records = []
await self.producer.put_records(records=records)
self.assertEqual(3, len(self.producer_mock.records))
self.assertEqual(1, len(self.producer.record_buf))
self.event_loop.run_until_complete(test())
|
427462
|
from thenewboston.blocks.signatures import generate_signature
from thenewboston.utils.tools import sort_and_encode
from thenewboston.verify_keys.verify_key import encode_verify_key, get_verify_key
def generate_signed_request(*, data, nid_signing_key):
"""Generate and return signed request"""
node_identifier = get_verify_key(signing_key=nid_signing_key)
signature = generate_signature(
message=sort_and_encode(data),
signing_key=nid_signing_key
)
return {
'message': data,
'node_identifier': encode_verify_key(verify_key=node_identifier),
'signature': signature
}
|
427492
|
from desktop_local_tests.local_ip_responder_test_case_with_disrupter import LocalIPResponderTestCaseWithDisrupter
from desktop_local_tests.windows.windows_adapter_disrupter import WindowsAdapterDisrupter
class TestWindowsIPResponderDisruptAdapter(LocalIPResponderTestCaseWithDisrupter):
'''Summary:
Tests whether traffic leaving the user's device has the public IP hidden when the network
adapter is disabled.
Details:
This test will connect to VPN then disable the primary network adapter.
This test uses a simple UDP client which spams UDP packets to a public server. The server logs
the source IP of every packet. The test checks with the server to make sure that the public IP
is always the VPN server's IP and not the device's.
Discussion:
It is unlikely that the adapter would be disabled in this way in the real world, but of course
not impossible. However, this represents a class of tests which disrupt the primary network
adapter. Other types of disruption would be:
* Ethernet cable pulled (see TestDNSDisruptCable)
* Wi-Fi power disabled (see TestWindowsOSDNSDisruptWifiPower)
* Wi-Fi network unavailable, e.g. walk out of range of Wi-Fi network.
Weaknesses:
None
Scenarios:
No restrictions.
TODO:
This class only disables the primary adapter. Would be good to try permutations like:
* Disable all adapters
* This opens up a class of behaviour tests of "what does my VPN do when I lose all
connectivity but then get it back later"
* Disable a secondary adapter
* Disable all but the primary adapter
This behaviour should be configurable from the test config.
'''
def __init__(self, devices, parameters):
super().__init__(WindowsAdapterDisrupter, devices, parameters)
|
427519
|
from fastapi_utils.api_model import APIModel
from tifa.apps.admin.local import g
from tifa.apps.admin.router import bp
from tifa.models.gift_card import GiftCard
class TGiftCard(APIModel):
id: str
name: str
@bp.list("/gift_cards", out=TGiftCard, summary="GiftCard", tags=["GiftCard"])
async def gift_cards_items():
ins = await g.adal.first_or_404(GiftCard)
return {"items": ins}
@bp.item("/gift_card", out=TGiftCard, summary="GiftCard", tags=["GiftCard"])
async def gift_card_item():
ins = await g.adal.first_or_404(GiftCard)
return {"items": ins}
@bp.op("/gift_card/create", out=TGiftCard, summary="GiftCard", tags=["GiftCard"])
async def gift_card_create():
ins = await g.adal.first_or_404(GiftCard)
return {"items": ins}
@bp.op("/gift_card/update", out=TGiftCard, summary="GiftCard", tags=["GiftCard"])
async def gift_card_update():
ins = await g.adal.first_or_404(GiftCard)
return {"items": ins}
@bp.op("/gift_card/delete", out=TGiftCard, summary="GiftCard", tags=["GiftCard"])
async def gift_card_delete():
ins = await g.adal.first_or_404(GiftCard)
return {"items": ins}
@bp.op("/gift_card/activate", out=TGiftCard, summary="GiftCard", tags=["GiftCard"])
async def gift_card_activate():
ins = await g.adal.first_or_404(GiftCard)
return {"items": ins}
@bp.op("/gift_card/deactivate", out=TGiftCard, summary="GiftCard", tags=["GiftCard"])
async def gift_card_deactivate():
ins = await g.adal.first_or_404(GiftCard)
return {"items": ins}
|
427537
|
import struct
from terrabot.util.streamer import Streamer
class Packet14Parser(object):
def parse(self, world, player, data, ev_man):
pass
|
427540
|
import unittest
from torchimage.misc import outer
from torchimage.pooling.base import SeparablePoolNd
from torchimage.pooling.gaussian import GaussianPoolNd
from torchimage.pooling.uniform import AvgPoolNd
from torchimage.padding.utils import same_padding_width
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from functools import reduce
from scipy import ndimage
NDIMAGE_PAD_MODES = [("symmetric", "reflect"),
("replicate", "nearest"),
("constant", "constant"),
("reflect", "mirror"),
("circular", "wrap")]
class MyTestCase(unittest.TestCase):
def test_uniform_1d(self):
# x = torch.arange(10, dtype=torch.float64)
x = torch.rand(30, dtype=torch.float64)
for n in range(1, 10):
for ti_mode, ndimage_mode in NDIMAGE_PAD_MODES:
filter_layer = SeparablePoolNd(np.ones(n) / n).to_filter(ti_mode)
y_ti = filter_layer.forward(x, axes=None).numpy()
y_ndimage = ndimage.uniform_filter(x.numpy(), size=n, mode=ndimage_mode)
with self.subTest(n=n, ti_mode=ti_mode):
self.assertLess(np.abs(y_ti - y_ndimage).max(), 1e-14)
def test_uniform(self):
for n in range(1, 10):
x = torch.rand(100, 41, dtype=torch.float64) * 100 - 50
x = torch.round(x)
for ti_mode, ndimage_mode in NDIMAGE_PAD_MODES:
filter_layer = SeparablePoolNd(np.ones(n) / n).to_filter(ti_mode)
y_ti = filter_layer.forward(x, axes=None).numpy()
y_ndimage = ndimage.uniform_filter(x.numpy(), size=n, mode=ndimage_mode)
result = np.allclose(y_ti, y_ndimage, rtol=1e-5, atol=1e-5, equal_nan=False)
with self.subTest(ti_mode=ti_mode, ndimage_mode=ndimage_mode, n=n):
self.assertTrue(result)
def test_conv(self):
for ti_mode, ndimage_mode in NDIMAGE_PAD_MODES:
for ndim in range(1, 5):
kernel_size = np.random.randint(1, 10, size=ndim)
kernels = [np.random.rand(ks) for ks in kernel_size]
shape = tuple(np.random.randint(20, 50, size=ndim))
x = torch.rand(*shape, dtype=torch.float64)
full_conv_tensor = reduce(outer, kernels)
# note that convolve in neural network is correlate in signal processing
y_ndimage = ndimage.correlate(x.numpy(), weights=full_conv_tensor, mode=ndimage_mode)
filter_layer = SeparablePoolNd(kernels).to_filter(padder=ti_mode)
y_ti = filter_layer.forward(x, axes=None).numpy()
result = np.allclose(y_ti, y_ndimage, rtol=1e-7, atol=1e-5, equal_nan=False)
with self.subTest(ti_mode=ti_mode, ndimage_mode=ndimage_mode, ndim=ndim,
kernel_size=kernel_size, shape=shape):
self.assertTrue(result)
def test_wrapper_1(self):
# wrapped image filter should behave the same way as its base pooling class
x = torch.rand(17, 100, 5)
# gaussian filter type
gf_1 = GaussianPoolNd(9, sigma=1.5, order=0).to_filter("reflect")
gf_2 = GaussianPoolNd(9, 1.5, 0).to_filter("reflect")
gp = GaussianPoolNd(9, sigma=1.5, order=0, stride=1, same_padder="reflect")
y1 = gf_1.forward(x, axes=None)
y2 = gf_2.forward(x, axes=None)
y = gp.forward(x, axes=None)
self.assertEqual(torch.abs(y1 - y).max().item(), 0)
self.assertEqual(torch.abs(y2 - y).max().item(), 0)
def test_gaussian_1(self):
sigma = 1.5
for truncate in range(2, 10, 2):
for order in range(6):
for ti_mode, ndimage_mode in NDIMAGE_PAD_MODES:
x = torch.rand(10, 37, 21, dtype=torch.float64)
y_sp = ndimage.gaussian_filter(x.numpy(), sigma=sigma, order=order, mode=ndimage_mode,
truncate=truncate)
gf1 = GaussianPoolNd(kernel_size=int(2 * truncate * sigma + 1), sigma=sigma, order=order,
).to_filter(padder=ti_mode)
y_ti = gf1.forward(x, axes=None)
y_ti = y_ti.numpy()
self.assertLess(np.abs(y_sp - y_ti).max(), 1e-10)
def test_precision_1(self):
# 1d convolution precision testing
for ti_mode, ndimage_mode in NDIMAGE_PAD_MODES:
x = torch.rand(10, dtype=torch.float64)
w = torch.rand(5, dtype=torch.float64)
y1 = ndimage.correlate1d(x.numpy(), w.numpy(), axis=-1, mode=ndimage_mode, origin=0)
pool_layer = SeparablePoolNd(w).to_filter(padder=ti_mode)
y2 = pool_layer.forward(x, axes=None).numpy()
result = np.allclose(y1, y2, rtol=1e-9, atol=1e-9)
with self.subTest(ti_mode=ti_mode, ndimage_mode=ndimage_mode):
self.assertTrue(result)
def test_average_1(self):
for kernel_size in range(3, 15, 2):
x = torch.rand(13, 25, 18, dtype=torch.float64)
for ti_mode, ndimage_mode in NDIMAGE_PAD_MODES:
filter_layer = AvgPoolNd(kernel_size=kernel_size).to_filter(padder=ti_mode)
y_ti = filter_layer.forward(x, axes=None).numpy()
y_ndi = ndimage.uniform_filter(x.numpy(), size=kernel_size, mode=ndimage_mode)
with self.subTest(kernel_size=kernel_size, ti_mode=ti_mode, ndimage_mode=ndimage_mode):
self.assertLess(np.abs(y_ti - y_ndi).max(), 1e-10)
def test_average_2(self):
for kernel_size in range(3, 15, 2):
x = torch.rand(1, 1, 13, 18, dtype=torch.float64)
ti_mode = "constant"
filter_layer = AvgPoolNd(kernel_size=kernel_size, count_include_pad=True).to_filter(padder=ti_mode)
y_ti = filter_layer.forward(x).squeeze().numpy()
y_torch = F.avg_pool2d(x, kernel_size=kernel_size, stride=1,
padding=kernel_size//2, count_include_pad=True).squeeze().numpy()
with self.subTest(kernel_size=kernel_size, ti_mode=ti_mode, count_include_pad=True):
self.assertLess(np.abs(y_ti - y_torch).max(), 1e-10)
filter_layer = AvgPoolNd(kernel_size=kernel_size, count_include_pad=False).to_filter(padder=ti_mode)
y_ti = filter_layer.forward(x).squeeze().numpy()
y_torch = F.avg_pool2d(x, kernel_size=kernel_size, stride=1,
padding=kernel_size // 2, count_include_pad=False).squeeze().numpy()
with self.subTest(kernel_size=kernel_size, ti_mode=ti_mode, count_include_pad=False):
self.assertLess(np.abs(y_ti - y_torch).max(), 1e-10)
if __name__ == '__main__':
unittest.main()
|
427575
|
import csv
import collections
import re
# this opens and reads a csv data as a list
def read(filename):
data = []
with open(filename, 'rU') as f:
f = csv.reader(f)
for row in f:
data.append(row)
return data
# this opens and reads csv data as a dict
def read_as_dict(filename):
csv = read(filename)
headers = csv.pop(0)
data = list()
for row in csv:
d = dict()
for index, header in enumerate(headers):
d[header] = row[index]
data.append(d)
return data
# this tries to convert a str into a float and throws an error if not possible.
def safe_float(s):
n = 0
try:
n = float(s)
except:
if s != '':
print "Cannot turn %s into float" % s
return n
# this writes output as a csv
def write(data, filename):
with open(filename, 'wb') as f:
writer = csv.writer(f)
writer.writerows(data)
# this flattens our data from a list of dicts and writes output as csv
def flatten_dict(data, headers, filename):
result = list()
for row in data:
result_row = list ()
for key in headers:
try:
result_row.append(row[key])
except KeyError:
continue
result.append(result_row)
headers = [headers]
result = headers + result
write(result, filename)
# join lead exposure data to classified tracts
lead_data = read_as_dict('../exports/lead-risk-score.csv')
tracts_data = read_as_dict ('exports/tracts-classification.csv')
d = collections.defaultdict(dict)
for l in (lead_data, tracts_data):
for elem in l:
d[elem['id']].update(elem)
joined_data = d.values()
# Analysis Qs
# 1
all_tracts = list()
for row in joined_data:
try:
if row['decile']:
all_tracts.append(row)
except KeyError:
continue
all_tracts_len = len(all_tracts)
print '1. We have data for {} tracts.'.format(all_tracts_len)
# 2
print '2. How many tracts have a decile of 10? And of those, how many are urban? How many are rural?'
counter_tracts = list()
counter_urban_tracts = list()
counter_rural_tracts = list()
for row in joined_data:
try:
if safe_float(row['decile']) == 10.0:
counter_tracts.append(row)
if safe_float(row['decile']) == 10.0 and row['c'] == 'u':
counter_urban_tracts.append(row)
if safe_float(row['decile']) == 10.0 and row['c'] == 'r':
counter_rural_tracts.append(row)
except KeyError:
continue
tracts_10 = len(counter_tracts)
tracts_10_urban = len(counter_urban_tracts)
tracts_10_rural = len(counter_rural_tracts)
pct_urban = round(float(tracts_10_urban) / float(tracts_10) * 100, 0)
pct_rural = round(float(tracts_10_rural) / float(tracts_10) * 100, 0)
print '2A: {} have a decile of 10. {} pct or {} are urban tracts. Only {} are rural tracts, or {} pct.'.format(tracts_10, pct_urban, tracts_10_urban, tracts_10_rural, pct_rural)
# 3
print '''3: Write to exports "tracts-high-risk-metro.csv", which contains the percentage
of census tracts in a metro area with a respective decile score of 10 or 1.'''
ua_list = read_as_dict('ua.csv')
# slim down atts from joined data
slimmed_joined = list()
for row in joined_data:
att = dict()
try:
att['id'] = row['id']
att['n'] = row['name']
att['d'] = row['decile']
att['c'] = row['c']
number = re.compile('\d+(?:\.\d+)?')
uace = number.findall(row['l'])
if len(uace) == 0:
att['uace'] = 'r'
elif len(uace) == 1:
att['uace'] = uace[0]
elif len(uace) == 2:
att['uace'] = uace[0]
att['u2'] = uace[1]
else:
att['uace'] = uace[0]
att['u2'] = uace[1]
att['u3'] = uace[2]
slimmed_joined.append(att)
except KeyError:
continue
# append metro name to tracts
metro_names = list()
for row in slimmed_joined:
att = dict()
for metro in ua_list:
try:
if row['uace'] == metro['UACE'] or row['u2'] == metro['UACE'] or row['u3'] == metro['UACE']:
att['metro'] = metro['NAME']
att['d'] = row['d']
att['n'] = row['n']
att['p'] = metro['POP']
metro_names.append(att)
except KeyError:
continue
# group data by metro
metro_grp = collections.defaultdict(list)
for metro in metro_names:
metro_grp[metro['metro']].append(metro)
# calculate percentage of tracts for an urban area with a decile score of 10
metro_deciles = list()
for k,v in metro_grp.items():
att = dict()
deciles = list()
for item in v:
att['p'] = item['p']
deciles.append(safe_float(item['d']))
att['k'] = k
att['d'] = deciles
metro_deciles.append(att)
high_risk_tracts_metro = list()
for row in metro_deciles:
att = dict()
list_length = len(row['d'])
# count frequency of deciles in list
counter = collections.Counter(row['d'])
counter_dict = dict(counter.items())
try:
# don't include metro areas w/ less than 10 tracts and populations > 100,000
if list_length >= 10 and int(row['p']) >= 100000:
att['d10'] = counter_dict[10.0]
att['dnum'] = list_length
att['pct'] = float(att['d10']) / float(list_length) * 100
att['k'] = row['k']
att['p'] = row['p']
high_risk_tracts_metro.append(att)
except KeyError:
continue
# 4
all_tracts_st = list()
for row in slimmed_joined:
att = dict()
name = row['n']
split = name.split(',')
if len(split) == 3:
att['n'] = name
att['st'] = split[2]
att['d'] = row['d']
att['c'] = row['c']
all_tracts_st.append(att)
else:
continue
# determine number of rural tracts with a risk of 6 or more
rural_tracts = list()
rural_tracts_risk = list()
for row in all_tracts_st:
if row['c'] == 'r':
rural_tracts.append(row)
if row['c'] == 'r' and safe_float(row['d']) >= 6.0:
rural_tracts_risk.append(row)
all_rural_tracts = len(rural_tracts)
rural_high_risk = len(rural_tracts_risk)
pct_rural_tracts = round(float(rural_high_risk) / float(all_rural_tracts) * 100, 0)
print '4. There are {} rural tracts. {} have a risk score of 6 or greater, or {} pct.'.format(all_rural_tracts, rural_high_risk, pct_rural_tracts)
# 4.5 group rural by state and determine percentage of tracts that are rural
print '''4.5 Write to exports "state-rural-pct.csv" and group rural census tracts by state and determine percentage of tracts that are rural
and percentage of rural tracts that have a lead exposure risk of 6 or greater.'''
# group all_tracts_st by state
st_grp = collections.defaultdict(list)
for st in all_tracts_st:
st_grp[st['st']].append(st)
state_rural_pct = list()
for k,v in st_grp.items():
att = dict()
tracts = len(v)
rural_count = list()
rural_risk = list()
for item in v:
if item['c'] == 'r':
rural_count.append('r')
if item['c'] == 'r' and safe_float(item['d']) >= 6.0:
rural_risk.append(item['d'])
rural = len(rural_count)
rural_risk_count = len(rural_risk)
att['pct_rural'] = float(rural) / float(tracts) * 100
try:
att['pct_rural_risk'] = float(rural_risk_count)/ float(rural) * 100
except ZeroDivisionError:
continue
att['st'] = item['st']
att['r'] = rural
att['t'] = tracts
state_rural_pct.append(att)
# write to csv
headers = ['k', 'd10', 'dnum', 'pct', 'p']
flatten_dict(high_risk_tracts_metro, headers, 'exports/tracts-high-risk-metro.csv')
headers = ['st', 'pct_rural', 'r', 't', 'pct_rural_risk']
flatten_dict(state_rural_pct, headers, 'exports/state-rural-pct.csv')
|
427579
|
import slideseg
import os
def main():
"""
Runs SlideSeg with the parameters specified in Parameters.txt
:return: image chips and masks
"""
def str2bool(value):
return value.lower() in ("true", "yes", "1")
params = slideseg.load_parameters('Parameters.txt')
print('running __main__ with parameters: {0}'.format(params))
if not os.path.isdir(params["slide_path"]):
path, filename = os.path.split(params["slide_path"])
xpath, xml_filename = os.path.split(params["xml_path"])
params["slide_path"] = path
params["xml_path"] = xpath
print('loading {0}'.format(filename))
slideseg.run(params, filename)
else:
for filename in os.listdir(params["slide_path"]):
slideseg.run(params, filename)
if __name__ == "__main__":
main()
|
427600
|
import itertools
import cmath
import h5py
from pauxy.systems.hubbard import Hubbard
from pauxy.trial_wavefunction.free_electron import FreeElectron
from pauxy.trial_wavefunction.uhf import UHF
from pauxy.trial_wavefunction.harmonic_oscillator import HarmonicOscillator
from pauxy.estimators.ci import simple_fci_bose_fermi, simple_fci
from pauxy.estimators.hubbard import local_energy_hubbard_holstein, local_energy_hubbard
from pauxy.systems.hubbard_holstein import HubbardHolstein
from pauxy.utils.linalg import reortho
from pauxy.estimators.greens_function import gab_spin
import time
from pauxy.utils.linalg import diagonalise_sorted
from pauxy.estimators.greens_function import gab_spin
import scipy
from scipy.linalg import expm
import scipy.sparse.linalg
from scipy.optimize import minimize
try:
from jax.config import config
config.update("jax_enable_x64", True)
import jax
from jax import grad, jit
import jax.numpy as np
import jax.scipy.linalg as LA
import numpy
except ModuleNotFoundError:
import numpy
np = numpy
def jit(function):
def wrapper():
function
return wrapper()
import math
@jit
def gab(A, B):
r"""One-particle Green's function.
This actually returns 1-G since it's more useful, i.e.,
.. math::
\langle \phi_A|c_i^{\dagger}c_j|\phi_B\rangle =
[B(A^{\dagger}B)^{-1}A^{\dagger}]_{ji}
where :math:`A,B` are the matrices representing the Slater determinants
:math:`|\psi_{A,B}\rangle`.
For example, usually A would represent (an element of) the trial wavefunction.
.. warning::
Assumes A and B are not orthogonal.
Parameters
----------
A : :class:`numpy.ndarray`
Matrix representation of the bra used to construct G.
B : :class:`numpy.ndarray`
Matrix representation of the ket used to construct G.
Returns
-------
GAB : :class:`numpy.ndarray`
(One minus) the green's function.
"""
# Todo: check energy evaluation at later point, i.e., if this needs to be
# transposed. Shouldn't matter for Hubbard model.
inv_O = np.linalg.inv((A.conj().T).dot(B))
GAB = B.dot(inv_O.dot(A.conj().T))
return GAB
@jit
def local_energy_hubbard_holstein_jax(T,U,g,m,w0, G, X, Lap, Ghalf=None):
r"""Calculate local energy of walker for the Hubbard-Hostein model.
Parameters
----------
system : :class:`HubbardHolstein`
System information for the HubbardHolstein model.
G : :class:`numpy.ndarray`
Walker's "Green's function"
Returns
-------
(E_L(phi), T, V): tuple
Local, kinetic and potential energies of given walker phi.
"""
nbasis = T[0].shape[1]
ke = np.sum(T[0] * G[0] + T[1] * G[1])
pe = U * np.dot(G[0].diagonal(), G[1].diagonal())
pe_ph = 0.5 * w0 ** 2 * m * np.sum(X * X)
ke_ph = -0.5 * np.sum(Lap) / m - 0.5 * w0 * nbasis
rho = G[0].diagonal() + G[1].diagonal()
e_eph = - g * np.sqrt(m * w0 * 2.0) * np.dot(rho, X)
etot = ke + pe + pe_ph + ke_ph + e_eph
Eph = ke_ph + pe_ph
Eel = ke + pe
Eeb = e_eph
return (etot, ke+pe, ke_ph+pe_ph+e_eph)
def gradient(x, nbasis, nup, ndown, T, U, g, m, w0, c0,restricted,restricted_shift):
grad = numpy.array(jax.grad(objective_function)(x, nbasis, nup, ndown, T, U, g, m, w0, c0,restricted,restricted_shift))
return grad
def hessian(x, nbasis, nup, ndown, T, U, g, m, w0, c0, restricted):
H = numpy.array(jax.hessian(objective_function)(x, nbasis, nup, ndown, T, U, g, m, w0, c0,restricted,restricted_shift))
return H
def hessian_product(x, p, nbasis, nup, ndown, T, U, g, m, w0, c0):
h = 1e-5
xph = x + p * h
xmh = x - p * h
gph = gradient(xph, nbasis, nup, ndown, T, U, g, m, w0, c0)
gmh = gradient(xmh, nbasis, nup, ndown, T, U, g, m, w0, c0)
Hx = (gph - gmh) / (2.0 * h)
return Hx
@jit
def compute_exp(Ua, tmp, theta_a):
for i in range(1,50):
tmp = np.einsum("ij,jk->ik", theta_a, tmp)
Ua += tmp / math.factorial(i)
return Ua
def compute_greens_function_from_x (x, nbasis, nup, ndown, c0, restricted):
shift = x[0:nbasis]
nbsf = nbasis
nocca = nup
noccb = ndown
nvira = nbasis - nocca
nvirb = nbasis - noccb
nova = nocca*nvira
novb = noccb*nvirb
daia = np.array(x[nbsf:nbsf+nova],dtype=np.float64)
daib = np.array(x[nbsf+nova:nbsf+nova+novb],dtype=np.float64)
daia = daia.reshape((nvira, nocca))
daib = daib.reshape((nvirb, noccb))
if (restricted):
daib = jax.ops.index_update(daib, jax.ops.index[:,:], daia)
theta_a = np.zeros((nbsf, nbsf),dtype=np.float64)
theta_b = np.zeros((nbsf, nbsf),dtype=np.float64)
theta_a = jax.ops.index_update(theta_a, jax.ops.index[nocca:nbsf,:nocca], daia)
theta_a = jax.ops.index_update(theta_a, jax.ops.index[:nocca, nocca:nbsf], -np.transpose(daia))
theta_b = jax.ops.index_update(theta_b, jax.ops.index[noccb:nbsf,:noccb], daib)
theta_b = jax.ops.index_update(theta_b, jax.ops.index[:noccb, noccb:nbsf], -np.transpose(daib))
Ua = np.eye(nbsf,dtype=np.float64)
tmp = np.eye(nbsf,dtype=np.float64)
Ua = compute_exp(Ua, tmp, theta_a)
C0a = np.array(c0[:nbsf*nbsf].reshape((nbsf,nbsf)),dtype=np.float64)
Ca = C0a.dot(Ua)
Ga = gab(Ca[:,:nocca], Ca[:,:nocca])
if (noccb > 0):
C0b = np.array(c0[nbsf*nbsf:].reshape((nbsf,nbsf)),dtype=np.float64)
Ub = np.eye(nbsf)
tmp = np.eye(nbsf)
Ub = compute_exp(Ub, tmp, theta_b)
Cb = C0b.dot(Ub)
Gb = gab(Cb[:,:noccb], Cb[:,:noccb])
else:
Gb = numpy.zeros_like(Ga)
G = np.array([Ga, Gb],dtype=np.float64)
return G
def objective_function (x, nbasis, nup, ndown, T, U, g, m, w0, c0, restricted, restricted_shift):
nbasis = int(round(nbasis))
nup = int(round(nup))
ndown = int(round(ndown))
shift = x[0:nbasis]
nbsf = nbasis
nocca = nup
noccb = ndown
nvira = nbasis - nocca
nvirb = nbasis - noccb
nova = nocca*nvira
novb = noccb*nvirb
daia = np.array(x[nbsf:nbsf+nova],dtype=np.float64)
daib = np.array(x[nbsf+nova:nbsf+nova+novb],dtype=np.float64)
daia = daia.reshape((nvira, nocca))
daib = daib.reshape((nvirb, noccb))
if (restricted):
daib = jax.ops.index_update(daib, jax.ops.index[:,:], daia)
theta_a = np.zeros((nbsf, nbsf),dtype=np.float64)
theta_b = np.zeros((nbsf, nbsf),dtype=np.float64)
theta_a = jax.ops.index_update(theta_a, jax.ops.index[nocca:nbsf,:nocca], daia)
theta_a = jax.ops.index_update(theta_a, jax.ops.index[:nocca, nocca:nbsf], -np.transpose(daia))
theta_b = jax.ops.index_update(theta_b, jax.ops.index[noccb:nbsf,:noccb], daib)
theta_b = jax.ops.index_update(theta_b, jax.ops.index[:noccb, noccb:nbsf], -np.transpose(daib))
Ua = np.eye(nbsf,dtype=np.float64)
tmp = np.eye(nbsf,dtype=np.float64)
Ua = compute_exp(Ua, tmp, theta_a)
C0a = np.array(c0[:nbsf*nbsf].reshape((nbsf,nbsf)),dtype=np.float64)
Ca = C0a.dot(Ua)
Ga = gab(Ca[:,:nocca], Ca[:,:nocca])
if (noccb > 0):
C0b = np.array(c0[nbsf*nbsf:].reshape((nbsf,nbsf)),dtype=np.float64)
Ub = np.eye(nbsf)
tmp = np.eye(nbsf)
Ub = compute_exp(Ub, tmp, theta_b)
Cb = C0b.dot(Ub)
Gb = gab(Cb[:,:noccb], Cb[:,:noccb])
else:
Gb = np.zeros_like(Ga)
G = np.array([Ga, Gb],dtype=np.float64)
if (restricted_shift):
shift = jax.ops.index_update(shift, jax.ops.index[:nbasis], x[0])
phi = HarmonicOscillator(m, w0, order=0, shift = shift)
Lap = phi.laplacian(shift)
etot, eel, eph = local_energy_hubbard_holstein_jax(T,U, g,m,w0, G, shift, Lap)
return etot.real
class CoherentState(object):
def __init__(self, system, options, verbose=False):
self.verbose = verbose
if verbose:
print ("# Parsing free electron input options.")
init_time = time.time()
self.name = "coherent_state"
self.type = "coherent_state"
self.trial_type = complex
self.initial_wavefunction = options.get('initial_wavefunction',
'coherent_state')
if verbose:
print ("# Diagonalising one-body Hamiltonian.")
(self.eigs_up, self.eigv_up) = diagonalise_sorted(system.T[0])
(self.eigs_dn, self.eigv_dn) = diagonalise_sorted(system.T[1])
self.reference = options.get('reference', None)
self.exporder = options.get('exporder', 6)
self.maxiter = options.get('maxiter', 3)
self.maxscf = options.get('maxscf', 500)
self.ueff = options.get('ueff', system.U)
if verbose:
print("# exporder in CoherentState is 15 no matter what you entered like {}".format(self.exporder))
self.psi = numpy.zeros(shape=(system.nbasis, system.nup+system.ndown),
dtype=self.trial_type)
assert (system.name == "HubbardHolstein")
self.m = system.m
self.w0 = system.w0
self.nbasis = system.nbasis
self.nocca = system.nup
self.noccb = system.ndown
self.algorithm = options.get('algorithm',"bfgs")
self.random_guess = options.get('random_guess',False)
self.symmetrize = options.get('symmetrize',False)
if verbose:
print("# random guess = {}".format(self.random_guess))
if verbose:
print("# Symmetrize Coherent State = {}".format(self.symmetrize))
self.wfn_file = options.get('wfn_file', None)
self.coeffs = None
self.perms = None
if self.wfn_file is not None:
if verbose:
print ("# Reading trial wavefunction from %s"%(self.wfn_file))
f = h5py.File(self.wfn_file, "r")
self.shift = f["shift"][()].real
self.psi = f["psi"][()]
f.close()
if (len(self.psi.shape) == 3):
if verbose:
print("# MultiCoherent trial detected")
self.symmetrize = True
self.perms = None
f = h5py.File(self.wfn_file, "r")
self.coeffs = f["coeffs"][()]
f.close()
self.nperms = self.coeffs.shape[0]
assert(self.nperms == self.psi.shape[0])
assert(self.nperms == self.shift.shape[0])
self.boson_trial = HarmonicOscillator(m = system.m, w = system.w0, order = 0, shift=self.shift[0,:])
self.G = None
if verbose:
print("# A total of {} coherent states are used".format(self.nperms))
else:
gup = gab(self.psi[:, :system.nup],
self.psi[:, :system.nup]).T
if (system.ndown > 0):
gdown = gab(self.psi[:, system.nup:],
self.psi[:, system.nup:]).T
else:
gdown = numpy.zeros_like(gup)
self.G = numpy.array([gup, gdown], dtype=self.psi.dtype)
self.boson_trial = HarmonicOscillator(m = system.m, w = system.w0, order = 0, shift=self.shift)
else:
free_electron = options.get('free_electron', False)
if (free_electron):
trial_elec = FreeElectron(system, trial=options, verbose=self.verbose)
else:
trial_elec = UHF(system, trial=options, verbose=self.verbose)
self.psi[:, :system.nup] = trial_elec.psi[:, :system.nup]
if (system.ndown > 0):
self.psi[:, system.nup:] = trial_elec.psi[:, system.nup:]
Pa = self.psi[:, :system.nup].dot(self.psi[:, :system.nup].T)
Va = (numpy.eye(system.nbasis) - Pa).dot(numpy.eye(system.nbasis))
e, va = numpy.linalg.eigh(Va)
if (system.ndown > 0):
Pb = self.psi[:, system.nup:].dot(self.psi[:, system.nup:].T)
else:
Pb = numpy.zeros_like(Pa)
Vb = (numpy.eye(system.nbasis) - Pb).dot(numpy.eye(system.nbasis))
e, vb = numpy.linalg.eigh(Vb)
nocca = system.nup
noccb = system.ndown
nvira = system.nbasis-system.nup
nvirb = system.nbasis-system.ndown
self.virt = numpy.zeros((system.nbasis, nvira+nvirb))
self.virt[:,:nvira] = numpy.real(va[:,system.nup:])
self.virt[:,nvira:] = numpy.real(vb[:,system.ndown:])
self.G = trial_elec.G.copy()
gup = gab(self.psi[:, :system.nup],
self.psi[:, :system.nup]).T
if (system.ndown > 0):
gdown = gab(self.psi[:, system.nup:],
self.psi[:, system.nup:]).T
else:
gdown = numpy.zeros_like(gup)
self.G = numpy.array([gup, gdown])
self.variational = options.get('variational',True)
self.restricted = options.get('restricted',False)
if (verbose):
print("# restricted = {}".format(self.restricted))
self.restricted_shift = options.get('restricted_shift',False)
if (verbose):
print("# restricted_shift = {}".format(self.restricted_shift))
rho = [numpy.diag(self.G[0]), numpy.diag(self.G[1])]
self.shift = numpy.sqrt(system.w0*2.0 * system.m) * system.g * (rho[0]+ rho[1]) / (system.m * system.w0**2)
self.shift = self.shift.real
print("# Initial shift = {}".format(self.shift[0:5]))
self.init_guess_file = options.get('init_guess_file', None)
if (self.init_guess_file is not None):
if verbose:
print ("# Reading initial guess from %s"%(self.init_guess_file))
f = h5py.File(self.init_guess_file, "r")
self.shift = f["shift"][()].real
self.psi = f["psi"][()]
self.G = f["G"][()]
f.close()
self.init_guess_file_stripe = options.get('init_guess_file_stripe', None)
if (self.init_guess_file_stripe is not None):
if verbose:
print ("# Reading initial guess from %s and generating an intial guess"%(self.init_guess_file_stripe))
f = h5py.File(self.init_guess_file_stripe, "r")
shift = f["shift"][()].real
psi = f["psi"][()]
G = f["G"][()]
f.close()
ny = system.nbasis // shift.shape[0]
assert(ny == system.ny)
self.shift = numpy.zeros(system.nbasis)
for i in range(ny):
self.shift[system.nx * i: system.nx * i+system.nx] = shift.copy()
for s in [0,1]:
self.G[s] = numpy.zeros_like(self.G[s])
for i in range(ny):
offset = system.nx*i
for j in range(system.nx):
for k in range(system.nx):
self.G[s][offset+j,offset+k] = G[s][j,k]
beta = self.shift * numpy.sqrt(system.m * system.w0 /2.0)
Focka = system.T[0] - 2.0 * system.g * numpy.diag(beta) + self.ueff * numpy.diag(self.G[1].diagonal())
Fockb = system.T[1] - 2.0 * system.g * numpy.diag(beta) + self.ueff * numpy.diag(self.G[0].diagonal())
Focka = Focka.real
Fockb = Fockb.real
ea, va = numpy.linalg.eigh(Focka)
eb, vb = numpy.linalg.eigh(Fockb)
self.psi[:,:system.nup] = va[:,:system.nup]
self.psi[:,system.nup:] = vb[:,:system.ndown]
if (self.variational):
if (verbose):
print("# we will repeat SCF {} times".format(self.maxiter))
self.run_variational(system, verbose)
print("# Variational Coherent State Energy = {}".format(self.energy))
print("# Optimized shift = {}".format(self.shift[0:5]))
self.boson_trial = HarmonicOscillator(m = system.m, w = system.w0, order = 0, shift=self.shift)
if (not len(self.psi.shape) == 3):
if (self.symmetrize):
self.perms = numpy.array(list(itertools.permutations([i for i in range(system.nbasis)])))
self.nperms = self.perms.shape[0]
norm = 1.0 / numpy.sqrt(self.nperms)
self.coeffs = norm * numpy.ones(self.nperms)
print("# Number of permutations = {}".format(self.nperms))
elif (self.coeffs == None):
self.coeffs = 1.0
self.calculate_energy(system)
if (self.symmetrize):
print("# Coherent State energy (symmetrized) = {}".format(self.energy))
else:
print("# Coherent State energy = {}".format(self.energy))
self.initialisation_time = time.time() - init_time
self.spin_projection = options.get('spin_projection',False)
if (self.spin_projection and not self.symmetrize): # natural orbital
print("# Spin projection is used")
Pcharge = self.G[0] + self.G[1]
e, v = numpy.linalg.eigh(Pcharge)
self.init = numpy.zeros_like(self.psi)
idx = e.argsort()[::-1]
e = e[idx]
v = v[:,idx]
self.init[:, :system.nup] = v[:, :system.nup].copy()
if (system.ndown > 0):
self.init[:, system.nup:] = v[:, :system.ndown].copy()
else:
if (len(self.psi.shape) == 3):
self.init = self.psi[0,:,:].copy()
else:
self.init = self.psi.copy()
MS = numpy.abs(nocca-noccb) / 2.0
S2exact = MS * (MS+1.)
Sij = self.psi[:,:nocca].T.dot(self.psi[:,nocca:])
self.S2 = S2exact + min(nocca, noccb) - numpy.sum(numpy.abs(Sij*Sij).ravel())
if (verbose):
print("# <S^2> = {: 3f}".format(self.S2))
# For interface compatability
self.ndets = 1
self.bp_wfn = options.get('bp_wfn', None)
self.error = False
self.eigs = numpy.append(self.eigs_up, self.eigs_dn)
self.eigs.sort()
self._mem_required = 0.0
self._rchol = None
self._eri = None
self._UVT = None
if verbose:
print ("# Updated coherent.")
if verbose:
print ("# Finished initialising Coherent State trial wavefunction.")
def value(self, walker): # value
if (self.symmetrize):
phi = 0.0
if (len(self.psi.shape) == 3): # multicoherent given
for i in range(self.nperms):
shift = self.shift[i,:].copy()
boson_trial = HarmonicOscillator(m = self.m, w = self.w0, order = 0, shift=shift)
phi += boson_trial.value(walker.X) * walker.ots[i] * self.coeffs[i].conj()
else:
shift0 = self.shift.copy()
for i, perm in enumerate(self.perms):
shift = shift0[perm].copy()
boson_trial = HarmonicOscillator(m = self.m, w = self.w0, order = 0, shift=shift)
phi += boson_trial.value(walker.X) * walker.ots[i] * self.coeffs[i].conj()
else:
boson_trial = HarmonicOscillator(m = self.m, w = self.w0, order = 0, shift=self.shift)
phi = boson_trial.value(walker.X)
return phi
def gradient(self, walker): # gradient / value
if (self.symmetrize):
grad = numpy.zeros(self.nbasis, dtype=walker.phi.dtype)
denom = self.value(walker)
if (len(self.psi.shape) == 3): # multicoherent given
for i in range(self.nperms):
shift = self.shift[i,:].copy()
boson_trial = HarmonicOscillator(m = self.m, w = self.w0, order = 0, shift=shift)
grad += boson_trial.value(walker.X) * boson_trial.gradient(walker.X) * walker.ots[i] * self.coeffs[i]
else:
shift0 = self.shift.copy()
for i, perm in enumerate(self.perms):
shift = shift0[perm].copy()
boson_trial = HarmonicOscillator(m = self.m, w = self.w0, order = 0, shift=shift)
grad += boson_trial.value(walker.X) * boson_trial.gradient(walker.X) * walker.ots[i] * self.coeffs[i]
grad /= denom
else:
boson_trial = HarmonicOscillator(m = self.m, w = self.w0, order = 0, shift=self.shift)
grad = boson_trial.gradient(walker.X)
return grad
def laplacian(self, walker): # gradient / value
if (self.symmetrize):
lap = numpy.zeros(self.nbasis, dtype=walker.phi.dtype)
denom = self.value(walker)
if (len(self.psi.shape) == 3): # multicoherent given
for i in range(self.nperms):
shift = self.shift[i,:].copy()
boson_trial = HarmonicOscillator(m = self.m, w = self.w0, order = 0, shift=shift)
walker.Lapi[i] = boson_trial.laplacian(walker.X)
lap += boson_trial.value(walker.X) * walker.Lapi[i] * walker.ots[i] * self.coeffs[i].conj()
else:
shift0 = self.shift.copy()
for i, perm in enumerate(self.perms):
shift = shift0[perm].copy()
boson_trial = HarmonicOscillator(m = self.m, w = self.w0, order = 0, shift=shift)
walker.Lapi[i] = boson_trial.laplacian(walker.X)
lap += boson_trial.value(walker.X) * walker.Lapi[i] * walker.ots[i] * self.coeffs[i].conj()
lap /= denom
else:
boson_trial = HarmonicOscillator(m = self.m, w = self.w0, order = 0, shift=self.shift)
lap = boson_trial.laplacian(walker.X)
return lap
def bosonic_local_energy(self, walker):
ke = - 0.5 * numpy.sum(self.laplacian(walker)) / self.m
pot = 0.5 * self.m * self.w0 * self.w0 * numpy.sum(walker.X * walker.X)
eloc = ke+pot - 0.5 * self.w0 * self.nbasis # No zero-point energy
return eloc
def run_variational(self, system, verbose):
nbsf = system.nbasis
nocca = system.nup
noccb = system.ndown
nvira = system.nbasis - nocca
nvirb = system.nbasis - noccb
#
nova = nocca*nvira
novb = noccb*nvirb
#
x = numpy.zeros(system.nbasis + nova + novb, dtype=numpy.float64)
if (x.shape[0] == 0):
gup = numpy.zeros((nbsf, nbsf))
for i in range(nocca):
gup[i,i] = 1.0
gdown = numpy.zeros((nbsf, nbsf))
for i in range(noccb):
gdown[i,i] = 1.0
self.G = numpy.array([gup, gdown])
self.shift = numpy.zeros(nbsf)
self.calculate_energy(system)
return
Ca = numpy.zeros((nbsf,nbsf))
Ca[:,:nocca] = numpy.real(self.psi[:,:nocca])
Ca[:,nocca:] = numpy.real(self.virt[:,:nvira])
Cb = numpy.zeros((nbsf,nbsf))
Cb[:,:noccb] = numpy.real(self.psi[:,nocca:])
Cb[:,noccb:] = numpy.real(self.virt[:,nvira:])
if (self.restricted):
Cb = Ca.copy()
if (system.ndown > 0):
c0 = numpy.zeros(nbsf*nbsf*2, dtype=numpy.float64)
c0[:nbsf*nbsf] = Ca.ravel()
c0[nbsf*nbsf:] = Cb.ravel()
else:
c0 = numpy.zeros(nbsf*nbsf, dtype=numpy.float64)
c0[:nbsf*nbsf] = Ca.ravel()
#
x[:system.nbasis] = self.shift.real.copy() # initial guess
if (self.init_guess_file is None and self.init_guess_file_stripe is None):
if (self.random_guess):
for i in range(system.nbasis):
x[i] = numpy.random.randn(1)
else:
for i in range(system.nbasis):
if (i%2==0):
x[i] /= 2.0
else:
x[i] *= 2.0
self.energy = 1e6
if (self.algorithm == "adagrad"):
from jax.experimental import optimizers
opt_init, opt_update, get_params = optimizers.adagrad(step_size=0.5)
for i in range (self.maxiter): # Try 10 times
ehistory = []
x_jax = np.array(x)
opt_state = opt_init(x_jax)
def update(i, opt_state):
params = get_params(opt_state)
gradient = jax.grad(objective_function)(params, float(system.nbasis), float(system.nup), float(system.ndown),\
system.T, self.ueff, system.g, system.m, system.w0, c0, self.restricted)
return opt_update(i, gradient, opt_state)
eprev = 10000
params = get_params(opt_state)
Gprev = compute_greens_function_from_x(params, system.nbasis, system.nup, system.ndown, c0, self.restricted)
shift_prev = x[:system.nbasis]
for t in range(1000):
params = get_params(opt_state)
shift_curr = params[:system.nbasis]
Gcurr = compute_greens_function_from_x(params, system.nbasis, system.nup, system.ndown, c0, self.restricted)
ecurr = objective_function(params, float(system.nbasis), float(system.nup), float(system.ndown),\
system.T, self.ueff, system.g, system.m, system.w0, c0, self.restricted)
opt_state = update(t, opt_state)
Gdiff = (Gprev-Gcurr).ravel()
shift_diff = shift_prev - shift_curr
# rms = numpy.sum(Gdiff**2)/system.nbasis**2 + numpy.sum(shift_diff**2) / system.nbasis
rms = numpy.max(numpy.abs(Gdiff)) + numpy.max(numpy.abs(shift_diff))
echange = numpy.abs(ecurr - eprev)
if (echange < 1e-10 and rms < 1e-10):
if verbose:
print("# {} {} {} {} (converged)".format(t, ecurr, echange, rms))
self.energy = ecurr
ehistory += [ecurr]
break
else:
eprev = ecurr
Gprev = Gcurr
shift_prev = shift_curr
if (verbose and t % 20 == 0):
if (t == 0):
print("# {} {}".format(t, ecurr))
else:
print("# {} {} {} {}".format(t, ecurr, echange, rms))
x = numpy.array(params)
self.shift = x[:nbsf]
daia = x[nbsf:nbsf+nova]
daib = x[nbsf+nova:nbsf+nova+novb]
elif self.algorithm == "basin_hopping":
from scipy.optimize import basinhopping
minimizer_kwargs = {"method":"L-BFGS-B", "jac":True, "args":(float(system.nbasis), float(system.nup), float(system.ndown),system.T, self.ueff, system.g, system.m, system.w0, c0, self.restricted),
"options":{ 'maxls': 20, 'iprint': 2, 'gtol': 1e-10, 'eps': 1e-10, 'maxiter': self.maxscf,\
'ftol': 1.0e-10, 'maxcor': 1000, 'maxfun': 15000,'disp':False}}
def func(x, nbasis, nup, ndown,T, U, g, m, w0, c0, restricted):
f = objective_function(x, nbasis, nup, ndown,T, U, g, m, w0, c0, restricted)
df = gradient(x, nbasis, nup, ndown,T, U, g, m, w0, c0, restricted)
return f, df
def print_fun(x, f, accepted):
print("at minimum %.4f accepted %d" % (f, int(accepted)))
res = basinhopping(func, x, minimizer_kwargs=minimizer_kwargs, callback=print_fun,
niter=self.maxiter, niter_success=3)
self.energy = res.fun
self.shift = res.x[:nbsf]
daia = res.x[nbsf:nbsf+nova]
daib = res.x[nbsf+nova:nbsf+nova+novb]
elif self.algorithm == "bfgs":
for i in range (self.maxiter): # Try 10 times
res = minimize(objective_function, x, args=(float(system.nbasis), float(system.nup), float(system.ndown),\
system.T, self.ueff, system.g, system.m, system.w0, c0, self.restricted), jac=gradient, tol=1e-10,\
method='L-BFGS-B',\
options={ 'maxls': 20, 'iprint': 2, 'gtol': 1e-10, 'eps': 1e-10, 'maxiter': self.maxscf,\
'ftol': 1.0e-10, 'maxcor': 1000, 'maxfun': 15000,'disp':True})
e = res.fun
if (verbose):
print("# macro iter {} energy is {}".format(i, e))
if (e < self.energy and numpy.abs(self.energy - e) > 1e-6):
self.energy = res.fun
self.shift = self.shift
xconv = res.x.copy()
else:
break
x[:system.nbasis] = numpy.random.randn(self.shift.shape[0]) * 1e-1 + xconv[:nbsf]
x[nbsf:nbsf+nova+novb] = numpy.random.randn(nova+novb) * 1e-1 + xconv[nbsf:]
self.shift = res.x[:nbsf]
daia = res.x[nbsf:nbsf+nova]
daib = res.x[nbsf+nova:nbsf+nova+novb]
daia = daia.reshape((nvira, nocca))
daib = daib.reshape((nvirb, noccb))
if (self.restricted):
daib = daia.copy()
theta_a = numpy.zeros((nbsf, nbsf))
theta_a[nocca:nbsf,:nocca] = daia.copy()
theta_a[:nocca, nocca:nbsf] = -daia.T.copy()
theta_b = numpy.zeros((nbsf, nbsf))
theta_b[noccb:nbsf,:noccb] = daib.copy()
theta_b[:noccb, noccb:nbsf] = -daib.T.copy()
Ua = expm(theta_a)
C0a = c0[:nbsf*nbsf].reshape((nbsf,nbsf))
Ca = C0a.dot(Ua)
if (noccb > 0):
C0b = c0[nbsf*nbsf:].reshape((nbsf,nbsf))
Ub = expm(theta_b)
Cb = C0b.dot(Ub)
Cocca, detpsi = reortho(Ca[:,:nocca])
Coccb, detpsi = reortho(Cb[:,:noccb])
self.psi[:,:nocca] = Cocca
self.psi[:,nocca:] = Coccb
self.update_electronic_greens_function(system)
MS = numpy.abs(nocca-noccb) / 2.0
S2exact = MS * (MS+1.)
Sij = self.psi[:,:nocca].T.dot(self.psi[:,nocca:])
S2 = S2exact + min(nocca, noccb) - numpy.sum(numpy.abs(Sij*Sij).ravel())
# nocca = system.nup
# noccb = system.ndown
# MS = numpy.abs(nocca-noccb) / 2.0
# S2exact = MS * (MS+1.)
# Sij = psi_accept[:,:nocca].T.dot(psi_accept[:,nocca:])
# S2 = S2exact + min(nocca, noccb) - numpy.sum(numpy.abs(Sij*Sij).ravel())
print("# <S^2> = {: 3f}".format(S2))
def update_electronic_greens_function(self, system, verbose=0):
gup = gab(self.psi[:, :system.nup],
self.psi[:, :system.nup]).T
if (system.ndown == 0):
gdown = numpy.zeros_like(gup)
else:
gdown = gab(self.psi[:, system.nup:],
self.psi[:, system.nup:]).T
self.G = numpy.array([gup, gdown])
def update_wfn(self, system, V, verbose=0):
(self.eigs_up, self.eigv_up) = diagonalise_sorted(system.T[0]+V[0])
(self.eigs_dn, self.eigv_dn) = diagonalise_sorted(system.T[1]+V[1])
# I think this is slightly cleaner than using two separate
# matrices.
if self.reference is not None:
self.psi[:, :system.nup] = self.eigv_up[:, self.reference]
self.psi[:, system.nup:] = self.eigv_dn[:, self.reference]
else:
self.psi[:, :system.nup] = self.eigv_up[:, :system.nup]
self.psi[:, system.nup:] = self.eigv_dn[:, :system.ndown]
nocca = system.nup
noccb = system.ndown
nvira = system.nbasis-system.nup
nvirb = system.nbasis-system.ndown
self.virt[:, :nvira] = self.eigv_up[:,nocca:nocca+nvira]
self.virt[:, nvira:nvira+nvirb] = self.eigv_dn[:,noccb:noccb+nvirb]
gup = gab(self.psi[:, :system.nup],
self.psi[:, :system.nup]).T
h1 = system.T[0] + V[0]
if (system.ndown == 0):
gdown = numpy.zeros_like(gup)
else:
gdown = gab(self.psi[:, system.nup:],
self.psi[:, system.nup:]).T
self.eigs = numpy.append(self.eigs_up, self.eigs_dn)
self.eigs.sort()
self.G = numpy.array([gup, gdown])
def calculate_energy(self, system):
if self.verbose:
print ("# Computing trial energy.")
if (self.symmetrize):
num_energy = 0.
num_e1b = 0.
num_e2b = 0.
denom = 0.0
if (len(self.psi.shape) == 3): # multicoherent given
betas = self.shift * numpy.sqrt(system.m * system.w0 /2.0)
for iperm in range(self.nperms):
psia = self.psi[iperm, :, :system.nup]
psib = self.psi[iperm, :, system.nup:]
G = [gab(psia, psia),gab(psib, psib)]
shift = self.shift[iperm,:]
beta = betas[iperm,:]
phi = HarmonicOscillator(system.m, system.w0, order=0, shift = shift)
Lap = phi.laplacian(shift)
(energy_i, e1b_i, e2b_i) = local_energy_hubbard_holstein_jax(system.T,system.U, system.g,system.m,system.w0, G, shift, Lap)
overlap = numpy.linalg.det(psia.T.dot(psia)) * numpy.linalg.det(psib.T.dot(psib)) * numpy.prod(numpy.exp (- 0.5 * (beta**2 + beta**2) + beta*beta))
num_energy += energy_i * numpy.abs(self.coeffs[iperm])**2 * overlap
num_e1b += e1b_i * numpy.abs(self.coeffs[iperm])**2 * overlap
num_e2b += e2b_i * numpy.abs(self.coeffs[iperm])**2 * overlap
denom += overlap * numpy.abs(self.coeffs[iperm])**2
for jperm in range(iperm+1, self.nperms):
psia_j = self.psi[jperm, :, :system.nup]
psib_j = self.psi[jperm, :, system.nup:]
G_j = [gab(psia, psia_j),gab(psib, psib_j)]
beta_j = betas[jperm,:]
rho = G_j[0].diagonal() + G_j[1].diagonal()
ke = numpy.sum(system.T[0] * G_j[0] + system.T[1] * G_j[1])
pe = system.U * numpy.dot(G_j[0].diagonal(), G_j[1].diagonal())
e_ph = system.w0 * numpy.sum(beta * beta_j)
e_eph = - system.g * numpy.dot(rho, beta + beta_j)
overlap = numpy.linalg.det(psia.T.dot(psia_j)) * numpy.linalg.det(psib.T.dot(psib_j)) * numpy.prod(numpy.exp (- 0.5 * (beta**2 + beta_j**2) + beta*beta_j))
num_energy += (ke + pe + e_ph + e_eph)*overlap * self.coeffs[iperm] * self.coeffs[jperm] * 2.0 # 2.0 comes from hermiticity
num_e1b += (ke + pe)*overlap * self.coeffs[iperm] * self.coeffs[jperm] * 2.0 # 2.0 comes from hermiticity
num_e2b += (e_ph + e_eph)*overlap * self.coeffs[iperm] * self.coeffs[jperm] * 2.0 # 2.0 comes from hermiticity
denom += overlap * self.coeffs[iperm] * self.coeffs[jperm] * 2.0
else:
# single coherent state energy
phi = HarmonicOscillator(system.m, system.w0, order=0, shift = self.shift)
Lap = phi.laplacian(self.shift)
(energy_single, e1b_single, e2b_single) = local_energy_hubbard_holstein_jax(system.T,system.U, system.g,system.m,system.w0, self.G, self.shift, Lap)
psia = self.psi[:, :system.nup]
psib = self.psi[:, system.nup:]
beta = self.shift * numpy.sqrt(system.m * system.w0 /2.0)
for iperm in range(self.nperms):
ipermutation = self.perms[iperm]
psia_iperm = psia[ipermutation, :].copy()
psib_iperm = psib[ipermutation, :].copy()
beta_iperm = beta[ipermutation].copy()
num_energy += energy_single * self.coeffs[iperm]**2
num_e1b += e1b_single * self.coeffs[iperm]**2
num_e2b += e2b_single * self.coeffs[iperm]**2
denom += self.coeffs[iperm]**2
for jperm in range(iperm+1, self.nperms):
jpermutation = self.perms[jperm]
psia_jperm = psia[jpermutation, :].copy()
psib_jperm = psib[jpermutation, :].copy()
beta_jperm = beta[jpermutation].copy()
Ga = gab(psia_iperm, psia_jperm)
Gb = gab(psib_iperm, psib_jperm)
rho = Ga.diagonal() + Gb.diagonal()
ke = numpy.sum(system.T[0] * Ga + system.T[1] * Gb)
pe = system.U * numpy.dot(Ga.diagonal(), Gb.diagonal())
e_ph = system.w0 * numpy.sum(beta_iperm * beta_jperm)
e_eph = - system.g * numpy.dot(rho, beta_iperm + beta_jperm)
overlap = numpy.linalg.det(psia_iperm.T.dot(psia_jperm)) * numpy.linalg.det(psib_iperm.T.dot(psib_jperm)) * numpy.prod(numpy.exp (- 0.5 * (beta_iperm**2 + beta_jperm**2) + beta_iperm*beta_jperm))
num_energy += (ke + pe + e_ph + e_eph)*overlap * self.coeffs[iperm] * self.coeffs[jperm] * 2.0 # 2.0 comes from hermiticity
num_e1b += (ke + pe)*overlap * self.coeffs[iperm] * self.coeffs[jperm] * 2.0 # 2.0 comes from hermiticity
num_e2b += (e_ph + e_eph)*overlap * self.coeffs[iperm] * self.coeffs[jperm] * 2.0 # 2.0 comes from hermiticity
denom += overlap * self.coeffs[iperm] * self.coeffs[jperm] * 2.0
self.energy = num_energy / denom
self.e1b = num_e1b / denom
self.e2b = num_e2b / denom
else:
phi = HarmonicOscillator(system.m, system.w0, order=0, shift = self.shift)
Lap = phi.laplacian(self.shift)
(self.energy, self.e1b, self.e2b) = local_energy_hubbard_holstein_jax(system.T,system.U,system.g,system.m,system.w0, self.G, self.shift, Lap)
self.energy = complex(self.energy)
self.e1b = complex(self.e1b)
self.e2b = complex(self.e2b)
|
427664
|
from __future__ import print_function
import os
import sys
import datetime
import configparser
from LSP import proc_lsp
from LSPET import proc_lspet
from MPII import proc_mpii
from COCO import proc_coco
from H36M import proc_h36m
sys.path.append("../src/")
from utility import take_notes
# parse configures
conf = configparser.ConfigParser()
conf.read(u'../conf.ini', encoding='utf8')
tgt_path = conf.get('DATA', 'tgt_path')
lsp_path = conf.get('DATA', 'lsp_path')
lspet_path = conf.get('DATA', 'lspet_path')
upi_path = conf.get('DATA', 'upi_path')
coco_api_path = conf.get('DATA', 'coco_api_path')
coco_list_path = conf.get('DATA', 'coco_list_path')
h36m_path = conf.get('DATA', 'h36m_path')
c_time = datetime.datetime.now()
time_string = "%s-%02d:%02d:%02d" % (c_time.date(), c_time.hour, c_time.minute, c_time.second)
take_notes("start at %s\n" % time_string, "./data_log.txt", create_file = True)
p_train = 0
p_test = 0
# build all dirs if not exist
for i in [tgt_path + "train/", tgt_path + "train/img/",
tgt_path + "train/sil/", tgt_path + "train/para/",
tgt_path + "test/", tgt_path + "test/img/",
tgt_path + "test/sil/", tgt_path + "test/para/"]:
if not os.path.exists(i):
os.makedirs(i)
p_train, p_test = proc_lsp(tgt_path + "train/", tgt_path + "test/",
p_train, p_test,
lsp_path, upi_path)
p_train = proc_lspet(tgt_path + "train/", p_train,
lspet_path, upi_path)
p_train, p_test = proc_mpii(tgt_path + "train/", tgt_path + "test/",
p_train, p_test, upi_path)
p_train, p_test = proc_coco(tgt_path + "train/", tgt_path + "test/",
p_train, p_test, coco_list_path)
p_train, p_test = proc_h36m(tgt_path + "train/", tgt_path + "test/",
p_train, p_test, h36m_path)
print("All done")
|
427665
|
from click.testing import CliRunner
from inenv.cli import print_version, autojump
from inenv.inenv import autojump_enabled
from inenv.version import __version__
class TestCli(object):
def setup_method(self):
self.runner = CliRunner()
def invoke(self, command, args=None):
if args is None:
args = []
result = self.runner.invoke(command, args)
assert result.exit_code == 0
return result
def test_print_version(self):
result = self.invoke(print_version)
assert __version__ in result.output
def test_autojump(self):
for _ in range(2): # cycle through both states!
autojump_was_enabled = autojump_enabled()
result = self.invoke(autojump)
if autojump_was_enabled:
assert 'disabled' in result.output
else:
assert 'enabled' in result.output
assert autojump_was_enabled != autojump_enabled()
|
427686
|
import pytest
from encoded.tests.features.conftest import app, app_settings, index_workbook
pytestmark = [
pytest.mark.indexing,
pytest.mark.usefixtures('index_workbook'),
]
def test_reports_search_batched_search_generator_init(dummy_request):
from encoded.reports.search import BatchedSearchGenerator
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment'
)
bsg = BatchedSearchGenerator(dummy_request)
assert isinstance(bsg, BatchedSearchGenerator)
assert bsg.batch_field == '@id'
assert bsg.batch_size == 5000
assert bsg.param_list == {'type': ['Experiment']}
assert bsg.batch_param_values == []
def test_reports_search_batched_search_generator_make_batched_values_from_batch_param_values(dummy_request):
from encoded.reports.search import BatchedSearchGenerator
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment'
)
bsg = BatchedSearchGenerator(dummy_request)
assert list(bsg._make_batched_values_from_batch_param_values()) == []
from encoded.reports.metadata import BatchedSearchGenerator
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&@id=/files/ENCFFABC123/'
'&@id=/files/ENCFFABC345/&@id=/files/ENCFFABC567/'
'&@id=/files/ENCFFABC789/&@id=/files/ENCFFDEF123/'
'&@id=/files/ENCFFDEF345/&@id=/files/ENCFFDEF567/'
)
bsg = BatchedSearchGenerator(dummy_request, batch_size=2)
assert list(bsg._make_batched_values_from_batch_param_values()) == [
['/files/ENCFFABC123/', '/files/ENCFFABC345/'],
['/files/ENCFFABC567/', '/files/ENCFFABC789/'],
['/files/ENCFFDEF123/', '/files/ENCFFDEF345/'],
['/files/ENCFFDEF567/']
]
bsg = BatchedSearchGenerator(dummy_request, batch_field='accession', batch_size=2)
assert list(bsg._make_batched_values_from_batch_param_values()) == []
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&@id=/files/ENCFFABC123/'
'&@id=/files/ENCFFABC345/&@id=/files/ENCFFABC567/'
'&@id=/files/ENCFFABC789/&@id=/files/ENCFFDEF123/'
'&@id=/files/ENCFFDEF345/&@id=/files/ENCFFDEF567/'
'&accession=ENCFFAAA111'
)
bsg = BatchedSearchGenerator(dummy_request, batch_field='accession')
assert next(bsg._make_batched_values_from_batch_param_values()) == ['ENCFFAAA111']
def test_reports_search_batched_search_generator_make_batched_params_from_batched_values(dummy_request):
from encoded.reports.search import BatchedSearchGenerator
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&@id=/files/ENCFFABC123/'
'&@id=/files/ENCFFABC345/&@id=/files/ENCFFABC567/'
'&@id=/files/ENCFFABC789/&@id=/files/ENCFFDEF123/'
'&@id=/files/ENCFFDEF345/&@id=/files/ENCFFDEF567/'
)
bsg = BatchedSearchGenerator(dummy_request, batch_size=2)
actual_batched_params = []
for batched_values in bsg._make_batched_values_from_batch_param_values():
actual_batched_params.append(
bsg._make_batched_params_from_batched_values(batched_values)
)
expected_batched_params = [
[('@id', '/files/ENCFFABC123/'), ('@id', '/files/ENCFFABC345/')],
[('@id', '/files/ENCFFABC567/'), ('@id', '/files/ENCFFABC789/')],
[('@id', '/files/ENCFFDEF123/'), ('@id', '/files/ENCFFDEF345/')],
[('@id', '/files/ENCFFDEF567/')]
]
assert expected_batched_params == actual_batched_params
def test_reports_search_batched_search_generator_build_new_request(dummy_request):
from encoded.reports.search import BatchedSearchGenerator
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&@id=/files/ENCFFABC123/'
'&@id=/files/ENCFFABC345/&@id=/files/ENCFFABC567/'
'&@id=/files/ENCFFABC789/&@id=/files/ENCFFDEF123/'
'&@id=/files/ENCFFDEF345/&@id=/files/ENCFFDEF567/'
)
bsg = BatchedSearchGenerator(dummy_request, batch_size=2)
batched_params = [('@id', '/files/ENCFFABC123/'), ('@id', '/files/ENCFFABC345/')]
request = bsg._build_new_request(batched_params)
assert str(request.query_string) == (
'type=Experiment'
'&%40id=%2Ffiles%2FENCFFABC123%2F'
'&%40id=%2Ffiles%2FENCFFABC345%2F'
'&limit=all'
)
assert request.path_info == '/search/'
assert request.registry
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&@id=/files/ENCFFABC123/'
'&@id=/files/ENCFFABC345/&@id=/files/ENCFFABC567/'
'&@id=/files/ENCFFABC789/&@id=/files/ENCFFDEF123/'
'&@id=/files/ENCFFDEF345/&@id=/files/ENCFFDEF567/'
'&field=accession&files.status=released'
)
bsg = BatchedSearchGenerator(dummy_request, batch_size=2)
batched_params = [('@id', '/files/ENCFFABC123/'), ('@id', '/files/ENCFFABC345/')]
request = bsg._build_new_request(batched_params)
assert request.query_string == (
'type=Experiment&field=accession&files.status=released'
'&%40id=%2Ffiles%2FENCFFABC123%2F'
'&%40id=%2Ffiles%2FENCFFABC345%2F'
'&limit=all'
)
assert request.path_info == '/search/'
assert request.registry
def test_reports_search_batched_search_generator_results(index_workbook, dummy_request):
from encoded.reports.search import BatchedSearchGenerator
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&field=@id&field=status'
)
bsg = BatchedSearchGenerator(dummy_request)
results = list(bsg.results())
assert len(results) >= 63, f'{len(results)} not expected'
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&@id=/experiments/ENCSR001ADI/'
'&field=@id&field=status'
)
bsg = BatchedSearchGenerator(dummy_request)
results = list(bsg.results())
assert len(results) == 1
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment'
'&@id=/experiments/ENCSR001ADI/'
'&@id=/experiments/ENCSR003CON/'
'&@id=/experiments/ENCSR000ACY/'
'&@id=/experiments/ENCSR001CON/'
'&@id=/experiments/ENCSR751STT/'
'&@id=/experiments/ENCSR604DNT/'
'&@id=/experiments/ENCSR001SER/'
'&@id=/experiments/ENCSR000AEM/'
'&@id=/experiments/ENCSR334EJI/'
'&@id=/experiments/ENCSR123AAD/'
'&field=@id&field=status'
)
bsg = BatchedSearchGenerator(dummy_request)
results = list(bsg.results())
assert len(results) == 10
for result in results:
# (@type, @id, status)
assert len(result.keys()) == 3
bsg = BatchedSearchGenerator(dummy_request, batch_size=2)
results = list(bsg.results())
assert len(results) == 10
for result in results:
assert len(result.keys()) == 3
bsg = BatchedSearchGenerator(dummy_request, batch_size=3)
results = list(bsg.results())
assert len(results) == 10
for result in results:
assert len(result.keys()) == 3
bsg = BatchedSearchGenerator(dummy_request, batch_size=5)
results = list(bsg.results())
assert len(results) == 10
for result in results:
assert len(result.keys()) == 3
bsg = BatchedSearchGenerator(dummy_request, batch_field='accession')
results = list(bsg.results())
assert len(results) == 10
for result in results:
assert len(result.keys()) == 3
|
427702
|
import torch
import random
import librosa
import numpy as np
import nlpaug.flow as naf
import nlpaug.augmenter.audio as naa
import nlpaug.augmenter.spectrogram as nas
from torchvision.transforms import Normalize
from torch.utils.data import Dataset
from torchaudio.datasets import LIBRISPEECH
BAD_LIBRISPEECH_INDICES = [60150]
LIBRISPEECH_MEAN = [-22.924]
LIBRISPEECH_STDEV = [12.587]
LIBRISPEECH_HOP_LENGTH_DICT = {
224: 672,
112: 1344,
64: 2360,
32: 4800,
}
class LibriSpeech(Dataset):
def __init__(
self,
root,
train=True,
spectral_transforms=False,
wavform_transforms=True,
train_urls=[
'train-clean-100',
'train-clean-360',
'train-other-500',
],
test_url='dev-clean',
max_length=150526,
input_size=112,
normalize_mean=LIBRISPEECH_MEAN,
normalize_stdev=LIBRISPEECH_STDEV,
):
super().__init__()
# choose to either apply augmentation at wavform or at augmentation level
assert not (spectral_transforms and wavform_transforms)
if train:
datasets = []
for train_url in train_urls:
dataset = LIBRISPEECH(root, url=train_url, download=True,
folder_in_archive='LibriSpeech')
datasets.append(dataset)
self.dataset = ConcatDatasets(datasets)
else:
self.dataset = LIBRISPEECH(root, url=test_url, download=True,
folder_in_archive='LibriSpeech')
self.wavform_transforms = wavform_transforms
self.spectral_transforms = spectral_transforms
self.max_length = max_length
self.train = train
self.input_size = input_size
self.normalize_mean = normalize_mean
self.normalize_stdev = normalize_stdev
all_speaker_ids = self.get_speaker_ids()
unique_speaker_ids = sorted(list(set(all_speaker_ids)))
num_unique_speakers = len(unique_speaker_ids)
self.speaker_id_map = dict(zip(unique_speaker_ids, range(num_unique_speakers)))
self.all_speaker_ids = np.array([self.speaker_id_map[sid] for sid in all_speaker_ids])
self.num_unique_speakers = num_unique_speakers
def get_speaker_ids(self):
if self.train:
speaker_ids = []
for dataset in self.dataset.datasets:
speaker_ids_i = self._get_speaker_ids(dataset)
speaker_ids.append(speaker_ids_i)
return np.concatenate(speaker_ids)
else:
return self._get_speaker_ids(self.dataset)
def _get_speaker_ids(self, dataset):
speaker_ids = []
for i in range(len(dataset)):
fileid = dataset._walker[i]
speaker_id = self.load_librispeech_speaker_id(
fileid,
dataset._path,
dataset._ext_audio,
dataset._ext_txt,
)
speaker_ids.append(speaker_id)
return np.array(speaker_ids)
def load_librispeech_speaker_id(self, fileid, path, ext_audio, ext_txt):
speaker_id, _, _ = fileid.split("-")
return int(speaker_id)
def __getitem__(self, index):
if index in BAD_LIBRISPEECH_INDICES:
index = index + 1
wavform, sample_rate, _, speaker_id, _, _ = self.dataset.__getitem__(index)
speaker_id = self.speaker_id_map[speaker_id]
wavform = np.asarray(wavform[0])
if self.wavform_transforms:
transforms = WavformAugmentation(sample_rate)
wavform = transforms(wavform)
# pad to 150k frames
if len(wavform) > self.max_length:
# randomly pick which side to chop off (fix if validation)
flip = (bool(random.getrandbits(1)) if self.train else True)
padded = (wavform[:self.max_length] if flip else
wavform[-self.max_length:])
else:
padded = np.zeros(self.max_length)
padded[:len(wavform)] = wavform # pad w/ silence
spectrum = librosa.feature.melspectrogram(
padded,
sample_rate,
hop_length=LIBRISPEECH_HOP_LENGTH_DICT[self.input_size],
n_mels=self.input_size,
)
if self.spectral_transforms: # apply time and frequency masks
transforms = SpectrumAugmentation()
spectrum = transforms(spectrum)
# log mel-spectrogram
spectrum = librosa.power_to_db(spectrum**2)
spectrum = torch.from_numpy(spectrum).float()
spectrum = spectrum.unsqueeze(0)
if self.spectral_transforms: # apply noise on spectral
noise_stdev = 0.25 * self.normalize_stdev[0]
noise = torch.randn_like(spectrum) * noise_stdev
spectrum = spectrum + noise
normalize = Normalize(self.normalize_mean, self.normalize_stdev)
spectrum = normalize(spectrum)
return index, spectrum, speaker_id
def __len__(self):
return len(self.dataset)
class LibriSpeechTwoViews(LibriSpeech):
def __getitem__(self, index):
index, spectrum1, speaker_id = super().__getitem__(index)
_, spectrum2, _ = super().__getitem__(index)
return index, spectrum1, spectrum2, speaker_id
class ConcatDatasets(Dataset):
"""
We might have a few datasets we wish to concatenate together
and treat as a single dataset.
"""
def __init__(self, datasets):
super().__init__()
self.datasets = datasets
self.size = len(datasets)
def get_dataset_index(self, index):
# find which dataset this index belongs in
total_length = 0
for i in range(self.size):
if index < (len(self.datasets[i]) + total_length):
return self.datasets[i], total_length
# if not, then accumulate total_length
total_length += len(self.datasets[i])
def __getitem__(self, index):
dataset, shift_size = self.get_dataset_index(index)
return dataset.__getitem__(index - shift_size)
def __len__(self):
total_length = 0
for i in range(self.size):
total_length += len(self.datasets[i])
return total_length
class LibriSpeechTransfer(Dataset):
"""
Divide the dev-clean split of LibriSpeech into train and
test splits by speaker so we can train a logreg fairly.
"""
def __init__(
self,
root,
train=True,
spectral_transforms=False,
wavform_transforms=False,
max_length=150526,
input_size=112,
normalize_mean=LIBRISPEECH_MEAN,
normalize_stdev=LIBRISPEECH_STDEV,
):
super().__init__()
assert not (spectral_transforms and wavform_transforms)
self.dataset = LIBRISPEECH(root, url='dev-clean', download=True,
folder_in_archive='LibriSpeech')
all_speaker_ids = self.get_speaker_ids(self.dataset)
unique_speaker_ids = sorted(list(set(all_speaker_ids)))
num_unique_speakers = len(unique_speaker_ids)
self.speaker_id_map = dict(zip(unique_speaker_ids, range(num_unique_speakers)))
self.all_speaker_ids = np.array([self.speaker_id_map[sid] for sid in all_speaker_ids])
self.num_unique_speakers = num_unique_speakers
self.num_labels = num_unique_speakers
self.indices = self.train_test_split(self.dataset, all_speaker_ids, train=train)
self.spectral_transforms = spectral_transforms
self.wavform_transforms = wavform_transforms
self.max_length = max_length
self.train = train
self.input_size = input_size
self.normalize_mean = normalize_mean
self.normalize_stdev = normalize_stdev
def get_speaker_ids(self, dataset):
speaker_ids = []
for i in range(len(dataset)):
fileid = dataset._walker[i]
speaker_id = self.load_librispeech_speaker_id(
fileid,
dataset._path,
dataset._ext_audio,
dataset._ext_txt,
)
speaker_ids.append(speaker_id)
return np.array(speaker_ids)
def train_test_split(self, dataset, speaker_ids, train=True):
rs = np.random.RandomState(42) # fix seed so reproducible splitting
unique_speaker_ids = sorted(set(speaker_ids))
unique_speaker_ids = np.array(unique_speaker_ids)
# train test split to ensure the 80/20 splits
train_indices, test_indices = [], []
for speaker_id in unique_speaker_ids:
speaker_indices = np.where(speaker_ids == speaker_id)[0]
size = len(speaker_indices)
rs.shuffle(speaker_indices)
train_size = int(0.8 * size)
train_indices.extend(speaker_indices[:train_size].tolist())
test_indices.extend(speaker_indices[train_size:].tolist())
return train_indices if train else test_indices
def load_librispeech_speaker_id(self, fileid, path, ext_audio, ext_txt):
speaker_id, _, _ = fileid.split("-")
return int(speaker_id)
def __getitem__(self, index):
# NOTE: overwrite index with our custom indices mapping exapmles
# to the training and test splits
index = self.indices[index]
try:
wavform, sample_rate, _, speaker_id, _, _ = self.dataset.__getitem__(index)
except:
index2 = (index + 1) % len(self.dataset)
wavform, sample_rate, _, speaker_id, _, _ = self.dataset.__getitem__(index2)
speaker_id = self.speaker_id_map[speaker_id]
wavform = np.asarray(wavform[0])
if self.wavform_transforms:
transforms = WavformAugmentation(sample_rate)
wavform = transforms(wavform)
# pad to 150k frames
if len(wavform) > self.max_length:
# randomly pick which side to chop off (fix if validation)
flip = (bool(random.getrandbits(1)) if self.train else True)
padded = (wavform[:self.max_length] if flip else
wavform[-self.max_length:])
else:
padded = np.zeros(self.max_length)
padded[:len(wavform)] = wavform # pad w/ silence
hop_length_dict = {224: 672, 112: 1344, 64: 2360, 32: 4800}
spectrum = librosa.feature.melspectrogram(
padded,
sample_rate,
hop_length=hop_length_dict[self.input_size],
n_mels=self.input_size,
)
if self.spectral_transforms: # apply time and frequency masks
transforms = SpectrumAugmentation()
spectrum = transforms(spectrum)
# log mel-spectrogram
spectrum = librosa.power_to_db(spectrum**2)
spectrum = torch.from_numpy(spectrum).float()
spectrum = spectrum.unsqueeze(0)
if self.spectral_transforms: # apply noise on spectral
noise_stdev = 0.25 * self.normalize_stdev[0]
noise = torch.randn_like(spectrum) * noise_stdev
spectrum = spectrum + noise
normalize = Normalize(self.normalize_mean, self.normalize_stdev)
spectrum = normalize(spectrum)
return index, spectrum, speaker_id
def __len__(self):
return len(self.indices)
class SpectrumAugmentation(object):
def get_random_freq_mask(self):
return nas.FrequencyMaskingAug(mask_factor=40)
def get_random_time_mask(self):
return nas.TimeMaskingAug(mask_factor=40)
def __call__(self, data):
transforms = naf.Sequential([self.get_random_freq_mask(),
self.get_random_time_mask()])
return transforms.augment(data)
class WavformAugmentation(object):
def __init__(self, sample_rate=None, crop_and_noise_only=True):
super().__init__()
self.crop_and_noise_only = crop_and_noise_only
self.sample_rate = sample_rate
def get_random_loudness(self):
return naa.LoudnessAug(crop=(0,1), coverage=1)
def get_random_crop(self):
return AudioCropAug(scale=(0.08, 1.0))
def get_random_noise(self):
return AudioNoiseAug(scale=1)
def get_random_pitch(self):
return naa.PitchAug(self.sample_rate, crop=(0,1), coverage=1)
def __call__(self, data):
if self.crop_and_noise_only:
transforms = [self.get_random_crop(), self.get_random_noise()]
else:
transforms = [self.get_random_crop(), self.get_random_loudness(),
self.get_random_noise(), self.get_random_pitch()]
random.shuffle(transforms)
for transform in transforms:
data = transform.augment(data)
return data
class AudioCropAug(object):
def __init__(self, scale=(0.08, 1.0), rescale=False):
super().__init__()
self.scale = scale
self.rescale = rescale
def augment(self, data):
scale = np.random.uniform(
low=self.scale[0],
high=self.scale[1],
)
data_size = len(data)
crop_size = int(scale * data_size)
start_ix = int(np.random.choice(np.arange(data_size - crop_size)))
crop = data[start_ix:start_ix+crop_size]
if self.rescale:
result = librosa.effects.time_stretch(crop, crop_size / data_size)
else:
result = np.zeros(data_size)
result[start_ix:start_ix+crop_size] = crop
return result
class AudioNoiseAug(object):
def __init__(self, scale=1):
super().__init__()
self.scale = scale
def get_noise(self, segment_size, scale):
# https://en.wikipedia.org/wiki/Colors_of_noise
uneven = segment_size % 2
fft_size = segment_size // 2 + 1 + uneven
noise_fft = np.random.randn(fft_size)
noise_fft = noise_fft * scale # magnify?
noise = np.fft.irfft(noise_fft)
if uneven:
noise = noise[:-1]
return noise
def augment(self, data):
noise = self.get_noise(len(data), self.scale)
return data + noise
|
427745
|
class Solution:
def containsNearbyAlmostDuplicate(self, nums: List[int], k: int, t: int) -> bool:
if t < 0 or k <= 0:
return False
table = {}
w = 1 + t
for i, num in enumerate(nums):
curr = num // w
if curr in table:
return True
if curr - 1 in table and num - table[curr - 1] <= t:
return True
if curr + 1 in table and table[curr + 1] - num <= t:
return True
table[curr] = num
if i >= k:
del table[nums[i - k] // w]
return False
|
427761
|
from setuptools import setup
import versioneer
from os import path
here = path.abspath(path.dirname(__file__))
# Read the requirements from requirements.txt
requires = open('requirements.txt').read().strip().split('\n')
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
short_description = (
"""Assemble thumbnail-sized images from a large collection into a tiling
which, viewed at a distance, gives the impression of one large photo.""")
extras_require = {
'parallel': ['dask[bag]'],
}
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
setup(
name='photomosaic',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description=short_description,
long_description=long_description,
url='https://github.com/danielballan/photomosaic',
author='Photomosiac Contributors',
author_email='<EMAIL>',
license='BSD 3-Clause',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='art image color mosaic',
packages=['photomosaic'],
python_requires=">=3.6",
install_requires=requires,
extras_require=extras_require
)
|
427833
|
from math import pi, cos, sin, log, acos, sqrt, pow
"""
Equations source:
a) https://vismor.com/documents/power_systems/transmission_lines/S2.SS1.php
b) <NAME> - Distribution system modelling (3rd Ed.)
Typical values of earth
10 Ω/m3 - Resistivity of swampy ground
100 Ω/m3 - Resistivity of average damp earth
1000 Ω/m3 - Resistivity of dry earth
"""
def p_approx(k, theta):
"""
:param k:
:param theta:
:return:
"""
a1 = pi / 8
a2 = k * cos(theta) / (3 * sqrt(2))
a3 = k * k * cos(2 * theta) * (0.6728 + log(2 / k)) / 16
a4 = k * k * theta * sin(2 * theta) / 16
a5 = k * k * k * cos(3 * theta) / (45 * sqrt(2))
a6 = k * k * k * k * pi * cos(4 * theta) / 1536
return a1 - a2 + a3 + a4 + a5 - a6
def q_approx(k, theta):
"""
:param k:
:param theta:
:return:
"""
a1 = 0.5 * log(2 / k)
a2 = k * cos(theta) / (3 * sqrt(2))
a3 = pi * k * k * cos(2 * theta) / 64
a4 = k * k * k * cos(3 * theta) / (45 * sqrt(2))
a5 = k * k * k * k * sin(4 * theta) / 384
a6 = k * k * k * k * cos(4 * theta) * (1.0895 + log(2 / k)) / 384
return -0.0386 + a1 + a2 - a3 + a4 - a5 - a6
def get_d_ij(xi, yi, xj, yj):
"""
Distance module between wires
:param xi: x position of the wire i
:param yi: y position of the wire i
:param xj: x position of the wire j
:param yj: y position of the wire j
:return: distance module
"""
return sqrt((xi - xj)**2 + (yi - yj)**2)
def get_D_ij(xi, yi, xj, yj):
"""
Distance module between the wire i and the image of the wire j
:param xi: x position of the wire i
:param yi: y position of the wire i
:param xj: x position of the wire j
:param yj: y position of the wire j
:return: Distance module between the wire i and the image of the wire j
"""
return sqrt((xi - xj) ** 2 + (yi + yj) ** 2)
def z_ii(r_i, h_i, gmr_i, f, rho):
"""
Self impedance
:param r_i: wire resistance
:param h_i: wire vertical position (m)
:param gmr_i: wire geometric mean radius (m)
:param f: system frequency (Hz)
:param rho: earth resistivity (Ohm / m^3)
:return: self impedance in Ohm / m
"""
w = 2 * pi * f
k = 4 * pi * h_i * sqrt(2 * rho * f)
theta = 0
p = p_approx(k, theta)
q = q_approx(k, theta)
z = r_i + 1j * 2 * w * log(2 * h_i / gmr_i) + 4 * w * (p + 1j * q)
return z
def z_ij(h_i, h_j, d_ij, D_ij, f, rho):
"""
Mutual impedance
:param h_i: wire i vertical position (m)
:param h_j: wire j vertical position (m)
:param d_ij: Distance module between the wires i and j
:param D_ij: Distance module between the wire i and the image of the wire j
:param f: system frequency (Hz)
:param rho: earth resistivity (Ohm / m^3)
:return: mutual impedance in Ohm / m
"""
w = 2 * pi * f
k = 2 * pi * D_ij * sqrt(2 * rho * f)
theta = acos(h_i + h_j) / D_ij
p = p_approx(k, theta)
q = q_approx(k, theta)
z = 2j * w * log(D_ij / d_ij) + 4 * w * (p + 1j * q)
return z
def calc_z_matrix():
pass
|
427840
|
expected_output = {
"configuration": {
"vpg_name": "VirtualPortGroup2",
"vpg_ip_addr": "192.168.2.1",
"vpg_ip_mask": "255.255.255.0",
"sng_name": "SNG-APPQOE",
"sng_ip_addr": "192.168.2.2",
},
"status": {"operational_state": "RUNNING"},
}
|
427851
|
import inspect
import os
import types
import chainer_chemistry.functions
import chainer_chemistry.links
import chainer_chemistry.models
def _is_rst_exists(entity):
return os.path.exists('source/generated/{}.rst'.format(entity))
def check(app, exception):
missing_entities = []
missing_entities += [
name for name in _list_chainer_functions()
if not _is_rst_exists(name)]
missing_entities += [
name for name in _list_chainer_links()
if not _is_rst_exists(name)]
missing_entities += [
name for name in _list_chainer_models()
if not _is_rst_exists(name)]
if len(missing_entities) != 0:
app.warn('\n'.join([
'Undocumented entities found.',
'',
] + missing_entities))
def _list_chainer_functions():
# List exported functions under chainer.functions.
return ['chainer_chemistry.functions.{}'.format(name)
for (name, func) in chainer_chemistry.functions.__dict__.items()
if isinstance(func, types.FunctionType)]
def _list_chainer_links():
# List exported classes under chainer.links.
return ['chainer_chemistry.links.{}'.format(name)
for (name, link) in chainer_chemistry.links.__dict__.items()
if inspect.isclass(link)]
def _list_chainer_models():
# List exported classes under chainer.links.
return ['chainer_chemistry.models.{}'.format(name)
for (name, model) in chainer_chemistry.models.__dict__.items()
if inspect.isclass(model)]
|
427875
|
from arche.tools import bitbucket
import pytest
urls = [
(
"https://bitbucket.org/scrapinghub/customer/src/master/customer/schemas/ecommerce.json",
"https://api.bitbucket.org/2.0/repositories/scrapinghub/customer/src/master"
"/customer/schemas/ecommerce.json",
),
(
"https://bitbucket.org/scrapinghub/customer/raw/"
"9c4b0bf46f2012ab38bc066e1ebe774d72856013/customer/schemas/ecommerce.json",
"https://api.bitbucket.org/2.0/repositories/scrapinghub/customer/src/"
"9c4b0bf46f2012ab38bc066e1ebe774d72856013/customer/schemas/ecommerce.json",
),
]
@pytest.mark.parametrize(
["url", "expected"],
[
(
"https://bitbucket.org/scrapinghub/customer/src/master/customer/schemas/"
"ecommerce.json",
"https://api.bitbucket.org/2.0/repositories/scrapinghub/customer/src/"
"master/customer/schemas/ecommerce.json",
),
(
"https://bitbucket.org/scrapinghub/customer/raw/"
"9c4b0bf46f2012ab38bc066e1ebe774d72856013/customer/schemas/"
"ecommerce.json",
"https://api.bitbucket.org/2.0/repositories/scrapinghub/customer/src/"
"9c4b0bf46f2012ab38bc066e1ebe774d72856013/customer/schemas/"
"ecommerce.json",
),
],
)
def test_convert_to_api_url(url, expected):
api_url = bitbucket.convert_to_api_url(url, bitbucket.NETLOC, bitbucket.API_NETLOC)
assert api_url == expected
@pytest.mark.parametrize(
"url",
[
"https://bitbucket.org/ecommerce.json",
"https://bitbucket.org/user/ecommerce.json",
"https://bitbucket.org/user/repo/ecommerce.json",
"https://bitbucket.org/user/repo/foobar/ecommerce.json",
],
)
def test_convert_to_api_url_using_an_invalid_url(url):
with pytest.raises(ValueError):
bitbucket.convert_to_api_url(url, bitbucket.NETLOC, bitbucket.API_NETLOC)
@pytest.mark.parametrize(
"credentials,expected",
[(("foo", "bar"), "Zm9vOmJhcg=="), (("alice", "secret"), "YWxpY2U6c2VjcmV0")],
)
def test_get_auth_header(credentials, expected):
assert bitbucket.get_auth_header(*credentials) == {
"Authorization": f"Basic {expected}"
}
def test_prepare_request():
bitbucket.USER = "foo"
bitbucket.PASS = "<PASSWORD>"
url = (
"https://bitbucket.org/scrapinghub/customer/src/master/customer/schemas/"
"ecommerce.json"
)
req = bitbucket.prepare_request(url)
assert "api.bitbucket.org" == req.host
assert "Authorization" in req.headers
def test_prepare_request_raises_an_error_when_no_credentials_found():
bitbucket.USER = bitbucket.PASS = None
with pytest.raises(ValueError):
bitbucket.prepare_request("foo")
|
427882
|
import os
import random
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
import numpy as np
import torch
from PIL import Image
from detection.src.yolov3.model import Darknet
from detection.src.yolov3.utils.datasets import ListDataset
from detection.src.yolov3.utils.parse_config import parse_data_config
from detection.src.yolov3.utils.utils import non_max_suppression, rescale_boxes, load_classes
class YOLODetect():
"""
This step performs detection on a given episode using a trained YOLO model.
"""
def __init__(
self,
episode_config,
model_config,
trained_weights,
objectness_threshold=0.8,
nms_threshold=0.4,
iou_threshold=0.2,
image_size=416,
output_dir='output/detections',
):
"""
Args:
episode_config (str): path to the .data configuration file of the episode
model_config (str): path to the .cfg file defining the structure of the YOLO model
trained_weights (str): path to the file containing the trained weights of the model
objectness_threshold (float): the algorithm only keep boxes with a higher objectness confidence
nms_threshold (float): non maximum suppression threshold
iou_threshold (float): intersection over union threshold
image_size (int): size of input images
output_dir (str): directory where the predicted boxes are saved
"""
self.data_config = parse_data_config(episode_config)
self.model_config = model_config
self.trained_weights = trained_weights
self.objectness_threshold = objectness_threshold
self.nms_threshold = nms_threshold
self.iou_threshold = iou_threshold
self.image_size = image_size
self.output_dir = output_dir
self.labels = self.parse_labels(self.data_config['labels'])
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def apply(self):
"""
Executes YOLODetect step and saves the result images
"""
model = self.get_model()
paths, images = self.get_episode()
_, outputs = model.forward(images)
outputs = outputs.cpu()
outputs = non_max_suppression(
outputs,
conf_thres=self.objectness_threshold,
nms_thres=self.nms_threshold
)
self.save_detections(list(paths), outputs)
def dump_output(self, *_, **__):
pass
def parse_labels(self, labels_str):
"""
Gets labels from a string
Args:
labels_str (str): string from the data config file describing the labels of the episode
Returns:
list: labels of the episode
"""
labels_str_split = labels_str.split(', ')
labels = [int(label) for label in labels_str_split]
return labels
def get_episode(self):
"""
TODO: change from episodic collate_fn to standard collate_fn
Returns:
Tuple[Tuple, torch.Tensor, torch.Tensor]: the paths, images and target boxes of data instances composing
the episode described in the data configuration file
"""
dataset = ListDataset(
list_path=self.data_config['eval'],
img_size=self.image_size,
augment=False,
multiscale=False,
normalized_labels=True,
)
data_instances = [dataset[i] for i in range(len(dataset))]
paths, images, _ = dataset.collate_fn(data_instances)
return paths, images.to(self.device)
def get_model(self):
"""
Returns:
Darknet: model
"""
return Darknet(self.model_config, self.image_size, self.trained_weights).to(self.device)
def save_detections(self, paths, output):
"""
Draws predicted boxes on input images and saves them in self.output_dir
Args:
paths (list): paths to input images
output (list): output of the model. Each element is a torch.Tensor of shape (number_of_kept_detections, 7).
Each detection contains (x1, y1, x2, y2, objectness_confidence, class_score, class_predicted)
"""
# Bounding-box colors
cmap = plt.get_cmap("tab20b")
colors = [cmap(i) for i in np.linspace(0, 1, 20)]
print('Saving images:')
# Iterate through images and save plot of detections
for img_i, (path, detections) in enumerate(zip(paths, output)):
print('Image {index}: {path}'.format(index=img_i, path=path))
# Create plot
img = np.array(Image.open(path))
plt.figure()
fig, ax = plt.subplots(1)
ax.imshow(img)
# Draw bounding boxes and labels of detections
if detections is not None:
# Rescale boxes to original image
detections = rescale_boxes(detections, self.image_size, img.shape[:2])
unique_labels = detections[:, -1].cpu().unique()
n_cls_preds = len(unique_labels)
bbox_colors = random.sample(colors, n_cls_preds)
classes = load_classes(self.data_config['names'])
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
print('\t+ Label: {label_name}, Classif conf: {class_conf}'.format(
label_name=classes[int(cls_pred)],
class_conf=cls_conf.item())
)
box_w = x2 - x1
box_h = y2 - y1
color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
# Create a Rectangle patch
bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor='none')
# Add the bbox to the plot
ax.add_patch(bbox)
# Add label
plt.text(
x1,
y1,
s=classes[int(cls_pred)],
color='white',
verticalalignment='top',
bbox={'color': color, 'pad': 0},
)
# Save generated image with detections
plt.axis('off')
plt.gca().xaxis.set_major_locator(NullLocator())
plt.gca().yaxis.set_major_locator(NullLocator())
filename = path.split('/')[-1].split('.')[0] + '.png'
plt.savefig(os.path.join(self.output_dir, filename), bbox_inches='tight', pad_inches=0.0)
plt.close()
|
427900
|
import sys, asyncio, os
from catalog import searchDomains, findOpenPorts, kafkaProducer
from aux import consumer, producer
async def main():
tasks = []
foundDomains = asyncio.Queue()
portsQueue = asyncio.Queue()
tasks.append(asyncio.create_task(producer(searchDomains, sys.argv[1], foundDomains)))
tasks.append(asyncio.create_task(consumer(foundDomains, findOpenPorts, 1, "80,443", portsQueue)))
tasks.append(asyncio.create_task(consumer(portsQueue, kafkaProducer, 2, os.environ["KAFKA_SERVER"], "domainsTopic")))
await asyncio.gather(*tasks)
asyncio.run(main())
|
427944
|
from collections import defaultdict
import re
import pymysql as mysql
MYSQL_FIELD_TYPES = {
0: 'DECIMAL',
1: 'TINY',
2: 'SHORT',
3: 'LONG',
4: 'FLOAT',
5: 'DOUBLE',
6: 'NULL',
7: 'TIMESTAMP',
8: 'LONGLONG',
9: 'INT24',
10: 'DATE',
11: 'TIME',
12: 'DATETIME',
13: 'YEAR',
14: 'NEWDATE',
15: 'VARCHAR',
16: 'BIT',
246: 'NEWDECIMAL',
247: 'INTERVAL',
248: 'SET',
249: 'TINY_BLOB',
250: 'MEDIUM_BLOB',
251: 'LONG_BLOB',
252: 'BLOB',
253: 'VAR_STRING',
254: 'STRING',
255: 'GEOMETRY'
}
class DataSource:
'''
A data abstraction layer for accessing datasets.
This layer is typically hidden from end-users, as they only access
the database through DBConnection and DBContext objects.
'''
def connect(self):
'''
:return: a connection object.
:rtype: DBConnection
'''
raise NotImplementedError()
def tables(self):
'''
:return: a list of table names.
:rtype: list
'''
raise NotImplementedError()
def table_columns(self, table_name):
'''
:param table_name: table name for which to retrieve column names
:return: a list of columns for the given table.
:rtype: list
'''
raise NotImplementedError()
def foreign_keys(self):
'''
:return: a list of foreign key relations in the form (table_name, column_name, referenced_table_name, referenced_column_name).
:rtype: list
'''
raise NotImplementedError()
def table_column_names(self):
'''
:return: a list of table / column names in the form (table, col_name).
:rtype: list
'''
raise NotImplementedError()
def connected(self, tables, cols, find_connections=False):
'''
Returns a list of tuples of connected table pairs.
:param tables: a list of table names
:param cols: a list of column names
:param find_connections: set this to True to detect relationships from column names.
:return: a tuple (connected, pkeys, fkeys, reverse_fkeys)
'''
connected = defaultdict(list)
fkeys = defaultdict(set)
reverse_fkeys = {}
pkeys = {}
with self.connect() as con:
fk_result = self.foreign_keys()
if find_connections:
for table in tables:
for col in cols[table]:
if col.endswith('_id'):
ref_table = (col[:-4] + 'ies') if col[-4] == 'y' and col[-5] != 'e' else (col[:-3] + 's')
if ref_table in tables:
connected[(table, ref_table)].append((col, 'id'))
connected[(ref_table, table)].append(('id', col))
fkeys[table].add(col)
reverse_fkeys[(table, col)] = ref_table
if col == 'id':
pkeys[table] = col
for (table, col, ref_table, ref_col) in fk_result:
connected[(table, ref_table)].append((col, ref_col))
connected[(ref_table, table)].append((ref_col, col))
fkeys[table].add(col)
reverse_fkeys[(table, col)] = ref_table
tbl_col_names = self.table_column_names()
for (table, pk) in tbl_col_names:
pkeys[table] = pk
return connected, pkeys, fkeys, reverse_fkeys
def table_primary_key(self, table_name):
'''
Returns the primary key attribute name for the given table.
:param table_name: table name string
'''
raise NotImplementedError()
def fetch(self, table, cols):
'''
Fetches rows for the given table and columns.
:param table: target table
:param cols: list of columns to select
:return: rows from the given table and columns
:rtype: list
'''
raise NotImplementedError()
def select_where(self, table, cols, pk_att, pk):
'''
Select with where clause.
:param table: target table
:param cols: list of columns to select
:param pk_att: attribute for the where clause
:param pk: the id that the pk_att should match
:return: rows from the given table and cols, with the condition pk_att==pk
:rtype: list
'''
raise NotImplementedError()
def fetch_types(self, table, cols):
'''
Returns a dictionary of field types for the given table and columns.
:param table: target table
:param cols: list of columns to select
:return: a dictionary of types for each attribute
:rtype: dict
'''
raise NotImplementedError()
def column_values(self, table, col):
'''
Returns a list of distinct values for the given table and column.
:param table: target table
:param cols: list of columns to select
'''
raise NotImplementedError()
def get_driver_name(self):
raise NotImplementedError()
def get_jdbc_prefix(self):
raise NotImplementedError()
class MySQLDataSource(DataSource):
'''
A DataSource implementation for accessing datasets from a MySQL DBMS.
'''
def __init__(self, connection):
'''
:param connection: a DBConnection instance.
'''
self.connection = connection
def connect(self):
return self.connection.connect()
def foreign_keys(self):
with self.connect() as con:
cursor = con.cursor()
cursor.execute(
"SELECT table_name, column_name, referenced_table_name, referenced_column_name \
FROM information_schema.KEY_COLUMN_USAGE \
WHERE referenced_table_name IS NOT NULL AND table_schema='%s'" % self.connection.database)
fk_result = [row for row in cursor]
return fk_result
def table_column_names(self):
with self.connect() as con:
cursor = con.cursor()
cursor.execute(
"SELECT table_name, column_name \
FROM information_schema.KEY_COLUMN_USAGE \
WHERE constraint_name='PRIMARY' AND table_schema='%s'" % self.connection.database)
tbl_col_names = [row for row in cursor]
return tbl_col_names
def tables(self):
with self.connect() as con:
cursor = con.cursor()
cursor.execute('SHOW tables')
tables = [table for (table,) in cursor]
return tables
def table_columns(self, table_name):
with self.connect() as con:
cursor = con.cursor()
cursor.execute("SELECT column_name FROM information_schema.columns WHERE table_name = '%s' AND table_schema='%s'" % (table_name, self.connection.database))
columns = [col for (col,) in cursor]
return columns
def fmt_cols(self, cols):
return ','.join(["`%s`" % col for col in cols])
def fetch_types(self, table, cols):
with self.connect() as con:
cursor = con.cursor()
cursor.execute("SELECT %s FROM `%s` LIMIT 1" % (self.fmt_cols(cols), table))
cursor.fetchall()
types = {}
for desc in cursor.description:
# types[desc[0]] = mysql.FieldType.get_info(desc[1])
types[desc[0]] = MYSQL_FIELD_TYPES[desc[1]]
return types
def fetch(self, table, cols):
with self.connect() as con:
cursor = con.cursor()
cursor.execute("SELECT %s FROM %s" % (self.fmt_cols(cols), table))
result = [cols for cols in cursor]
return result
def select_where(self, table, cols, pk_att, pk):
with self.connect() as con:
cursor = con.cursor()
attributes = self.fmt_cols(cols)
cursor.execute("SELECT %s FROM %s WHERE `%s`='%s'" % (attributes, table, pk_att, pk))
result = [cols for cols in cursor]
return result
def column_values(self, table, col):
with self.connect() as con:
cursor = con.cursor()
cursor.execute("SELECT DISTINCT BINARY `%s`, `%s` FROM `%s`" % (col, col, table))
values = [val for (_,val) in cursor]
return values
def get_driver_name(self):
return 'com.mysql.jdbc.Driver'
def get_jdbc_prefix(self):
return 'jdbc:mysql://'
class PgSQLDataSource(DataSource):
'''
A DataSource implementation for accessing datasets from a PosgreSQL DBMS.
'''
def __init__(self, connection):
'''
:param connection: a DBConnection instance.
'''
self.connection = connection
def connect(self):
return self.connection.connect()
def foreign_keys(self):
with self.connect() as con:
cursor = con.cursor()
database = self.connection.database
cursor.execute("SELECT \
tc.table_name, kcu.column_name, \
ccu.table_name AS referenced_table_name,\
ccu.column_name AS referenced_column_name \
FROM \
information_schema.table_constraints AS tc \
JOIN information_schema.key_column_usage AS kcu \
ON tc.constraint_name = kcu.constraint_name \
JOIN information_schema.constraint_column_usage AS ccu \
ON ccu.constraint_name = tc.constraint_name \
WHERE constraint_type = 'FOREIGN KEY' AND tc.table_catalog='%s'" % database)
fk_result = [row for row in cursor]
return fk_result
def table_column_names(self):
with self.connect() as con:
cursor = con.cursor()
database = self.connection.database
cursor.execute(
"SELECT \
tc.table_name, kcu.column_name \
FROM \
information_schema.table_constraints AS tc\
JOIN information_schema.key_column_usage AS kcu \
ON tc.constraint_name = kcu.constraint_name \
WHERE constraint_type = 'PRIMARY KEY' AND tc.table_catalog='%s'" % database)
tbl_col_names = [row for row in cursor]
return tbl_col_names
def tables(self):
with self.connect() as con:
cursor = con.cursor()
database = self.connection.database
cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema=\'public\' \
AND table_type=\'BASE TABLE\' AND table_catalog='%s' AND table_name NOT LIKE \'\\_%%\'" % (database)) # to escape this sql command: ... NOT LIKE '\_%'
tables = [table for (table,) in cursor]
return tables
def table_columns(self, table):
with self.connect() as con:
cursor = con.cursor()
database = self.connection.database
cursor.execute("SELECT column_name FROM information_schema.columns \
WHERE table_name = '%s' AND table_catalog='%s'" % (table,database))
columns = [col for (col,) in cursor]
return columns
def fmt_cols(self, cols):
return ','.join(["%s" % col for col in cols])
def fetch_types(self, table, cols):
with self.connect() as con:
cursor = con.cursor()
types = {}
cursor.execute("SELECT attname as col_name, atttypid::regtype AS base_type \
FROM pg_catalog.pg_attribute WHERE attrelid = 'public.%s'::regclass \
AND attnum > 0 AND NOT attisdropped ORDER BY attnum;" % table)
for rows in cursor:
types[rows[0]] = rows[1]
return types
def fetch(self, table, cols):
with self.connect() as con:
cursor = con.cursor()
cursor.execute("SELECT %s FROM %s" % (self.fmt_cols(cols), table))
result = [cols for cols in cursor]
return result
def select_where(self, table, cols, pk_att, pk):
with self.connect() as con:
cursor = con.cursor()
attributes = self.fmt_cols(cols)
cursor.execute("SELECT %s FROM %s WHERE %s='%s'" % (attributes, table, att, val_att))
result = [cols for cols in cursor]
return result
def column_values(self, table, col):
with self.connect() as con:
cursor = con.cursor()
cursor.execute("SELECT DISTINCT %s, %s FROM %s" % (col, col, table))
values = [val for (_,val) in cursor]
return values
def get_driver_name(self):
return 'org.postgresql.Driver'
def get_jdbc_prefix(self):
return 'jdbc:postgresql://'
class SQLiteDataSource(DataSource):
'''
A DataSource implementation for accessing datasets from SQLite DBMS.
Because SQLite types are dynamic and its CREATE TABLE statement allows for any kind of custom type names, we try to match the most common types with three type categories used in ``rdm``.
'''
continuous_types = ('real', 'double', 'double precision', 'float', 'decimal')
integer_types = ('int', 'integer', 'tinyint', 'smallint', 'mediumint', 'bigint', 'unsigned big int', 'int2', 'int8')
ordinal_types = ('character', 'varchar', 'varying character', 'nchar', 'native character', 'nvarchar', 'text', 'clob', 'char')
def __init__(self, connection):
'''
:param connection: a DBConnection instance.
'''
self.connection = connection
def connect(self):
return self.connection.connect()
def foreign_keys(self):
tables = self.tables()
result = []
for table in tables:
with self.connect() as con:
cursor = con.cursor()
cursor.execute('''SELECT "from","table","to" FROM pragma_foreign_key_list("{}");'''.format(table))
for row in cursor:
result.append((table,) + row)
return result
def table_column_names(self):
tables = self.tables()
result = []
for table in tables:
with self.connect() as con:
cursor = con.cursor()
cursor.execute('SELECT name FROM pragma_table_info("{}") WHERE pk=1;'.format(table))
result.append((table, [row for row in cursor][0][0]))
return result
def tables(self):
with self.connect() as con:
cursor = con.cursor()
cursor.execute("SELECT name FROM sqlite_master \
WHERE type='table' AND name NOT LIKE 'sqlite_%';")
tables = [table for (table,) in cursor]
return tables
def table_columns(self, table_name):
with self.connect() as con:
cursor = con.cursor()
cursor.execute('SELECT name FROM pragma_table_info("{}");'.format(table_name))
columns = [col for (col,) in cursor]
return columns
def fmt_cols(self, cols):
return ','.join(['"{}"'.format(col) for col in cols])
def fetch_types(self, table, cols):
types = {}
with self.connect() as con:
cursor = con.cursor()
cursor.execute('SELECT "name", "type" from pragma_table_info("{}") WHERE "name" IN ({})'.format(table, self.fmt_cols(cols)))
types = {row[0]: re.sub('\([0-9a-zA-Z ,]*\)', '', row[1].strip().lower()).strip() for row in cursor}
return types
def fetch(self, table, cols):
with self.connect() as con:
cursor = con.cursor()
cursor.execute('SELECT %s FROM "%s"' % (self.fmt_cols(cols), table))
result = [cols for cols in cursor]
return result
def select_where(self, table, cols, pk_att, pk):
with self.connect() as con:
cursor = con.cursor()
attributes = self.fmt_cols(cols)
cursor.execute('SELECT %s FROM "%s" WHERE "%s"="%s"' % (attributes, table, pk_att, pk))
result = [cols for cols in cursor]
return result
def column_values(self, table, col):
with self.connect() as con:
cursor = con.cursor()
cursor.execute('SELECT DISTINCT "%s" FROM "%s"' % (col, table))
values = [val[0] for val in cursor]
return values
def get_driver_name(self):
return 'java.sql.DriverManager'
def get_jdbc_prefix(self):
return 'jdbc:sqlite:{}'.format(self.connection.sqlite_database)
|
427969
|
from setuptools import setup, find_packages
long_description = open('README.rst').read()
version = '0.0.3'
setup(name='keras-adversarial',
version=version,
description='Adversarial models and optimizers for Keras',
url='https://github.com/bstriner/keras-adversarial',
download_url='https://github.com/bstriner/keras-adversarial/tarball/v{}'.format(version),
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=['Keras'],
keywords=['keras', 'gan', 'adversarial', 'multiplayer'],
license='MIT',
long_description=long_description,
classifiers=[
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
])
|
427982
|
import argparse
import numpy as np
import math
import os
import time
from six.moves import cPickle # six.moves is used to self-adjust the change of python 2 and python 3
import yaml
import itertools
from multiprocessing.dummy import Pool as ThreadPool
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
from model import yolo_layer
from utils.utils import get_random_data, preprocess_true_boxes, add_logger, update_values, set_tb_logger
class Train(object):
def __init__(self, args, threadpool):
# initial parameters and model
self.args = args
self.annotation_path = args.annotation_path
self.input_shape = (args.height, args.width)
self.anchors = args.anchors
self.class_names = args.classes_names
self.num_classes = len(self.class_names)
self.model, self.infos = self._create_model()
self.optimizer = self._get_optimizer(self.args, self.model)
self.threadpool = threadpool
# mkdir log file
log_name = str(time.strftime("%Y%m%d%H%M%S", time.localtime())) + '_' + '_' + 'bs_' + str(self.args.batch_size)
print("logging to %s ..." % (log_name))
self.logger = set_tb_logger('logs', log_name)
self._train_pipeline(self.annotation_path)
self.init_lr = args.lr
def _train_pipeline(self, sample_path):
'''
The training pipeline including training and validation samples generation, loss backward
and checkpoints saving.
'''
# random split the data to training and validation samples
data_gen, data_gen_val, max_batch_ind, max_val_batch_ind = self._train_data_generation(sample_path)
self.args.n_iter = self.infos.get('n_iter', self.args.n_iter)
start_epoch = self.infos.get('epoch', self.args.start_epoch)
best_val_loss = self.infos.get('best_val_loss', 100000)
# Learning rate self-adjusting depends on the validation loss
scheduler = ReduceLROnPlateau(self.optimizer, factor=0.1, patience=2, verbose=True, eps=1e-12)
# scheduler = StepLR(self.optimizer, step_size=3, gamma=0.6)
# training approach
for epoch in range(start_epoch, self.args.max_epochs):
self._train(data_gen, max_batch_ind, epoch)
val_loss = self._validation(epoch, data_gen_val, max_val_batch_ind)
scheduler.step(val_loss)
best_flag = False
if best_val_loss is None or val_loss < best_val_loss:
best_val_loss = val_loss
best_flag = True
# checkpoints saving
self.checkpoint_save(self.infos, epoch, best_val_loss, best_flag)
def cos_lr(self, epoch_start, epoch_max, base_lr):
curr_lr = base_lr * (1 + math.cos(math.pi*epoch_start / epoch_max)) / 2
return curr_lr
def _train(self, data_gen, max_batch_ind, epoch):
'''
:param data_gen: generator for training data
:param max_batch_ind: maximum slice for training data
:param epoch:
'''
torch.set_grad_enabled(True)
self.model.train()
tmp_losses = 0
batch_sum = 0
start = time.time()
for batch_ind in range(max_batch_ind):
data = next(data_gen)
img, label0, label1, label2 = data[0], data[1], data[2], data[3]
img = torch.from_numpy(img).float().cuda()
img = img.view(img.shape[0], img.shape[1], img.shape[2], img.shape[3]).permute(0, 3, 1, 2).contiguous()
label0 = torch.from_numpy(label0)
label1 = torch.from_numpy(label1)
label2 = torch.from_numpy(label2)
if self.args.use_cuda:
img = img.cuda()
label0 = label0.cuda()
label1 = label1.cuda()
label2 = label2.cuda()
losses = self.model(img, label0, label1, label2)
loss = add_logger(losses, self.logger, batch_ind + max_batch_ind * epoch, 'train')
loss = loss.sum() / loss.numel()
tmp_losses = tmp_losses + loss.item()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if batch_ind % self.args.display_interval == 0 and batch_ind != 0:
end = time.time()
batch_sum = batch_sum + self.args.display_interval
tmp_losses_show = tmp_losses / batch_sum
print("step {}/{} (epoch {}), loss: {:f} , lr:{:f}, time/batch = {:.3f}"
.format(batch_ind, max_batch_ind, epoch, tmp_losses_show, self.optimizer.param_groups[-1]['lr'],
(end - start)/self.args.display_interval))
start = end
def _validation(self, epoch, data_gen, max_val_batch_ind):
torch.set_grad_enabled(False)
self.model.eval()
tmp_losses = 0
for batch_idx in range(max_val_batch_ind):
data = next(data_gen)
img, label0, label1, label2 = data[0], data[1], data[2], data[3]
img = torch.from_numpy(img).float()
img = img.view(img.shape[0], img.shape[1], img.shape[2], img.shape[3]).permute(0, 3, 1, 2).contiguous()
label0 = torch.from_numpy(label0)
label1 = torch.from_numpy(label1)
label2 = torch.from_numpy(label2)
if self.args.use_cuda:
img = img.cuda()
label0 = label0.cuda()
label1 = label1.cuda()
label2 = label2.cuda()
losses = self.model(img, label0, label1, label2)
loss = add_logger(losses, self.logger, self.args.n_iter, 'val')
tmp_losses = tmp_losses + loss.item()
tmp_losses = tmp_losses / max_val_batch_ind
print("============================================")
print("Evaluation Loss (epoch {}), TOTAL_LOSS: {:.3f}".format(epoch, tmp_losses))
print("============================================")
return tmp_losses
@staticmethod
def _get_optimizer(args, net):
params = []
for key, value in dict(net.named_parameters()).items():
if value.requires_grad:
if 'backbone' in key:
params += [{'params':[value], 'lr':args.backbone_lr}]
else:
params += [{'params':[value], 'lr':args.lr}]
# Initialize optimizer class
if args.optimizer == "adam":
optimizer = optim.Adam(params, weight_decay=args.weight_decay)
elif args.optimizer == "rmsprop":
optimizer = optim.RMSprop(params, weight_decay=args.weight_decay)
else:
# Default to sgd
optimizer = optim.SGD(params, momentum=0.9, weight_decay=args.weight_decay,
nesterov=(args.optimizer == "nesterov"))
return optimizer
def _create_model(self):
model = yolo_layer.yolov3layer(self.args)
infos = {}
if self.args.start_from != '':
if self.args.load_best_score == 1:
model_path = os.path.join(self.args.start_from, 'model-best.pth')
info_path = os.path.join(self.args.start_from, 'infos-best.pkl')
else:
model_path = os.path.join(self.args.start_from, 'model.pth')
info_path = os.path.join(self.args.start_from, 'infos.pkl')
if os.path.exists(info_path):
with open(info_path, 'rb') as f:
infos = cPickle.load(f)
print('Loading the model from %s ...' %(model_path))
model.load_state_dict(torch.load(model_path))
if self.args.use_cuda:
if self.args.mGPUs:
model = torch.nn.DataParallel(model).cuda()
else:
model = model.cuda()
return model, infos
def _data_generator(self, annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
if i + batch_size > n:
np.random.shuffle(annotation_lines)
i = 0
# Separate all image reader into different threads can speed up the program
output = self.threadpool.starmap(get_random_data, zip(annotation_lines[i:i + batch_size],
itertools.repeat(input_shape, batch_size)))
image_data = list(zip(*output))[0]
box_data = list(zip(*output))[1]
i = i + batch_size
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true]
def _data_generator_wrapper(self, annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n == 0 or batch_size <= 0: return None
return self._data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
def _train_data_generation(self, sample_path):
with open(sample_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
val_split = self.args.val_split
num_val = int(len(lines) * val_split)
num_train = len(lines) - num_val
batch_size = self.args.batch_size
data_gen = self._data_generator_wrapper(lines[:num_train], batch_size, self.input_shape, self.anchors, self.num_classes)
data_gen_val = self._data_generator_wrapper(lines[num_train:], batch_size, self.input_shape, self.anchors, self.num_classes)
max_batch_ind = int(num_train / batch_size)
max_val_batch_ind = int(num_val / batch_size)
return data_gen, data_gen_val, max_batch_ind, max_val_batch_ind
def checkpoint_save(self, infos, epoch, best_val_loss, best_flag=False):
checkpoint_path = os.path.join(self.args.save_path, self.args.backbones_network)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
if self.args.mGPUs > 1:
torch.save(self.model.module.state_dict(), os.path.join(checkpoint_path, 'model.pth'))
else:
torch.save(self.model.state_dict(), os.path.join(checkpoint_path, 'model.pth'))
print("model saved to {}".format(checkpoint_path))
infos['n_iter'] = self.args.n_iter
infos['epoch'] = epoch
infos['best_val_loss'] = best_val_loss
infos['opt'] = self.args
with open(os.path.join(checkpoint_path, 'infos.pkl'), 'wb') as f:
cPickle.dump(infos, f)
if best_flag:
if self.args.mGPUs > 1:
torch.save(self.model.module.state_dict(), os.path.join(checkpoint_path, 'model-best.pth'))
else:
torch.save(self.model.state_dict(), os.path.join(checkpoint_path, 'model-best.pth'))
print("model saved to {} with best total loss {:.3f}".format(os.path.join(checkpoint_path, \
'model-best.pth'), best_val_loss))
with open(os.path.join(checkpoint_path, 'infos-best.pkl'), 'wb') as f:
cPickle.dump(infos, f)
def main():
args = make_args()
with open(args.cfg_path, 'r') as handle:
options_yaml = yaml.load(handle)
update_values(options_yaml, vars(args))
# set random seed to cpu and gpu
if args.seed:
torch.manual_seed(args.seed)
if args.use_cuda:
torch.cuda.manual_seed(args.seed)
try:
threadpool = ThreadPool(args.batch_size)
except Exception as e:
print(e)
exit(1)
Train(args, threadpool)
def make_args():
# load the optional parameters and update new arguments
parser = argparse.ArgumentParser()
# # Data input settings
parser.add_argument('--cfg_path', type=str, default='cfgs/Yolo_train.yml', help='load config')
parser.add_argument('--use_cuda', type=bool, default=True, help='whether use gpu.')
parser.add_argument('--mGPUs', type=bool, default=False, help='whether use mgpu.')
return parser.parse_args()
if __name__ == '__main__':
main()
|
427994
|
from __future__ import absolute_import
import base64
import contextlib
import typing as tp
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
# noinspection PyProtectedMember
from applitools.core import logger
from applitools.core.eyes_base import FailureReports, EyesBase
from applitools.core.match_window_task import MatchWindowTask
from applitools.core.triggers import MouseTrigger, TextTrigger
from applitools.core.errors import EyesError, TestFailedError
from applitools.core.geometry import Region
from applitools.core.scaling import ContextBasedScaleProvider, FixedScaleProvider
from applitools.utils import image_utils
from . import eyes_selenium_utils
from .webdriver import EyesWebDriver
from .capture import EyesWebDriverScreenshot, dom_capture
from .target import Target
from .positioning import StitchMode, ElementPositionProvider
from .webelement import EyesWebElement
if tp.TYPE_CHECKING:
from applitools.core.scaling import ScaleProvider
from applitools.utils.custom_types import (ViewPort, AnyWebDriver, FrameReference, AnyWebElement)
class ScreenshotType(object):
ENTIRE_ELEMENT_SCREENSHOT = 'EntireElementScreenshot'
REGION_OR_ELEMENT_SCREENSHOT = 'RegionOrElementScreenshot'
FULLPAGE_SCREENSHOT = "FullPageScreenshot"
VIEWPORT_SCREENSHOT = "ViewportScreenshot"
class Eyes(EyesBase):
"""
Applitools Selenium Eyes API for python.
"""
_UNKNOWN_DEVICE_PIXEL_RATIO = 0
_DEFAULT_DEVICE_PIXEL_RATIO = 1
@staticmethod
def set_viewport_size(driver, size=None, viewportsize=None):
# type: (AnyWebDriver, tp.Optional[ViewPort], tp.Optional[ViewPort]) -> None
assert driver is not None
if size is None and viewportsize is None:
raise ValueError("set_viewport_size require `size` parameter")
if viewportsize:
logger.deprecation("Use `size` parameter instead")
eyes_selenium_utils.set_viewport_size(driver, size)
def get_viewport_size(self):
# type: () -> ViewPort
"""
Returns the size of the viewport of the application under test (e.g, the browser).
"""
if self._viewport_size:
return self._viewport_size
self._viewport_size = self._driver.get_viewport_size()
return self._viewport_size
def __init__(self, server_url=EyesBase.DEFAULT_EYES_SERVER):
super(Eyes, self).__init__(server_url)
self._driver = None # type: tp.Optional[AnyWebDriver]
self._match_window_task = None # type: tp.Optional[MatchWindowTask]
self._viewport_size = None # type: tp.Optional[ViewPort]
self._screenshot_type = None # type: tp.Optional[str] # ScreenshotType
self._device_pixel_ratio = self._UNKNOWN_DEVICE_PIXEL_RATIO
self._stitch_mode = StitchMode.Scroll # type: tp.Text
self._element_position_provider = None # type: tp.Optional[ElementPositionProvider]
# If true, Eyes will create a full page screenshot (by using stitching) for browsers which only
# returns the viewport screenshot.
self.force_full_page_screenshot = False # type: bool
# If true, Eyes will remove the scrollbars from the pages before taking the screenshot.
self.hide_scrollbars = False # type: bool
@property
def stitch_mode(self):
# type: () -> tp.Text
"""
Gets the stitch mode.
:return: The stitch mode.
"""
return self._stitch_mode
@stitch_mode.setter
def stitch_mode(self, stitch_mode):
# type: (tp.Text) -> None
"""
Sets the stitch property - default is by scrolling.
:param stitch_mode: The stitch mode to set - either scrolling or css.
"""
self._stitch_mode = stitch_mode
if stitch_mode == StitchMode.CSS:
self.hide_scrollbars = True
self.send_dom = True
@property
def driver(self):
# type: () -> EyesWebDriver
"""
Returns the current web driver.
"""
return self._driver
def _obtain_screenshot_type(self, is_element, inside_a_frame, stitch_content, force_fullpage, is_region=False):
# type:(bool, bool, bool, bool, bool) -> str
if stitch_content or force_fullpage:
if is_element and not stitch_content:
return ScreenshotType.REGION_OR_ELEMENT_SCREENSHOT
if not inside_a_frame:
if ((force_fullpage and not stitch_content) or # noqa
(stitch_content and not is_element)):
return ScreenshotType.FULLPAGE_SCREENSHOT
if inside_a_frame or stitch_content:
return ScreenshotType.ENTIRE_ELEMENT_SCREENSHOT
else:
if is_region or (is_element and not stitch_content):
return ScreenshotType.REGION_OR_ELEMENT_SCREENSHOT
if not stitch_content and not force_fullpage:
return ScreenshotType.VIEWPORT_SCREENSHOT
return ScreenshotType.VIEWPORT_SCREENSHOT
@property
def _environment(self):
os = self.host_os
# If no host OS was set, check for mobile OS.
if os is None:
logger.info('No OS set, checking for mobile OS...')
# Since in Python Appium driver is the same for Android and iOS, we need to use the desired
# capabilities to figure this out.
if eyes_selenium_utils.is_mobile_device(self._driver):
platform_name = self._driver.platform_name
logger.info(platform_name + ' detected')
platform_version = self._driver.platform_version
if platform_version is not None:
# Notice that Python's "split" function's +limit+ is the the maximum splits performed
# whereas in Ruby it is the maximum number of elements in the result (which is why they are set
# differently).
major_version = platform_version.split('.', 1)[0]
os = platform_name + ' ' + major_version
else:
os = platform_name
logger.info("Setting OS: " + os)
else:
logger.info('No mobile OS detected.')
app_env = {'os': os, 'hostingApp': self.host_app,
'displaySize': self._viewport_size,
'inferred': self._inferred_environment}
return app_env
@property
def _title(self):
if self._should_get_title:
# noinspection PyBroadException
try:
return self._driver.title
except Exception:
self._should_get_title = False
# Couldn't get title, return empty string.
return ''
@property
def _inferred_environment(self):
# type: () -> tp.Optional[tp.Text]
try:
user_agent = self._driver.execute_script('return navigator.userAgent')
except WebDriverException:
user_agent = None
if user_agent:
return "useragent:%s" % user_agent
return None
def _update_scaling_params(self):
# type: () -> tp.Optional[ScaleProvider]
if self._device_pixel_ratio != self._UNKNOWN_DEVICE_PIXEL_RATIO:
logger.debug("Device pixel ratio was already changed")
return None
logger.info('Trying to extract device pixel ratio...')
try:
device_pixel_ratio = image_utils.get_device_pixel_ratio(self._driver)
except Exception as e:
logger.info('Failed to extract device pixel ratio! Using default. Error %s ' % e)
device_pixel_ratio = self._DEFAULT_DEVICE_PIXEL_RATIO
logger.info('Device pixel ratio: {}'.format(device_pixel_ratio))
logger.info("Setting scale provider...")
try:
scale_provider = ContextBasedScaleProvider(
top_level_context_entire_size=self._driver.get_entire_page_size(),
viewport_size=self.get_viewport_size(),
device_pixel_ratio=device_pixel_ratio,
# always False as in Java version
is_mobile_device=False) # type: ScaleProvider
except Exception:
# This can happen in Appium for example.
logger.info("Failed to set ContextBasedScaleProvider.")
logger.info("Using FixedScaleProvider instead...")
scale_provider = FixedScaleProvider(1 / device_pixel_ratio)
logger.info("Done!")
return scale_provider
@contextlib.contextmanager
def _hide_scrollbars_if_needed(self):
if self.hide_scrollbars:
original_overflow = self._driver.hide_scrollbars()
eyes_selenium_utils.add_data_overflow_to_element(self.driver, None, original_overflow)
yield
if self.hide_scrollbars:
self._driver.set_overflow(original_overflow)
def _try_capture_dom(self):
try:
dom_json = dom_capture.get_full_window_dom(self._driver)
return dom_json
except Exception as e:
logger.warning(
'Exception raising during capturing DOM Json. Passing...\n Got next error: {}'.format(str(e)))
return None
def _get_screenshot(self):
scale_provider = self._update_scaling_params()
if self._screenshot_type == ScreenshotType.ENTIRE_ELEMENT_SCREENSHOT:
self._last_screenshot = self._entire_element_screenshot(scale_provider)
elif self._screenshot_type == ScreenshotType.FULLPAGE_SCREENSHOT:
self._last_screenshot = self._full_page_screenshot(scale_provider)
elif self._screenshot_type == ScreenshotType.VIEWPORT_SCREENSHOT:
self._last_screenshot = self._viewport_screenshot(scale_provider)
elif self._screenshot_type == ScreenshotType.REGION_OR_ELEMENT_SCREENSHOT:
self._last_screenshot = self._region_or_screenshot(scale_provider)
else:
raise EyesError("No proper ScreenshotType obtained")
return self._last_screenshot
def get_screenshot(self, hide_scrollbars_called=False):
if hide_scrollbars_called:
return self._get_screenshot()
else:
with self._hide_scrollbars_if_needed():
return self._get_screenshot()
def _entire_element_screenshot(self, scale_provider):
# type: (ScaleProvider) -> EyesWebDriverScreenshot
logger.info('Entire element screenshot requested')
screenshot = self._driver.get_stitched_screenshot(self._region_to_check,
self._seconds_to_wait_screenshot,
scale_provider)
return EyesWebDriverScreenshot.create_from_image(screenshot, self._driver)
def _region_or_screenshot(self, scale_provider):
# type: (ScaleProvider) -> EyesWebDriverScreenshot
logger.info('Not entire element screenshot requested')
screenshot = self._viewport_screenshot(scale_provider)
region = screenshot.get_element_region_in_frame_viewport(self._region_to_check)
screenshot = screenshot.get_sub_screenshot_by_region(region)
return screenshot
def _full_page_screenshot(self, scale_provider):
# type: (ScaleProvider) -> EyesWebDriverScreenshot
logger.info('Full page screenshot requested')
screenshot = self._driver.get_full_page_screenshot(self._seconds_to_wait_screenshot,
scale_provider)
return EyesWebDriverScreenshot.create_from_image(screenshot, self._driver)
def _viewport_screenshot(self, scale_provider):
# type: (ScaleProvider) -> EyesWebDriverScreenshot
logger.info('Viewport screenshot requested')
self._driver._wait_before_screenshot(self._seconds_to_wait_screenshot)
if not self._driver.is_mobile_device():
image64 = self._driver.get_screesnhot_as_base64_from_main_frame()
else:
image64 = self._driver.get_screenshot_as_base64()
image = image_utils.image_from_bytes(base64.b64decode(image64))
scale_provider.update_scale_ratio(image.width)
pixel_ratio = 1 / scale_provider.scale_ratio
if pixel_ratio != 1.0:
image = image_utils.scale_image(image, 1.0 / pixel_ratio)
return EyesWebDriverScreenshot.create_from_image(image, self._driver).get_viewport_screenshot()
def _ensure_viewport_size(self):
if self._viewport_size is None:
self._viewport_size = self._driver.get_default_content_viewport_size()
if not eyes_selenium_utils.is_mobile_device(self._driver):
eyes_selenium_utils.set_viewport_size(self._driver, self._viewport_size)
def open(self, driver, app_name, test_name, viewport_size=None):
# type: (AnyWebDriver, tp.Text, tp.Text, tp.Optional[ViewPort]) -> EyesWebDriver
if self.is_disabled:
logger.debug('open(): ignored (disabled)')
return driver
if isinstance(driver, EyesWebDriver):
# If the driver is an EyesWebDriver (as might be the case when tests are ran
# consecutively using the same driver object)
self._driver = driver
else:
if not isinstance(driver, RemoteWebDriver):
logger.info("WARNING: driver is not a RemoteWebDriver (class: {0})".format(driver.__class__))
self._driver = EyesWebDriver(driver, self, self._stitch_mode)
if viewport_size is not None:
self._viewport_size = viewport_size
eyes_selenium_utils.set_viewport_size(self._driver, viewport_size)
self._ensure_viewport_size()
self._open_base(app_name, test_name, viewport_size)
return self._driver
def check_window(self, tag=None, match_timeout=-1, target=None):
# type: (tp.Optional[tp.Text], int, tp.Optional[Target]) -> None
"""
Takes a snapshot from the browser using the web driver and matches it with the expected
output.
:param tag: (str) Description of the visual validation checkpoint.
:param match_timeout: (int) Timeout for the visual validation checkpoint (milliseconds).
:param target: (Target) The target for the check_window call
:return: None
"""
logger.info("check_window('%s')" % tag)
self._screenshot_type = self._obtain_screenshot_type(is_element=False,
inside_a_frame=bool(self._driver.frame_chain),
stitch_content=False,
force_fullpage=self.force_full_page_screenshot)
self._check_window_base(tag, match_timeout, target)
def check_region(self, region, tag=None, match_timeout=-1, target=None, stitch_content=False):
# type: (Region, tp.Optional[tp.Text], int, tp.Optional[Target], bool) -> None
"""
Takes a snapshot of the given region from the browser using the web driver and matches it
with the expected output. If the current context is a frame, the region is offsetted
relative to the frame.
:param region: (Region) The region which will be visually validated. The coordinates are
relative to the viewport of the current frame.
:param tag: (str) Description of the visual validation checkpoint.
:param match_timeout: (int) Timeout for the visual validation checkpoint (milliseconds).
:param target: (Target) The target for the check_window call
:return: None
"""
logger.info("check_region([%s], '%s')" % (region, tag))
if region.is_empty():
raise EyesError("region cannot be empty!")
self._screenshot_type = self._obtain_screenshot_type(is_element=False,
inside_a_frame=bool(self._driver.frame_chain),
stitch_content=stitch_content,
force_fullpage=self.force_full_page_screenshot,
is_region=True)
self._region_to_check = region
self._check_window_base(tag, match_timeout, target)
def check_region_by_element(self, element, tag=None, match_timeout=-1, target=None, stitch_content=False):
# type: (AnyWebElement, tp.Optional[tp.Text], int, tp.Optional[Target], bool) -> None
"""
Takes a snapshot of the region of the given element from the browser using the web driver
and matches it with the expected output.
:param element: (WebElement) The element which region will be visually validated.
:param tag: (str) Description of the visual validation checkpoint.
:param match_timeout: (int) Timeout for the visual validation checkpoint (milliseconds).
:param target: (Target) The target for the check_window call
:return: None
"""
logger.info("check_region_by_element('%s')" % tag)
self._screenshot_type = self._obtain_screenshot_type(is_element=True,
inside_a_frame=bool(self._driver.frame_chain),
stitch_content=stitch_content,
force_fullpage=self.force_full_page_screenshot)
if not isinstance(element, EyesWebElement):
element = EyesWebElement(element, self.driver)
self._element_position_provider = ElementPositionProvider(self._driver, element)
origin_overflow = element.get_overflow()
eyes_selenium_utils.add_data_overflow_to_element(self.driver, element, origin_overflow)
element.set_overflow('hidden')
element_region = self._get_element_region(element)
self._region_to_check = element_region
self._check_window_base(tag, match_timeout, target)
self._element_position_provider = None
if origin_overflow:
element.set_overflow(origin_overflow)
def _get_element_region(self, element):
# We use a smaller size than the actual screenshot size in order to eliminate duplication
# of bottom scroll bars, as well as footer-like elements with fixed position.
pl = element.location
# TODO: add correct values for Safari
# in the safari browser the returned size has absolute value but not relative as
# in other browsers
element_width = element.get_client_width()
element_height = element.get_client_height()
border_left_width = element.get_computed_style_int('border-left-width')
border_top_width = element.get_computed_style_int('border-top-width')
element_region = Region(pl['x'] + border_left_width,
pl['y'] + border_top_width,
element_width, element_height)
return element_region
def check_region_by_selector(self, by, value, tag=None, match_timeout=-1, target=None, stitch_content=False):
# type: (tp.Text, tp.Text, tp.Optional[tp.Text], int, tp.Optional[Target], bool) -> None
"""
Takes a snapshot of the region of the element found by calling find_element(by, value)
and matches it with the expected output.
:param by: (By) The way by which an element to be validated should be found (e.g., By.ID).
:param value: (str) The value identifying the element using the "by" type.
:param tag: (str) Description of the visual validation checkpoint.
:param match_timeout: (int) Timeout for the visual validation checkpoint (milliseconds).
:param target: (Target) The target for the check_window call
:return: None
"""
logger.debug("calling 'check_region_by_selector'...")
# hack: prevent stale element exception by saving viewport value before catching element
self._driver.get_default_content_viewport_size()
self.check_region_by_element(self._driver.find_element(by, value), tag,
match_timeout, target, stitch_content)
def check_region_in_frame_by_selector(self, frame_reference, # type: FrameReference
by, # type: tp.Text
value, # type: tp.Text
tag=None, # type: tp.Optional[tp.Text]
match_timeout=-1, # type: int
target=None, # type: tp.Optional[Target]
stitch_content=False # type: bool
):
# type: (...) -> None
"""
Checks a region within a frame, and returns to the current frame.
:param frame_reference: (int/str/WebElement) A reference to the frame in which the region should be checked.
:param by: (By) The way by which an element to be validated should be found (e.g., By.ID).
:param value: (str) The value identifying the element using the "by" type.
:param tag: (str) Description of the visual validation checkpoint.
:param match_timeout: (int) Timeout for the visual validation checkpoint (milliseconds).
:param target: (Target) The target for the check_window call
:return: None
"""
# TODO: remove this disable
if self.is_disabled:
logger.info('check_region_in_frame_by_selector(): ignored (disabled)')
return
logger.info("check_region_in_frame_by_selector('%s')" % tag)
# Switching to the relevant frame
with self._driver.switch_to.frame_and_back(frame_reference):
logger.debug("calling 'check_region_by_selector'...")
self.check_region_by_selector(by, value, tag, match_timeout, target, stitch_content)
def add_mouse_trigger_by_element(self, action, element):
# type: (tp.Text, AnyWebElement) -> None
"""
Adds a mouse trigger.
:param action: Mouse action (click, double click etc.)
:param element: The element on which the action was performed.
"""
if self.is_disabled:
logger.debug("add_mouse_trigger: Ignoring %s (disabled)" % action)
return
# Triggers are activated on the last checked window.
if self._last_screenshot is None:
logger.debug("add_mouse_trigger: Ignoring %s (no screenshot)" % action)
return
if not self._driver.frame_chain == self._last_screenshot.frame_chain:
logger.debug("add_mouse_trigger: Ignoring %s (different frame)" % action)
return
control = self._last_screenshot.get_intersected_region_by_element(element)
# Making sure the trigger is within the last screenshot bounds
if control.is_empty():
logger.debug("add_mouse_trigger: Ignoring %s (out of bounds)" % action)
return
cursor = control.middle_offset
trigger = MouseTrigger(action, control, cursor)
self._user_inputs.append(trigger)
logger.info("add_mouse_trigger: Added %s" % trigger)
def add_text_trigger_by_element(self, element, text):
# type: (AnyWebElement, tp.Text) -> None
"""
Adds a text trigger.
:param element: The element to which the text was sent.
:param text: The trigger's text.
"""
if self.is_disabled:
logger.debug("add_text_trigger: Ignoring '%s' (disabled)" % text)
return
# Triggers are activated on the last checked window.
if self._last_screenshot is None:
logger.debug("add_text_trigger: Ignoring '%s' (no screenshot)" % text)
return
if not self._driver.frame_chain == self._last_screenshot.frame_chain:
logger.debug("add_text_trigger: Ignoring %s (different frame)" % text)
return
control = self._last_screenshot.get_intersected_region_by_element(element)
# Making sure the trigger is within the last screenshot bounds
if control.is_empty():
logger.debug("add_text_trigger: Ignoring %s (out of bounds)" % text)
return
trigger = TextTrigger(control, text)
self._user_inputs.append(trigger)
logger.info("add_text_trigger: Added %s" % trigger)
|
428009
|
import argparse
from ..github import GithubClient
from .utils import ensure_github_token, get_current_pr
def merge_command(args: argparse.Namespace) -> None:
github_client = GithubClient(ensure_github_token(args.token))
github_client.merge_pr(get_current_pr())
|
428023
|
H = [[0, -1, [68.16,1]], [0, -1, [10.2465,1]], [0, -1, [2.34648,1]],
[0, -1, [0.67332,1]], [0, -1, [0.22466,1]], [0, -1, [0.082217,1]],
[1, 0, [1.3,1]], [1, 0, [0.33,1]],
[2, 0, [1.0,1]], ]
C = [[0, -1, [16371.074,1]], [0, -1, [2426.9925,1]], [0, -1, [544.54418,1]],
[0, -1, [150.80487,1]], [0, -1, [47.708143,1]], [0, -1, [16.457241,1]],
[0, -1, [6.0845578,1]], [0, -1, [2.3824631,1]], [0, -1, [0.6619866,1]],
[0, -1, [0.24698997,1]], [0, -1, [0.0949873,1]],
[1, 0, [40.790423,1]], [1, 0, [9.5034633,1]], [1, 0, [2.9408357,1]],
[1, 0, [1.0751115,1]], [1, 0, [0.4267024,1]], [1, 0, [0.17481926,1]],
[1, 0, [0.07113054,1]],
[2, 0, [0.35,1]], [2, 0, [1.4,1]], ]
F = [[0, -1, [37736.0,1]], [0, -1, [5867.0791,1]], [0, -1, [1332.4679,1]],
[0, -1, [369.4406,1]], [0, -1, [116.843,1]], [0, -1, [40.34877,1]],
[0, -1, [14.96627,1]], [0, -1, [5.8759295,1]], [0, -1, [1.6533352,1]],
[0, -1, [0.61083583,1]], [0, -1, [0.23328922,1]],
[1, 0, [102.26192,1]], [1, 0, [23.938381,1]], [1, 0, [7.5205914,1]],
[1, 0, [2.7724566,1]], [1, 0, [1.1000514,1]], [1, 0, [0.44677512,1]],
[1, 0, [0.17187009,1]],
[2, 0, [1.4,1]], [2, 0, [0.35,1]],]
Cl = [[0, -1, [105818.82,1]], [0, -1, [15872.006,1]], [0, -1, [3619.6548,1]],
[0, -1, [1030.8038,1]], [0, -1, [339.90788,1]], [0, -1, [124.5381,1]],
[0, -1, [49.513502,1]], [0, -1, [20.805604,1]], [0, -1, [6.4648238,1]],
[0, -1, [2.5254537,1]], [0, -1, [1.16544849,1]], [0, -1, [0.53783215,1]],
[0, -1, [0.19349716,1]],
[1, 0, [622.02736,1]], [1, 0, [145.49719,1]], [1, 0, [45.008659,1]],
[1, 0, [15.900889,1]], [1, 0, [5.9259437,1]], [1, 0, [2.2943822,1]],
[1, 0, [0.6280655,1]], [1, 0, [0.18123318,1]],
[2, 0, [2.5,1]], [2, 0, [0.8,1]], [2, 0, [0.25,1]], ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.