id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11469498
|
from __future__ import (
annotations,
)
from functools import (
lru_cache,
)
from typing import (
TYPE_CHECKING,
Any,
Iterable,
NamedTuple,
Optional,
Type,
Union,
)
from ...exceptions import (
MinosImportException,
)
from ...importlib import (
import_module,
)
from .generics import (
GenericTypeProjector,
)
if TYPE_CHECKING:
from ..abc import (
Model,
)
class ModelType(type):
"""Model Type class."""
name: str
namespace: str
type_hints: dict[str, Type]
@classmethod
def build(
mcs, name_: str, type_hints_: Optional[dict[str, type]] = None, *, namespace_: Optional[str] = None, **kwargs
) -> ModelType:
"""Build a new ``ModelType`` instance.
:param name_: Name of the new type.
:param type_hints_: Type hints of the new type.
:param namespace_: Namespace of the new type.
:param kwargs: Type hints of the new type as named parameters.
:return: A ``ModelType`` instance.
"""
if type_hints_ is None:
type_hints_ = tuple(kwargs.items())
else:
if len(kwargs):
raise ValueError("Type hints can be passed in a dictionary or as named parameters, but not both.")
type_hints_ = tuple(type_hints_.items())
if namespace_ is None:
try:
namespace_, name_ = name_.rsplit(".", 1)
except ValueError:
namespace_ = str()
# noinspection PyTypeChecker
return mcs._build(name_, type_hints_, namespace_)
@classmethod
@lru_cache()
def _build(mcs, name_: str, type_hints_: tuple[tuple[str, type], ...], namespace_: Optional[str]):
return mcs(name_, tuple(), {"type_hints": dict(type_hints_), "namespace": namespace_})
@classmethod
def from_typed_dict(mcs, typed_dict) -> ModelType:
"""Build a new ``ModelType`` instance from a ``typing.TypedDict``.
:param typed_dict: Typed dict to be used as base.
:return: A ``ModelType`` instance.
"""
return mcs.build(typed_dict.__name__, typed_dict.__annotations__)
@staticmethod
def from_model(model: Union[Model, type[Model]]) -> ModelType:
"""Build a new instance from model class.
:param model: The model class.
:return: A new ``ModelType`` instance.
"""
from .builders import (
TypeHintParser,
)
type_hints = GenericTypeProjector.from_model(model).build()
type_hints = {k: TypeHintParser(v).build() for k, v in type_hints.items()}
# noinspection PyTypeChecker
return ModelType.build(name_=model.classname, type_hints_=type_hints)
def __call__(cls, *args, **kwargs) -> Model:
return cls.model_cls.from_model_type(cls, *args, **kwargs)
@property
def model_cls(cls) -> Type[Model]:
"""Get the model class if defined or ``DataTransferObject`` otherwise.
:return: A model class.
"""
try:
# noinspection PyTypeChecker
return import_module(cls.classname)
except MinosImportException:
from ..dynamic import (
DataTransferObject,
)
return DataTransferObject
@property
def name(cls) -> str:
"""Get the type name.
:return: A string object.
"""
return cls.__name__
@property
def classname(cls) -> str:
"""Get the full class name.
:return: An string object.
"""
if not len(cls.namespace):
return cls.name
return f"{cls.namespace}.{cls.name}"
def __le__(cls, other: Any) -> bool:
from .comparators import (
TypeHintComparator,
)
return type(cls).__eq__(cls, other) or (
type(cls) == type(other)
and cls.name == other.name
and cls.namespace == other.namespace
and set(cls.type_hints.keys()) <= set(other.type_hints.keys())
and all(TypeHintComparator(v, other.type_hints[k]).match() for k, v in cls.type_hints.items())
)
def __lt__(cls, other: Any) -> bool:
from .comparators import (
TypeHintComparator,
)
return (
type(cls) == type(other)
and cls.name == other.name
and cls.namespace == other.namespace
and set(cls.type_hints.keys()) < set(other.type_hints.keys())
and all(TypeHintComparator(v, other.type_hints[k]).match() for k, v in cls.type_hints.items())
)
def __ge__(cls, other: Any) -> bool:
from .comparators import (
TypeHintComparator,
)
return type(cls).__eq__(cls, other) or (
type(cls) == type(other)
and cls.name == other.name
and cls.namespace == other.namespace
and set(cls.type_hints.keys()) >= set(other.type_hints.keys())
and all(TypeHintComparator(v, cls.type_hints[k]).match() for k, v in other.type_hints.items())
)
def __gt__(cls, other: Any) -> bool:
from .comparators import (
TypeHintComparator,
)
return (
type(cls) == type(other)
and cls.name == other.name
and cls.namespace == other.namespace
and set(cls.type_hints.keys()) > set(other.type_hints.keys())
and all(TypeHintComparator(v, cls.type_hints[k]).match() for k, v in other.type_hints.items())
)
def __eq__(cls, other: Any) -> bool:
conditions = (
cls._equal_with_model_type,
cls._equal_with_model,
cls._equal_with_inherited_model,
cls._equal_with_bucket_model,
)
# noinspection PyArgumentList
return any(condition(other) for condition in conditions)
def _equal_with_model_type(cls, other: ModelType) -> bool:
from .comparators import (
TypeHintComparator,
)
return (
type(cls) == type(other)
and cls.name == other.name
and cls.namespace == other.namespace
and set(cls.type_hints.keys()) == set(other.type_hints.keys())
and all(TypeHintComparator(v, other.type_hints[k]).match() for k, v in cls.type_hints.items())
)
def _equal_with_model(cls, other: Any) -> bool:
return hasattr(other, "model_type") and cls == ModelType.from_model(other)
def _equal_with_inherited_model(cls, other: ModelType) -> bool:
return (
type(cls) == type(other) and cls.model_cls != other.model_cls and issubclass(cls.model_cls, other.model_cls)
)
def _equal_with_bucket_model(self, other: Any) -> bool:
from ..dynamic import (
BucketModel,
)
return (
hasattr(other, "model_cls")
and issubclass(self.model_cls, other.model_cls)
and issubclass(other.model_cls, BucketModel)
)
def __hash__(cls) -> int:
return hash(tuple(cls))
def __iter__(cls) -> Iterable:
# noinspection PyRedundantParentheses
yield from (cls.name, cls.namespace, tuple(cls.type_hints.items()))
def __repr__(cls):
return f"{type(cls).__name__}(name={cls.name!r}, namespace={cls.namespace!r}, type_hints={cls.type_hints!r})"
class FieldType(NamedTuple):
"""Field Type class."""
name: str
type: type
|
11469590
|
import rasa
rasa.train(
domain="domain.yml",
config="config.yml",
training_files="data",
)
|
11469610
|
import getpass
name = input('Write your name: ')
print('Welcome to the Hangman Game', name)
# Enter a word, her input will be hidden
hangman = getpass.getpass('Type a word: ')
# You will have a maximum of 5 attempts
print('Your goal is to get the word typed right. You can make a maximum of 5 mistakes')
print('\n')
letters = []
mistakes = 5
while True:
if mistakes == 0:
print('\033[31m\nYOU LOST!!!\033[0m')
break
# At the end of 5 wrong attempts, the game is over
# Enter a letter
letter = input('Type a letter: ')
if len(letter) > 1:
# Enter just one letter
print("\nPlease enter only one letter\n")
continue
if letter in hangman:
# If the initial word contains the typed letter, it will be printed on the screen that the letter exists in the word
print(f'The letter "{letter}" exists in the word')
letters.append(letter)
else:
# If the initial word does not contain the typed letter, it will be printed on the screen that the letter does not exist in the word and the number of attempts left
print(f'A letter "{letter}" does not exist in the word')
mistakes -= 1
print(f'You have {mistakes} mistakes')
hangman_temp = ''
for letter_secret in hangman:
if letter_secret in letters:
hangman_temp += letter_secret
else:
hangman_temp += '*'
if hangman_temp == hangman:
# If you get the word right, it will be printed that you won
print(
f'\033[32m\nYOU HIT the word! The word was: {hangman}\033[0m')
break
else:
if mistakes != 0:
# Each letter typed, right or wrong, will be printed on the screen as the word is, with the correct letter positions that have already been typed
print(
f'\033[34m\nThe word looks like this: {hangman_temp}\n\033[0m')
|
11469620
|
graph = {"A": set(["B", "C"]),
"B": set(["A", "D", "E"]),
"C": set(["A", "F", "G"]),
"D": set(["B"]),
"E": set(["B"]),
"F": set(["C"]),
"G": set(["C"])}
def bfs(graph, start_node):
explored, fronteir = set(), [start_node]
while fronteir:
node = fronteir.pop(0)
if node not in explored:
explored.update(node)
print(node)
fronteir.extend(graph[node] - explored)
return
bfs(graph, "A")
|
11469642
|
from pathlib import Path
from canvas_workflow_kit.utils import parse_class_from_python_source
from .base import WorkflowHelpersBaseTest
from canvas_workflow_kit import events
from canvas_workflow_kit.protocol import (ProtocolResult, STATUS_DUE)
from canvas_workflow_kit.recommendation import (HyperlinkRecommendation)
class HyperlinkHelpersTest(WorkflowHelpersBaseTest):
def setUp(self):
super().setUp()
currentDir = Path(__file__).parent.resolve()
self.mocks_path = f'{currentDir}/mock_data/'
full_patient = self.load_patient('full_detailed_patient')
partial_patient = self.load_patient('partial_detailed_patient')
self.full_patient_class = self.createProtocolClass()(
patient=full_patient)
self.partial_patient_class = self.createProtocolClass()(
patient=partial_patient)
def createProtocolClass(self):
template_path = Path(
__file__).parent.parent / 'protocols/hyperlink_helpers.py'
template = template_path.open('r').read()
return parse_class_from_python_source(template)
def test_fields(self):
Protocol = self.full_patient_class
self.assertEqual(
'Creates external dynamic hyperlinks at the top of the protocol list',
Protocol._meta.description)
self.assertEqual('External Links', Protocol._meta.title)
self.assertEqual('v1.0.0', Protocol._meta.version)
self.assertEqual('https://canvasmedical.com/',
Protocol._meta.information)
self.assertEqual(['ExternalLinks'], Protocol._meta.identifiers)
self.assertEqual(['Links'], Protocol._meta.types)
self.assertEqual([events.HEALTH_MAINTENANCE],
Protocol._meta.responds_to_event_types)
self.assertEqual(['Canvas Medical'], Protocol._meta.authors)
self.assertEqual(['Links to external resources about the patient'],
Protocol._meta.references)
self.assertEqual('', Protocol._meta.funding_source)
self.assertEqual(False, Protocol._meta.can_be_snoozed)
def test_appointment_class_result(self):
tested = self.full_patient_class
result = tested.compute_results()
self.assertIsInstance(result, ProtocolResult)
self.assertEqual(STATUS_DUE, result.status)
self.assertIsInstance(result.recommendations[0],
HyperlinkRecommendation)
self.assertEqual('', result.narrative)
self.assertIsNone(result.due_in)
self.assertEqual(30, result.days_of_notice)
self.assertIsNone(result.next_review)
def test_patient_external_id(self):
full_patient = self.full_patient_class
expecting_id = full_patient.patient_external_id()
partial_patient = self.partial_patient_class
expecting_empty_string = partial_patient.patient_external_id()
self.assertEqual(expecting_id, '72342334')
self.assertEqual(expecting_empty_string, '')
|
11469646
|
features = [
{"name": "rte", "ordered": True, "section": ["net route "]},
{"name": "domain", "ordered": True, "section": ["net route-domain "]},
]
|
11469714
|
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
matrix = [[0 for _ in range(n+1)] for _ in range(m+1)]
matrix[0][1] = 1
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
matrix[i][j] = matrix[i-1][j] + matrix[i][j-1]
return matrix[-1][-1]
|
11469778
|
from request import LoyalRequest
from typing import Literal, Dict
from interface import OTAFirmware, RestoreFirmware
MDSVBV = "MobileDeviceSoftwareVersionsByVersion"
class AppleInternalHandler:
def __init__(self) -> None:
self.restore_cache = {}
self.ota_cache = {}
self.HTTP = LoyalRequest()
def __filter_keys(self, key) -> str:
key = key.lower()
key = key.replace("-", "_")
return key
def __lower_keys(self, data) -> Dict:
return {self.__filter_keys(key): value for key, value in data.items()}
async def parse_ota(self, plist: Dict):
ota_cache = {}
async def parse_restore(self, plist: Dict):
restore_cache = {}
for v in plist[MDSVBV].values():
for idevice in v.values():
for identifier, builds in idevice.items():
for build, firmware in builds.items():
keys = firmware.keys()
if "SameAs" in keys:
restore_cache[identifier][build] = restore_cache[
identifier
][firmware["SameAs"]]
if "Restore" in keys:
restore_cache[identifier][build] = RestoreFirmware(
**self.__lower_keys(firmware["Restore"])
)
self.restore_cache = restore_cache
|
11469803
|
import torch
import numpy as np
import pandas as pd
from agents.bbb import BBBAgent
from common.mushroom_env import MushroomEnv
NB_STEPS = 20000
N_SEEDS = 20
for i in range(N_SEEDS):
env = MushroomEnv()
agent = BBBAgent(env,
None,
mean_prior=0,
std_prior=0.1,
logging=True,
train_freq=1,
updates_per_train=1,
batch_size=128,
start_train_step=32,
log_folder_details='BBB',
learning_rate=1e-2,
noise_scale=0.01,
bayesian_sample_size = 2,
verbose=True
)
agent.learn(NB_STEPS)
|
11469812
|
Given a digit string, return all possible letter combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below.
Input:Digit string "23"
Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
Note:
Although the above answer is in lexicographical order, your answer could be in any order you want.
class Solution:
# @return a list of strings, [s1, s2]
dict = {'0':'',
'1':'',
'2':'abc',
'3':'def',
'4':'ghi',
'5':'jkl',
'6':'mno',
'7':'pqrs',
'8':'tuv',
'9':'wxyz'}
# Iteration
def letterCombinations(self, digits):
if not digits: return []
result = [''] # Note: here we should use [''] not empty []
for digit in digits:
ret = []
for comb in result:
for char in Solution.dict[digit]:
ret.append(comb+char)
if ret: # Note: add ret here, we need to consider the case "12"
result = ret
return result
# Recursion 1
def letterCombinations(self, digits):
if not digits: return []
result = []
length = len(digits)
self.letter_com_helper(digits, result, '', length)
return result
def letter_com_helper(self, digits, result, substr,length):
if len(substr) == length:
result.append(substr); return
for i, digit in enumerate(digits):
if digit == "1" or digit == '0': # take care the special '1' and '0' case
length -= 1
for char in Solution.dict[digit]:
self.letter_com_helper(digits[i+1:], result, substr + char,length)
# test case:
# Input: "22"
# Output: ["aa","ab","ac","ba","bb","bc","ca","cb","cc","a","b","c"]
# Expected: ["aa","ab","ac","ba","bb","bc","ca","cb","cc"]
# Recursion 2
def letterCombinations_1(self, digits):
if len(digits)==0: return []
result = []
self.combination_rec(digits,0,result,'')
return result
def combination_rec(self,digits,i,result,ret):
if i == len(digits): #注意这里一定要check i == len(digits), check the following recursion solution !
result.append(ret)
return
for char in Solution.dict[digits[i]]: # Here we do not need to for loop the digits !!!
self.combination_rec(digits,i+1,result,ret+char)
# Recursion 3
def letterCombinations_2(self, digits):
if digits == "":
return [""]
result = []
result = self.get_result_2([], 0, digits)
return result
def get_result_2(self, result, i, digit):
if i == len(digit): return result
str1 = Solution.dict[digit[i]]
result2 = []
for char in str1:
if result == []:
result2.append(char)
else:
for m in xrange(len(result)):
temp = result[m]+char
result2.append(temp)
return self.get_result(result2, i+1, digit)
# Assuming the average number of letters on every number is m,
# and the length of digits string is n, then the time complexity: O(m^n)
|
11469823
|
import numpy as np
import py.test
import random
from weldnumpy import weldarray, erf as welderf
import scipy.special as ss
'''
TODO0: Decompose heavily repeated stuff, like the assert blocks and so on.
TODO: New tests:
- reduce ufuncs: at least the supported ones.
- use np.add.reduce syntax for the reduce ufuncs.
- getitem: lists and ndarrays + ints.
- error based tests: nan; underflow/overflow; unsupported types [true] * [...] etc;
- long computational graphs - that segfault or take too long; will require implicit evaluation
when the nested ops get too many.
- edge/failing cases: out = ndarray for op involving weldarrays.
- update elements of an array in a loop etc. --> setitem test.
- setitem + views tests.
'''
UNARY_OPS = [np.exp, np.log, np.sqrt]
# TODO: Add wa.erf - doesn't use the ufunc functionality of numpy so not doing it for
# now.
BINARY_OPS = [np.add, np.subtract, np.multiply, np.divide]
REDUCE_UFUNCS = [np.add.reduce, np.multiply.reduce]
# FIXME: weld mergers dont support non-commutative ops --> need to find a workaround for this.
# REDUCE_UFUNCS = [np.add.reduce, np.subtract.reduce, np.multiply.reduce, np.divide.reduce]
TYPES = ['float32', 'float64', 'int32', 'int64']
NUM_ELS = 10
# TODO: Create test with all other ufuncs.
def random_arrays(num, dtype):
'''
Generates random Weld array, and numpy array of the given num elements.
'''
# np.random does not support specifying dtype, so this is a weird
# way to support both float/int random numbers
test = np.zeros((num), dtype=dtype)
test[:] = np.random.randn(*test.shape)
test = np.abs(test)
# at least add 1 so no 0's (o.w. divide errors)
random_add = np.random.randint(1, high=10, size=test.shape)
test = test + random_add
test = test.astype(dtype)
np_test = np.copy(test)
w = weldarray(test, verbose=False)
return np_test, w
def given_arrays(l, dtype):
'''
@l: list.
returns a np array and a weldarray.
'''
test = np.array(l, dtype=dtype)
np_test = np.copy(test)
w = weldarray(test)
return np_test, w
def test_unary_elemwise():
'''
Tests all the unary ops in UNARY_OPS.
FIXME: For now, unary ops seem to only be supported on floats.
'''
for op in UNARY_OPS:
for dtype in TYPES:
# int still not supported for the unary ops in Weld.
if "int" in dtype:
continue
np_test, w = random_arrays(NUM_ELS, dtype)
w2 = op(w)
np_result = op(np_test)
w2_eval = w2.evaluate()
assert np.allclose(w2, np_result)
assert np.array_equal(w2_eval, np_result)
def test_binary_elemwise():
'''
'''
for op in BINARY_OPS:
for dtype in TYPES:
np_test, w = random_arrays(NUM_ELS, dtype)
np_test2, w2 = random_arrays(NUM_ELS, dtype)
w3 = op(w, w2)
weld_result = w3.evaluate()
np_result = op(np_test, np_test2)
# Need array equal to keep matching types for weldarray, otherwise
# allclose tries to subtract floats from ints.
assert np.array_equal(weld_result, np_result)
def test_multiple_array_creation():
'''
Minor edge case but it fails right now.
---would probably be fixed after we get rid of the loop fusion at the numpy
level.
'''
np_test, w = random_arrays(NUM_ELS, 'float32')
w = weldarray(w) # creating array again.
w2 = np.exp(w)
weld_result = w2.evaluate()
np_result = np.exp(np_test)
assert np.allclose(weld_result, np_result)
def test_array_indexing():
'''
Need to decide: If a weldarray item is accessed - should we evaluateuate the
whole array (for expected behaviour to match numpy) or not?
'''
pass
def test_numpy_operations():
'''
Test operations that aren't implemented yet - it should pass it on to
numpy's implementation, and return weldarrays.
'''
np_test, w = random_arrays(NUM_ELS, 'float32')
np_result = np.sin(np_test)
w2 = np.sin(w)
weld_result = w2.evaluate()
assert np.allclose(weld_result, np_result)
def test_type_conversion():
'''
After evaluating, the dtype of the returned array must be the same as
before.
'''
for t in TYPES:
_, w = random_arrays(NUM_ELS, t)
_, w2 = random_arrays(NUM_ELS, t)
w2 = np.add(w, w2)
weld_result = w2.evaluate()
assert weld_result.dtype == t
def test_concat():
'''
Test concatenation of arrays - either Weld - Weld, or Weld - Numpy etc.
'''
pass
def test_views_basic():
'''
Taking views into a 1d weldarray should return a weldarray view of the
correct data without any copying.
'''
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
assert isinstance(w2, weldarray)
def test_views_update_child():
'''
Updates both parents and child to put more strain.
'''
def asserts(w, n, w2, n2):
assert np.allclose(w[2:5], w2.evaluate())
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
NUM_ELS = 10
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
# unary part
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
asserts(w, n, w2, n2)
# binary part
n3, w3 = random_arrays(3, 'float32')
n2 = np.add(n2, n3, out=n2)
w2 = np.add(w2, w3, out=w2)
w2.evaluate()
asserts(w, n, w2, n2)
w2 += 5.0
n2 += 5.0
w2.evaluate()
asserts(w, n, w2, n2)
def test_views_update_parent():
'''
Create a view, then update the parent in place. The change should be
effected in the view-child as well.
'''
def asserts(w, n, w2, n2):
assert np.allclose(w[2:4], w2.evaluate())
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:4]
n2 = n[2:4]
w = np.exp(w, out=w)
n = np.exp(n, out=n)
w2.evaluate()
print(w2)
print(w[2:4])
# w2 should have been updated too.
asserts(w, n, w2, n2)
n3, w3 = random_arrays(NUM_ELS, 'float32')
w = np.add(w, w3, out=w)
n = np.add(n, n3, out=n)
asserts(w, n, w2, n2)
assert np.allclose(w3, n3)
# check scalars
w += 5.0
n += 5.0
w.evaluate()
asserts(w, n, w2, n2)
def test_views_update_mix():
'''
'''
n, w = random_arrays(10, 'float32')
# Let's add more complexity. Before messing with child views etc, first
# register an op with the parent as well.
n = np.sqrt(n)
w = np.sqrt(w)
# get the child views
w2 = w[2:5]
n2 = n[2:5]
# updatig the values in place is still reflected correctly.
w = np.log(w, out=w)
n = np.log(n, out=n)
# evaluating this causes the internal representation to change. So can't
# rely on w.weldobj.context[w.name] anymore.
w.evaluate()
# print("w2 before exp: ", w2)
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
w2.evaluate()
assert np.allclose(w[2:5], w2)
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
def test_views_mix2():
'''
update parent/child, binary/unary ops.
'''
NUM_ELS = 10
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
w2.evaluate()
assert np.allclose(w[2:5], w2)
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
n3, w3 = random_arrays(NUM_ELS, 'float32')
w = np.add(w, w3, out=w)
n = np.add(n, n3, out=n)
assert np.allclose(w[2:5], w2.evaluate())
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
# now update the child
def test_views_grandparents_update_mix():
'''
Similar to above. Ensure consistency of views of views etc.
'''
n, w = random_arrays(10, 'float32')
# Let's add more complexity. Before messing with child views etc, first
# register an op with the parent as well.
# TODO: uncomment.
n = np.sqrt(n)
w = np.sqrt(w)
# get the child views
w2 = w[2:9]
n2 = n[2:9]
w3 = w2[2:4]
n3 = n2[2:4]
assert np.allclose(w3.evaluate(), n3)
# updatig the values in place is still reflected correctly.
w = np.log(w, out=w)
n = np.log(n, out=n)
# evaluating this causes the internal representation to change. So can't
# rely on w.weldobj.context[w.name] anymore.
w.evaluate()
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
# w2.evaluate()
w3 = np.sqrt(w3, out=w3)
n3 = np.sqrt(n3, out=n3)
assert np.allclose(w[2:9], w2)
assert np.allclose(w2, n2)
assert np.allclose(w3, n3)
assert np.allclose(w, n)
assert np.allclose(w2[2:4], w3)
def test_views_check_old():
'''
Old views should still be valid etc.
'''
pass
def test_views_mess():
'''
More complicated versions of the views test.
'''
# parent arrays
NUM_ELS = 100
num_views = 10
n, w = random_arrays(NUM_ELS, 'float32')
# in order to avoid sqrt running into bad values
w += 1000.00
n += 1000.00
weld_views = []
np_views = []
weld_views2 = []
np_views2 = []
for i in range(num_views):
nums = random.sample(range(0,NUM_ELS), 2)
start = min(nums)
end = max(nums)
# FIXME: Need to add correct behaviour in this case.
if start == end:
continue
weld_views.append(w[start:end])
np_views.append(n[start:end])
np.sqrt(weld_views[i], out=weld_views[i])
np.sqrt(np_views[i], out=np_views[i])
np.log(weld_views[i], out=weld_views[i])
np.log(np_views[i], out=np_views[i])
np.exp(weld_views[i], out=weld_views[i])
np.exp(np_views[i], out=np_views[i])
# add some binary ops.
n2, w2 = random_arrays(len(np_views[i]), 'float32')
weld_views[i] = np.add(weld_views[i], w2, out=weld_views[i])
np_views[i] = np.add(np_views[i], n2, out=np_views[i])
# weld_views[i].evaluate()
a = np.log(weld_views[i])
b = np.log(np_views[i])
assert np.allclose(a, b)
w = np.sqrt(w, out=w)
n = np.sqrt(n, out=n)
assert np.allclose(n, w)
assert np.array_equal(w.evaluate(), n)
# TODO: Add stuff with grandchildren, and so on.
for i in range(num_views):
assert np.array_equal(np_views[i], weld_views[i].evaluate())
assert np.allclose(np_views[i], weld_views[i])
def test_views_overlap():
'''
Two overlapping views of the same array. Updating one must result in the
other being updated too.
'''
NUM_ELS = 10
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
# TODO: uncomment
w3 = w[4:7]
n3 = n[4:7]
# w4, n4 are non overlapping views. Values should never change
w4 = w[7:9]
n4 = n[7:9]
# w5, n5 are contained within w2, n2.
w5 = w[3:4]
n5 = n[3:4]
# unary part
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
w2.evaluate()
assert np.allclose(w[2:5], w2)
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
assert np.allclose(w5, n5)
assert np.allclose(w4, n4)
assert np.allclose(w3, n3)
print("starting binary part!")
# binary part:
# now update the child with binary op
n3, w3 = random_arrays(3, 'float32')
# n3, w3 = given_arrays([1.0, 1.0, 1.0], 'float32')
n2 = np.add(n2, n3, out=n2)
print('going to do np.add on w2,w3, out=w2')
w2 = np.add(w2, w3, out=w2)
# assert np.allclose(w[2:5], w2)
assert np.allclose(w, n)
assert np.allclose(w2.evaluate(), n2)
print('w5: ', w5)
print(n5)
assert np.allclose(w5, n5)
assert np.allclose(w4, n4)
assert np.allclose(w3, n3)
w2 += 5.0
n2 += 5.0
w2.evaluate()
assert np.allclose(w[2:5], w2)
assert np.allclose(w, n)
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w5, n5)
assert np.allclose(w4, n4)
assert np.allclose(w3, n3)
def test_mix_np_weld_ops():
'''
Weld Ops + Numpy Ops - before executing any of the numpy ops, the
registered weld ops must be evaluateuated.
'''
np_test, w = random_arrays(NUM_ELS, 'float32')
np_test = np.exp(np_test)
np_result = np.sin(np_test)
w2 = np.exp(w)
w2 = np.sin(w2)
weld_result = w2.evaluate()
assert np.allclose(weld_result, np_result)
def test_scalars():
'''
Special case of broadcasting rules - the scalar is applied to all the
Weldrray members.
'''
t = "int32"
print("t = ", t)
n, w = random_arrays(NUM_ELS, t)
n2 = n + 2
w2 = w + 2
w2 = w2.evaluate()
assert np.allclose(w2, n2)
# test by combining it with binary op.
n, w = random_arrays(NUM_ELS, t)
w += 10
n += 10
n2, w2 = random_arrays(NUM_ELS, t)
w = np.add(w, w2)
n = np.add(n, n2)
assert np.allclose(w, n)
t = "float32"
print("t = ", t)
np_test, w = random_arrays(NUM_ELS, t)
np_result = np_test + 2.00
w2 = w + 2.00
weld_result = w2.evaluate()
assert np.allclose(weld_result, np_result)
def test_stale_add():
'''
Registers op for weldarray w2, and then add it to w1. Works trivially
because updating a weldobject with another weldobject just needs to get the
naming right.
'''
n1, w1 = random_arrays(NUM_ELS, 'float32')
n2, w2 = random_arrays(NUM_ELS, 'float32')
w2 = np.exp(w2)
n2 = np.exp(n2)
w1 = np.add(w1, w2)
n1 = np.add(n1, n2)
w1 = w1.evaluate()
assert np.allclose(w1, n1)
def test_cycle():
'''
This was a problem when I was using let statements to hold intermediate
weld code. (because of my naming scheme)
'''
n1, w1 = given_arrays([1.0, 2.0], 'float32')
n2, w2 = given_arrays([3.0, 3.0], 'float32')
# w3 depends on w1.
w3 = np.add(w1, w2)
n3 = np.add(n1, n2)
# changing this to some other variable lets us pass the test.
w1 = np.exp(w1)
n1 = np.exp(n1)
w1 = np.add(w1,w3)
n1 = np.add(n1, n3)
assert np.allclose(w1.evaluate(), n1)
assert np.allclose(w3.evaluate(), n3)
def test_self_assignment():
n1, w1 = given_arrays([1.0, 2.0], 'float32')
n2, w2 = given_arrays([2.0, 1.0], 'float32')
w1 = np.exp(w1)
n1 = np.exp(n1)
assert np.allclose(w1.evaluate(), n1)
w1 = w1 + w2
n1 = n1 + n2
assert np.allclose(w1.evaluate(), n1)
def test_reuse_array():
'''
a = np.add(b,)
Ensure that despite sharing underlying memory of ndarrays, future ops on a
and b should not affect each other as calculations are performed based on
the weldobject which isn't shared between the two.
'''
n1, w1 = given_arrays([1.0, 2.0], 'float32')
n2, w2 = given_arrays([2.0, 1.0], 'float32')
w3 = np.add(w1, w2)
n3 = np.add(n1, n2)
w1 = np.log(w1)
n1 = np.log(n1)
w3 = np.exp(w3)
n3 = np.exp(n3)
w1 = w1 + w3
n1 = n1 + n3
w1_result = w1.evaluate()
assert np.allclose(w1_result, n1)
w3_result = w3.evaluate()
assert np.allclose(w3_result, n3)
def test_fancy_indexing():
'''
TODO: Needs more complicated tests that mix different indexing strategies,
but since fancy indexing creates a new array - it shouldn't have any
problems dealing with further stuff.
'''
_, w = random_arrays(NUM_ELS, 'float64')
b = w > 0.50
w2 = w[b]
assert isinstance(w2, weldarray)
assert id(w) != id(w2)
def test_mixing_types():
'''
mixing f32 with f64, or i32 with f64.
Weld doesn't seem to support this right now, so pass it on to np.
'''
n1, w1 = random_arrays(2, 'float64')
n2, w2 = random_arrays(2, 'float32')
w3 = w1 + w2
n3 = n1 + n2
assert np.array_equal(n3, w3.evaluate())
def test_inplace_assignment():
'''
With the output optimization, this should be quite efficient for weld.
'''
n, w = random_arrays(100, 'float32')
n2, w2 = random_arrays(100, 'float32')
orig_addr = id(w)
for i in range(100):
n += n2
w += w2
# Ensures that the stuff above happened in place.
assert id(w) == orig_addr
w3 = w.evaluate()
assert np.allclose(n, w)
def test_nested_weld_expr():
'''
map(zip(map(...))) kind of really long nested expressions.
Add a timeout - it shouldn't take literally forever as it does now.
'''
pass
def test_getitem_evaluate():
'''
Should evaluateuate stuff before returning from getitem.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n2, w2 = random_arrays(NUM_ELS, 'float32')
n += n2
w += w2
assert n[0] == w[0]
def test_implicit_evaluate():
n, w = random_arrays(2, 'float32')
n2, w2 = random_arrays(2, 'float32')
w3 = w+w2
n3 = n+n2
print(w3)
w3 = w3.evaluate()
w3 = w3.evaluate()
assert np.allclose(w3, n3)
def test_setitem_basic():
'''
set an arbitrary item in the array after registering ops on it.
'''
# TODO: run this on all types.
n, w = random_arrays(NUM_ELS, 'float32')
n[0] = 5.0
w[0] = 5.0
assert np.allclose(n, w)
n[0] += 10.0
w[0] += 10.0
assert np.allclose(n, w)
n[2] -= 5.0
w[2] -= 5.0
assert np.allclose(n, w)
def test_setitem_slice():
'''
'''
n, w = random_arrays(NUM_ELS, 'float32')
n[0:2] = [5.0, 2.0]
w[0:2] = [5.0, 2.0]
assert np.allclose(n, w)
n[4:6] += 10.0
w[4:6] += 10.0
assert np.allclose(n, w)
def test_setitem_strides():
'''
TODO: make more complicated versions which do multiple types of changes on strides at once.
TODO2: need to support different strides.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n[0:2:1] = [5.0, 2.0]
w[0:2:1] = [5.0, 2.0]
print('w: ', w)
print('n: ', n)
assert np.allclose(n, w)
n[5:8:1] += 10.0
w[5:8:1] += 10.0
assert np.allclose(n, w)
def test_setitem_list():
'''
'''
n, w = random_arrays(NUM_ELS, 'float32')
a = [0, 3]
n[a] = [5.0, 13.0]
w[a] = [5.0, 13.0]
print('n: ', n)
print('w: ', w)
assert np.allclose(n, w)
def test_setitem_weird_indexing():
'''
try to confuse the weldarray with different indexing patterns.
'''
pass
def test_setitem_mix():
'''
Mix all setitem stuff / and other ops.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n = np.sqrt(n)
w = np.sqrt(w)
# assert np.allclose(n, w)
n, w = random_arrays(NUM_ELS, 'float32')
n[0:2] = [5.0, 2.0]
w[0:2] = [5.0, 2.0]
assert np.allclose(n, w)
n[4:6] += 10.0
w[4:6] += 10.0
assert np.allclose(n, w)
def test_setitem_views():
'''
What if you use setitem on a view? Will the changes be correctly propagated to the base array
etc?
'''
n, w = random_arrays(NUM_ELS, 'float32')
n2 = n[0:4]
w2 = w[0:4]
n2[0:2:1] = [5.0, 2.0]
w2[0:2:1] = [5.0, 2.0]
assert np.allclose(n2, w2)
n2[0:3:1] += 10.0
w2[0:3:1] += 10.0
assert np.allclose(n2, w2)
def test_iterator():
n, w = random_arrays(NUM_ELS, 'float32')
w = np.exp(w, out=w)
n = np.exp(n, out=n)
for i, e in enumerate(w):
print(e)
assert e == n[i]
assert w[i] == n[i]
def test_views_double_update():
'''
Cool edge case involving views / and ordering of np.add args etc. When using wv = np.add(a,
b, out=b), other is b, and result is b too. So b gets added to b instead of a.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n2, w2 = random_arrays(NUM_ELS, 'float32')
w += 100.00
n += 100.00
wv = w[3:5]
nv = n[3:5]
nv2, wv2 = random_arrays(len(wv), 'float32')
wv = np.add(wv2, wv, out=wv)
nv = np.add(nv2, nv, out=nv)
# Instead, this would work:
# wv = np.add(wv, wv2, out=wv)
# nv = np.add(nv, nv2, out=nv)
assert np.allclose(w, n)
assert np.allclose(wv, nv)
def test_views_strides():
'''
Generating views with different strides besides 1.
FIXME: not supported yet.
'''
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:8:2]
n2 = n[2:8:2]
w += 100.00
n += 100.00
assert np.allclose(w, n)
assert np.allclose(w2, n2)
w2 = np.sqrt(w2, out=w2)
n2 = np.sqrt(n2, out=n2)
assert np.allclose(w, n)
assert np.allclose(w2, n2)
def test_views_other_indexing():
'''
Testing more unusual indexing patterns here.
This should be much more relevant in multidimensional arrays, so not testing it in depth here.
'''
def test_stuff(w, n, w2, n2):
w += 100.00
n += 100.00
assert np.allclose(w, n)
assert np.allclose(w2, n2)
w2 = np.sqrt(w2, out=w2)
n2 = np.sqrt(n2, out=n2)
assert np.allclose(w, n)
assert np.allclose(w2, n2)
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[:]
n2 = n[:]
test_stuff(w, n, w2, n2)
w3 = w[2:]
n3 = n[2:]
test_stuff(w, n, w2, n2)
# Bunch of failing / error handling tests.
def test_unsupported_views_empty_index():
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:2]
n2 = n[2:2]
print(w2)
print(n2)
# Fails on this one - but instead this case should be dealt with correctly when setting up
# inputs.
assert np.allclose(w2, n2)
def test_unsupported_nan_vals():
'''
need to send this off to np to handle as weld fails if elements are nans etc.
'''
n, w = random_arrays(100, 'float32')
for i in range(2):
n = np.exp(n)
w = np.exp(w)
print('n = ', n)
print('w = ', w)
assert np.allclose(n, w)
def test_unsupported_types():
n, w = given_arrays([2.0, 3.0], 'float32')
t = np.array([True, False])
n = n*t
w = w*t
print('w = ', w)
assert np.allclose(n, w)
n, w = given_arrays([2.0, 3.0], 'float32')
# Not sure what input this is in ufunc terms
n = n*True
w = w*True
assert np.allclose(n, w)
def test_unsupported_ndarray_output():
'''
kind of a stupid test - just make sure weldarray doesn't die with ugly errors.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n2, w2 = random_arrays(NUM_ELS, 'float32')
n = np.exp(n, out=n)
n2 = np.exp(w, out=n2)
assert np.allclose(n,n2)
def test_new_array_creation():
'''
Creating new array with an op should leave the value in the old array unchanged.
If the weldobject.evaluate() method would perform the update in place, then this test would
fail.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n2 = np.sqrt(n)
w2 = np.sqrt(w)
assert np.allclose(n, w)
assert np.allclose(n2, w2)
def test_reduce():
'''
reductions is another type of ufunc. Only applies to binary ops. Not many other interesting
cases to test this because it just evaluates stuff and returns an int/float.
'''
for t in TYPES:
for r in REDUCE_UFUNCS:
n, w = random_arrays(NUM_ELS, t)
n2 = r(n)
w2 = r(w)
assert np.allclose(n2, w2)
def test_vectorization_bug():
'''
simplest case of a bug that seems to occur in more complicated programs with different order of
arrays etc. Seems to happen because of the vectorization pass.
'''
# minimum case to reproduce bug:
n, w = random_arrays(NUM_ELS, 'float32')
n2, w2 = random_arrays(NUM_ELS, 'float32')
n = n*2.0
w = w*2.0
n2 = n + n2
w2 = w + w2
# Note: Here y + x produces the correct result! (weld IR given below) or if we evaluate y before.
n3 = n + n2
w3 = w + w2
# this produces correct result:
# w3 = w2 + w
w3 = w3.evaluate()
assert np.allclose(n, w)
assert np.allclose(n2, w2)
assert np.allclose(n3, w3)
def test_blackscholes_bug():
'''
Seems to happen because of the infer-size pass.
'''
n, w = random_arrays(NUM_ELS, 'float32')
n2, w2 = random_arrays(NUM_ELS, 'float32')
n3, w3 = random_arrays(NUM_ELS, 'float32')
n4 = n - (np.exp(n2) * n3)
w4 = w - (np.exp(w2) * w3)
assert np.allclose(n4, w4)
def test_erf():
'''
Separate test because numpy and weld have different functions for this right now.
'''
for dtype in TYPES:
# int still not supported for the unary ops in Weld.
if "int" in dtype:
continue
n, w = random_arrays(NUM_ELS, dtype)
n2 = ss.erf(n)
w2 = welderf(w)
w2_eval = w2.evaluate()
assert np.allclose(w2, n2)
# TODO: this works with all other unary ops but doesn't work with erf...need to debug it
# further. Might have something to do with the fact that erf is not routed through
# __array_ufunc__.
# assert np.array_equal(w2_eval, n2)
|
11469844
|
import os
import time
from uninas.methods.abstract import AbstractMethod
from uninas.training.trainer.abstract import AbstractTrainerFunctions
from uninas.optimization.pbt.response import PbtServerResponse
from uninas.training.callbacks.abstract import AbstractCallback
from uninas.utils.torch.misc import itemize
from uninas.utils.args import Argument
from uninas.utils.loggers.python import LoggerManager
from uninas.register import Register
try:
import Pyro5.api
@Register.training_callback(requires_log_dict=True)
class PbtCallback(AbstractCallback):
"""
Communicate with a Population-based-training (PBT) server for saving/loading/param instructions
"""
def __init__(self, save_dir: str, index: int, communication_file: str):
"""
:param save_dir: main dir where to save
:param index: index of this callback
:param communication_file: where the file to set up the first client-server communication is located
"""
super().__init__(save_dir, index)
self._communication_file = communication_file
self._is_connected = False
self._server_uri = None
self._server = None
self._client_id = -1
@classmethod
def log(cls, msg: str):
LoggerManager().get_logger().info('%s: %s' % (cls.__name__, msg))
def setup(self, trainer: AbstractTrainerFunctions, pl_module: AbstractMethod, stage: str):
""" called when the trainer changes the method it trains (also called for the first one) """
assert not self._is_connected, "Can not change the method"
while not self._is_connected:
time.sleep(1)
if os.path.isfile(self._communication_file):
with open(self._communication_file, 'r') as f:
self._server_uri = f.read()
self._server = Pyro5.api.Proxy(self._server_uri)
self.log("connecting to URI: %s" % self._server_uri)
response = PbtServerResponse.from_dict(self._server.client_register())
self._client_id = response.client_id
self.log("local client id: %d" % self._client_id)
self._is_connected = True
self._on_server_response(response, trainer, pl_module)
def teardown(self, trainer: AbstractTrainerFunctions, pl_module: AbstractMethod, stage: str):
"""Called when fit or test ends"""
self._server.client_finish(self._client_id)
del self._server
self._is_connected = False
@classmethod
def args_to_add(cls, index=None) -> [Argument]:
""" list arguments to add to argparse when this class (or a child class) is chosen """
return super().args_to_add(index) + [
Argument('communication_file', default="{path_tmp}/communication_uri", type=str, is_path=True,
help="where the file to set up the first client-server communication is located"),
]
def _on_server_response(self, response: PbtServerResponse, trainer: AbstractTrainerFunctions,
pl_module: AbstractMethod):
assert self._client_id == response.client_id,\
"client_id mismatch! Got %d, expected %d" % (response.client_id, self._client_id)
response.act(self.log, trainer)
def _client_result(self, log_dict: dict, trainer: AbstractTrainerFunctions, pl_module: AbstractMethod):
assert isinstance(log_dict, dict)
r = self._server.client_result(self._client_id, pl_module.current_epoch, itemize(log_dict))
r = PbtServerResponse.from_dict(r)
self._on_server_response(r, trainer, pl_module)
def on_train_epoch_start(self, trainer: AbstractTrainerFunctions,
pl_module: AbstractMethod,
log_dict: dict = None):
""" Called when the train epoch begins. """
self._client_result(log_dict, trainer, pl_module)
def on_train_epoch_end(self, trainer: AbstractTrainerFunctions,
pl_module: AbstractMethod,
log_dict: dict = None):
""" Called when the train epoch ends. """
self._client_result(log_dict, trainer, pl_module)
def on_validation_epoch_end(self, trainer: AbstractTrainerFunctions,
pl_module: AbstractMethod,
log_dict: dict = None):
""" Called when the val epoch ends. """
self._client_result(log_dict, trainer, pl_module)
def on_test_epoch_end(self, trainer: AbstractTrainerFunctions,
pl_module: AbstractMethod,
log_dict: dict = None):
""" Called when the test epoch ends. """
self._client_result(log_dict, trainer, pl_module)
except ImportError as e:
Register.missing_import(e)
|
11469866
|
import numpy as np
from psychopy import visual, monitors
from .. import exp
import unittest
# some modules are only available in Python 2.6
try:
from collections import OrderedDict
except:
from exp import OrderedDict
exp.default_computer.recognized = True # so that tests can proceed
PATHS = exp.set_paths('', exp.default_computer)
class TestExp(unittest.TestCase):
def test_setpaths(self):
exp.set_paths('', exp.default_computer)
exp.set_paths()
def test_exp(self):
thisexp = MyExp(rp={'no_output': True, 'debug': True, 'unittest': True})
with self.assertRaises(SystemExit):
thisexp.run()
def test_ThickShapeStim(self):
monitor = monitors.Monitor('test', distance=57, width=37)
monitor.setSizePix((1028, 768))
win = visual.Window([128,128], monitor=monitor)
line = exp.ThickShapeStim(win)
line.draw()
line.setOri(10)
line.setPos((10,10))
line.setVertices(value=[(-.5,0),(0,.5),(.5,0),(.7,.9)])
line.draw()
win.close()
def test_GroupStim(self):
win = visual.Window([128,128])
line1 = visual.ShapeStim(win)
line2 = visual.ShapeStim(win)
group1 = exp.GroupStim(stimuli=line1, name='group1')
group2 = exp.GroupStim(stimuli=[line1, line2])
group1.draw()
group1.setPos((10, 8))
group2.setOri(10)
group1.draw()
group2.draw()
win.close()
def test_invert_dict(self):
d = {1: 2, -3: 4, 0: 'a'}
invd = exp.invert_dict(d)
self.assertEqual(invd, OrderedDict([(2,1), (4,-3), ('a',0)]))
def test_other(self):
self.assertEqual(exp.signal_det('5', '5'), 'correct')
exp.get_mon_sizes()
class MyExp(exp.Experiment):
"""
Test experiment.
"""
def __init__(self, name='exp', **kwargs):
# initialize the default Experiment class with our parameters
super(MyExp, self).__init__(name=name, paths=PATHS,
computer=exp.default_computer, **kwargs)
self.computer.valid_responses = {'1': 0, 'd': 1}
def create_stimuli(self):
"""Define stimuli
"""
self.create_fixation()
stim1 = visual.ShapeStim(self.win)
stim2 = visual.ShapeStim(self.win)
self.s = {
'fix': self.fixation,
'stim1': stim1,
'both': exp.GroupStim(stimuli=[stim1, stim2], name='both')
}
def create_trial(self):
"""Create trial structure
"""
self.trial = [exp.Event(self,
dur=0.300, # in seconds
display=self.s['fix'],
func=self.idle_event),
exp.Event(self,
dur=0, # this means present until response
display=[self.s['stim1'], self.s['both']],
func=self.wait_until_response),
exp.Event(self,
dur=.300,
display=self.s['fix'],
func=self.feedback)
]
def create_exp_plan(self):
"""Define each trial's parameters
"""
exp_plan = []
for cond in range(8):
exp_plan.append(OrderedDict([
('cond', cond),
('onset', ''),
('dur', ''),
('corr_resp', 1),
('subj_resp', ''),
('accuracy', ''),
('rt', ''),
]))
self.exp_plan = exp_plan
if __name__ == '__main__':
unittest.main()
|
11469912
|
import numpy as np # モジュールnumpyをnpという名前で読み込み
import csv # モジュールcsvの読み込み
from scipy import optimize # scipy内のoptimizeモジュールを読み込み
filename = 'out2' # 出力ファイル名
writer = csv.writer(open(filename + '.csv', 'w', newline='')) # 出力するcsvファイルの生成
writer.writerow(['step', 'f(x)', 'x1', 'x2']) # csvファイルへのラベルの書き込み
def f(x): # 目的関数の定義
return ((2 - x[0])**2 + (4 - x[1])**2)**0.5 + ((3 - x[0])**2 + (2 - x[1])**2)**0.5
def g(x): # 制約条件の定義(>0)
return np.array([-2 * x[0] - 3 * x[1] + 7, x[0], -x[0] + 2, x[1], -x[1] + 2])
def callbackF(x): # 最適化の各ステップで計算結果を記録する関数
global step
step += 1
writer.writerow([step, f(x), x[0], x[1]])
x = np.array([0.0, 0.0])
step = 0
writer.writerow([step, f(x), x[0], x[1]])
optimize.fmin_slsqp(f, x, f_ieqcons=g, iprint=2, callback=callbackF) # 逐次二次計画法
|
11469953
|
import unittest
import numpy as np
import pandas as pd
import scipy.stats as st
from ..analysis import GroupLinearRegression
from ..analysis.exc import MinimumSizeError, NoDataError
from ..data import UnequalVectorLengthError, Vector
class MyTestCase(unittest.TestCase):
def test_linregress_four_groups(self):
np.random.seed(987654321)
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_2 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_3 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_4_x = st.norm.rvs(size=100)
input_4_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_4_x]
input_4 = input_4_x, input_4_y
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 100 + [2] * 100 + [3] * 100 + [4] * 100
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
output = """
Linear Regression
-----------------
n Slope Intercept r^2 Std Err p value Group
--------------------------------------------------------------------------------------------------
100 -0.0056 0.0478 0.0000 0.1030 0.9567 1
100 0.0570 -0.1671 0.0037 0.0950 0.5497 2
100 -0.2521 0.1637 0.0506 0.1103 0.0244 3
100 0.9635 0.1043 0.8181 0.0459 0.0000 4 """
exp = GroupLinearRegression(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
self.assertTupleEqual(exp.counts, ('100', '100', '100', '100'))
self.assertAlmostEqual(exp.slope[0], -0.005613130406764816)
self.assertAlmostEqual(exp.slope[1], 0.0570354136308546)
self.assertAlmostEqual(exp.slope[2], -0.2521496921022714)
self.assertAlmostEqual(exp.slope[3], 0.9634599098599703)
self.assertAlmostEqual(exp.intercept[0], 0.04775111565537506)
self.assertAlmostEqual(exp.intercept[1], -0.1670688836199169)
self.assertAlmostEqual(exp.intercept[2], 0.1637132078993005)
self.assertAlmostEqual(exp.intercept[3], 0.10434448563066669)
self.assertAlmostEqual(exp.r_squared[0], 3.030239852495909e-05)
self.assertAlmostEqual(exp.r_squared[1], 0.00366271257512563)
self.assertAlmostEqual(exp.r_squared[2], 0.05062765121282169)
self.assertAlmostEqual(exp.r_squared[3], 0.8180520671815105)
self.assertAlmostEqual(exp.statistic[0], 3.030239852495909e-05)
self.assertAlmostEqual(exp.statistic[1], 0.00366271257512563)
self.assertAlmostEqual(exp.statistic[2], 0.05062765121282169)
self.assertAlmostEqual(exp.statistic[3], 0.8180520671815105)
self.assertAlmostEqual(exp.r_value[0], -0.005504761441239674)
self.assertAlmostEqual(exp.r_value[1], 0.06052034843856759)
self.assertAlmostEqual(exp.r_value[2], -0.2250058915069152)
self.assertAlmostEqual(exp.r_value[3], 0.9044623083255103)
self.assertAlmostEqual(exp.std_err[0], 0.1030023210648352)
self.assertAlmostEqual(exp.std_err[1], 0.09502400478678666)
self.assertAlmostEqual(exp.std_err[2], 0.11029855015697929)
self.assertAlmostEqual(exp.std_err[3], 0.04589905033402483)
self.assertAlmostEqual(exp.p_value[0], 0.956651586890106)
self.assertAlmostEqual(exp.p_value[1], 0.5497443545114141)
self.assertAlmostEqual(exp.p_value[2], 0.024403659194742487)
self.assertAlmostEqual(exp.p_value[3], 4.844813765580163e-38)
self.assertEqual(str(exp), output)
def test_linregress_four_groups_string(self):
np.random.seed(987654321)
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_2 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_3 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_4_x = st.norm.rvs(size=100)
input_4_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_4_x]
input_4 = input_4_x, input_4_y
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = ['a'] * 100 + ['b'] * 100 + ['c'] * 100 + ['d'] * 100
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
output = """
Linear Regression
-----------------
n Slope Intercept r^2 Std Err p value Group
--------------------------------------------------------------------------------------------------
100 -0.0056 0.0478 0.0000 0.1030 0.9567 a
100 0.0570 -0.1671 0.0037 0.0950 0.5497 b
100 -0.2521 0.1637 0.0506 0.1103 0.0244 c
100 0.9635 0.1043 0.8181 0.0459 0.0000 d """
exp = GroupLinearRegression(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
self.assertTupleEqual(exp.counts, ('100', '100', '100', '100'))
self.assertAlmostEqual(exp.slope[0], -0.005613130406764816)
self.assertAlmostEqual(exp.slope[1], 0.0570354136308546)
self.assertAlmostEqual(exp.slope[2], -0.2521496921022714)
self.assertAlmostEqual(exp.slope[3], 0.9634599098599703)
self.assertAlmostEqual(exp.intercept[0], 0.04775111565537506)
self.assertAlmostEqual(exp.intercept[1], -0.1670688836199169)
self.assertAlmostEqual(exp.intercept[2], 0.1637132078993005)
self.assertAlmostEqual(exp.intercept[3], 0.10434448563066669)
self.assertAlmostEqual(exp.r_squared[0], 3.030239852495909e-05)
self.assertAlmostEqual(exp.r_squared[1], 0.00366271257512563)
self.assertAlmostEqual(exp.r_squared[2], 0.05062765121282169)
self.assertAlmostEqual(exp.r_squared[3], 0.8180520671815105)
self.assertAlmostEqual(exp.statistic[0], 3.030239852495909e-05)
self.assertAlmostEqual(exp.statistic[1], 0.00366271257512563)
self.assertAlmostEqual(exp.statistic[2], 0.05062765121282169)
self.assertAlmostEqual(exp.statistic[3], 0.8180520671815105)
self.assertAlmostEqual(exp.r_value[0], -0.005504761441239674)
self.assertAlmostEqual(exp.r_value[1], 0.06052034843856759)
self.assertAlmostEqual(exp.r_value[2], -0.2250058915069152)
self.assertAlmostEqual(exp.r_value[3], 0.9044623083255103)
self.assertAlmostEqual(exp.std_err[0], 0.1030023210648352)
self.assertAlmostEqual(exp.std_err[1], 0.09502400478678666)
self.assertAlmostEqual(exp.std_err[2], 0.11029855015697929)
self.assertAlmostEqual(exp.std_err[3], 0.04589905033402483)
self.assertAlmostEqual(exp.p_value[0], 0.956651586890106)
self.assertAlmostEqual(exp.p_value[1], 0.5497443545114141)
self.assertAlmostEqual(exp.p_value[2], 0.024403659194742487)
self.assertAlmostEqual(exp.p_value[3], 4.844813765580163e-38)
self.assertEqual(str(exp), output)
def test_no_data(self):
"""Test the case where there's no data."""
self.assertRaises(NoDataError, lambda: GroupLinearRegression([], []))
def test_at_minimum_size(self):
"""Test to make sure the case where the length of data is just above the minimum size."""
np.random.seed(987654321)
input_1 = st.norm.rvs(size=2), st.norm.rvs(size=2)
input_2 = st.norm.rvs(size=2), st.norm.rvs(size=2)
input_3 = st.norm.rvs(size=2), st.norm.rvs(size=2)
input_4_x = st.norm.rvs(size=2)
input_4_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_4_x]
input_4 = input_4_x, input_4_y
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 2 + [2] * 2 + [3] * 2 + [4] * 2
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
output = """
Linear Regression
-----------------
n Slope Intercept r^2 Std Err p value Group
--------------------------------------------------------------------------------------------------
2 -1.0763 1.2343 1.0000 0.0000 0.0000 1
2 2.0268 0.6799 1.0000 0.0000 0.0000 2
2 1.8891 -2.4800 1.0000 0.0000 0.0000 3
2 0.1931 -0.2963 1.0000 0.0000 0.0000 4 """
exp = GroupLinearRegression(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
self.assertEqual(str(exp), output)
def test_all_below_minimum_size(self):
"""Test the case where all the supplied data is less than the minimum size."""
np.random.seed(987654321)
input_1 = st.norm.rvs(size=1), st.norm.rvs(size=1)
input_2 = st.norm.rvs(size=1), st.norm.rvs(size=1)
input_3 = st.norm.rvs(size=1), st.norm.rvs(size=1)
input_4 = st.norm.rvs(size=1), st.norm.rvs(size=1)
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1, 2, 3, 4]
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
self.assertRaises(
NoDataError,
lambda: GroupLinearRegression(input_array['a'], input_array['b'], groups=input_array['c'])
)
def test_below_minimum_size(self):
"""Test the case where a group is less than the minimum size."""
np.random.seed(987654321)
input_1 = st.norm.rvs(size=10), st.norm.rvs(size=10)
input_2 = st.norm.rvs(size=10), st.norm.rvs(size=10)
input_3 = st.norm.rvs(size=1), st.norm.rvs(size=1)
input_4 = st.norm.rvs(size=10), st.norm.rvs(size=10)
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 10 + [2] * 10 + [3] + [4] * 10
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
output = """
Linear Regression
-----------------
n Slope Intercept r^2 Std Err p value Group
--------------------------------------------------------------------------------------------------
10 0.4268 -0.2032 0.2877 0.2374 0.1100 1
10 0.1214 -0.6475 0.0393 0.2123 0.5832 2
10 0.2367 0.2525 0.1131 0.2343 0.3419 4 """
exp = GroupLinearRegression(input_array['a'], input_array['b'], groups=input_array['c'])
self.assertEqual(output, str(exp))
def test_vector_no_data(self):
"""Test the case where there's no data with a vector as input."""
self.assertRaises(NoDataError, lambda: GroupLinearRegression(Vector([], other=[])))
def test_no_ydata(self):
"""Test the case where the ydata argument is None."""
self.assertRaises(AttributeError, lambda: GroupLinearRegression([1, 2, 3, 4]))
def test_unequal_pair_lengths(self):
"""Test the case where the supplied pairs are unequal."""
np.random.seed(987654321)
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=96)
self.assertRaises(UnequalVectorLengthError, lambda: GroupLinearRegression(input_1[0], input_1[1]))
def test_linregress_one_group(self):
np.random.seed(987654321)
input_array = st.norm.rvs(size=100), st.norm.rvs(size=100)
output = """
Linear Regression
-----------------
n Slope Intercept r^2 Std Err p value Group
--------------------------------------------------------------------------------------------------
100 -0.0056 0.0478 0.0000 0.1030 0.9567 1 """
exp = GroupLinearRegression(input_array[0], input_array[1], display=False)
self.assertEqual(str(exp), output)
def test_linregress_vector(self):
np.random.seed(987654321)
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_2 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_3 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_4_x = st.norm.rvs(size=100)
input_4_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_4_x]
input_4 = input_4_x, input_4_y
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 100 + [2] * 100 + [3] * 100 + [4] * 100
input_array = Vector(cs_x, other=cs_y, groups=grp)
output = """
Linear Regression
-----------------
n Slope Intercept r^2 Std Err p value Group
--------------------------------------------------------------------------------------------------
100 -0.0056 0.0478 0.0000 0.1030 0.9567 1
100 0.0570 -0.1671 0.0037 0.0950 0.5497 2
100 -0.2521 0.1637 0.0506 0.1103 0.0244 3
100 0.9635 0.1043 0.8181 0.0459 0.0000 4 """
exp = GroupLinearRegression(input_array, display=False)
self.assertTupleEqual(exp.counts, ('100', '100', '100', '100'))
self.assertAlmostEqual(exp.slope[0], -0.005613130406764816)
self.assertAlmostEqual(exp.slope[1], 0.0570354136308546)
self.assertAlmostEqual(exp.slope[2], -0.2521496921022714)
self.assertAlmostEqual(exp.slope[3], 0.9634599098599703)
self.assertAlmostEqual(exp.intercept[0], 0.04775111565537506)
self.assertAlmostEqual(exp.intercept[1], -0.1670688836199169)
self.assertAlmostEqual(exp.intercept[2], 0.1637132078993005)
self.assertAlmostEqual(exp.intercept[3], 0.10434448563066669)
self.assertAlmostEqual(exp.r_squared[0], 3.030239852495909e-05)
self.assertAlmostEqual(exp.r_squared[1], 0.00366271257512563)
self.assertAlmostEqual(exp.r_squared[2], 0.05062765121282169)
self.assertAlmostEqual(exp.r_squared[3], 0.8180520671815105)
self.assertAlmostEqual(exp.statistic[0], 3.030239852495909e-05)
self.assertAlmostEqual(exp.statistic[1], 0.00366271257512563)
self.assertAlmostEqual(exp.statistic[2], 0.05062765121282169)
self.assertAlmostEqual(exp.statistic[3], 0.8180520671815105)
self.assertAlmostEqual(exp.r_value[0], -0.005504761441239674)
self.assertAlmostEqual(exp.r_value[1], 0.06052034843856759)
self.assertAlmostEqual(exp.r_value[2], -0.2250058915069152)
self.assertAlmostEqual(exp.r_value[3], 0.9044623083255103)
self.assertAlmostEqual(exp.std_err[0], 0.1030023210648352)
self.assertAlmostEqual(exp.std_err[1], 0.09502400478678666)
self.assertAlmostEqual(exp.std_err[2], 0.11029855015697929)
self.assertAlmostEqual(exp.std_err[3], 0.04589905033402483)
self.assertAlmostEqual(exp.p_value[0], 0.956651586890106)
self.assertAlmostEqual(exp.p_value[1], 0.5497443545114141)
self.assertAlmostEqual(exp.p_value[2], 0.024403659194742487)
self.assertAlmostEqual(exp.p_value[3], 4.844813765580163e-38)
self.assertEqual(str(exp), output)
def test_linregress_missing_data(self):
np.random.seed(987654321)
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_2 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_3 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_4_x = st.norm.rvs(size=100)
input_4_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_4_x]
input_4 = input_4_x, input_4_y
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 100 + [2] * 100 + [3] * 100 + [4] * 100
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
input_array['a'][24] = np.nan
input_array['a'][256] = np.nan
input_array['b'][373] = np.nan
input_array['b'][24] = np.nan
input_array['b'][128] = np.nan
output = """
Linear Regression
-----------------
n Slope Intercept r^2 Std Err p value Group
--------------------------------------------------------------------------------------------------
99 -0.0115 0.0340 0.0001 0.1028 0.9114 1
99 0.0281 -0.1462 0.0009 0.0950 0.7681 2
99 -0.2546 0.1653 0.0495 0.1133 0.0269 3
99 0.9635 0.1043 0.8178 0.0462 0.0000 4 """
exp = GroupLinearRegression(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
self.assertEqual(str(exp), output)
if __name__ == '__main__':
unittest.main()
|
11470011
|
import pytest
import dpnp
import numpy
def _getattr(ex, str_):
attrs = str_.split(".")
res = ex
for attr in attrs:
res = getattr(res, attr)
return res
@pytest.mark.parametrize("func_name",
['abs', ])
@pytest.mark.parametrize("type",
[numpy.float64, numpy.float32, numpy.int64, numpy.int32],
ids=['float64', 'float32', 'int64', 'int32'])
def test_strides(func_name, type):
shape = (4, 4)
a = numpy.arange(shape[0] * shape[1], dtype=type).reshape(shape)
a_strides = a[0::2, 0::2]
dpa = dpnp.array(a)
dpa_strides = dpa[0::2, 0::2]
dpnp_func = _getattr(dpnp, func_name)
result = dpnp_func(dpa_strides)
numpy_func = _getattr(numpy, func_name)
expected = numpy_func(a_strides)
numpy.testing.assert_allclose(expected, result)
|
11470013
|
import pytorch_lightning as pl
class Optimization(pl.LightningModule):
def __init__(self):
super(Optimization, self).__init__()
self.counters = {
'iteration': 0,
}
|
11470038
|
import gdsfactory as gf
def test_get_ports() -> None:
c = gf.components.mzi_phase_shifter_top_heater_metal(length_x=123)
p = c.get_ports_dict()
assert len(p) == 4, len(p)
p_dc = c.get_ports_dict(width=11.0)
p_dc_layer = c.get_ports_dict(layer=(49, 0))
assert len(p_dc) == 2, f"{len(p_dc)}"
assert len(p_dc_layer) == 2, f"{len(p_dc_layer)}"
p_optical = c.get_ports_dict(width=0.5)
assert len(p_optical) == 2, f"{len(p_optical)}"
p_optical_west = c.get_ports_dict(orientation=180, width=0.5)
p_optical_east = c.get_ports_dict(orientation=0, width=0.5)
assert len(p_optical_east) == 1, f"{len(p_optical_east)}"
assert len(p_optical_west) == 1, f"{len(p_optical_west)}"
if __name__ == "__main__":
test_get_ports()
# c = gf.components.mzi_phase_shifter()
# c.show()
# p = c.get_ports_dict()
# assert len(p) == 4, len(p)
# p_dc = c.get_ports_dict(width=11.)
# p_dc_layer = c.get_ports_dict(layer=(49, 0))
# assert len(p_dc) == 2, f"{len(p_dc)}"
# assert len(p_dc_layer) == 2, f"{len(p_dc_layer)}"
# p_optical = c.get_ports_dict(width=0.5)
# assert len(p_optical) == 2, f"{len(p_optical)}"
# p_optical_west = c.get_ports_dict(orientation=180, width=0.5)
# p_optical_east = c.get_ports_dict(orientation=0, width=0.5)
# assert len(p_optical_east) == 1, f"{len(p_optical_east)}"
# assert len(p_optical_west) == 1, f"{len(p_optical_west)}"
|
11470060
|
del_items(0x80147A14)
SetType(0x80147A14, "void EA_cd_seek(int secnum)")
del_items(0x80147A1C)
SetType(0x80147A1C, "void MY_CdGetSector(unsigned long *src, unsigned long *dst, int size)")
del_items(0x80147A50)
SetType(0x80147A50, "void init_cdstream(int chunksize, unsigned char *buf, int bufsize)")
del_items(0x80147A60)
SetType(0x80147A60, "void flush_cdstream()")
del_items(0x80147A84)
SetType(0x80147A84, "int check_complete_frame(struct strheader *h)")
del_items(0x80147B04)
SetType(0x80147B04, "void reset_cdstream()")
del_items(0x80147B2C)
SetType(0x80147B2C, "void kill_stream_handlers()")
del_items(0x80147B90)
SetType(0x80147B90, "void stream_cdready_handler(unsigned long *addr, int idx, int i, int sec)")
del_items(0x80147D90)
SetType(0x80147D90, "void CD_stream_handler(struct TASK *T)")
del_items(0x80147E84)
SetType(0x80147E84, "void install_stream_handlers()")
del_items(0x80147EF4)
SetType(0x80147EF4, "void cdstream_service()")
del_items(0x80147F8C)
SetType(0x80147F8C, "int cdstream_get_chunk(unsigned char **data, struct strheader **h)")
del_items(0x801480B0)
SetType(0x801480B0, "int cdstream_is_last_chunk()")
del_items(0x801480C8)
SetType(0x801480C8, "void cdstream_discard_chunk()")
del_items(0x801481C8)
SetType(0x801481C8, "void close_cdstream()")
del_items(0x8014823C)
SetType(0x8014823C, "int open_cdstream(char *fname, int secoffs, int seclen)")
del_items(0x801483D8)
SetType(0x801483D8, "int set_mdec_img_buffer(unsigned char *p)")
del_items(0x8014840C)
SetType(0x8014840C, "void start_mdec_decode(unsigned char *data, int x, int y, int w, int h)")
del_items(0x80148590)
SetType(0x80148590, "void DCT_out_handler()")
del_items(0x8014862C)
SetType(0x8014862C, "void init_mdec(unsigned char *vlc_buffer, unsigned char *vlc_table)")
del_items(0x8014869C)
SetType(0x8014869C, "void init_mdec_buffer(char *buf, int size)")
del_items(0x801486B8)
SetType(0x801486B8, "int split_poly_area(struct POLY_FT4 *p, struct POLY_FT4 *bp, int offs, struct RECT *r, int sx, int sy, int correct)")
del_items(0x80148AA8)
SetType(0x80148AA8, "void rebuild_mdec_polys(int x, int y)")
del_items(0x80148C7C)
SetType(0x80148C7C, "void clear_mdec_frame()")
del_items(0x80148C88)
SetType(0x80148C88, "void draw_mdec_polys()")
del_items(0x80148FD4)
SetType(0x80148FD4, "void invalidate_mdec_frame()")
del_items(0x80148FE8)
SetType(0x80148FE8, "int is_frame_decoded()")
del_items(0x80148FF4)
SetType(0x80148FF4, "void init_mdec_polys(int x, int y, int w, int h, int bx1, int by1, int bx2, int by2, int correct)")
del_items(0x80149384)
SetType(0x80149384, "void set_mdec_poly_bright(int br)")
del_items(0x801493EC)
SetType(0x801493EC, "int init_mdec_stream(unsigned char *buftop, int sectors_per_frame, int mdec_frames_per_buffer)")
del_items(0x8014943C)
SetType(0x8014943C, "void init_mdec_audio(int rate)")
del_items(0x80149540)
SetType(0x80149540, "void kill_mdec_audio()")
del_items(0x80149570)
SetType(0x80149570, "void stop_mdec_audio()")
del_items(0x80149594)
SetType(0x80149594, "void play_mdec_audio(unsigned char *data, struct asec *h)")
del_items(0x80149838)
SetType(0x80149838, "void set_mdec_audio_volume(short vol, struct SpuVoiceAttr voice_attr)")
del_items(0x80149904)
SetType(0x80149904, "void resync_audio()")
del_items(0x80149934)
SetType(0x80149934, "void stop_mdec_stream()")
del_items(0x80149980)
SetType(0x80149980, "void dequeue_stream()")
del_items(0x80149A6C)
SetType(0x80149A6C, "void dequeue_animation()")
del_items(0x80149C1C)
SetType(0x80149C1C, "void decode_mdec_stream(int frames_elapsed)")
del_items(0x80149E08)
SetType(0x80149E08, "void play_mdec_stream(char *filename, int speed, int start, int end)")
del_items(0x80149EBC)
SetType(0x80149EBC, "void clear_mdec_queue()")
del_items(0x80149EE8)
SetType(0x80149EE8, "void StrClearVRAM()")
del_items(0x80149FA8)
SetType(0x80149FA8, "short PlayFMVOverLay(char *filename, int w, int h)")
del_items(0x8014A4B0)
SetType(0x8014A4B0, "unsigned short GetDown__C4CPad(struct CPad *this)")
|
11470070
|
from tests.support.asserts import assert_error, assert_success
def get_element_css_value(session, element_id, prop):
return session.transport.send(
"GET",
"session/{session_id}/element/{element_id}/css/{prop}".format(
session_id=session.session_id,
element_id=element_id,
prop=prop
)
)
def test_no_top_browsing_context(session, closed_window):
original_handle, element = closed_window
response = get_element_css_value(session, element.id, "display")
assert_error(response, "no such window")
response = get_element_css_value(session, "foo", "bar")
assert_error(response, "no such window")
session.window_handle = original_handle
response = get_element_css_value(session, element.id, "display")
assert_error(response, "no such element")
def test_no_browsing_context(session, closed_frame):
response = get_element_css_value(session, "foo", "bar")
assert_error(response, "no such window")
def test_element_not_found(session):
result = get_element_css_value(session, "foo", "display")
assert_error(result, "no such element")
def test_element_stale(session, inline):
session.url = inline("<input>")
element = session.find.css("input", all=False)
session.refresh()
result = get_element_css_value(session, element.id, "display")
assert_error(result, "stale element reference")
def test_property_name_value(session, inline):
session.url = inline("""<input style="display: block">""")
element = session.find.css("input", all=False)
result = get_element_css_value(session, element.id, "display")
assert_success(result, "block")
def test_property_name_not_existent(session, inline):
session.url = inline("<input>")
element = session.find.css("input", all=False)
result = get_element_css_value(session, element.id, "foo")
assert_success(result, "")
|
11470074
|
import os
from flask import Flask, flash, redirect, request, Response, url_for, session
from flask_admin import Admin
from flask_dance.consumer import oauth_authorized, oauth_error
from flask_dance.contrib.twitter import make_twitter_blueprint, twitter
from flask_login import current_user, login_required, logout_user
from flask_security import SQLAlchemyUserDatastore, Security, login_user
from flask_admin.menu import MenuLink
from flask_dance.contrib.github import make_github_blueprint
from sqlalchemy.orm.exc import NoResultFound
from bitcoin_acks.database.session import session_scope
from bitcoin_acks.logging import log
from bitcoin_acks.models import Invoices, PullRequests, Logs
from bitcoin_acks.models.bounties import Bounties
from bitcoin_acks.models.users import OAuth, Roles, Users
from bitcoin_acks.payments.payment_processor import PaymentProcessor
from bitcoin_acks.webapp.database import db
from bitcoin_acks.webapp.templates.template_globals import \
apply_template_globals
from bitcoin_acks.webapp.views.bounties_payable_model_view import BountiesPayableModelView
from bitcoin_acks.webapp.views.invoices_model_view import InvoicesModelView
from bitcoin_acks.webapp.views.pull_requests_model_view import \
PullRequestsModelView
from bitcoin_acks.webapp.views.user_model_view import UsersModelView
def create_app(config_object: str):
app = Flask(__name__)
app.config.from_object(config_object)
db.init_app(app)
user_datastore = SQLAlchemyUserDatastore(db, Users, Roles)
security = Security(datastore=user_datastore)
security.init_app(app, user_datastore)
apply_template_globals(app)
@app.after_request
def after_request(response):
""" Logging after every request. """
record = Logs()
if request.headers.getlist("X-Forwarded-For"):
record.ip = request.headers.getlist("X-Forwarded-For")[0]
else:
record.ip = request.remote_addr
record.method = request.method
record.full_path = request.full_path
record.path = request.path
record.user_agent = request.user_agent.string
record.status = response.status_code
with session_scope() as log_session:
log_session.add(record)
return response
admin = Admin(app,
name='Bitcoin ACKs',
template_mode='bootstrap3',
url='/',
index_view=PullRequestsModelView(PullRequests, db.session))
admin.add_view(BountiesPayableModelView(Bounties, db.session))
admin.add_view(InvoicesModelView(Invoices, db.session))
admin.add_view(UsersModelView(Users, db.session))
@app.route('/robots.txt')
def robots_txt():
return Response('User-agent: *\nDisallow: /\n')
@app.route("/logout")
@login_required
def logout():
logout_user()
flash("You have logged out")
return redirect(url_for("index"))
app.payment_processor = PaymentProcessor()
@app.route('/payment-notification/', methods=['POST'])
def payment_notification():
r = request.get_json()
log.debug('invoice_notification', request=r, session=session)
if 'data' in r:
r = r['data']
app.payment_processor.process_invoice_data(r)
return {}
github_blueprint = make_github_blueprint(
client_id=os.environ['GITHUB_OAUTH_CLIENT_ID'],
client_secret=os.environ['GITHUB_OAUTH_CLIENT_SECRET'],
scope='user:email'
)
app.register_blueprint(github_blueprint, url_prefix='/login-github')
# twitter_blueprint = make_twitter_blueprint(
# api_key=os.environ['TWITTER_OAUTH_CLIENT_KEY'],
# api_secret=os.environ['TWITTER_OAUTH_CLIENT_SECRET'],
# )
# app.register_blueprint(twitter_blueprint, url_prefix='/login-twitter')
#
# @app.route("/login-twitter")
# def twitter_logged_in():
# if not twitter.authorized:
# return redirect(url_for("twitter.login"))
# user_resp = twitter.get("account/settings.json")
# log.debug('user response', resp=user_resp.json())
# assert user_resp.ok
# return "You are @{screen_name} on Twitter".format(screen_name=user_resp.json()["screen_name"])
@oauth_authorized.connect_via(github_blueprint)
def github_logged_in(github_blueprint, token):
if not token:
flash("Failed to log in.", category="error")
return redirect(url_for("github.login"))
user_resp = github_blueprint.session.get("/user")
log.debug('user response', resp=user_resp.json())
emails_resp = github_blueprint.session.get("/user/emails")
log.debug('user emails response', resp=emails_resp.json())
if not emails_resp.ok:
log.error('github_logged_in error', resp=emails_resp.json(),
token=token)
msg = "Failed to fetch user info."
flash(msg, category="error")
return False
info = user_resp.json()
user_id = info["node_id"]
email = [e for e in emails_resp.json() if e['primary']][0]['email']
with session_scope() as db_session:
try:
user = db_session.query(Users).filter(Users.id == user_id).one()
except NoResultFound:
user = Users(id=user_id)
db_session.add(user)
user.is_active = True
user.email = email
try:
db_session.query(OAuth).filter_by(provider=github_blueprint.name, provider_user_id=user_id).one()
except NoResultFound:
oauth = OAuth(provider=github_blueprint.name, provider_user_id=user_id, user_id=user_id, token=token)
db_session.add(oauth)
login_user(user)
flash("Successfully signed in.")
return False
# notify on OAuth provider error
@oauth_error.connect_via(github_blueprint)
def github_error(github_blueprint, message, response, error):
msg = "OAuth error from {name}! message={message} response={response}".format(
name=github_blueprint.name, message=message, response=response
)
flash(msg, category="error")
class LoginMenuLink(MenuLink):
def is_accessible(self):
return not current_user.is_authenticated
class LogoutMenuLink(MenuLink):
def is_accessible(self):
return current_user.is_authenticated
admin.add_link(LoginMenuLink(name='Login', endpoint='github.login'))
admin.add_link(LogoutMenuLink(name='Logout', endpoint='logout'))
return app
if __name__ == '__main__':
app = create_app('bitcoin_acks.webapp.settings.Config')
app.debug = True
app.run(host='0.0.0.0', port=7371)
|
11470089
|
from collections import defaultdict
from types import SimpleNamespace
class NestedNamespace(SimpleNamespace):
def __init__(self, dictionary, **kwargs):
super().__init__(**kwargs)
for key, value in dictionary.items():
if isinstance(value, dict):
self.__setattr__(key, NestedNamespace(value))
else:
self.__setattr__(key, value)
def rec_dd():
return defaultdict(rec_dd)
def recursive_get(dic, path):
if len(path) == 0:
return dic
head = path[0]
tail = path[1:]
return recursive_get(dic[head], tail)
def recursive_set(dic, path, value):
if len(path) == 1:
dic[path[0]] = value
else:
head = path[0]
tail = path[1:]
recursive_set(dic[head], tail, value)
def expand_keys(dic, prefix=tuple(), result=None):
if result is None:
result = rec_dd()
if not isinstance(dic, dict):
recursive_set(result, prefix, dic)
else:
for k, v in dic.items():
path = tuple(k.split('.'))
full_path = prefix + path
expand_keys(v, full_path, result)
return result
def fix_dict(defdict):
if isinstance(defdict, defaultdict):
for k in list(defdict.keys()):
defdict[k] = fix_dict(defdict[k])
return dict(defdict)
return defdict
|
11470100
|
import sys
from importlib import import_module
import numpy as np
import pandas as pd
import pandas._testing as tm
import pytest
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from pandas.testing import assert_series_equal
from hcrystalball.exceptions import InsufficientDataLengthError
from hcrystalball.exceptions import PredictWithoutFitError
from hcrystalball.utils import check_fit_before_predict
from hcrystalball.utils import check_X_y
from hcrystalball.utils import deep_dict_update
from hcrystalball.utils import get_estimator_repr
from hcrystalball.utils import optional_import
@pytest.fixture(scope="module")
def X(request):
if "series" in request.param:
return tm.makeTimeSeries(freq="D")
elif "dataframe" in request.param:
result = tm.makeTimeDataFrame(freq="D").drop(columns="A")
if "date_col_str" in request.param:
return result.assign(index=lambda x: x.index.astype(str)).set_index("index")
elif "len_<_3" in request.param:
return result.iloc[:2, :]
elif "wo_date_col" in request.param:
result.index.name = "some_other_index_name"
return result
else:
raise ValueError("Invalid X fixture parameter")
else:
raise ValueError("Invalid X fixture parameter")
@pytest.fixture(scope="module")
def y(request):
if request.param is None:
return None
elif "dataframe" in request.param:
return tm.makeTimeDataFrame(freq="D")
elif "series" in request.param:
result = tm.makeTimeSeries(freq="D")
if "wrong_len" in request.param:
return result[:2]
elif "ok" in request.param:
return result
else:
raise ValueError("Invalid X fixture parameter")
elif "ndarray" in request.param:
result = tm.makeTimeSeries(freq="D").values
if "wrong_len" in request.param:
return result[:2]
elif "wrong_ndim" in request.param:
return np.array([result, result])
elif "ok" in request.param:
return result
else:
raise ValueError("Invalid X fixture parameter")
else:
raise ValueError("Invalid X fixture parameter")
@pytest.mark.parametrize(
"X, y, expected_error",
[
("series", None, TypeError),
("dataframe_len_<_3", None, InsufficientDataLengthError),
("dataframe_date_col_str", None, ValueError),
("dataframe_wo_date_col", None, None),
("dataframe_wo_date_col", "dataframe", TypeError),
("dataframe_wo_date_col", "series_wrong_len", ValueError),
("dataframe_wo_date_col", "series_ok", None),
("dataframe_wo_date_col", "ndarray_wrong_len", ValueError),
("dataframe_wo_date_col", "ndarray_wrong_ndim", ValueError),
("dataframe_wo_date_col", "ndarray_ok", None),
],
indirect=["X", "y"],
)
def test_check_X_y(X, y, expected_error):
@check_X_y
def pass_func(self, X, y):
return X, y
# make sure certain checks raises appropriate errors
if expected_error is not None:
with pytest.raises(expected_error):
pass_func(None, X, y)
else:
# make sure X and y stay unchanged after the check
res_X, res_y = pass_func(None, X, y)
assert_frame_equal(res_X, X)
if isinstance(y, pd.Series):
assert_series_equal(res_y, y)
else:
assert_array_equal(res_y, y)
@pytest.mark.parametrize("model_is_fitted, expected_error", [(True, None), (False, PredictWithoutFitError)])
def test_check_fit_before_predict(model_is_fitted, expected_error):
class DummyModel:
def __init__(self, name="dummy_model", fitted=False):
self.name = name
self.fitted = fitted
@check_fit_before_predict
def predict(self, X):
return X
x = 3
dummy_model = DummyModel(fitted=model_is_fitted)
if expected_error is None:
assert x == dummy_model.predict(x)
else:
with pytest.raises(PredictWithoutFitError):
_ = dummy_model.predict(x)
@pytest.mark.parametrize(
"wrapper_instance",
["sklearn", "stacking_ensemble", "simple_ensemble", "smoothing", "sarimax", "prophet", "tbats"],
indirect=["wrapper_instance"],
)
def test_get_model_repr_single_model(wrapper_instance):
model_repr = get_estimator_repr(wrapper_instance)
assert model_repr.find("...") == -1
assert model_repr == wrapper_instance.__repr__(N_CHAR_MAX=10000).replace("\n", "").replace(" ", "")
@pytest.mark.parametrize(
"pipeline_instance_model_in_pipeline",
["sklearn", "stacking_ensemble", "simple_ensemble", "smoothing", "sarimax", "prophet", "tbats"],
indirect=["pipeline_instance_model_in_pipeline"],
)
def test_get_model_repr_pipeline_instance_model_in_pipeline(
pipeline_instance_model_in_pipeline,
):
model_repr = get_estimator_repr(pipeline_instance_model_in_pipeline)
assert model_repr.find("...") == -1
assert model_repr == pipeline_instance_model_in_pipeline.__repr__(N_CHAR_MAX=10000).replace(
"\n", ""
).replace(" ", "")
@pytest.mark.parametrize(
"module_name, class_name, dependency",
[
("_prophet", "ProphetWrapper", "prophet"),
("_statsmodels", "ExponentialSmoothingWrapper", "statsmodels.tsa.api"),
("_statsmodels", "HoltSmoothingWrapper", "statsmodels.tsa.api"),
("_statsmodels", "SimpleSmoothingWrapper", "statsmodels.tsa.api"),
("_sarimax", "SarimaxWrapper", "pmdarima.arima"),
("_tbats", "TBATSWrapper", "tbats"),
("_tbats", "BATSWrapper", "tbats"),
],
)
def test_optional_import_missing_dependency(module_name, class_name, dependency):
# store modules for later reference
hcb_wrappers = sys.modules["hcrystalball.wrappers"]
hcb_wrappers_module = sys.modules[f"hcrystalball.wrappers.{module_name}"]
dep = sys.modules[dependency]
# remove modules
del sys.modules["hcrystalball.wrappers"]
del sys.modules[f"hcrystalball.wrappers.{module_name}"]
sys.modules[dependency] = None
# import never fails
res = optional_import(f"hcrystalball.wrappers.{module_name}", class_name, globals())
assert not res
# class has an informative docstring
assert (
globals()[class_name].__doc__
== "This is just helper class to inform user about missing dependencies at init time"
)
# init of the helper class fails
with pytest.raises(ModuleNotFoundError):
globals()[class_name]()
# restore modules for other tests
sys.modules["hcrystalball.wrappers"] = hcb_wrappers
sys.modules[f"hcrystalball.wrappers.{module_name}"] = hcb_wrappers_module
sys.modules[dependency] = dep
@pytest.mark.parametrize(
"module_name, class_name",
[
("_prophet", "ProphetWrapper"),
("_statsmodels", "ExponentialSmoothingWrapper"),
("_statsmodels", "HoltSmoothingWrapper"),
("_statsmodels", "SimpleSmoothingWrapper"),
("_sarimax", "SarimaxWrapper"),
("_tbats", "TBATSWrapper"),
("_tbats", "BATSWrapper"),
],
)
def test_optional_import_with_dependency(module_name, class_name):
# importing from private modules (from hcrystalball.wrappers._prophet import ProphetWrapper)
# and wrapper module (form hcrystalball.wrappers import ProphetWrapper)
# returns the same result when having dependencies in place
ProphetWrapperOrig = getattr(import_module(f"hcrystalball.wrappers.{module_name}"), class_name)
res = optional_import(f"hcrystalball.wrappers.{module_name}", class_name, globals())
assert res[0] == class_name
assert str(globals()[class_name]()) == str(ProphetWrapperOrig())
@pytest.mark.parametrize(
"source, update, exp_result",
[
(
{"a": {"b": 1, "c": 2}, "x": {"z": 1}},
{"x": {"c": 0}},
{"a": {"b": 1, "c": 2}, "x": {"z": 1, "c": 0}},
),
(
{"x": {"c": 1}},
{"x": {"c": 0}},
{"x": {"c": 0}},
),
],
)
def test_deep_dict_update(source, update, exp_result):
result = deep_dict_update(source, update)
assert result == exp_result
assert source == source
assert update == update
|
11470127
|
import random
import torch
from torch import nn
import numpy as np
from .amr_graph import read_file
from .vocabs import PAD, UNK, DUM, NIL, END, CLS
# Returns cp_seq as a list of lemma + '_' and mp_seq is a list of the lemmas
# plus the dictionaries to convert the tokens to an index
# This represents the potential concepts / attributes
def get_concepts(lem, vocab):
cp_seq, mp_seq = [], []
new_tokens = set()
for le in lem:
cp_seq.append(le + '_')
mp_seq.append(le)
for cp, mp in zip(cp_seq, mp_seq):
if vocab.token2idx(cp) == vocab.unk_idx:
new_tokens.add(cp)
if vocab.token2idx(mp) == vocab.unk_idx:
new_tokens.add(mp)
nxt = vocab.size
token2idx, idx2token = dict(), dict()
for x in new_tokens:
token2idx[x] = nxt
idx2token[nxt] = x
nxt += 1
return cp_seq, mp_seq, token2idx, idx2token
def ListsToTensor(xs, vocab=None, local_vocabs=None, unk_rate=0.):
pad = vocab.padding_idx if vocab else 0
def toIdx(w, i):
if vocab is None:
return w
if isinstance(w, list):
return [toIdx(_, i) for _ in w]
if random.random() < unk_rate:
return vocab.unk_idx
if local_vocabs is not None:
local_vocab = local_vocabs[i]
if (local_vocab is not None) and (w in local_vocab):
return local_vocab[w]
return vocab.token2idx(w)
max_len = max(len(x) for x in xs)
ys = []
for i, x in enumerate(xs):
y = toIdx(x, i) + [pad]*(max_len-len(x))
ys.append(y)
data = np.transpose(np.array(ys, dtype=np.int64))
return data
def ListsofStringToTensor(xs, vocab, max_string_len=20):
max_len = max(len(x) for x in xs)
ys = []
for x in xs:
y = x + [PAD]*(max_len -len(x))
zs = []
for z in y:
z = list(z[:max_string_len])
zs.append(vocab.token2idx([CLS]+z+[END]) + [vocab.padding_idx]*(max_string_len - len(z)))
ys.append(zs)
data = np.transpose(np.array(ys, dtype=np.int64), (1, 0, 2))
return data
def ArraysToTensor(xs):
"list of numpy array, each has the same demonsionality"
x = np.array([ list(x.shape) for x in xs], dtype=np.int64)
shape = [len(xs)] + list(x.max(axis = 0))
data = np.zeros(shape, dtype=np.int64)
for i, x in enumerate(xs):
slicing_shape = list(x.shape)
slices = tuple([slice(i, i+1)]+[slice(0, x) for x in slicing_shape])
data[slices] = x
return data
def batchify(data, vocabs, unk_rate=0.):
_tok = ListsToTensor([ [CLS]+x['tok'] for x in data], vocabs['tok'], unk_rate=unk_rate)
_lem = ListsToTensor([ [CLS]+x['lem'] for x in data], vocabs['lem'], unk_rate=unk_rate)
_pos = ListsToTensor([ [CLS]+x['pos'] for x in data], vocabs['pos'], unk_rate=unk_rate)
_ner = ListsToTensor([ [CLS]+x['ner'] for x in data], vocabs['ner'], unk_rate=unk_rate)
_word_char = ListsofStringToTensor([ [CLS]+x['tok'] for x in data], vocabs['word_char'])
local_token2idx = [x['token2idx'] for x in data]
local_idx2token = [x['idx2token'] for x in data]
_cp_seq = ListsToTensor([ x['cp_seq'] for x in data], vocabs['predictable_concept'], local_token2idx)
_mp_seq = ListsToTensor([ x['mp_seq'] for x in data], vocabs['predictable_concept'], local_token2idx)
concept, edge = [], []
for x in data:
amr = x['amr']
concept_i, edge_i, _ = amr.root_centered_sort(vocabs['rel'].priority)
concept.append(concept_i)
edge.append(edge_i)
augmented_concept = [[DUM]+x+[END] for x in concept]
_concept_char_in = ListsofStringToTensor(augmented_concept, vocabs['concept_char'])[:-1]
_concept_in = ListsToTensor(augmented_concept, vocabs['concept'], unk_rate=unk_rate)[:-1]
_concept_out = ListsToTensor(augmented_concept, vocabs['predictable_concept'], local_token2idx)[1:]
out_conc_len, bsz = _concept_out.shape
_rel = np.full((1+out_conc_len, bsz, out_conc_len), vocabs['rel'].token2idx(PAD), dtype=np.int64)
# v: [<dummy>, concept_0, ..., concept_l, ..., concept_{n-1}, <end>] u: [<dummy>, concept_0, ..., concept_l, ..., concept_{n-1}]
for bidx, (x, y) in enumerate(zip(edge, concept)):
for l, _ in enumerate(y):
if l > 0:
# l=1 => pos=l+1=2
_rel[l+1, bidx, 1:l+1] = vocabs['rel'].token2idx(NIL)
for v, u, r in x:
r = vocabs['rel'].token2idx(r)
_rel[v+1, bidx, u+1] = r
ret = {'lem':_lem, 'tok':_tok, 'pos':_pos, 'ner':_ner, 'word_char':_word_char, \
'copy_seq': np.stack([_cp_seq, _mp_seq], -1), \
'local_token2idx':local_token2idx, 'local_idx2token': local_idx2token, \
'concept_in':_concept_in, 'concept_char_in':_concept_char_in, \
'concept_out':_concept_out, 'rel':_rel}
bert_tokenizer = vocabs.get('bert_tokenizer', None)
if bert_tokenizer is not None:
ret['bert_token'] = ArraysToTensor([ x['bert_token'] for x in data])
ret['token_subword_index'] = ArraysToTensor([ x['token_subword_index'] for x in data])
return ret
# Note that source can be a filename or a file-type object (ie.. open file or io.StringIO)
# GPU_SIZE = 12000 # okay for 8G memory
class DataLoader(object):
def __init__(self, vocabs, source, batch_size, for_train, gpu_size=12000):
self.data = []
bert_tokenizer = vocabs.get('bert_tokenizer', None)
for amr, token, lemma, pos, ner in zip(*read_file(source)):
if for_train:
_, _, not_ok = amr.root_centered_sort()
if not_ok or len(token)==0:
continue
cp_seq, mp_seq, token2idx, idx2token = get_concepts(lemma, vocabs['predictable_concept'])
datum = {'amr':amr, 'tok':token, 'lem':lemma, 'pos':pos, 'ner':ner, \
'cp_seq':cp_seq, 'mp_seq':mp_seq,\
'token2idx':token2idx, 'idx2token':idx2token}
if bert_tokenizer is not None:
bert_token, token_subword_index = bert_tokenizer.tokenize(token)
datum['bert_token'] = bert_token
datum['token_subword_index'] = token_subword_index
self.data.append(datum)
self.vocabs = vocabs
self.batch_size = batch_size
self.train = for_train
self.unk_rate = 0.
self.gpu_size = gpu_size
def set_unk_rate(self, x):
self.unk_rate = x
def __iter__(self):
idx = list(range(len(self.data)))
if self.train:
random.shuffle(idx)
idx.sort(key = lambda x: len(self.data[x]['tok']) + len(self.data[x]['amr']))
batches = []
num_tokens, data = 0, []
for i in idx:
num_tokens += len(self.data[i]['tok']) + len(self.data[i]['amr'])
data.append(self.data[i])
if num_tokens >= self.batch_size:
sz = len(data)* (2 + max(len(x['tok']) for x in data) + max(len(x['amr']) for x in data))
if sz > self.gpu_size:
# because we only have limited GPU memory
batches.append(data[:len(data)//2])
data = data[len(data)//2:]
batches.append(data)
num_tokens, data = 0, []
if data:
sz = len(data)* (2 + max(len(x['tok']) for x in data) + max(len(x['amr']) for x in data))
if sz > self.gpu_size:
# because we only have limited GPU memory
batches.append(data[:len(data)//2])
data = data[len(data)//2:]
batches.append(data)
if self.train:
random.shuffle(batches)
for batch in batches:
yield batchify(batch, self.vocabs, self.unk_rate)
|
11470242
|
import basix
import numpy
import pytest
def xtest_create_simple():
# Creates Lagrange P1 element on triangle
# Point evaluation of polynomial set
degree = 1
points = numpy.array([[0, 0], [1, 0], [0, 1]], dtype=numpy.float64)
matrix = numpy.identity(points.shape[0])
# Create element from space and dual
coeff_space = numpy.identity(points.shape[0])
fe = basix.create_new_element("Custom element", "triangle", degree, [1], points, matrix, coeff_space,
[[1, 1, 1], [0, 0, 0], [0]], [numpy.identity(3) for i in range(3)],
basix.MappingType.identity)
numpy.set_printoptions(suppress=True, precision=2)
points = numpy.array([[.5, 0], [0, .5], [.5, .5]], dtype=numpy.float64)
print(fe.tabulate(0, points))
def xtest_create_custom():
# Creates second order element on triangle
# Point evaluation of polynomial set
degree = 2
points = numpy.array([[0, .5], [0.5, 0], [0.5, 0.5], [0.25, 0.25], [0.25, 0.5], [0.5, 0.25]], dtype=numpy.float64)
matrix = numpy.identity(points.shape[0])
# Create element from space and dual
coeff_space = numpy.identity(points.shape[0])
fe = basix.create_new_element("Custom element", "triangle", degree, [1], points, matrix, coeff_space,
[[0, 0, 0], [1, 1, 1], [3]],
[numpy.identity(5) for i in range(3)],
basix.MappingType.identity)
numpy.set_printoptions(suppress=True, precision=2)
points = numpy.array([[.25, 0], [0, .25], [.25, .25]], dtype=numpy.float64)
print(fe.tabulate(0, points))
def xtest_create_invalid():
degree = 2
# Try to create an invalid element of order 2
points = numpy.array([[0, 0.25], [0, 0.75], [0.25, 0.75], [0.75, 0.25],
[0.25, 0.0], [0.75, 0.0]], dtype=numpy.float64)
matrix = numpy.identity(points.shape[0])
# Create element from space and dual
coeff_space = numpy.identity(points.shape[0])
with pytest.raises(RuntimeError):
basix.create_new_element("Custom element", "triangle", degree, [1], points, matrix, coeff_space,
[[0, 0, 0], [2, 2, 2], [0]],
[numpy.identity(6) for i in range(3)], basix.MappingType.identity)
|
11470252
|
import os.path as osp
# data locations
prefix = './data'
train_name = 'deepfashion_train'
test_name = 'deepfashion_test'
knn = 5
knn_method = 'faiss'
train_data = dict(
feat_path=osp.join(prefix, 'features', '{}.bin'.format(train_name)),
label_path=osp.join(prefix, 'labels', '{}.meta'.format(train_name)),
knn_graph_path=osp.join(prefix, 'knns', train_name,
'{}_k_{}.npz'.format(knn_method, knn)),
k_at_hop=[5, 5],
active_connection=5,
is_norm_feat=True,
is_sort_knns=True,
)
test_data = dict(
feat_path=osp.join(prefix, 'features', '{}.bin'.format(test_name)),
label_path=osp.join(prefix, 'labels', '{}.meta'.format(test_name)),
knn_graph_path=osp.join(prefix, 'knns', test_name,
'{}_k_{}.npz'.format(knn_method, knn)),
k_at_hop=[5, 5],
active_connection=5,
is_norm_feat=True,
is_sort_knns=True,
is_test=True,
)
# model
model = dict(type='lgcn', kwargs=dict(feature_dim=256))
# training args
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=1e-4)
optimizer_config = {}
lr_config = dict(
policy='step',
step=[1, 2, 3],
)
batch_size_per_gpu = 16
total_epochs = 4
workflow = [('train', 1)]
# testing args
max_sz = 50
step = 0.5
pool = 'avg'
metrics = ['pairwise', 'bcubed', 'nmi']
# misc
workers_per_gpu = 1
checkpoint_config = dict(interval=1)
log_level = 'INFO'
log_config = dict(interval=200, hooks=[
dict(type='TextLoggerHook'),
])
|
11470269
|
from django.conf.urls import include, url
from localshop.apps.dashboard import views
app_name = 'dashboard'
repository_urls = [
# Package urls
url(r'^packages/add/$',
views.PackageAddView.as_view(),
name='package_add'),
url(r'^packages/(?P<name>[-._\w]+)/', include([
url(r'^$',
views.PackageDetailView.as_view(),
name='package_detail'),
url(r'^refresh-from-upstream/$',
views.PackageRefreshView.as_view(),
name='package_refresh'),
url(r'^release-mirror-file/$',
views.PackageMirrorFileView.as_view(),
name='package_mirror_file'),
])),
# CIDR
url(r'^settings/cidr/$',
views.CidrListView.as_view(), name='cidr_index'),
url(r'^settings/cidr/create$',
views.CidrCreateView.as_view(), name='cidr_create'),
url(r'^settings/cidr/(?P<pk>\d+)/edit',
views.CidrUpdateView.as_view(), name='cidr_edit'),
url(r'^settings/cidr/(?P<pk>\d+)/delete',
views.CidrDeleteView.as_view(), name='cidr_delete'),
# Credentials
url(r'^settings/credentials/$',
views.CredentialListView.as_view(),
name='credential_index'),
url(r'^settings/credentials/create$',
views.CredentialCreateView.as_view(),
name='credential_create'),
url(r'^settings/credentials/(?P<access_key>[-a-f0-9]+)/secret',
views.CredentialSecretKeyView.as_view(),
name='credential_secret'),
url(r'^settings/credentials/(?P<access_key>[-a-f0-9]+)/edit',
views.CredentialUpdateView.as_view(),
name='credential_edit'),
url(r'^settings/credentials/(?P<access_key>[-a-f0-9]+)/delete',
views.CredentialDeleteView.as_view(),
name='credential_delete'),
url(r'^settings/teams/$', views.TeamAccessView.as_view(), name='team_access'),
]
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^repositories/create$', views.RepositoryCreateView.as_view(), name='repository_create'),
url(r'^repositories/(?P<slug>[^/]+)/', include([
url(r'^$', views.RepositoryDetailView.as_view(), name='repository_detail'),
url(r'^edit$', views.RepositoryUpdateView.as_view(), name='repository_edit'),
url(r'^delete$', views.RepositoryDeleteView.as_view(), name='repository_delete'),
url(r'^refresh$', views.RepositoryRefreshView.as_view(), name='repository_refresh'),
])),
url(r'^repositories/(?P<repo>[^/]+)/', include(repository_urls))
]
|
11470293
|
from plenum.test.testable import spyable
from sovrin_client.agent.walleted_agent import WalletedAgent
from sovrin_client.agent.runnable_agent import RunnableAgent
# @spyable(
# methods=[WalletedAgent._handlePing, WalletedAgent._handlePong])
class TestWalletedAgent(WalletedAgent, RunnableAgent):
pass
|
11470305
|
from logging import getLogger
from shapely.geometry import Point, Polygon, shape, box, LineString
from shapely import speedups
from geopy import Nominatim
from pogeo import get_distance
if speedups.available:
speedups.enable()
class FailedQuery(Exception):
"""Raised when no location is found."""
class Landmark:
''' Contains information about user-defined landmarks.'''
log = getLogger('landmarks')
def __init__(self, name, shortname=None, points=None, query=None,
hashtags=None, phrase=None, is_area=False, query_suffix=None):
self.name = name
self.shortname = shortname
self.is_area = is_area
if not points and not query:
query = name.lstrip('the ')
if ((query_suffix and query) and
query_suffix.lower() not in query.lower()):
query = '{} {}'.format(query, query_suffix)
self.location = None
if query:
self.query_location(query)
elif points:
try:
length = len(points)
if length > 2:
self.location = Polygon(points)
elif length == 2:
self.location = box(points[0][0], points[0][1],
points[1][0], points[1][1])
elif length == 1:
self.location = Point(*points[0])
except TypeError:
raise ValueError('points must be a list/tuple of lists/tuples'
' containing 2 coordinates each')
if not self.location:
raise ValueError('No location provided for {}. Must provide'
' either points, or query.'.format(self.name))
elif not isinstance(self.location, (Point, Polygon, LineString)):
raise NotImplementedError('{} is a {} which is not supported'
.format(self.name, self.location.type))
self.south, self.west, self.north, self.east = self.location.bounds
# very imprecise conversion to square meters
self.size = self.location.area * 12100000000
if phrase:
self.phrase = phrase
elif is_area:
self.phrase = 'in'
else:
self.phrase = 'at'
self.hashtags = hashtags
def __contains__(self, coordinates):
"""determine if a point is within this object range"""
lat, lon = coordinates
if (self.south <= lat <= self.north and
self.west <= lon <= self.east):
return self.location.contains(Point(lat, lon))
return False
def query_location(self, query):
def swap_coords(geojson):
out = []
for x in geojson:
if isinstance(x, list):
out.append(swap_coords(x))
else:
return geojson[1], geojson[0]
return out
nom = Nominatim()
try:
geo = nom.geocode(query=query, geometry='geojson', timeout=3).raw
geojson = geo['geojson']
except (AttributeError, KeyError):
raise FailedQuery('Query for {} did not return results.'.format(query))
self.log.info('Nominatim returned {} for {}'.format(geo['display_name'], query))
geojson['coordinates'] = swap_coords(geojson['coordinates'])
self.location = shape(geojson)
def get_coordinates(self):
if isinstance(self.location, Polygon):
return tuple(self.location.exterior.coordinates)
else:
return self.location.coords[0]
def generate_string(self, coordinates):
if coordinates in self:
return '{} {}'.format(self.phrase, self.name)
distance = self.distance_from_point(coordinates)
if distance < 50 or (self.is_area and distance < 100):
return '{} {}'.format(self.phrase, self.name)
else:
return '{:.0f} meters from {}'.format(distance, self.name)
def distance_from_point(self, coordinates):
point = Point(*coordinates)
if isinstance(self.location, Point):
nearest = self.location
else:
nearest = self.nearest_point(point)
return get_distance(coordinates, nearest.coords[0])
def nearest_point(self, point):
'''Find nearest point in geometry, measured from given point.'''
if isinstance(self.location, Polygon):
segs = self.pairs(self.location.exterior.coords)
elif isinstance(self.location, LineString):
segs = self.pairs(self.location.coords)
else:
raise NotImplementedError('project_point_to_object not implemented'
"for geometry type '{}'.".format(
self.location.type))
nearest_point = None
min_dist = float("inf")
for seg_start, seg_end in segs:
line_start = Point(seg_start)
line_end = Point(seg_end)
intersection_point = self.project_point_to_line(
point, line_start, line_end)
cur_dist = point.distance(intersection_point)
if cur_dist < min_dist:
min_dist = cur_dist
nearest_point = intersection_point
return nearest_point
@staticmethod
def pairs(lst):
"""Iterate over a list in overlapping pairs."""
i = iter(lst)
prev = next(i)
for item in i:
yield prev, item
prev = item
@staticmethod
def project_point_to_line(point, line_start, line_end):
'''Find nearest point on a straight line,
measured from given point.'''
line_magnitude = line_start.distance(line_end)
u = (((point.x - line_start.x) * (line_end.x - line_start.x) +
(point.y - line_start.y) * (line_end.y - line_start.y))
/ (line_magnitude ** 2))
# closest point does not fall within the line segment,
# take the shorter distance to an endpoint
if u < 0.00001 or u > 1:
ix = point.distance(line_start)
iy = point.distance(line_end)
if ix > iy:
return line_end
else:
return line_start
else:
ix = line_start.x + u * (line_end.x - line_start.x)
iy = line_start.y + u * (line_end.y - line_start.y)
return Point([ix, iy])
class Landmarks:
def __init__(self, query_suffix=None):
self.points_of_interest = set()
self.areas = set()
self.query_suffix = query_suffix
def add(self, *args, **kwargs):
if ('query_suffix' not in kwargs) and self.query_suffix and (
'query' not in kwargs):
kwargs['query_suffix'] = self.query_suffix
landmark = Landmark(*args, **kwargs)
if landmark.is_area:
self.areas.add(landmark)
else:
self.points_of_interest.add(landmark)
if landmark.size < 1:
print(landmark.name, type(landmark.location), '\n')
else:
print(landmark.name, landmark.size, type(landmark.location), '\n')
def find_landmark(self, coords, max_distance=750):
landmark = find_within(self.points_of_interest, coords)
if landmark:
return landmark
landmark, distance = find_closest(self.points_of_interest, coords)
try:
if distance < max_distance:
return landmark
except TypeError:
pass
area = find_within(self.areas, coords)
if area:
return area
area, area_distance = find_closest(self.areas, coords)
try:
if area and area_distance < distance:
return area
else:
return landmark
except TypeError:
return area
def find_within(landmarks, coordinates):
within = [landmark for landmark in landmarks if coordinates in landmark]
found = len(within)
if found == 1:
return within[0]
if found:
landmarks = iter(within)
smallest = next(landmarks)
smallest_size = landmark.size
for landmark in landmarks:
if landmark.size < smallest_size:
smallest = landmark
smallest_size = landmark.size
return smallest
return None
def find_closest(landmarks, coordinates):
landmarks = iter(landmarks)
try:
closest_landmark = next(landmarks)
except StopIteration:
return None, None
shortest_distance = closest_landmark.distance_from_point(coordinates)
for landmark in landmarks:
distance = landmark.distance_from_point(coordinates)
if distance <= shortest_distance:
if (distance == shortest_distance
and landmark.size > closest_landmark.size):
continue
shortest_distance = distance
closest_landmark = landmark
return closest_landmark, shortest_distance
|
11470307
|
from mxnet import gluon
from mxnet.gluon import HybridBlock
from ceecnet.nn.layers.conv2Dnormed import *
from ceecnet.nn.layers.attention import *
from ceecnet.nn.pooling.psp_pooling import *
from ceecnet.nn.layers.scale import *
from ceecnet.nn.layers.combine import *
# CEEC units
from ceecnet.nn.units.ceecnet import *
# FracTALResUnit
from ceecnet.nn.units.fractal_resnet import *
"""
if upFuse == True, then instead of concatenation of the encoder features with the decoder features, the algorithm performs Fusion with
relative attention.
"""
class mantis_dn_features(HybridBlock):
def __init__(self, nfilters_init, depth, widths=[1], psp_depth=4, verbose=True, norm_type='BatchNorm', norm_groups=None, nheads_start=8, model='CEECNetV1', upFuse=False, ftdepth=5, **kwards):
super().__init__(**kwards)
self.depth = depth
if len(widths) == 1 and depth != 1:
widths = widths * depth
else:
assert depth == len(widths), ValueError("depth and length of widths must match, aborting ...")
with self.name_scope():
self.conv_first = Conv2DNormed(nfilters_init,kernel_size=(1,1), _norm_type = norm_type, norm_groups=norm_groups)
self.fuse_first = Fusion(nfilters_init, norm=norm_type, norm_groups=norm_groups)
# List of convolutions and pooling operators
self.convs_dn = gluon.nn.HybridSequential()
self.pools = gluon.nn.HybridSequential()
self.fuse = gluon.nn.HybridSequential()
for idx in range(depth):
nheads = nheads_start * 2**idx #
nfilters = nfilters_init * 2 **idx
if verbose:
print ("depth:= {0}, nfilters: {1}, nheads::{2}, widths::{3}".format(idx,nfilters,nheads,widths[idx]))
tnet = gluon.nn.HybridSequential()
for _ in range(widths[idx]):
if model == 'CEECNetV1':
tnet.add(CEEC_unit_v1(nfilters=nfilters, nheads = nheads, ngroups = nheads , norm_type = norm_type, norm_groups=norm_groups,ftdepth=ftdepth))
elif model == 'CEECNetV2':
tnet.add(CEEC_unit_v2(nfilters=nfilters, nheads = nheads, ngroups = nheads , norm_type = norm_type, norm_groups=norm_groups,ftdepth=ftdepth))
elif model == 'FracTALResNet':
tnet.add(FracTALResNet_unit(nfilters=nfilters, nheads = nheads, ngroups = nheads , norm_type = norm_type, norm_groups=norm_groups,ftdepth=ftdepth))
else:
raise ValueError("I don't know requested model, aborting ... - Given model::{}".format(model))
self.convs_dn.add(tnet)
if idx < depth-1:
self.fuse.add( Fusion( nfilters=nfilters, nheads = nheads , norm = norm_type, norm_groups=norm_groups) )
self.pools.add(DownSample(nfilters, _norm_type=norm_type, norm_groups=norm_groups))
# Middle pooling operator
self.middle = PSP_Pooling(nfilters,depth=psp_depth, _norm_type=norm_type,norm_groups=norm_groups)
self.convs_up = gluon.nn.HybridSequential() # 1 argument
self.UpCombs = gluon.nn.HybridSequential() # 2 arguments
for idx in range(depth-1,0,-1):
nheads = nheads_start * 2**idx
nfilters = nfilters_init * 2 **(idx-1)
if verbose:
print ("depth:= {0}, nfilters: {1}, nheads::{2}, widths::{3}".format(2*depth-idx-1,nfilters,nheads,widths[idx]))
tnet = gluon.nn.HybridSequential()
for _ in range(widths[idx]):
if model == 'CEECNetV1':
tnet.add(CEEC_unit_v1(nfilters=nfilters, nheads = nheads, ngroups = nheads , norm_type = norm_type, norm_groups=norm_groups,ftdepth=ftdepth))
elif model == 'CEECNetV2':
tnet.add(CEEC_unit_v2(nfilters=nfilters, nheads = nheads, ngroups = nheads , norm_type = norm_type, norm_groups=norm_groups,ftdepth=ftdepth))
elif model == 'FracTALResNet':
tnet.add(FracTALResNet_unit(nfilters=nfilters, nheads = nheads, ngroups = nheads , norm_type = norm_type, norm_groups=norm_groups,ftdepth=ftdepth))
else:
raise ValueError("I don't know requested model, aborting ... - Given model::{}".format(model))
self.convs_up.add(tnet)
if upFuse==True:
self.UpCombs.add(combine_layers_wthFusion(nfilters=nfilters, nheads=nheads, _norm_type=norm_type,norm_groups=norm_groups,ftdepth=ftdepth))
else:
self.UpCombs.add(combine_layers(nfilters, _norm_type=norm_type,norm_groups=norm_groups))
def hybrid_forward(self, F, input_t1, input_t2):
conv1_t1 = self.conv_first(input_t1)
conv1_t2 = self.conv_first(input_t2)
fuse1 = self.fuse_first(conv1_t1,conv1_t2)
# ******** Going down ***************
fusions = []
# Workaround of a mxnet bug
# https://github.com/apache/incubator-mxnet/issues/16736
pools1 = F.identity(conv1_t1)
pools2 = F.identity(conv1_t2)
for idx in range(self.depth):
conv1 = self.convs_dn[idx](pools1)
conv2 = self.convs_dn[idx](pools2)
if idx < self.depth-1:
# Evaluate fusions
conv1 = F.identity(conv1)
conv2 = F.identity(conv2)
fusions = fusions + [self.fuse[idx](conv1,conv2)]
# Evaluate pools
pools1 = self.pools[idx](conv1)
pools2 = self.pools[idx](conv2)
# Middle psppooling
middle = self.middle(F.concat(conv1,conv2, dim=1))
# Activation of middle layer
middle = F.relu(middle)
fusions = fusions + [middle]
# ******* Coming up ****************
convs_up = middle
for idx in range(self.depth-1):
convs_up = self.UpCombs[idx](convs_up, fusions[-idx-2])
convs_up = self.convs_up[idx](convs_up)
return convs_up, fuse1
|
11470346
|
from contextlib import contextmanager
from datetime import timedelta
from uuid import uuid4
from django.conf import settings
from django.db.models.deletion import ProtectedError
from django.test import SimpleTestCase, TestCase
from django.utils import timezone
from nose.tools import assert_in
from corehq.motech.const import ALGO_AES, BASIC_AUTH
from corehq.motech.models import ConnectionSettings
from corehq.motech.utils import b64_aes_encrypt
from ..const import (
MAX_ATTEMPTS,
MAX_BACKOFF_ATTEMPTS,
MIN_RETRY_WAIT,
RECORD_CANCELLED_STATE,
RECORD_FAILURE_STATE,
RECORD_PENDING_STATE,
RECORD_SUCCESS_STATE,
)
from ..models import (
FormRepeater,
SQLRepeater,
are_repeat_records_migrated,
format_response,
get_all_repeater_types,
is_response,
)
DOMAIN = 'test-domain-ap'
def test_get_all_repeater_types():
types = get_all_repeater_types()
for cls in settings.REPEATER_CLASSES:
name = cls.split('.')[-1]
assert_in(name, types)
class RepeaterTestCase(TestCase):
def setUp(self):
super().setUp()
url = 'https://www.example.com/api/'
conn = ConnectionSettings.objects.create(domain=DOMAIN, name=url, url=url)
self.repeater = FormRepeater(
domain=DOMAIN,
url=url,
connections_settings_id=conn.id
)
self.repeater.save()
self.sql_repeater = SQLRepeater.objects.create(
domain=DOMAIN,
repeater_id=self.repeater.get_id,
connection_settings=conn,
)
def tearDown(self):
if self.repeater.connection_settings_id:
ConnectionSettings.objects.filter(
pk=self.repeater.connection_settings_id
).delete()
self.sql_repeater.delete()
self.repeater.delete()
super().tearDown()
class RepeaterConnectionSettingsTests(RepeaterTestCase):
def test_create_connection_settings(self):
self.assertIsNone(self.repeater.connection_settings_id)
conn = self.repeater.connection_settings
self.assertIsNotNone(self.repeater.connection_settings_id)
self.assertEqual(conn.name, self.repeater.url)
def test_notify_addresses(self):
self.repeater.notify_addresses_str = "<EMAIL>"
conn = self.repeater.connection_settings
self.assertEqual(conn.notify_addresses, ["<EMAIL>"])
def test_notify_addresses_none(self):
self.repeater.notify_addresses_str = None
conn = self.repeater.connection_settings
self.assertEqual(conn.notify_addresses, [])
def test_password_encrypted(self):
self.repeater.auth_type = BASIC_AUTH
self.repeater.username = "terry"
self.repeater.password = "<PASSWORD>!"
conn = self.repeater.connection_settings
self.assertEqual(self.repeater.plaintext_password, conn.plaintext_password)
# repeater.password was saved decrypted; conn.password is not:
self.assertNotEqual(self.repeater.password, conn.password)
def test_password_bug(self):
self.repeater.auth_type = BASIC_AUTH
self.repeater.username = "terry"
plaintext = "Don't save me decrypted!"
ciphertext = b64_aes_encrypt(plaintext)
bytestring_repr = f"b'{ciphertext}'" # bug fixed by commit 3a900068
self.repeater.password = f'${ALGO_AES}${bytestring_repr}'
conn = self.repeater.connection_settings
self.assertEqual(conn.plaintext_password, self.repeater.plaintext_password)
class TestRepeaterName(RepeaterTestCase):
def test_migrated_name(self):
"""
When ConnectionSettings are migrated from an old Repeater,
ConnectionSettings.name is set to Repeater.url
"""
connection_settings = self.repeater.connection_settings
self.assertEqual(connection_settings.name, self.repeater.url)
self.assertEqual(self.repeater.name, connection_settings.name)
def test_repeater_name(self):
connection_settings = ConnectionSettings.objects.create(
domain=DOMAIN,
name='Example Server',
url='https://example.com/api/',
)
self.repeater.connection_settings_id = connection_settings.id
self.repeater.save()
self.assertEqual(self.repeater.name, connection_settings.name)
class TestSQLRepeatRecordOrdering(RepeaterTestCase):
def setUp(self):
super().setUp()
self.sql_repeater.repeat_records.create(
domain=DOMAIN,
payload_id='eve',
registered_at='1970-02-01',
)
def test_earlier_record_created_later(self):
self.sql_repeater.repeat_records.create(
domain=self.sql_repeater.domain,
payload_id='lilith',
# If Unix time starts on 1970-01-01, then I guess 1970-01-06
# is Unix R<NAME>, the sixth day of Creation, the day
# [Lilith][1] and Adam were created from clay.
# [1] https://en.wikipedia.org/wiki/Lilith
registered_at='1970-01-06',
)
repeat_records = self.sql_repeater.repeat_records.all()
self.assertEqual(repeat_records[0].payload_id, 'lilith')
self.assertEqual(repeat_records[1].payload_id, 'eve')
def test_later_record_created_later(self):
self.sql_repeater.repeat_records.create(
domain=self.sql_repeater.domain,
payload_id='cain',
registered_at='1995-01-06',
)
repeat_records = self.sql_repeater.repeat_records.all()
self.assertEqual(repeat_records[0].payload_id, 'eve')
self.assertEqual(repeat_records[1].payload_id, 'cain')
class RepeaterManagerTests(RepeaterTestCase):
def test_all_ready_no_repeat_records(self):
sql_repeaters = SQLRepeater.objects.all_ready()
self.assertEqual(len(sql_repeaters), 0)
def test_all_ready_pending_repeat_record(self):
with make_repeat_record(self.sql_repeater, RECORD_PENDING_STATE):
sql_repeaters = SQLRepeater.objects.all_ready()
self.assertEqual(len(sql_repeaters), 1)
self.assertEqual(sql_repeaters[0].id, self.sql_repeater.id)
def test_all_ready_failed_repeat_record(self):
with make_repeat_record(self.sql_repeater, RECORD_FAILURE_STATE):
sql_repeaters = SQLRepeater.objects.all_ready()
self.assertEqual(len(sql_repeaters), 1)
self.assertEqual(sql_repeaters[0].id, self.sql_repeater.id)
def test_all_ready_succeeded_repeat_record(self):
with make_repeat_record(self.sql_repeater, RECORD_SUCCESS_STATE):
sql_repeaters = SQLRepeater.objects.all_ready()
self.assertEqual(len(sql_repeaters), 0)
def test_all_ready_cancelled_repeat_record(self):
with make_repeat_record(self.sql_repeater, RECORD_CANCELLED_STATE):
sql_repeaters = SQLRepeater.objects.all_ready()
self.assertEqual(len(sql_repeaters), 0)
def test_all_ready_paused(self):
with make_repeat_record(self.sql_repeater, RECORD_PENDING_STATE), \
pause(self.sql_repeater):
sql_repeaters = SQLRepeater.objects.all_ready()
self.assertEqual(len(sql_repeaters), 0)
def test_all_ready_next_future(self):
in_five_mins = timezone.now() + timedelta(minutes=5)
with make_repeat_record(self.sql_repeater, RECORD_PENDING_STATE), \
set_next_attempt_at(self.sql_repeater, in_five_mins):
sql_repeaters = SQLRepeater.objects.all_ready()
self.assertEqual(len(sql_repeaters), 0)
def test_all_ready_next_past(self):
five_mins_ago = timezone.now() - timedelta(minutes=5)
with make_repeat_record(self.sql_repeater, RECORD_PENDING_STATE), \
set_next_attempt_at(self.sql_repeater, five_mins_ago):
sql_repeaters = SQLRepeater.objects.all_ready()
self.assertEqual(len(sql_repeaters), 1)
self.assertEqual(sql_repeaters[0].id, self.sql_repeater.id)
@contextmanager
def make_repeat_record(sql_repeater, state):
repeat_record = sql_repeater.repeat_records.create(
domain=sql_repeater.domain,
payload_id=str(uuid4()),
state=state,
registered_at=timezone.now()
)
try:
yield repeat_record
finally:
repeat_record.delete()
@contextmanager
def pause(sql_repeater):
sql_repeater.is_paused = True
sql_repeater.save()
try:
yield
finally:
sql_repeater.is_paused = False
sql_repeater.save()
@contextmanager
def set_next_attempt_at(sql_repeater, when):
sql_repeater.next_attempt_at = when
sql_repeater.save()
try:
yield
finally:
sql_repeater.next_attempt_at = None
sql_repeater.save()
class ResponseMock:
pass
class IsResponseTests(SimpleTestCase):
def test_has_text(self):
resp = ResponseMock()
resp.text = '<h1>Hello World</h1>'
self.assertFalse(is_response(resp))
def test_has_status_code(self):
resp = ResponseMock()
resp.status_code = 504
self.assertFalse(is_response(resp))
def test_has_reason(self):
resp = ResponseMock()
resp.reason = 'Gateway Timeout'
self.assertFalse(is_response(resp))
def test_has_status_code_and_reason(self):
resp = ResponseMock()
resp.status_code = 504
resp.reason = 'Gateway Timeout'
self.assertTrue(is_response(resp))
class FormatResponseTests(SimpleTestCase):
def test_non_response(self):
resp = ResponseMock()
self.assertIsNone(format_response(resp))
def test_no_text(self):
resp = ResponseMock()
resp.status_code = 504
resp.reason = 'Gateway Timeout'
self.assertEqual(format_response(resp), '504: Gateway Timeout')
def test_with_text(self):
resp = ResponseMock()
resp.status_code = 200
resp.reason = 'OK'
resp.text = '<h1>Hello World</h1>'
self.assertEqual(format_response(resp), '200: OK\n'
'<h1>Hello World</h1>')
class AddAttemptsTests(RepeaterTestCase):
def setUp(self):
super().setUp()
self.just_now = timezone.now()
self.sql_repeater.next_attempt_at = self.just_now
self.sql_repeater.save()
self.repeat_record = self.sql_repeater.repeat_records.create(
domain=DOMAIN,
payload_id='eggs',
registered_at=timezone.now(),
)
def test_add_success_attempt_true(self):
self.repeat_record.add_success_attempt(response=True)
self.assertEqual(self.repeat_record.state, RECORD_SUCCESS_STATE)
self.assertIsNone(self.sql_repeater.next_attempt_at)
self.assertEqual(self.repeat_record.num_attempts, 1)
self.assertEqual(self.repeat_record.attempts[0].state,
RECORD_SUCCESS_STATE)
self.assertEqual(self.repeat_record.attempts[0].message, '')
def test_add_success_attempt_200(self):
resp = ResponseMock()
resp.status_code = 200
resp.reason = 'OK'
resp.text = '<h1>Hello World</h1>'
self.repeat_record.add_success_attempt(response=resp)
self.assertEqual(self.repeat_record.state, RECORD_SUCCESS_STATE)
self.assertIsNone(self.sql_repeater.next_attempt_at)
self.assertEqual(self.repeat_record.num_attempts, 1)
self.assertEqual(self.repeat_record.attempts[0].state,
RECORD_SUCCESS_STATE)
self.assertEqual(self.repeat_record.attempts[0].message,
format_response(resp))
def test_add_server_failure_attempt_fail(self):
message = '504: Gateway Timeout'
self.repeat_record.add_server_failure_attempt(message=message)
self.assertEqual(self.repeat_record.state, RECORD_FAILURE_STATE)
self.assertGreater(self.sql_repeater.last_attempt_at, self.just_now)
self.assertEqual(self.sql_repeater.next_attempt_at,
self.sql_repeater.last_attempt_at + MIN_RETRY_WAIT)
self.assertEqual(self.repeat_record.num_attempts, 1)
self.assertEqual(self.repeat_record.attempts[0].state,
RECORD_FAILURE_STATE)
self.assertEqual(self.repeat_record.attempts[0].message, message)
self.assertEqual(self.repeat_record.attempts[0].traceback, '')
def test_add_server_failure_attempt_cancel(self):
message = '504: Gateway Timeout'
while self.repeat_record.state != RECORD_CANCELLED_STATE:
self.repeat_record.add_server_failure_attempt(message=message)
self.assertGreater(self.sql_repeater.last_attempt_at, self.just_now)
# Interval is MIN_RETRY_WAIT because attempts were very close together
self.assertEqual(self.sql_repeater.next_attempt_at,
self.sql_repeater.last_attempt_at + MIN_RETRY_WAIT)
self.assertEqual(self.repeat_record.num_attempts,
MAX_BACKOFF_ATTEMPTS + 1)
attempts = list(self.repeat_record.attempts)
expected_states = ([RECORD_FAILURE_STATE] * MAX_BACKOFF_ATTEMPTS
+ [RECORD_CANCELLED_STATE])
self.assertEqual([a.state for a in attempts], expected_states)
self.assertEqual(attempts[-1].message, message)
self.assertEqual(attempts[-1].traceback, '')
def test_add_client_failure_attempt_fail(self):
message = '409: Conflict'
self.repeat_record.add_client_failure_attempt(message=message)
self.assertEqual(self.repeat_record.state, RECORD_FAILURE_STATE)
self.assertIsNone(self.sql_repeater.last_attempt_at)
self.assertIsNone(self.sql_repeater.next_attempt_at)
self.assertEqual(self.repeat_record.num_attempts, 1)
self.assertEqual(self.repeat_record.attempts[0].state,
RECORD_FAILURE_STATE)
self.assertEqual(self.repeat_record.attempts[0].message, message)
self.assertEqual(self.repeat_record.attempts[0].traceback, '')
def test_add_client_failure_attempt_cancel(self):
message = '409: Conflict'
while self.repeat_record.state != RECORD_CANCELLED_STATE:
self.repeat_record.add_client_failure_attempt(message=message)
self.assertIsNone(self.sql_repeater.last_attempt_at)
self.assertIsNone(self.sql_repeater.next_attempt_at)
self.assertEqual(self.repeat_record.num_attempts,
MAX_ATTEMPTS + 1)
attempts = list(self.repeat_record.attempts)
expected_states = ([RECORD_FAILURE_STATE] * MAX_ATTEMPTS
+ [RECORD_CANCELLED_STATE])
self.assertEqual([a.state for a in attempts], expected_states)
self.assertEqual(attempts[-1].message, message)
self.assertEqual(attempts[-1].traceback, '')
def test_add_client_failure_attempt_no_retry(self):
message = '422: Unprocessable Entity'
while self.repeat_record.state != RECORD_CANCELLED_STATE:
self.repeat_record.add_client_failure_attempt(message=message, retry=False)
self.assertIsNone(self.sql_repeater.last_attempt_at)
self.assertIsNone(self.sql_repeater.next_attempt_at)
self.assertEqual(self.repeat_record.num_attempts, 1)
self.assertEqual(self.repeat_record.attempts[0].state, RECORD_CANCELLED_STATE)
self.assertEqual(self.repeat_record.attempts[0].message, message)
self.assertEqual(self.repeat_record.attempts[0].traceback, '')
def test_add_payload_exception_attempt(self):
message = 'ValueError: Schema validation failed'
tb_str = 'Traceback ...'
self.repeat_record.add_payload_exception_attempt(message=message,
tb_str=tb_str)
self.assertEqual(self.repeat_record.state, RECORD_CANCELLED_STATE)
# Note: Our payload issues do not affect how we deal with their
# server issues:
self.assertEqual(self.sql_repeater.next_attempt_at, self.just_now)
self.assertEqual(self.repeat_record.num_attempts, 1)
self.assertEqual(self.repeat_record.attempts[0].state,
RECORD_CANCELLED_STATE)
self.assertEqual(self.repeat_record.attempts[0].message, message)
self.assertEqual(self.repeat_record.attempts[0].traceback, tb_str)
class TestAreRepeatRecordsMigrated(RepeaterTestCase):
def setUp(self):
super().setUp()
are_repeat_records_migrated.clear(DOMAIN)
def test_no(self):
is_migrated = are_repeat_records_migrated(DOMAIN)
self.assertFalse(is_migrated)
def test_yes(self):
with make_repeat_record(self.sql_repeater, RECORD_PENDING_STATE):
is_migrated = are_repeat_records_migrated(DOMAIN)
self.assertTrue(is_migrated)
class TestSQLRepeaterConnectionSettings(RepeaterTestCase):
def test_connection_settings_are_accessible(self):
self.assertEqual(self.sql_repeater.connection_settings.url, 'https://www.example.com/api/')
def test_used_connection_setting_cannot_be_deleted(self):
with self.assertRaises(ProtectedError):
self.sql_repeater.connection_settings.delete()
|
11470375
|
import os
from pathlib import Path
import shutil
from unittest.mock import MagicMock
import pytest
from volttron.platform.vip.agent import Agent
from volttron.platform.web import PlatformWebService
from volttrontesting.utils.utils import AgentMock
from volttrontesting.utils.web_utils import get_test_web_env
@pytest.fixture()
def mock_platformweb_service() -> PlatformWebService:
PlatformWebService.__bases__ = (AgentMock.imitate(Agent, Agent()),)
platformweb = PlatformWebService(serverkey=MagicMock(),
identity=MagicMock(),
address=MagicMock(),
bind_web_address=MagicMock())
# rpc_caller = platformweb.vip.rpc
# platformweb._admin_endpoints = AdminEndpoints(rpc_caller=rpc_caller)
# Internally the register uses this value to determine the caller's identity
# to allow the platform web service to map calls back to the proper agent
platformweb.vip.rpc.context.vip_message.peer.return_value = "foo"
yield platformweb
def test_register_routes(mock_platformweb_service):
html_root = "/tmp/junk/html"
attempt_to_get_file = "/tmp/junk/index.html"
should_get_index_file = os.path.join(html_root, "index.html")
file_contents_bad = "HOLY COW!"
file_contents_good = "Woot there it is!"
try:
os.makedirs(html_root, exist_ok=True)
with open(attempt_to_get_file, "w") as should_not_get:
should_not_get.write(file_contents_bad)
with open(should_get_index_file, "w") as should_get:
should_get.write(file_contents_good)
pws = mock_platformweb_service
pws.register_path_route(f"/.*", html_root)
pws.register_path_route(f"/flubber", ".")
# Test to make sure the route is resolved to a full directory so easier
# to detect chroot for html paths.
assert len(pws.registeredroutes) == 2
for x in pws.registeredroutes:
# x is a tuple regex, 'path', directory
assert Path(x[2]).is_absolute()
start_response = MagicMock()
data = pws.app_routing(get_test_web_env("/index.html"), start_response)
data = "".join([x.decode("utf-8") for x in data])
assert "200 OK" in start_response.call_args[0]
assert data == file_contents_good
# Test relative route to the index.html file above the html_root, but using a
# rooted path to do so.
start_response.reset_mock()
data = pws.app_routing(get_test_web_env("/../index.html"), start_response)
data = "".join([x.decode("utf-8") for x in data])
assert "403 Forbidden" in start_response.call_args[0]
assert "403 Forbidden" in data
# Test relative route to the index.html file above the html_root.
start_response.reset_mock()
data = pws.app_routing(get_test_web_env("../index.html"), start_response)
data = "".join([x.decode("utf-8") for x in data])
assert "200 OK" not in start_response.call_args[0]
assert data != file_contents_bad
finally:
shutil.rmtree(str(Path(html_root).parent), ignore_errors=True)
|
11470461
|
import sys
import inspect
from types import FunctionType
def magic():
s = ''
f_locals = sys._getframe(1).f_locals
for var, value in inspect.getmembers(f_locals['self']):
if not (var.startswith('__') and var.endswith('__')) \
and var not in f_locals:
s += var + ' = self.' + var + '\n'
return s
def outdent_lines(lines):
outer_ws_count = 0
for ch in lines[0]:
if not ch.isspace():
break
outer_ws_count += 1
return [line[outer_ws_count:] for line in lines]
def insert_self_in_header(header):
return header[0:header.find('(') + 1] + 'self, ' + \
header[header.find('(') + 1:]
def get_indent_string(srcline):
indent = ''
for ch in srcline:
if not ch.isspace():
break
indent += ch
return indent
def rework(func):
srclines, line_num = inspect.getsourcelines(func)
srclines = outdent_lines(srclines)
dst = insert_self_in_header(srclines[0])
if len(srclines) > 1:
dst += get_indent_string(srclines[1]) + 'exec(magic())\n'
for line in srclines[1:]:
dst += line
dst += 'new_func = eval(func.__name__)\n'
exec(dst)
return new_func
class WithoutSelf(type):
def __init__(self, name, bases, attrs):
super(WithoutSelf, self).__init__(name, bases, attrs)
try:
for attr, value in attrs.items():
if isinstance(value, FunctionType):
setattr(self, attr, rework(value))
except IOError:
print "Couldn't read source code - it wont work."
sys.exit()
class Person(object):
__metaclass__ = WithoutSelf
def __init__(name):
self.name = name
def sayHi(name=None):
print 'Hi {}!'.format(name or self.name)
p = Person('World')
p.sayHi()
p.sayHi('Python')
|
11470486
|
import os
import shutil
from pipscc import pipscc
pipscc(["pipscc","-c" , "basics0.c", "-o" , "/tmp/bb.o" ]).run()
pipscc(["pipscc","/tmp/bb.o", "-o" , "a.out"]).run()
os.remove("a.out")
os.remove("/tmp/bb.o")
|
11470494
|
import os
import logging
import taco.logger.consts as logger_consts
LOGS_BASE_DIR_PATH = r'./Output/Logs/'
class KwargsLogger(object):
def __init__(self, logger=None, log_level=logging.DEBUG):
self._logger = logger
self.set_level(log_level)
def _format_message(self, message, **kwrags):
if kwrags is None:
kwrags = {}
formatted_kwargs = []
for key, value in kwrags.items():
formatted_kwargs.append('{0}={1}'.format(str(key).replace('\'', ''), str(value).replace('\'', '')))
return '{0}: {1}'.format(message, ', '.join(formatted_kwargs))
def get_child(self, logger_name):
child_logger = self._logger.getChild(logger_name)
return KwargsLogger(logger=child_logger)
def log_and_raise(self, exception_type, *exception_args, **exception_kwargs):
exception_obj = exception_type(*exception_args, **exception_kwargs)
self._logger.error(str(exception_obj))
raise exception_obj
def add_handler(self, handler):
self._logger.addHandler(handler)
def set_level(self, level):
self._logger.setLevel(level)
def error(self, msg, *args, **kwargs):
format_message = self._format_message(msg, **kwargs)
self._logger.error(format_message)
def info(self, msg, *args, **kwargs):
format_message = self._format_message(msg, **kwargs)
self._logger.info(format_message)
def debug(self, msg, *args, **kwargs):
format_message = self._format_message(msg, **kwargs)
self._logger.debug(format_message)
def warn(self, msg, *args, **kwargs):
format_message = self._format_message(msg, **kwargs)
self._logger.warning(format_message)
def get_logger(name, add_file_handler=False, log_level=logger_consts.DEFAULT_LOG_LEVEL):
logger = KwargsLogger(logging.Logger(name), log_level=log_level)
formatter = logging.Formatter(
'%(asctime)s-%(name)s-%(levelname)s %(filename)s-%(lineno)d %(message)s')
ch = logging.StreamHandler()
ch.setLevel(log_level)
ch.setFormatter(formatter)
logger.add_handler(ch)
if add_file_handler:
if not os.path.exists(LOGS_BASE_DIR_PATH):
os.makedirs(LOGS_BASE_DIR_PATH)
fh = logging.FileHandler(
LOGS_BASE_DIR_PATH + '{name}.log'.format(name=name))
fh.setLevel(log_level)
fh.setFormatter(formatter)
logger.add_handler(fh)
return logger
|
11470535
|
import numpy as np
from sciencebeam_gym.utils.bounding_box import BoundingBox
from sciencebeam_gym.utils.visualize_bounding_box import draw_bounding_box
class TestDrawBoundingBox:
def test_should_not_fail_with_float_bounding_box_values(self):
image_array = np.zeros((200, 200, 3), dtype='uint8')
draw_bounding_box(
image_array,
bounding_box=BoundingBox(10.0, 10.0, 50.0, 50.0),
color=(255, 0, 0),
text='Box 1'
)
|
11470542
|
import time
from django.core.exceptions import ValidationError
from django.conf import settings
class AntiSpam(object):
def __init__(self):
self.spammed = 0
self.info = {}
def check_spam(self, json_message):
message_length = len(json_message)
info_key = int(round(time.time() * 100))
self.info[info_key] = message_length
if message_length > settings.MAX_MESSAGE_SIZE:
self.spammed += 1
raise ValidationError("Message can't exceed %d symbols" % settings.MAX_MESSAGE_SIZE)
self.check_timed_spam()
def check_timed_spam(self):
# TODO implement me
pass
# raise ValidationError("You're chatting too much, calm down a bit!")
|
11470566
|
import torch
import numpy as np
class ProcessForce(object):
"""Truncate a time series of force readings with a window size.
Args:
window_size (int): Length of the history window that is
used to truncate the force readings
"""
def __init__(self, window_size, key='force', tanh=False):
assert isinstance(window_size, int)
self.window_size = window_size
self.key = key
self.tanh = tanh
def __call__(self, sample):
force = sample[self.key]
force = force[-self.window_size:]
if self.tanh:
force = np.tanh(force) # remove very large force readings
sample[self.key] = force.transpose()
return sample
|
11470711
|
import numpy as np
def tetra4_cell(elemList, nodeList):
nodeiRow = []
for i in range(len(elemList[:, 0])):
nodeiRow.append((4, int(np.argwhere(nodeList[:, 1] == elemList[i, 3])),
int(np.argwhere(nodeList[:, 1] == elemList[i, 4])),
int(np.argwhere(nodeList[:, 1] == elemList[i, 5])),
int(np.argwhere(nodeList[:, 1] == elemList[i, 6]))))
return np.array(nodeiRow)
def cell_type_tetra4(elemList):
x = np.repeat(10, (len(elemList[:, 0])))
return x
|
11470766
|
import os
import struct
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks
from twistar.registry import Registry
from floranet.models.device import Device
from floranet.models.application import Application
from floranet.appserver.azure_iot_mqtt import AzureIotMqtt
from floranet.database import Database
from floranet.log import log
"""
Azure IoT MQTT test application interface to use. Configure
this interface with the IoT Hub hostname, key name and
key value:
floranet> interface add azure protocol=mqtt name=AzureTest
iothost=test-floranet.azure-devices.net keyname=iothubowner
keyvalue=<KEY>
"""
AzureIoTHubName = 'AzureMqttTest'
"""
Azure IoT Hub Device Explorer should be used to verify outbound
(Device to Cloud) messages are received, and to send inbound
(Cloud to Device) test messages.
"""
class AzureIotMQTTTest(unittest.TestCase):
"""Test send and receive messages to Azure IoT Hub
"""
@inlineCallbacks
def setUp(self):
# Bootstrap the database
fpath = os.path.realpath(__file__)
config = os.path.dirname(fpath) + '/database.cfg'
log.start(True, '', True)
db = Database()
db.parseConfig(config)
db.start()
db.register()
self.device = yield Device.find(where=['appname = ?',
'azuredevice02'], limit=1)
self.app = yield Application.find(where=['appeui = ?',
self.device.appeui], limit=1)
@inlineCallbacks
def test_AzureIotMqtt(self):
"""Test sending & receiving sample data to/from an
Azure IoT Hub instance"""
interface = yield AzureIotMqtt.find(where=['name = ?',
AzureIoTHubName], limit=1)
port = 11
appdata = "{ Temperature: 42.3456 }"
yield interface.start(None)
yield interface.netServerReceived(self.device, self.app, port, appdata)
|
11470788
|
import logging
logger = logging.getLogger('peachy')
from peachyprinter.domain.layer_generator import LayerGenerator
from peachyprinter.domain.commands import LateralDraw, Layer, LateralMove
from math import pi, sin, cos, sqrt
class HalfVaseTestGenerator(LayerGenerator):
name = "Half Vase With A Twist"
def __init__(self, height, width, layer_height, speed=100):
self._height = float(height)
self._max_radius = float(width) / 2.0
self._layer_height = float(layer_height)
self._speed = speed
self._current_height = 0.0
self._steps_in_half = 100
self._rad_per_step = pi / float(self._steps_in_half)
self._layers = self._height / self._layer_height
logger.info("Half vase height: %s" % self._height)
logger.info("Half vase radius: %s" % self._max_radius)
logger.info("Half vase layer height: %s" % self._layer_height)
logger.info("Half vase speed: %s" % self._speed)
def __iter__(self):
return self
def __next__(self):
return self.next()
def _points(self, radius, start_angle):
points = [[0, 0]]
for step in range(0, self._steps_in_half):
angle = start_angle + (step * self._rad_per_step)
x = sin(angle) * radius
y = cos(angle) * radius
points.append([x, y])
points.append([0, 0])
return points
def _start_angle(self):
percent_complete = self._current_height / self._height
angle = pi * percent_complete
return angle
def _radius(self):
percent_complete = self._current_height / self._height
factor = (sin(percent_complete * 2.0 * pi * 2.0) + 1) / 2.0
out = (self._max_radius * 0.75) + (factor * (self._max_radius * 0.25))
return out
def next(self):
logger.info("Half vase height: %s" % self._height)
logger.info("Half vase current height: %s" % self._current_height)
if self._current_height >= self._height:
raise StopIteration
points = self._points(self._radius(), self._start_angle())
commands = [LateralDraw(points[index - 1], points[index], self._speed) for index in range(1, len(points))]
layer = Layer(self._current_height, commands=commands)
self._current_height = self._current_height + self._layer_height
return layer
class SolidObjectTestGenerator(LayerGenerator):
name = "Solidified Object of Opressive Beauty"
def __init__(self, height, width, layer_height, speed=100):
self._height = float(height)
self._max_radius = float(width) / 2.0
self._layer_height = float(layer_height)
self._speed = speed
self._current_height = 0.0
self._steps_in_circle = 180
self._steps_circle_section = 155
self._rad_per_step = (2.0*pi) / float(self._steps_in_circle)
self._layers = self._height / self._layer_height
logger.info("Solidified Object height: %s" % self._height)
logger.info("Solidified Object radius: %s" % self._max_radius)
logger.info("Solidified Object layer height: %s" % self._layer_height)
logger.info("Solidified Object speed: %s" % self._speed)
def __iter__(self):
return self
def __next__(self):
return self.next()
def _points(self, radius, start_angle):
points = [[0, 0]]
for step in range(0, self._steps_circle_section):
angle = start_angle + (step * self._rad_per_step)
x = sin(angle) * radius
y = cos(angle) * radius
points.append([x, y])
points.append([0, 0])
return points
def _start_angle(self):
percent_complete = self._current_height / self._height
angle = pi * percent_complete
return angle
def _radius(self):
percent_complete = self._current_height / self._height
factor = ((cos(sqrt(percent_complete) * pi * 3.0) / 4.0) + 0.75 - (percent_complete * 0.5)) * self._max_radius
return factor
def next(self):
if self._current_height >= self._height:
raise StopIteration
points = self._points(self._radius(), self._start_angle())
commands = [LateralDraw(points[index - 1], points[index], self._speed) for index in range(1, len(points))]
layer = Layer(self._current_height, commands=commands)
self._current_height = self._current_height + self._layer_height
return layer
class TwistVaseTestGenerator(LayerGenerator):
name = "Half Vase With A Bunch of Twists"
def __init__(self, height, width, layer_height, speed=100):
self._height = float(height)
self._max_radius = float(width) / 2.0
self._layer_height = float(layer_height)
self._speed = speed
self._current_height = 0.0
self._steps_in_half = 100
self._twists = 3 * pi
self._rad_per_step = pi / float(self._steps_in_half)
self._layers = self._height / self._layer_height
logger.info("Half vase height: %s" % self._height)
logger.info("Half vase radius: %s" % self._max_radius)
logger.info("Half vase layer height: %s" % self._layer_height)
logger.info("Half vase speed: %s" % self._speed)
def __iter__(self):
return self
def __next__(self):
return self.next()
def _points(self, radius, start_angle):
points = [[0, 0]]
for step in range(0, self._steps_in_half):
angle = start_angle + (step * self._rad_per_step)
x = sin(angle) * radius
y = cos(angle) * radius
points.append([x, y])
points.append([0, 0])
return points
def _start_angle(self):
percent_complete = self._current_height / self._height
angle = self._twists * percent_complete
return angle
def _radius(self):
percent_complete = self._current_height / self._height
factor = (sin(percent_complete * 2.0 * pi * 2.0) + 1) / 2.0
out = (self._max_radius * 0.75) + (factor * (self._max_radius * 0.25))
return out
def next(self):
logger.info("Half vase height: %s" % self._height)
logger.info("Half vase current height: %s" % self._current_height)
if self._current_height >= self._height:
raise StopIteration
points = self._points(self._radius(), self._start_angle())
commands = [LateralDraw(points[index - 1], points[index], self._speed) for index in range(1, len(points))]
layer = Layer(self._current_height, commands=commands)
self._current_height = self._current_height + self._layer_height
return layer
class SimpleVaseTestGenerator(LayerGenerator):
name = "Simple 5 Sided 180 Twist Vase"
def __init__(self, height, width, layer_height, speed=100):
self._height = float(height)
self._max_radius = float(width) / 2.0
self._layer_height = float(layer_height)
self._speed = speed
self._current_height = 0.0
self._steps = 5
self._rad_per_step = 2 * pi / float(self._steps)
self._layers = self._height / self._layer_height
self._angle_varience = pi / self._layers
self._last_point = [0, self._max_radius]
self._last_angle = 0
logger.info("Vase height: %s" % self._height)
logger.info("Vase radius: %s" % self._max_radius)
logger.info("Vase layer height: %s" % self._layer_height)
logger.info("Vase speed: %s" % self._speed)
def __iter__(self):
return self
def __next__(self):
return self.next()
def _points(self, start_angle):
points = []
for step in range(0, self._steps + 1):
angle = start_angle + (step * self._rad_per_step)
x = sin(angle) * self._max_radius
y = cos(angle) * self._max_radius
points.append([x, y])
return points
def next(self):
if self._current_height >= self._height:
raise StopIteration
points = self._points(self._last_angle)
commands = [LateralDraw(points[index - 1], points[index], self._speed) for index in range(1, len(points))]
layer = Layer(self._current_height, commands=commands)
self._current_height = self._current_height + self._layer_height
self._last_angle = self._last_angle + self._angle_varience
return layer
class ConcentricCircleTestGenerator(LayerGenerator):
name = "Concentric Circles"
def __init__(self, height, width, layer_height, speed=100):
self._height = float(height)
self._max_radius = float(width) / 2.0
self._layer_height = float(layer_height)
self._speed = speed
self._current_height = 0.0
self._steps = 90
self._rad_per_step = 2 * pi / float(self._steps)
self._layers = self._height / self._layer_height
self._angle_varience = pi / self._layers
self._last_point = [0, self._max_radius]
self._last_angle = 0
self._rings = 3
logger.info("Circles height: %s" % self._height)
logger.info("Circles radius: %s" % self._max_radius)
logger.info("Circles layer height: %s" % self._layer_height)
logger.info("Circles speed: %s" % self._speed)
def __iter__(self):
return self
def __next__(self):
return self.next()
def _points(self, start_angle, radius):
points = []
for step in range(0, self._steps + 10):
angle = start_angle + (step * self._rad_per_step)
x = sin(angle) * radius
y = cos(angle) * radius
points.append([x, y])
return points
def next(self):
if self._current_height >= self._height:
raise StopIteration
commands = []
for i in range(1, self._rings + 1):
radius = self._max_radius / self._rings * i
points = self._points(0, radius)
commands += [LateralMove(points[0], points[0], self._speed)]
commands += [LateralDraw(points[index - 1], points[index], self._speed) for index in range(1, len(points))]
layer = Layer(self._current_height, commands=commands)
self._current_height = self._current_height + self._layer_height
self._last_angle = self._last_angle + self._angle_varience
return layer
class LollipopTestGenerator(LayerGenerator):
name = "Lollipop"
def __init__(self, height, width, layer_height, speed=100):
self._height = height
self._current_height = 0
self._layer_height = layer_height
self._speed = speed
self._base_height = float(height) / 3.0
self._stick_radius = float(width) / 10.0
remaining_height = height - self._base_height
self._pop_radius = min(remaining_height / 2.0, float(width) / 2.0)
self._pop_center_height = height - self._pop_radius
self._stick_complexity = 100
self._pop_complexity = 100
logger.info("Pop height: %s" % self._height)
logger.info("Stick radius: %s" % str(width / 10.0))
logger.info("Pop radius: %s" % str(width / 2.0))
logger.info("Pop layer height: %s" % self._layer_height)
logger.info("Pop speed: %s" % self._speed)
def __iter__(self):
return self
def __next__(self):
return self.next()
def _get_stick(self):
points = []
rad_per_step = (2 * pi) / self._stick_complexity
for step in range(0, self._stick_complexity + 1):
angle = step * rad_per_step
x = sin(angle) * self._stick_radius
y = cos(angle) * self._stick_radius
points.append([x, y])
return points
def _layer_from_points(self, points):
commands = [LateralDraw(points[index - 1], points[index], self._speed) for index in range(1, len(points))]
return Layer(self._current_height, commands=commands)
def _get_pop(self, current_height):
rad_per_step = (2 * pi) / self._pop_complexity
distance_to_centre = abs(self._pop_center_height - current_height)
radius = sqrt((self._pop_radius * self._pop_radius) - (distance_to_centre * distance_to_centre))
points = []
for step in range(0, self._pop_complexity + 1):
angle = step * rad_per_step
x = sin(angle) * radius
y = cos(angle) * radius
points.append([x, y])
return points
def next(self):
if self._current_height >= self._height:
raise StopIteration
if self._current_height <= self._base_height:
layer = self._layer_from_points(self._get_stick())
else:
layer = self._layer_from_points(self._get_pop(self._current_height))
self._current_height += self._layer_height
return layer
|
11470822
|
from builtins import object
from django import forms
from pykeg.core import models
class NotificationSettingsForm(forms.ModelForm):
class Meta(object):
model = models.NotificationSettings
exclude = ["user", "backend"]
|
11470833
|
from copy import deepcopy
from anasymod.templates.templ import JinjaTempl
from anasymod.generators.gen_api import SVAPI, ModuleInst
from anasymod.sim_ctrl.datatypes import DigitalSignal
class ModuleRegMapSimCtrl(JinjaTempl):
def __init__(self, scfg):
super().__init__(trim_blocks=True, lstrip_blocks=True)
crtl_inputs = scfg.analog_ctrl_inputs + scfg.digital_ctrl_inputs
ctrl_outputs = scfg.analog_ctrl_outputs + scfg.digital_ctrl_outputs
#####################################################
# Define module ios
#####################################################
self.module_ifc = SVAPI()
module = ModuleInst(api=self.module_ifc, name="reg_map")
# Add clock
clk = DigitalSignal(name='clk', width=1, abspath=None)
module.add_input(io_obj=clk)
o_ctrl = DigitalSignal(name='o_ctrl', width=32, abspath=None)
module.add_input(io_obj=o_ctrl)
o_data = DigitalSignal(name='o_data', width=32, abspath=None)
module.add_output(io_obj=o_data)
i_ctrl = DigitalSignal(name='i_ctrl', width=32, abspath=None)
module.add_input(io_obj=i_ctrl)
i_data = DigitalSignal(name='i_data', width=32, abspath=None)
module.add_input(io_obj=i_data)
# Add I/Os to Design (probes and Conrol Parameters)
module.add_outputs(io_objs=crtl_inputs, connections=crtl_inputs)
module.add_inputs(io_objs=ctrl_outputs, connections=ctrl_outputs)
module.generate_header()
#####################################################
# Initialize Default Values for ControlInfrastructure Parameters
#####################################################
self.init_ctrlios = SVAPI()
for parameter in crtl_inputs:
default_signal = deepcopy(parameter)
default_signal.name = str(default_signal.name) + '_def'
self.init_ctrlios.gen_signal(io_obj=default_signal)
self.init_ctrlios.assign_to(io_obj=default_signal, exp=default_signal.init_value)
#####################################################
# Combo mux for Probes section
#####################################################
# instantiation of register map module
self.probes_combomux_cases = SVAPI()
self.probes_combomux_cases.indent(quantity=3)
for probe in ctrl_outputs:
if probe.o_addr is not None:
self.probes_combomux_cases.writeln(f"'d{probe.o_addr}: o_data_reg = {probe.name};")
#####################################################
# Combo mux for Probes section
#####################################################
# instantiation of register map module
self.params_regmap = SVAPI()
for param in crtl_inputs:
# create a reg signal
reg_signal = deepcopy(param)
reg_signal.name = f'{param.name}_reg'
self.params_regmap.gen_signal(reg_signal)
# assign to the reg signal
self.params_regmap.writeln(f'assign {param.name} = {param.name}_reg;')
# update the reg signal
self.params_regmap.writeln(f'''\
always @(posedge clk) begin
if (i_rst == 'b1) begin
{param.name}_reg <= {param.name}_def; // use VIO defaults
end else if ((i_valid == 1'b1) && (i_addr == 'd{param.i_addr})) begin
{param.name}_reg <= i_data;
end else begin
{param.name}_reg <= {param.name}_reg;
end
end''')
TEMPLATE_TEXT = '''
`timescale 1ns/1ps
`default_nettype none
{{subst.module_ifc.text}}
// break out signals in o_ctrl
logic [7:0] o_addr;
assign o_addr = o_ctrl[7:0];
// break out signals in i_ctrl
logic i_rst;
assign i_rst = i_ctrl[31];
logic i_valid;
assign i_valid = i_ctrl[30];
logic [7:0] i_addr;
assign i_addr = i_ctrl[7:0];
// Initial values for parameters
{{subst.init_ctrlios.text}}
// combo mux for reading outputs from design
logic [31:0] o_data_reg;
assign o_data = o_data_reg;
always @* begin
case (o_addr)
{{subst.probes_combomux_cases.text}}
default: o_data_reg = 0;
endcase
end
// register map for writing to the inputs of the design
{{subst.params_regmap.text}}
endmodule
`default_nettype wire
'''
|
11470853
|
from tkinter import Toplevel
from tkinter import Label
from tkinter import Frame
from tkinter import Button
from tkinter import GROOVE
from tkinter import NSEW
from tkinter import PhotoImage
from tkinter import Tk
import util.funcoes as funcoes
class SetLanguage():
def __init__(self, master, design, idioma, interface_idioma, icon, dic_imgs):
self.__base = "imagens/"
self.__tp_interface_idioma = None
self.__bt_idioma = None
self.__lb1 = None
self.master = master
self.design = design
self.idioma = idioma
self.interface_idioma = interface_idioma
self.icon = icon
self.dic_imgs = dic_imgs
def selecionar_idioma(self):
self.__tp_interface_idioma = Toplevel(self.master, self.design.dic["idioma_tp"])
self.__tp_interface_idioma.withdraw()
self.__tp_interface_idioma.tk.call('wm', 'iconphoto', self.__tp_interface_idioma._w, self.icon)
self.__tp_interface_idioma.grid_columnconfigure(1, weight=1)
self.__tp_interface_idioma.title('Escolha de Idioma')
self.fr_top_idioma = Frame(self.__tp_interface_idioma, self.design.dic["idioma_fr"])
self.fr_top_idioma.grid_columnconfigure(1, weight=1)
self.fr_top_idioma.grid(row=1, column=1, sticky=NSEW)
self.__lb1 = Label(self.fr_top_idioma, self.design.dic['idioma_lb'], text=self.interface_idioma["texto_atualizacao"][self.idioma])
self.__lb1.grid(row=1, column=1, sticky=NSEW)
self.__fr_idiomas = Frame(self.__tp_interface_idioma, self.design.dic['idioma_fr2'])
self.__fr_idiomas.grid(row=2, column=1)
# Carregar as imagens
self.__imgs = []
for k, v in self.dic_imgs.items():
self.__imgs.append(PhotoImage(file=self.__base+v))
# Carregar os botões
x = 0
self.__lista_botoes = []
for k, v in self.dic_imgs.items():
if self.idioma == k:
self.__fr_bt = Frame(self.__fr_idiomas, self.design.dic['idioma_fr3'])
self.__bt_bt = Button(self.__fr_bt, self.design.dic['idioma_bt'], relief=GROOVE, image=self.__imgs[x], )
self.__lb_bt = Label(self.__fr_bt, self.design.dic['idioma_lb2'], relief=GROOVE, text=k, )
else:
self.__fr_bt = Frame(self.__fr_idiomas, self.design.dic['idioma_fr4'])
self.__bt_bt = Button(self.__fr_bt, self.design.dic['idioma_bt2'], relief=GROOVE, image=self.__imgs[x],)
self.__lb_bt = Label(self.__fr_bt, self.design.dic['idioma_lb3'], relief=GROOVE, text=k)
self.__bt_bt["command"] = lambda bt_bt=self.__bt_bt: self.__marcar_opcao_idioma(bt_bt)
self.__lista_botoes.append([self.__fr_bt, self.__bt_bt, self.__lb_bt])
self.__fr_bt.grid(row=1, column=x)
self.__bt_bt.grid(row=1, column=x)
self.__lb_bt.grid(row=2, column=x)
x += 1
self.__tp_interface_idioma.update()
t_width = self.master.winfo_screenwidth()
t_heigth = self.master.winfo_screenheight()
j_heigth = self.__tp_interface_idioma.winfo_screenmmheight()
j_width = self.__tp_interface_idioma.winfo_screenmmwidth()
self.__tp_interface_idioma.geometry("+{}+{}".format(j_width, j_heigth, int(t_width/2-(j_width/2)), int(t_heigth/2-(j_heigth/2))))
self.__tp_interface_idioma.deiconify()
def __marcar_opcao_idioma(self, botao):
self.__tp_interface_idioma.withdraw()
for bandeira in self.__lista_botoes:
if bandeira[1] == botao:
self.idioma = bandeira[2]["text"]
self.__ic_idioma = PhotoImage( file="imagens/{}".format(self.dic_imgs[self.idioma]) )
self.__ic_idioma = self.__ic_idioma.subsample(4, 4)
funcoes.arquivo_de_configuracoes_interface("idioma", self.idioma)
#self.__lb1.configure(text=self.interface_idioma["texto_atualizacao"][self.idioma])
self.__tp_interface_idioma.destroy()
del bandeira
self.selecionar_idioma()
else:
pass
return 10, 20
def atualizar_sistema(idioma):
idioma.selecionar_idioma({"pt-br": "ic_pt_br.png", "en-us": "ic_en_us.png", "es": "ic_es.png"})
if __name__ == '__main__':
master = Tk()
design = Design()
design.update_design_dic()
# Configurações da IDE
arquivo_configuracoes = funcoes.carregar_json("configuracoes/configuracoes.json")
# Idioma que a safira está configurada
idioma = arquivo_configuracoes['idioma']
interface_idioma = funcoes.carregar_json("configuracoes/interface.json")
icon = PhotoImage(file='imagens/icone.png')
idioma = SetLanguage(master, design, idioma, interface_idioma, icon)
Button(master, text="acao", command=lambda id=idioma: atualizar_sistema(id)).grid()
master.mainloop()
|
11470865
|
from sys import argv
#Define constants
POSITION_MARGIN_OF_ERROR = 50
DISTANCE_MARGIN_OF_ERROR = 50
#Open the files
bwaFile = open(argv[1], 'r')
igenomicsFile = open(argv[2], 'r')
def printDivider():
print("-----------------------------------------------------")
def alignmentsDictFromFile(file):
dct = {}
for line in file.readlines():
line = line.strip('\n')
components = line.split('\t')
dictToAdd = {}
dictToAdd['read name'] = components[0]
dictToAdd['position'] = int(components[1])
dictToAdd['segment name'] = components[2]
dictToAdd['direction'] = components[3]
dictToAdd['distance'] = int(components[4])
if components[0] in dct:
print("ERROR: " + components[0] + " already occurred in DICT. Continuing...")
dct[components[0]] = dictToAdd
return dct
#Read the alignments into their respective lists
print("BEGIN LOADING: bwaAlignmentDict")
bwaAlignmentDict = alignmentsDictFromFile(bwaFile)
print("FINISHED LOADING: bwaAlignmentDict")
printDivider()
print("BEGIN LOADING: igenomicsAlignmentDict")
igenomicsAlignmentDict = alignmentsDictFromFile(igenomicsFile)
print("FINISHED LOADING: igenomicsAlignmentDict")
printDivider()
#Compare those lists, with respect to the constants
#NOTE: THIS PERFORMED WITH RESPECT TO THE ALIGNMENTS FOUND IN IGENOMICS
printDivider()
print("BEGIN COMPARING")
passedReads = []
failedReads = []
notSharedReads = []
for readIGenomics in igenomicsAlignmentDict:
igenomicsDict = igenomicsAlignmentDict[readIGenomics]
if readIGenomics in bwaAlignmentDict:
bwaDict = bwaAlignmentDict[readIGenomics]
passedAllTests = bwaDict['direction'] == igenomicsDict['direction']
reasonsForFailure = {}
reasonsForFailure['position'] = False
reasonsForFailure['distance'] = False
positionDifference = abs(igenomicsDict['position'] - bwaDict['position'])
if positionDifference > POSITION_MARGIN_OF_ERROR:
passedAllTests = False
reasonsForFailure['position'] = True
print("ERROR: POSITION: " + readIGenomics + " margin of error was " + str(positionDifference))
distanceDifference = abs(igenomicsDict['distance'] - bwaDict['distance'])
if distanceDifference > DISTANCE_MARGIN_OF_ERROR:
passedAllTests = False
reasonsForFailure['distance'] = True
print("ERROR: DISTANCE: " + readIGenomics + " margin of error was " + str(distanceDifference) + ". iGenomics had ED of " + str(igenomicsDict['distance']) + ". Official ED was " + str(bwaDict['distance']))
if passedAllTests:
passedReads.append(readIGenomics)
else:
failedReads.append({'read': readIGenomics, 'reasonsForFailure': reasonsForFailure})
else:
print("ERROR: " + readIGenomics + " not found in bwaDict. Continuing...")
notSharedReads.append(readIGenomics)
continue
print("FINISHED COMPARING")
printDivider()
print("RESULTS:")
print("PASSING READS: %d" % len(passedReads))
for read in passedReads:
print("PASSED: " + read)
print("FAILING READS: %d" % len(failedReads))
for readDict in failedReads:
print("FAILED: " + readDict['read'] + " --> " + str(readDict['reasonsForFailure']))
print("NOT SHARED READS:")
for read in notSharedReads:
print("NOT SHARED: " + read)
print("RESULTS FINISHED")
|
11470882
|
from djangobench.utils import run_benchmark
def setup():
global Book
from model_save_new.models import Book
def benchmark():
global Book
for i in range(0, 30):
b = Book(id=i, title='Foo')
b.save()
run_benchmark(
benchmark,
setup=setup,
meta={
'description': 'A simple Model.save() call, instance not in DB.',
},
)
|
11470898
|
import filestack.models
from filestack import utils
class ImageTransformationMixin:
"""
All transformations and related/dependent tasks live here. They can
be directly called by Transformation or Filelink objects.
"""
def resize(self, width=None, height=None, fit=None, align=None):
return self._add_transform_task('resize', locals())
def crop(self, dim=None):
return self._add_transform_task('crop', locals())
def rotate(self, deg=None, exif=None, background=None):
return self._add_transform_task('rotate', locals())
def flip(self):
return self._add_transform_task('flip', locals())
def flop(self):
return self._add_transform_task('flop', locals())
def watermark(self, file=None, size=None, position=None):
return self._add_transform_task('watermark', locals())
def detect_faces(self, minsize=None, maxsize=None, color=None, export=None):
return self._add_transform_task('detect_faces', locals())
def crop_faces(self, mode=None, width=None, height=None, faces=None, buffer=None):
return self._add_transform_task('crop_faces', locals())
def pixelate_faces(self, faces=None, minsize=None, maxsize=None, buffer=None, amount=None, blur=None, type=None):
return self._add_transform_task('pixelate_faces', locals())
def round_corners(self, radius=None, blur=None, background=None):
return self._add_transform_task('round_corners', locals())
def vignette(self, amount=None, blurmode=None, background=None):
return self._add_transform_task('vignette', locals())
def polaroid(self, color=None, rotate=None, background=None):
return self._add_transform_task('polaroid', locals())
def torn_edges(self, spread=None, background=None):
return self._add_transform_task('torn_edges', locals())
def shadow(self, blur=None, opacity=None, vector=None, color=None, background=None):
return self._add_transform_task('shadow', locals())
def circle(self, background=None):
return self._add_transform_task('circle', locals())
def border(self, width=None, color=None, background=None):
return self._add_transform_task('border', locals())
def sharpen(self, amount=None):
return self._add_transform_task('sharpen', locals())
def blur(self, amount=None):
return self._add_transform_task('blur', locals())
def monochrome(self):
return self._add_transform_task('monochrome', locals())
def blackwhite(self, threshold=None):
return self._add_transform_task('blackwhite', locals())
def sepia(self, tone=None):
return self._add_transform_task('sepia', locals())
def pixelate(self, amount=None):
return self._add_transform_task('pixelate', locals())
def oil_paint(self, amount=None):
return self._add_transform_task('oil_paint', locals())
def negative(self):
return self._add_transform_task('negative', locals())
def modulate(self, brightness=None, hue=None, saturation=None):
return self._add_transform_task('modulate', locals())
def partial_pixelate(self, amount=None, blur=None, type=None, objects=None):
return self._add_transform_task('partial_pixelate', locals())
def partial_blur(self, amount=None, blur=None, type=None, objects=None):
return self._add_transform_task('partial_blur', locals())
def collage(self, files=None, margin=None, width=None, height=None, color=None, fit=None, autorotate=None):
return self._add_transform_task('collage', locals())
def upscale(self, upscale=None, noise=None, style=None):
return self._add_transform_task('upscale', locals())
def enhance(self, preset=None):
return self._add_transform_task('enhance', locals())
def redeye(self):
return self._add_transform_task('redeye', locals())
def ascii(self, background=None, foreground=None, colored=None, size=None, reverse=None):
return self._add_transform_task('ascii', locals())
def filetype_conversion(self, format=None, background=None, page=None, density=None, compress=None,
quality=None, strip=None, colorspace=None, secure=None,
docinfo=None, pageformat=None, pageorientation=None):
return self._add_transform_task('output', locals())
def no_metadata(self):
return self._add_transform_task('no_metadata', locals())
def quality(self, value=None):
return self._add_transform_task('quality', locals())
def zip(self):
return self._add_transform_task('zip', locals())
def fallback(self, handle=None, cache=None):
return self._add_transform_task('fallback', locals())
def pdf_info(self, colorinfo=None):
return self._add_transform_task('pdfinfo', locals())
def pdf_convert(self, pageorientation=None, pageformat=None, pages=None):
return self._add_transform_task('pdfconvert', locals())
def minify_js(self, gzip=None, use_babel_polyfill=None, keep_fn_name=None, keep_class_name=None,
mangle=None, merge_vars=None, remove_console=None, remove_undefined=None, targets=None):
return self._add_transform_task('minify_js', locals())
def minify_css(self, level=None, gzip=None):
return self._add_transform_task('minify_css', locals())
def av_convert(self, *, preset=None, force=None, title=None, extname=None, filename=None,
width=None, height=None, upscale=None, aspect_mode=None, two_pass=None,
video_bitrate=None, fps=None, keyframe_interval=None, location=None,
watermark_url=None, watermark_top=None, watermark_bottom=None,
watermark_right=None, watermark_left=None, watermark_width=None, watermark_height=None,
path=None, access=None, container=None, audio_bitrate=None, audio_sample_rate=None,
audio_channels=None, clip_length=None, clip_offset=None):
new_transform = self._add_transform_task('video_convert', locals())
response = utils.requests.get(new_transform.url).json()
uuid = response['uuid']
timestamp = response['timestamp']
return filestack.models.AudioVisual(
new_transform.url, uuid, timestamp, apikey=new_transform.apikey, security=new_transform.security
)
def auto_image(self):
return self._add_transform_task('auto_image', locals())
def _add_transform_task(self, transformation, params):
if isinstance(self, filestack.models.Transformation):
instance = self
else:
instance = filestack.models.Transformation(apikey=None, security=self.security, handle=self.handle)
params.pop('self')
params = {k: v for k, v in params.items() if v is not None}
transformation_url = utils.return_transform_task(transformation, params)
instance._transformation_tasks.append(transformation_url)
return instance
|
11470922
|
from pybullet_planning import INF
from copy import deepcopy
class LadderGraphEdge(object):
def __init__(self, idx=None, cost=-INF):
self.idx = idx # the id of the destination vert
self.cost = cost
# TODO: we ignore the timing constraint here
def __repr__(self):
return 'E idx{0}, cost{1}'.format(self.idx, self.cost)
class LadderGraphRung(object):
def __init__(self, id=None, data=[], edges=[]):
self.id = id
# joint_data: joint values are stored in one contiguous list
self.data = data
self.edges = edges
def __repr__(self):
return 'id {0}, data {1}, edge num {2}'.format(self.id, len(self.data), len(self.edges))
class LadderGraph(object):
def __init__(self, dof):
if dof <= 0 or not isinstance(dof, int):
raise ValueError('dof of the robot must be an integer >= 1!')
self.dof = dof
self.rungs = []
def get_dof(self):
return self.dof
def get_rung(self, rung_id):
assert(rung_id < len(self.rungs))
return self.rungs[rung_id]
def get_edges(self, rung_id):
return self.get_rung(rung_id).edges
def get_edge_sizes(self):
return [len(r.edges) for r in self.rungs]
def get_data(self, rung_id):
return self.get_rung(rung_id).data
def get_rungs_size(self):
return len(self.rungs)
@property
def size(self):
return self.get_rungs_size()
def get_rung_vert_size(self, rung_id):
"""count the number of vertices in a rung"""
return int(len(self.get_rung(rung_id).data) / self.dof)
def get_vert_size(self):
"""count the number of vertices in the whole graph"""
return sum([self.get_rung_vert_size(r_id) for r_id in range(self.get_rungs_size())])
def get_vert_sizes(self):
return [self.get_rung_vert_size(r_id) for r_id in range(self.get_rungs_size())]
def get_vert_data(self, rung_id, vert_id):
return self.get_rung(rung_id).data[self.dof * vert_id : self.dof * (vert_id+1)]
def resize(self, rung_number):
if self.size == 0:
self.rungs = [LadderGraphRung(id=None, data=[], edges=[]) for i in range(rung_number)]
return
if self.size > 0 and self.size < rung_number:
# fill in the missing ones with empty rungs
self.rungs.extend([LadderGraphRung(id=None, data=[], edges=[]) for i in range(rung_number - self.size)])
return
elif self.size > rung_number:
self.rungs = [r for i, r in enumerate(self.rungs) if i < rung_number]
return
def clear(self):
self.rungs = []
# assign fns
def assign_rung(self, r_id, sol_lists):
rung = self.get_rung(r_id)
rung.id = r_id
rung.data = [jt for jt_l in sol_lists for jt in jt_l]
assert(len(rung.data) % self.dof == 0)
def assign_edges(self, r_id, edges):
# edges_ref = self.get_edges(r_id)
self.get_rung(r_id).edges = edges
# TODO: from_data / to_data
# ! but we might need to think about the data format, the data can be large...
def __repr__(self):
return 'g tot_r_size:{0}, v_sizes:{1}, e_sizes:{2}'.format(self.size, self.get_vert_sizes(), self.get_edge_sizes())
# TODO: insert_rung, clear_rung_edges (maybe not needed at all)
class EdgeBuilder(object):
"""edge builder for ladder graph, construct edges for fully connected biparte graph"""
def __init__(self, n_start, n_end, dof, upper_tm=None, joint_vel_limits=None, preference_cost=1.0):
self.result_edges_ = [[] for i in range(n_start)]
self.edge_scratch_ = [LadderGraphEdge(idx=None, cost=None) for i in range(n_end)] # preallocated space to work on
self.dof_ = dof
self.count_ = 0
self.has_edges_ = False
self.preference_cost = preference_cost
def consider(self, st_jt, end_jt, index):
"""index: to_id"""
# TODO check delta joint val exceeds the joint_vel_limits
# TODO: use preference_cost here
cost = 0
for i in range(self.dof_):
cost += abs(st_jt[i] - end_jt[i])
cost *= self.preference_cost
assert(self.count_ < len(self.edge_scratch_))
self.edge_scratch_[self.count_].cost = cost
self.edge_scratch_[self.count_].idx = index
self.count_ += 1
def next(self, i):
#TODO: want to do std::move here to transfer memory...
self.result_edges_[i] = deepcopy(self.edge_scratch_)
self.has_edges_ = self.has_edges_ or self.count_ > 0
self.count_ = 0
@property
def result(self):
return self.result_edges_
@property
def has_edges(self):
return self.has_edges_
######################################
# ladder graph operations
def append_ladder_graph(current_graph, next_graph):
"""Horizontally connect two given ladder graphs, edges are added between
all the nodes in current_graph's last rung and next_graph's first rung.
Note: this is typically used in connecting ladder graphs generated from
two different Cartesian processes.
Parameters
----------
current_graph : LadderGraph
The first ladder graph
next_graph : LadderGraph
The second ladder graph to be appended at the back of the first one.
Returns
-------
LadderGraph
Horizontally joined ladder graph
"""
assert(isinstance(current_graph, LadderGraph) and isinstance(next_graph, LadderGraph))
assert(current_graph.dof == next_graph.dof)
cur_size = current_graph.size
new_tot_size = cur_size + next_graph.size
dof = current_graph.dof
# just add two sets of rungs together to have a longer ladder graph
current_graph.resize(new_tot_size)
for i in range(next_graph.size):
current_graph.rungs[cur_size + i] = next_graph.rungs[i]
# connect graphs at the boundary
a_rung = current_graph.get_rung(cur_size - 1)
b_rung = current_graph.get_rung(cur_size)
n_st_vert = int(len(a_rung.data) / dof)
n_end_vert = int(len(b_rung.data) / dof)
edge_builder = EdgeBuilder(n_st_vert, n_end_vert, dof)
for k in range(n_st_vert):
st_id = k * dof
for j in range(n_end_vert):
end_id = j * dof
edge_builder.consider(a_rung.data[st_id : st_id+dof], b_rung.data[end_id : end_id+dof], j)
edge_builder.next(k)
edges_list = edge_builder.result
# assert(edge_builder.has_edges)
current_graph.assign_edges(cur_size - 1, edges_list)
return current_graph
def concatenate_graph_vertically(graph_above, graph_below):
"""Vertically connect two given ladder graphs by concatenating the rung data.
No edges will be added, requiring that the two given graphs have the same
amount of rungs. The old edges will be preserved but the edge indices of the
second graph will be shifted accordingly.
Note: this is typically used in concatenating sampled ladder graphs from the
SAME Cartesian process.
Parameters
----------
graph_above : LadderGraph
The first ladder graph
graph_below : LadderGraph
The second ladder graph to be appended below the first one.
Returns
-------
LadderGraph
Vertically joined ladder graph
"""
assert isinstance(graph_above, LadderGraph)
assert isinstance(graph_below, LadderGraph)
assert graph_above.size == graph_below.size, 'must have same amount of rungs!'# same number of rungs
num_rungs = graph_above.size
for i in range(num_rungs):
rung_above = graph_above.get_rung(i)
above_jts = graph_above.get_rung(i).data
below_jts = graph_below.get_rung(i).data
above_jts.extend(below_jts)
if i != num_rungs - 1:
# shifting target vert id in below_edges
next_above_rung_size = graph_above.get_rung_vert_size(i + 1)
below_edges = graph_below.get_edges(i)
for v_out_edges in below_edges:
e_copy = deepcopy(v_out_edges)
for v_out_e in e_copy:
v_out_e.idx += next_above_rung_size
rung_above.edges.append(e_copy)
return graph_above
|
11470941
|
import csv
import datetime
from io import BytesIO, StringIO
import json
from urllib.parse import quote_plus
from django.conf import settings
from django.contrib.sites.models import Site
from django.http import Http404, HttpResponse
from django.shortcuts import reverse
from django.template.loader import get_template
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy as _l
import xlsxwriter
from qatrack.qatrack_core.dates import format_as_date, format_datetime
from qatrack.qatrack_core.utils import chrometopdf, relative_dates
CSV = "csv"
XLS = "xlsx"
PDF = "pdf"
REPORT_REGISTRY = {}
CONTENT_TYPES = {
CSV: "text/csv",
PDF: "application/pdf",
XLS: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
}
ORDERED_CONTENT_TYPES = [CSV, PDF, XLS]
def register_class(target_class):
if target_class.__name__ in REPORT_REGISTRY:
msg = "Trying to register %s but a class with the name %s already exists in the report registry" % (
target_class, target_class.__name__
)
raise ValueError(msg)
REPORT_REGISTRY[target_class.__name__] = target_class
def format_user(user):
if not user:
return ""
return user.username if not user.email else mark_safe(
'%s (<a href="mailto:%s">%s</a>)' % (user.username, user.email, user.email)
)
class ReportMeta(type):
required = ["to_table"]
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
register_class(cls)
if name != "BaseReport":
missing = []
for req in meta.required:
if req not in cls.__dict__ and not any(req in b.__dict__ for b in bases):
missing.append(req)
if missing:
raise TypeError("%s is missing the following required methods: %s" % (name, ', '.join(missing)))
return cls
class BaseReport(object, metaclass=ReportMeta):
filter_class = None
category = _l("General")
description = _l("Generic QATrack+ Report")
name = ""
report_type = ""
extra_form = None
formats = [PDF, XLS, CSV]
def __init__(self, base_opts=None, report_opts=None, notes=None, user=None):
"""base_opts is dict of form:
{'report_id': <rid|None>, 'include_signature': <bool>, 'title': str} """
self.user = user
self.base_opts = base_opts or {}
self.report_opts = report_opts or {}
self.notes = notes or []
if self.filter_class:
self.filter_set = self.filter_class(self.report_opts, queryset=self.get_queryset())
else:
self.filter_set = None
def get_queryset(self):
"""Some report types will want to define a get_queryset method to use with their filter_set"""
return None
def get_filter_form(self):
if self.filter_set:
return self.filter_set.form
def filter_form_valid(self, filter_form):
"""Add any extra checks for the filter_form here. For example,
you may want to limit the number of objects included in a report and if
the number of objects is too large, you would do:
filter_form.add_error("__all__", "reduce the number of objects!")
return False
"""
return True
def get_template(self, using=None):
t = getattr(self, "template", "reports/html_report.html")
return get_template(t, using=using)
def get_filename(self, report_format):
return "%s.%s" % (slugify(self.name or "QATrack Report"), report_format)
def render(self, report_format):
self.report_format = report_format
try:
content = getattr(self, "to_%s" % report_format)()
except AttributeError: # pragma: nocover
raise Http404("Unknown report format %s" % report_format)
return self.get_filename(report_format), content
def render_to_response(self, report_format):
fname, content = self.render(report_format)
response = HttpResponse(content, content_type=CONTENT_TYPES[report_format])
response['Content-Disposition'] = 'attachment; filename="%s"' % fname
return response
@property
def html(self):
"""return whether or not this is a plain text report (csv/txt)"""
return self.report_format in ['pdf', 'html']
@property
def plain(self):
"""return whether or not this is a plain text report (csv/txt)"""
return not self.html
def get_context(self):
name = self.get_report_type_name()
return {
'STATIC_ROOT': settings.STATIC_ROOT,
'site': Site.objects.get_current(),
'content': "",
'protocol': settings.HTTP_OR_HTTPS,
'report_name': name,
'report_description': self.description,
'report_type': self.get_report_type(),
'report_format': getattr(self, "report_format", "html"),
'report_title': self.base_opts.get("title", name),
'report_url': self.get_report_url(),
'report_details': self.get_report_details(),
'notes': self.notes,
'queryset': self.filter_set.qs if self.filter_set else None,
'include_signature': self.base_opts.get("include_signature", False),
}
def make_url(self, url, text='', title='', plain=False):
slash = "/" if not (self.domain.endswith("/") or url.startswith("/")) else ""
full_url = '%s://%s%s%s' % (settings.HTTP_OR_HTTPS, self.domain, slash, url)
if plain or self.plain:
return full_url
return mark_safe('<a href="%s" title="%s">%s</a>' % (full_url, title, text))
def get_report_url(self):
from qatrack.reports.forms import serialize_report
domain = Site.objects.get_current().domain
base_url = '%s://%s%s' % (settings.HTTP_OR_HTTPS, domain, reverse("reports"))
if self.base_opts.get('report_id'):
return "%s?report_id=%s" % (base_url, self.base_opts['report_id'])
opts = serialize_report(self)
return "%s?opts=%s" % (base_url, quote_plus(json.dumps(opts)))
@property
def domain(self):
if not hasattr(self, "_domain"):
self._domain = Site.objects.get_current().domain
return self._domain
def get_report_type_name(self):
return self.name
def get_report_type(self):
return self.report_type
def get_report_details(self):
if self.filter_set is None:
return []
form = self.filter_set.form
form.is_valid()
details = []
for name, field in form.fields.items():
val = form.cleaned_data.get(name)
getter = getattr(self, "get_%s_details" % name, None)
if getter:
try:
label, field_details = getter(val)
except ValueError: # pragma: no cover
raise ValueError("get_%s_details should return a 2-tuple of form (label:str, details:str)" % name)
details.append((label, field_details))
else:
details.append((field.label, self.default_detail_value_format(val)))
return details
def default_detail_value_format(self, val):
"""Take a value and return as a formatted string based on its type"""
if val is None:
msg = "No Filter"
return "<em>%s</em>" % msg if self.report_format not in [XLS, CSV] else msg
if isinstance(val, str):
if val.lower() in relative_dates.ALL_DATE_RANGES:
start, end = relative_dates(val).range()
return "%s (%s - %s)" % (val, format_as_date(start), format_as_date(end))
return val
if isinstance(val, timezone.datetime):
return format_as_date(val)
try:
if len(val) > 0 and isinstance(val[0], timezone.datetime):
joiner = " - " if len(val) == 2 else ", "
return joiner.join(format_as_date(dt) for dt in val)
except: # noqa: E722 # pragma: no cover
pass
try:
return ', '.join(str(x) for x in val)
except: # noqa: E722 # pragma: no cover
pass
return str(val)
def to_html(self):
self.report_format = "html"
context = self.get_context()
context['base_template'] = "reports/html_report.html"
template = self.get_template(using=None)
return template.render(context)
def to_pdf(self):
fname = self.get_filename("pdf")
context = self.get_context()
context['base_template'] = "reports/pdf_report.html"
template = self.get_template(using=None)
content = template.render(context)
return chrometopdf(content, name=fname)
def to_csv(self):
context = self.get_context()
f = StringIO()
writer = csv.writer(f)
for row in self.to_table(context):
writer.writerow(row)
f.seek(0)
return f
def to_xlsx(self):
context = self.get_context()
f = BytesIO()
wb = xlsxwriter.Workbook(f, {'in_memory': True})
ws = wb.add_worksheet(name="Report")
row = 0
col = 0
for data_row in self.to_table(context):
for data in data_row:
# excel doesn't like urls longer than 255 chars, so write as string instead
if isinstance(data, str) and "http" in data and len(data) > 255:
ws.write_string(row, col, data)
elif isinstance(data, timezone.datetime):
ws.write_string(row, col, format_datetime(data))
elif isinstance(data, datetime.date):
ws.write_string(row, col, format_as_date(data))
else:
try:
ws.write(row, col, data)
except TypeError:
ws.write(row, col, str(data))
col += 1
row += 1
col = 0
wb.close()
f.seek(0)
return f
def to_table(self, context):
"""This function should be overridden in subclasses and then used like
class FooReport(BaseReport):
...
def to_table(self, context):
# get default rows including description/filters etc
rows = super().to_table()
# report specific data
rows += [
[...],
]
"""
rows = [
[_("Report Title:"), context['report_title']],
[_("View On Site:"), self.get_report_url()],
[_("Report Type:"), context['report_name']],
[_("Report Description:"), context['report_description']],
[_("Generated:"), format_datetime(timezone.now())],
[],
["Filters:"],
]
for label, criteria in context['report_details']:
rows.append([label + ":", criteria])
if context.get("notes"):
rows.append(["Notes:"])
for note in context['notes']:
rows.append([note['heading'], note['content']])
return rows
def report_types():
"""Return all report classes in the report registry"""
return [ReportClass for name, ReportClass in REPORT_REGISTRY.items() if name != "BaseReport"]
def report_descriptions():
"""
Return dictionary of form {report_type: description} for all report classes
in the report registry
"""
return {r.report_type: mark_safe(r.description) for r in report_types()}
def report_class(report_type):
"""Return report class corresponding to input report_type"""
for r in report_types():
if r.report_type == report_type:
return r
raise ValueError("Report class '%s' not found" % report_type)
def report_categories():
"""return list of all available report categories"""
return list(sorted(set([rt.category for rt in report_types()])))
def report_type_choices():
"""Return list of report type choices grouped by category. Suitable for choices for form field"""
rts = report_types()
rcs = report_categories()
return [(c, [(rt.report_type, rt.name) for rt in rts if rt.category == c]) for c in rcs]
|
11470945
|
import logging
import os
import parsl
import pytest
import time
logger = logging.getLogger(__name__)
@parsl.python_app
def this_app():
return 5
@pytest.mark.local
def test_row_counts():
from parsl.tests.configs.htex_local_alternate import fresh_config
import sqlalchemy
if os.path.exists("monitoring.db"):
logger.info("Monitoring database already exists - deleting")
os.remove("monitoring.db")
engine = sqlalchemy.create_engine("sqlite:///monitoring.db")
logger.info("loading parsl")
parsl.load(fresh_config())
# parsl.load() returns before all initialisation of monitoring
# is complete, which means it isn't safe to take a read lock on
# the database yet. This delay tries to work around that - some
# better async behaviour might be nice, but what?
#
# Taking a read lock before monitoring is initialized will cause
# a failure in the part of monitoring which creates tables, and
# which is not protected against read locks at the time this test
# was written.
time.sleep(10)
# to get an sqlite3 read lock that is held over a controllable
# long time, create a transaction and perform a SELECT in it.
# The lock will be held until the end of the transaction.
# (see bottom of https://sqlite.org/lockingv3.html)
logger.info("Getting a read lock on the monitoring database")
with engine.begin() as readlock_connection:
readlock_connection.execute("BEGIN TRANSACTION")
result = readlock_connection.execute("SELECT COUNT(*) FROM workflow")
(c, ) = result.first()
assert c == 1
# now readlock_connection should have a read lock that will
# stay locked until the transaction is ended, or the with
# block ends.
logger.info("invoking and waiting for result")
assert this_app().result() == 5
# there is going to be some raciness here making sure that
# the database manager actually tries to write while the
# read lock is held. I'm not sure if there is a better way
# to detect this other than a hopefully long-enough sleep.
time.sleep(10)
logger.info("cleaning up parsl")
parsl.dfk().cleanup()
parsl.clear()
# at this point, we should find data consistent with executing one
# task in the database.
logger.info("checking database content")
with engine.begin() as connection:
result = connection.execute("SELECT COUNT(*) FROM workflow")
(c, ) = result.first()
assert c == 1
result = connection.execute("SELECT COUNT(*) FROM task")
(c, ) = result.first()
assert c == 1
result = connection.execute("SELECT COUNT(*) FROM try")
(c, ) = result.first()
assert c == 1
logger.info("all done")
|
11471034
|
num = 1234
reversed_num = 0
while num != 0:
digit = num % 10
reversed_num = reversed_num * 10 + digit
num //= 10
print("Reversed Number: " + str(reversed_num))
Output:
4321
|
11471135
|
import gzip
import numpy
import os
import pandas
import random
from grocsvs import step
from grocsvs import utilities
from grocsvs import structuralvariants
from grocsvs.stages import cluster_svs
MAX_BARCODES = 200
class BarcodesFromGraphsStep(step.StepChunk):
"""
Collect barcodes supporting each candidate structural event so that
we can get all the reads that may support an event and perform assembly
"""
@staticmethod
def get_steps(options):
for sample, dataset in options.iter_10xdatasets():
step = BarcodesFromGraphsStep(
options, sample, dataset)
yield step
def __init__(self, options, sample, dataset):
self.options = options
self.sample = sample
self.dataset = dataset
def __str__(self):
return ".".join([self.__class__.__name__,
self.sample.name,
str(self.dataset.id)])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
file_name = "sv_barcodes.{}.{}.pickle".format(
self.sample.name,
self.dataset.id
)
paths = {
"sv_barcodes": os.path.join(directory, file_name)
}
return paths
def run(self):
dist1 = -500
dist2 = 5000
outpath = self.outpaths(final=False)["sv_barcodes"]
self.logger.log("loading...")
events = self.load_events()
barcodes_map = {}
for i, cluster in events.groupby("cluster"):
barcodes = set()
for j, event in cluster.iterrows():
_, _, merged_frags = \
structuralvariants.get_supporting_fragments_new(
self.options, self.sample, self.dataset,
event["chromx"], int(event["x"]),
event["chromy"], int(event["y"]),
event["orientation"], dist1, dist2,
min_reads_per_frag=0)
cur_bcs = set(merged_frags["bc"])
if len(cur_bcs) > MAX_BARCODES:
print "TOO MANY BARCODES: sampling {} of {} for cluster {}".format(MAX_BARCODES, len(cur_bcs), i)
cur_bcs = random.sample(cur_bcs, MAX_BARCODES)
if len(cur_bcs) == 0:
self.logger.log("WARNING: no barcodes found for event {} {}:{}::{}:{}{} dist1={} dist2={}".format(
self.sample.name, event.chromx, event.x, event.chromy, event.y, event.orientation, dist1, dist2))
barcodes.update(cur_bcs)
barcodes_map[i] = barcodes
utilities.pickle.dump(barcodes_map, open(outpath, "w"), protocol=-1)
def load_events(self):
# path = os.path.join(self.results_dir, "edges.tsv")
edges_path = cluster_svs.ClusterSVsStep(self.options).outpaths(final=True)["edges"]
graphs_table = pandas.read_table(edges_path)
graphs_table["chromx"] = graphs_table["chromx"].astype("string")
graphs_table["chromy"] = graphs_table["chromy"].astype("string")
return graphs_table
|
11471140
|
from django.test import TestCase
from rest_framework.exceptions import ValidationError
from rest_framework.fields import SkipField
from django_enumfield.contrib.drf import EnumField, NamedEnumField
from django_enumfield.tests.models import BeerState, LampState
class DRFTestCase(TestCase):
def test_enum_field(self):
field = EnumField(BeerState)
self.assertEqual(field.to_internal_value("0"), BeerState.FIZZY)
self.assertEqual(
field.to_internal_value(BeerState.EMPTY.value), BeerState.EMPTY
)
self.assertEqual(
field.to_representation(BeerState.FIZZY), BeerState.FIZZY.value
)
def test_enum_field__validation_fail(self):
field = EnumField(BeerState)
with self.assertRaises(ValidationError):
field.to_internal_value("3")
nonrequired_field = EnumField(LampState, required=False)
with self.assertRaises(SkipField):
self.assertEqual(nonrequired_field.to_internal_value("3"), 1)
def test_named_enum_field(self):
field = NamedEnumField(LampState)
self.assertEqual(field.to_internal_value("1"), LampState.ON)
self.assertEqual(field.to_representation(LampState.OFF), "OFF")
|
11471143
|
from datetime import datetime, timedelta
from openspaces import models
def get_ignored_users():
"""
Check app config table to get list of ignored twitter ids
"""
config_obj = models.OutgoingConfig.objects.latest("id")
ignore_list = [tw_id for tw_id in config_obj.ignore_users]
return ignore_list
def get_or_create_user_and_tweet(status):
"""
Take a status from twitter and either create or update info for tweet & user
"""
user, created = models.User.objects.get_or_create(id_str=str(status.user.id))
user.screen_name = status.user.screen_name
user.save()
# save tweet record to StreamedTweet model
tweet_record, created = models.StreamedTweet.objects.get_or_create(id_str=status.id_str)
tweet_record.id_str = status.id_str
tweet_record.user = user
tweet_record.text = status.text
tweet_record.source = status.source
tweet_record.save()
def check_for_auto_send():
"""
Check config table and return auto send value
"""
config_obj = models.OutgoingConfig.objects.latest("id")
approved = 1 if config_obj.auto_send else 0
return approved
def save_outgoing_tweet(**kwargs):
"""
Save a tweet object to the outgoing tweet table triggering celery stuff
"""
return models.OutgoingTweet.objects.create(**kwargs)
def check_time_room_conflict(a_time, a_room, mins_before=15, mins_after=30):
"""
Check to see if there is already a tweet scheduled to go out about
an event in the same time and room. Helps avoid duplicate retweets
about the same event sent by multiple users. Currently the retweets
from bot are first come first serve for a unqiue room and time stamp.
"""
start_time = a_time - timedelta(minutes=mins_before)
end_time = a_time + timedelta(minutes=mins_after)
event_conflict = models.OpenspacesEvent.objects.filter(location=a_room) \
.filter(start__range=(start_time, end_time))
return True if event_conflict else False
def create_event(**kwargs):
"""
Create event record with a description, creator, time, and room
"""
return models.OpenspacesEvent.objects.create(**kwargs)
def setup_outgoing_config():
models.OutgoingConfig.objects.create(auto_send=True,
default_send_interval=15,
ignore_users=[])
|
11471173
|
import argparse
import os
import torch
import tqdm
from torch.utils.data import DataLoader
from datasets.qm9_property import TARGET_NAMES
from utils import misc as utils_misc
from utils.transforms import get_edge_transform
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str, default='./data/qm9_property')
parser.add_argument('--split_file', type=str, default='./data/qm9_property/split.npz')
parser.add_argument('--data_processed_tag', type=str, default='dgl_processed')
parser.add_argument('--val_batch_size', type=int, default=64)
parser.add_argument('--num_workers', type=int, default=4)
# Eval
parser.add_argument('--seed', type=int, default=2020)
parser.add_argument('--ckpt_path', type=str, default='./logs_prop_pred')
parser.add_argument('--ckpt_iter', type=int, default=None)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--save_eval_log', type=eval, default=False, choices=[True, False])
parser.add_argument('--pre_pos_path', type=str, default=None)
parser.add_argument('--pre_pos_filename', type=str, default=None)
args = parser.parse_args()
return args
def main():
args = get_args()
utils_misc.seed_all(args.seed)
if args.save_eval_log:
logger = utils_misc.get_logger('eval', args.ckpt_path, 'log_eval.txt')
else:
logger = utils_misc.get_logger('eval')
logger.info(args)
logger.info(f'Loading model from {args.ckpt_path}')
if args.ckpt_iter is None:
ckpt_restore = utils_misc.CheckpointManager(args.ckpt_path, logger=logger).load_best()
else:
ckpt_restore = utils_misc.CheckpointManager(args.ckpt_path, logger=logger).load_with_iteration(args.ckpt_iter)
logger.info(f'Loaded model at iteration: {ckpt_restore["iteration"]} val loss: {ckpt_restore["score"]}')
ckpt_config = utils_misc.load_config(os.path.join(args.ckpt_path, 'config.yml'))
logger.info(f'ckpt_config: {ckpt_config}')
edge_transform = get_edge_transform(
ckpt_config.data.edge_transform_mode, ckpt_config.data.aux_edge_order,
ckpt_config.data.cutoff, ckpt_config.data.cutoff_pos)
target_name = ckpt_config.data.target_name
target_index = TARGET_NAMES.index(target_name)
# override data path
ckpt_config.data.dataset_path = args.dataset_path
ckpt_config.data.split_file = args.split_file
test_dset = utils_misc.get_prop_dataset(
ckpt_config.data, edge_transform, 'test', args.pre_pos_path, args.pre_pos_filename)
logger.info('TestSet %d' % (len(test_dset)))
test_loader = DataLoader(test_dset, batch_size=args.val_batch_size, collate_fn=utils_misc.collate_prop,
num_workers=args.num_workers, shuffle=False, drop_last=False)
ckpt_state = ckpt_restore['state_dict']
model = utils_misc.build_prop_pred_model(
ckpt_config, target_index=target_index,
target_mean=ckpt_state['target_mean'] if 'target_mean' in ckpt_state else None,
target_std=ckpt_state['target_std'] if 'target_std' in ckpt_state else None
).to(args.device)
model.load_state_dict(ckpt_restore['state_dict'])
logger.info(repr(model))
logger.info(f'# trainable parameters: {utils_misc.count_parameters(model) / 1e6:.4f} M')
with torch.no_grad():
model.eval()
maes = []
for batch, labels, meta_info in tqdm.tqdm(test_loader, dynamic_ncols=True, desc='Testing', leave=None):
batch = batch.to(torch.device(args.device))
labels = labels.to(args.device)[:, target_index]
pred, gen_pos = model(batch, ckpt_config.train.pos_type)
mae = (pred.view(-1) - labels).abs()
maes.append(mae)
mae = torch.cat(maes, dim=0).cpu() # [num_examples]
avg_loss = mae.mean()
mae = 1000 * mae if target_name in ['homo', 'lumo', 'gap', 'zpve', 'u0', 'u298', 'h298', 'g298'] else mae
logger.info(f'[Test] Epoch {ckpt_restore["iteration"]:03d} | Target: {target_name} Avg loss {avg_loss:.6f} '
f'rescale MAE: {mae.mean():.5f} ± {mae.std():.5f}')
if __name__ == '__main__':
main()
|
11471197
|
import numpy as np
#################################################################
# Implements the simulator class for MDPs
#################################################################
class MDPSimulator():
def __init__(self, model):
"""
Implements the multi-agent simulator:
This serves as a wrapper for MDP problem types
"""
self.model = model # problem instance
# initalize
self.current_state = model.initial_state()
self.last_action = 0
self.last_reward = 0.0
self.model_dims = model.state_shape
self.n_actions = model.n_actions
def act(self, action):
"""
Transitions the model forward by moving
"""
mdp = self.model
self.last_reward = mdp.reward(self.current_state, action)
self.current_state = mdp.transition(self.current_state, action)
if self.episode_over():
self.last_reward += mdp.reward(self.current_state, action)
def reward(self):
return self.last_reward
def get_screenshot(self):
return self.current_state
def episode_over(self):
return self.model.isterminal(self.current_state)
def reset_episode(self):
self.current_state = self.model.initial_state()
self.last_reward = 0.0
def n_actions(self):
return self.model.n_actions
|
11471258
|
import numpy as np
import pandas as pd
import xgboost as xgb
import datetime
import operator
from sklearn.cross_validation import train_test_split
from collections import Counter
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
from pylab import plot, show, subplot, specgram, imshow, savefig
RS = 12357
ROUNDS = 10000
print("Started")
np.random.seed(RS)
input_folder = './data/'
# data
df_train = pd.read_csv(input_folder + 'train.csv')
df_test = pd.read_csv(input_folder + 'test.csv')
print("Original data: X_train: {}, X_test: {}".format(df_train.shape, df_test.shape))
x_train_1 = pd.read_csv('xtrain.csv')
del x_train_1['Unnamed: 0']
x_test_1 = pd.read_csv('xtest.csv')
del x_test_1['Unnamed: 0']
print("Feature set 1: X_train: {}, X_test: {}".format(x_train_1.shape,x_test_1.shape))
x_train_2 = pd.read_csv('xtrain_2.csv')
#del x_train_2['Unnamed: 0']
x_test_2 = pd.read_csv('xtest_2.csv')
#del x_test_2['Unnamed: 0']
print("Feature set 2: X_train: {}, X_test: {}".format(x_train_2.shape, x_test_2.shape))
x_train_3 = pd.read_csv('xtrain_3.csv')
x_test_3 = pd.read_csv('xtest_3.csv')
print("Feature set 3: X_train: {}, X_test: {}".format(x_train_3.shape, x_test_3.shape))
y_train = df_train['is_duplicate'].values
x_train = pd.concat([x_train_1,x_train_2,x_train_3],axis=1)
x_test = pd.concat([x_test_1,x_test_2,x_test_3],axis=1)
print("Merge: X_train: {}, X_test: {}".format(x_train.shape, x_test.shape))
assert x_train.shape[0] == df_train.shape[0]
assert x_test.shape[0] == df_test.shape[0]
# XGB
params = {}
params['objective'] = 'binary:logistic'
params['eval_metric'] = 'logloss'
params['eta'] = 0.01
params['max_depth'] = 5
params['silent'] = 1
params['seed'] = RS
print("Will train XGB for {} rounds, RandomSeed: {}".format(ROUNDS, RS))
x, X_val, ytrain, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=RS)
print("Training data: X_train: {}, Y_train: {}, X_test: {}".format(x_train.shape, len(y_train), x_test.shape))
xg_train = xgb.DMatrix(x, label=ytrain)
xg_val = xgb.DMatrix(X_val, label=y_val)
watchlist = [(xg_train,'train'), (xg_val,'eval')]
clf = xgb.train(params=params,dtrain=xg_train,num_boost_round=ROUNDS,early_stopping_rounds=200,evals=watchlist)
preds = clf.predict(xgb.DMatrix(x_test))
print("Writing output...")
sub = pd.DataFrame()
sub['test_id'] = df_test['test_id']
sub['is_duplicate'] = preds
sub.to_csv("xgb_feat_seed_3{}_n{}.csv".format(RS, ROUNDS), index=False)
print("Done.")
|
11471271
|
from typing import Any, Dict, List, Type, TypeVar
import attr
from ..models.debug_info_connections import DebugInfoConnections
from ..models.debug_info_messages import DebugInfoMessages
from ..models.debug_info_recip_key_to_connection_id import DebugInfoRecipKeyToConnectionId
T = TypeVar("T", bound="DebugInfo")
@attr.s(auto_attribs=True)
class DebugInfo:
""" """
connections: DebugInfoConnections
recip_key_to_connection_id: DebugInfoRecipKeyToConnectionId
messages: DebugInfoMessages
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
connections = self.connections.to_dict()
recip_key_to_connection_id = self.recip_key_to_connection_id.to_dict()
messages = self.messages.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"connections": connections,
"recip_key_to_connection_id": recip_key_to_connection_id,
"messages": messages,
}
)
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
connections = DebugInfoConnections.from_dict(d.pop("connections"))
recip_key_to_connection_id = DebugInfoRecipKeyToConnectionId.from_dict(d.pop("recip_key_to_connection_id"))
messages = DebugInfoMessages.from_dict(d.pop("messages"))
debug_info = cls(
connections=connections,
recip_key_to_connection_id=recip_key_to_connection_id,
messages=messages,
)
debug_info.additional_properties = d
return debug_info
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
11471322
|
from aston.trace.trace import Trace, Chromatogram, decompress
__all__ = ["Trace", "Chromatogram", "decompress"]
|
11471349
|
pkgname = "unicode-cldr-common"
pkgver = "40.0"
pkgrel = 0
pkgdesc = "Common data from Unicode CLDR"
maintainer = "q66 <<EMAIL>>"
license = "Unicode-DFS-2016"
url = "https://cldr.unicode.org"
source = f"https://github.com/unicode-org/cldr/releases/download/release-{pkgver[:-2]}/cldr-common-{pkgver}.zip"
sha256 = "8d03c1ba6a3e33280e3959a34fe37d0a7002a4b6ac40c6570a69f7bbc25e6756"
def do_install(self):
self.install_dir("usr/share/unicode/cldr")
self.install_files("common", "usr/share/unicode/cldr")
self.install_license("LICENSE.txt")
|
11471350
|
pkgname = "ruby"
pkgver = "3.1.0"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
"--enable-shared", "--disable-rpath", "--disable-install-doc",
"ac_cv_func_isnan=yes", "ac_cv_func_isinf=yes"
]
make_cmd = "gmake"
make_build_args = ["all", "capi"]
make_install_env = {"MAKE": "gmake"}
hostmakedepends = ["gmake", "pkgconf", "bison", "flex", "mandoc"]
makedepends = [
"zlib-devel", "libedit-devel", "libffi-devel", "openssl-devel",
"libyaml-devel"
]
pkgdesc = "Ruby scripting language"
maintainer = "q66 <<EMAIL>>"
license = "Ruby OR BSD-2-Clause"
url = "https://www.ruby-lang.org/en"
source = f"https://cache.ruby-lang.org/pub/{pkgname}/{pkgver[:-2]}/{pkgname}-{pkgver}.tar.xz"
sha256 = "1a0e0b69b9b062b6299ff1f6c6d77b66aff3995f63d1d8b8771e7a113ec472e2"
# until verified; gonna need removing arch prefix from compiler name
# tests mostly pass but there are some portability issues in the test
# suite (stat usage) + chown not working in the sandbox + locale issues
options = ["!cross", "!check"]
match self.profile().arch:
case "ppc64":
# just ELFv2
configure_args += ["--with-coroutine=ppc64le"]
if self.profile().cross:
hostmakedepends += ["ruby"]
def post_install(self):
self.install_license("COPYING")
@subpackage("ruby-devel")
def _devel(self):
return self.default_devel(extra = [
f"usr/lib/ruby/{pkgver}/mkmf.rb"
])
@subpackage("ruby-ri")
def _ri(self):
self.depends += [f"{pkgname}={pkgver}-r{pkgrel}"]
return ["usr/bin/ri"]
|
11471376
|
from boa3.builtin import CreateNewEvent
Event = CreateNewEvent([('a',)])
def Main():
Event()
|
11471384
|
def payout_response():
return {
"id": "a6ee1bf1-ffcd-4bda-a7ab-99c1d5cd0472",
"external_id": "payout-1595405117",
"amount": 50000,
"merchant_name": "Xendit&#x27;s Intern",
"status": "PENDING",
"expiration_timestamp": "2020-07-23T08:05:19.815Z",
"created": "2020-07-22T08:05:18.421Z",
"email": "<EMAIL>",
"payout_url": "https://payout-staging.xendit.co/web/a6ee1bf1-ffcd-4bda-a7ab-99c1d5cd0472",
}
def void_payout_response():
return {
"id": "a6ee1bf1-ffcd-4bda-a7ab-99c1d5cd0472",
"external_id": "payout-1595405117",
"amount": 50000,
"merchant_name": "Xendit&#x27;s Intern",
"status": "VOIDED",
"expiration_timestamp": "2020-07-23T08:05:19.815Z",
"created": "2020-07-22T08:05:18.421Z",
"email": "<EMAIL>",
}
|
11471400
|
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.lib import newIcon, labelValidator
BB = QDialogButtonBox
class AdjustWindowLevelDialog(QDialog):
def __init__(self, text="Adjust window/level", parent=None):
super(AdjustWindowLevelDialog, self).__init__(parent)
self.windowEdit = QLineEdit()
self.windowEdit.setText(text)
self.windowEdit.setValidator(labelValidator())
self.windowEdit.editingFinished.connect(self.postProcess)
self.levelEdit = QLineEdit()
self.levelEdit.setText(text)
self.levelEdit.setValidator(labelValidator())
self.levelEdit.editingFinished.connect(self.postProcess)
layout = QVBoxLayout()
layout.addWidget(self.windowEdit)
layout.addWidget(self.levelEdit)
self.buttonBox = bb = BB(BB.Ok | BB.Cancel, Qt.Horizontal, self)
bb.button(BB.Ok).setIcon(newIcon('done'))
bb.button(BB.Cancel).setIcon(newIcon('undo'))
bb.accepted.connect(self.validate)
bb.rejected.connect(self.reject)
layout.addWidget(bb)
self.setLayout(layout)
def validate(self):
try:
if self.windowEdit.text().trimmed() and self.levelEdit.text().trimmed():
try:
_ = int(self.windowEdit.text())
_ = int(self.levelEdit.text())
self.accept()
except ValueError:
self.reject()
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
if self.windowEdit.text().strip() and self.levelEdit.text().strip():
try:
_ = int(self.windowEdit.text())
_ = int(self.levelEdit.text())
self.accept()
except ValueError:
self.reject()
def postProcess(self):
try:
self.windowEdit.setText(self.windowEdit.text().trimmed())
self.levelEdit.setText(self.levelEdit.text().trimmed())
except AttributeError:
# PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
self.windowEdit.setText(self.windowEdit.text().strip())
self.levelEdit.setText(self.levelEdit.text().strip())
def popUp(self, w_width=1000, w_level=200, move=True):
self.windowEdit.setText(str(w_width))
self.windowEdit.setSelection(0, len(str(w_width)))
self.windowEdit.setFocus(Qt.PopupFocusReason)
self.levelEdit.setText(str(w_level))
if move:
self.move(QCursor.pos())
if self.exec_():
return int(self.windowEdit.text()), int(self.levelEdit.text())
else:
return None
|
11471456
|
import os
import sys
import colorsys
sys.path.insert(0, './')
import glob
import string
import numpy as np
import pyvista as pv
import tensorflow as tf
from utils import helpers, tf_utils
def rotate_boxes(boxes, centers, theta):
pts_out = np.zeros((boxes.shape[0], 8, 3), np.float32)
for i, (b, c, r) in enumerate(zip(boxes, centers, theta)):
pts_out[i] = helpers.rotate_box(b, c, r)
return pts_out
def plot(pts, colors, labels):
labels_mask = labels.astype(np.bool)[:, 0]
labels = labels[labels_mask]
centers = labels[:, :3]
ext = labels[:, 3:6]
theta = labels[:, 6:8]
boxes_min = centers - (ext / 2)
boxes_max = centers + (ext / 2)
boxes = np.hstack((boxes_min, boxes_max))
obj_pts = rotate_boxes(boxes, centers, theta)
plot = pv.Plotter()
plot.view_xy()
# Remove ceiling
colors = colors[pts[:, 2] < np.max(pts[:, 2])-1.]
pts = pts[pts[:, 2] < np.max(pts[:, 2])-1.]
plot.add_points(pts, scalars=colors, rgb=True, render_points_as_spheres=True, point_size=15)
plot.add_points(labels[:, :3], color=[0, 0, 1], render_points_as_spheres=True, point_size=20)
classes = np.linspace(0, 1, obj_pts.shape[0]+1)
rgb_classes = np.array([colorsys.hsv_to_rgb(c, 0.8, 0.8) for c in classes])
for i, pts in enumerate(obj_pts):
lines = helpers.make_lines(pts)
for l in lines:
plot.add_mesh(l, color=rgb_classes[i], line_width=6)
plot.show()
def create_example(pts, colors, labels):
n_inst = labels.shape[0] if len(labels.shape) > 0 else 0
feature = {
'points' : tf_utils.float_list_feature(pts.reshape(-1, 1)),
'colors' : tf_utils.float_list_feature(colors.reshape(-1, 1)),
'labels' : tf_utils.float_list_feature(labels.reshape(-1, 1)),
'n_inst' : tf_utils.int64_feature(n_inst)
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def crop_s3dis():
filelist = glob.glob(os.path.join(config['in_dir'], '*.npy'))
box_size = config['box_size']
overlap = config['overlap']
saved = 0
with tf.io.TFRecordWriter(config['out_train_file']) as train_writer, tf.io.TFRecordWriter(config['out_test_file']) as test_writer:
bar = helpers.progbar(len(filelist))
bar.start()
max_labels = 0
rotations = np.radians(np.array([0, 90, 180, 270])) if config['rotate'] == True else np.array([0.])
for i, f in enumerate(filelist):
bar.update(i+1)
scene = np.load(f)
area = '_'.join(f.split('/')[-1].split('_')[:2])
room = '_'.join(f.split('/')[-1].split('.')[0].split('_')[2:])
area_n = int(f.split('/')[-1].split('_')[1])
object_paths = glob.glob(os.path.join(config['root_dir'], area, room, 'Annotations', '*{}*.npy'.format(config['label_object'])))
objects = np.array([np.load(o_f)[:, :3] for o_f in object_paths])
object_means_orig = np.array([np.mean(o, axis=0) for o in objects])
if object_means_orig.shape[0] == 0: continue
object_thetas_orig, object_extents = helpers.get_oabb(objects)
area = int(f.split('/')[-1].split('_')[1])
scene_extent = [
np.min(scene[:, 0]), np.min(scene[:, 1]), np.min(scene[:, 2]),
np.max(scene[:, 0]), np.max(scene[:, 1]), np.max(scene[:, 2])
]
x_stride_len = box_size[0]
y_stride_len = box_size[1]
num_xstrides = int(np.ceil((scene_extent[3] - scene_extent[0])/box_size[0]))
num_ystrides = int(np.ceil((scene_extent[4] - scene_extent[1])/box_size[1]))
for x_stride in range(num_xstrides):
for y_stride in range(num_ystrides):
bbox = [
scene_extent[0] + (x_stride*x_stride_len) - overlap[0]/2,
scene_extent[1] + (y_stride*y_stride_len) - overlap[0]/2,
-1e10,
scene_extent[0] + ((x_stride*x_stride_len) + x_stride_len) + overlap[0]/2,
scene_extent[1] + ((y_stride*y_stride_len) + y_stride_len) + overlap[0]/2,
1e10
]
scene_crop_orig = helpers.crop_bbox(scene, bbox)
if scene_crop_orig.shape[0] < config['n_pts'] / 2: continue
for angle in rotations:
_, scene_crop = helpers.get_fixed_pts(scene_crop_orig, config['n_pts'])
object_means = object_means_orig.copy()
object_thetas = object_thetas_orig.copy()
scene_crop[:, :3] = helpers.rotate_euler(scene_crop[:, :3], angle)
object_means = helpers.rotate_euler(object_means_orig, angle)
radians = np.arctan2(object_thetas[:, 1], object_thetas[:, 0])
radians -= angle
object_thetas[:, 0] = np.cos(radians)
object_thetas[:, 1] = np.sin(radians)
pts = scene_crop[:, :3]
scene_mean = np.mean(pts, axis=0)
pts -= scene_mean
colors = scene_crop[:, 3:6] / 255.
obj_occ = np.array([helpers.check_occupancy(obj_pts, bbox) for obj_pts in objects])
obj_occ[obj_occ < 1000] = 0
obj_occ = obj_occ.astype(np.bool)
if True in obj_occ:
try:
labels = object_means[np.where(obj_occ==True)]
labels -= scene_mean
labels = np.hstack((labels, object_extents[np.where(obj_occ==True)]))
labels = np.hstack((labels, object_thetas[np.where(obj_occ==True)]))
max_labels = labels.shape[0] if labels.shape[0] > max_labels else max_labels
labels = np.pad(labels, [[0, config['max_labels']-labels.shape[0]],[0, 0]])
except:
print(labels.shape)
continue
else:
continue
# Uncomment to visualise training data
# plot(pts, colors, labels)
tf_example = create_example(pts, colors, labels)
if area_n != 5:
train_writer.write(tf_example.SerializeToString())
else:
test_writer.write(tf_example.SerializeToString())
saved += 1
bar.finish()
print('[info] total scenes generated: {}'.format(saved))
print('[info] max label count: {}'.format(max_labels))
if __name__ == '__main__':
config = {
'root_dir' : './data/Stanford3dDataset',
'in_dir': './data/Stanford3dDataset/processed',
'out_train_file' : './data/s3d_scene_train.tfrecord',
'out_test_file' : './data/s3d_scene_test.tfrecord',
'label_object' : 'chair',
'box_size' : (1.5, 1.5),
'overlap' : (1.5, 1.5),
'max_labels' : 25,
'rotate' : True,
'n_pts' : 32768
}
crop_s3dis()
|
11471464
|
import sys
sys.path.append("..")
import numpy as np
from env.grid_world import GridWorld
from utils.plots import plot_gridworld
###########################################################
# Plot a grid world with no solution #
###########################################################
# specify world parameters
num_cols = 10
num_rows = 10
obstructions = np.array([[0,7],[1,1],[1,2],[1,3],[1,7],[2,1],[2,3],
[2,7],[3,1],[3,3],[3,5],[4,3],[4,5],[4,7],
[5,3],[5,7],[5,9],[6,3],[6,9],[7,1],[7,6],
[7,7],[7,8],[7,9],[8,1],[8,5],[8,6],[9,1]])
bad_states = np.array([[1,9],[4,2],[4,4],[7,5],[9,9]])
restart_states = np.array([[3,7],[8,2]])
start_state = np.array([[0,4]])
goal_states = np.array([[0,9],[2,2],[8,7]])
# create model
gw = GridWorld(num_rows=num_rows,
num_cols=num_cols,
start_state=start_state,
goal_states=goal_states)
gw.add_obstructions(obstructed_states=obstructions,
bad_states=bad_states,
restart_states=restart_states)
gw.add_rewards(step_reward=-1,
goal_reward=10,
bad_state_reward=-6,
restart_state_reward=-10)
gw.add_transition_probability(p_good_transition=0.7,
bias=0.5)
gw.add_discount(discount=0.9)
model = gw.create_gridworld()
# plot world
path = "../doc/imgs/unsolved_gridworld.png"
plot_gridworld(model, title="Test world", path=path)
|
11471490
|
import sys
import pickle
import subprocess
import os
import re
import datetime
import time
#src/python/runGeoShapeBenches.py -compare -reindex -ant
reTotHits = re.compile('totHits=(\d+)$')
nightly = '-nightly' in sys.argv
compareRun = '-compare' in sys.argv
if nightly and compareRun:
raise RuntimeError('cannot run nightly job with compare flag')
if nightly and '-reindex' not in sys.argv:
sys.argv.append('-reindex')
####################
# Add here your paths
####################
GEO_UTIL_DIR ='/Users/ivera/forks/luceneutil_cleanfork'
GEO_LUCENE_DIR = '/Users/ivera/forks/lucene-solr-fork/lucene'
BASELINE_LUCENE_DIR = '/Users/ivera/projects/lucene-solr/lucene'
GEO_LOGS_DIR = '/data/geo/'
#######
# add your file name, it needs to be under your data directory (see IndexAndSearchShapes.java)
#######
fileName = "osmdata.wkt"
approaches = ('LatLonShape',)
ops = ('intersects', 'contains', 'within', 'disjoint')
shapes = ('point', 'box', 'distance', 'poly 10', 'polyMedium', 'polyRussia')
def printResults(results, stats, maxDoc):
print()
print('Results on %2fM points:' % (maxDoc/1000000.))
print()
if '-reindex' in sys.argv or '-reindexFast' in sys.argv:
print('||Approach||Index time (sec)||Force merge time (sec)||Index size (GB)||Reader heap (MB)||')
readerHeapMB, indexSizeGB, indexTimeSec, forceMergeTimeSec = stats['LatLonShape']
print('%s|%.1fs|%.1fs|%.2f|%.2f|' % ('LatLonShape', indexTimeSec, forceMergeTimeSec, indexSizeGB, readerHeapMB))
else:
print('||Index size (GB)||Reader heap (MB)||')
readerHeapMB, indexSizeGB = stats['LatLonShape'][:2]
print('|%.2f|%.2f|' % (indexSizeGB, readerHeapMB))
print()
print('||Approach||Shape||Operation||M hits/sec||QPS||Hit count||')
for shape in shapes:
for op in ops:
tup = shape, op
if tup in results:
qps, mhps, totHits = results[tup]
print('|%s|%s|%s|%.2f|%.2f|%d|' % ('LatLonShape',shape, op, mhps, qps, totHits))
def printCompareResults(results, stats, maxDoc, resultsBase, statsBase):
print()
print('Results on %2fM points:' % (maxDoc/1000000.))
print()
if '-reindex' in sys.argv or '-reindexFast' in sys.argv:
print('Index time (sec)||Force merge time (sec)||Index size (GB)||Reader heap (MB)||')
print('||Dev||Base||Diff ||Dev ||Base ||diff ||Dev||Base||Diff||Dev||Base||Diff ||')
readerHeapMB, indexSizeGB, indexTimeSec, forceMergeTimeSec = stats['LatLonShape']
readerHeapMBBase, indexSizeGBBase, indexTimeSecBase, forceMergeTimeSecBase = statsBase['LatLonShape']
print('|%.1fs|%.1fs|%2.f%%|%.1fs|%.1fs|%2.f%%|%.2f|%.2f|%2.f%%|%.2f|%.2f|%2.f%%|' % (indexTimeSec, indexTimeSecBase, computeDiff(indexTimeSec, indexTimeSecBase), forceMergeTimeSec, forceMergeTimeSecBase, computeDiff(forceMergeTimeSec, forceMergeTimeSecBase), indexSizeGB, indexSizeGBBase, computeDiff(indexSizeGB, indexSizeGBBase), readerHeapMB, readerHeapMBBase, computeDiff(readerHeapMB, readerHeapMBBase)))
else:
print('||Index size (GB)||Reader heap (MB)||')
print('||Dev||Base||Diff||Dev||Base||Diff ||')
readerHeapMB, indexSizeGB = stats['LatLonShape'][:2]
readerHeapMBBase, indexSizeGBBase = statsBase['LatLonShape'][:2]
print('|%.2f|%.2f|%2.f%%|%.2f|%.2f|%2.f%%|' % (indexSizeGB, indexSizeGBBase, computeDiff(indexSizeGB, indexSizeGBBase), readerHeapMB, readerHeapMBBase, computeDiff(readerHeapMB, readerHeapMBBase)))
print()
print('||Approach||Shape||M hits/sec ||QPS ||Hit count ||')
print(' ||Dev||Base ||Diff||Dev||Base||Diff||Dev||Base||Diff||')
for op in ops:
for shape in shapes:
tup = shape, op
if tup in results:
qps, mhps, totHits = results[tup]
qpsBas, mhpsBas, totHitsBas = resultsBase[tup]
print('|%s|%s|%.2f|%.2f|%2.f%%|%.2f|%.2f|%2.f%%|%d|%d|%2.f%%|' % (shape, op, mhps, mhpsBas, computeDiff(mhps, mhpsBas), qps, qpsBas, computeDiff(qps, qpsBas), totHits, totHitsBas, computeDiff(totHits, totHitsBas)))
def computeDiff(dev, base):
if dev == 0:
return 0
return 100. * (dev - base) / base
def compile(basedir):
sources = '%s/src/main/perf/IndexAndSearchShapes.java' % (GEO_UTIL_DIR)
testFramework = '%s/build/test-framework/classes/java' % (basedir)
codecs = '%s/build/codecs/classes/java' % (basedir)
core = '%s/build/core/classes/java' % (basedir)
sandbox = '%s/build/sandbox/classes/java' % (basedir)
compile = 'javac -cp %s:%s:%s:%s %s' % (testFramework, codecs, core, sandbox, sources)
if os.system(compile):
raise RuntimeError('compile failed : %s' % basedir)
def execute(results, tup, didReindexParam, indexKey, log, basedir, dev):
extra = ' -file ' + fileName
if '-reindex' in sys.argv and indexKey not in didReindexParam:
extra = extra + ' -reindex'
didReindexParam.add(indexKey)
if '-reindexFast' in sys.argv and indexKey not in didReindexParam:
extra = extra + ' -reindexFast'
didReindexParam.add(indexKey)
if dev:
extra = extra + ' -dev'
shapeCmd = shape
utilSrcDir = '%s/src/main' % (GEO_UTIL_DIR)
testFramework = '%s/build/test-framework/classes/java' % (basedir)
codecs = '%s/build/codecs/classes/java' % (basedir)
core = '%s/build/core/classes/java' % (basedir)
sandbox = '%s/build/sandbox/classes/java' % (basedir)
run = 'java -Xmx10g -cp %s:%s:%s:%s:%s perf.IndexAndSearchShapes -%s -%s%s' % (utilSrcDir, testFramework, codecs, core, sandbox, op, shapeCmd, extra)
p = subprocess.Popen(run, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
totHits = None
indexSizeGB = None
readerHeapMB = None
maxDoc = None
indexTimeSec = 0.0
forceMergeTimeSec = 0.0
while True:
line = p.stdout.readline().decode('utf-8')
if len(line) == 0:
break
line = line.rstrip()
m = reTotHits.search(line)
if m is not None:
x = m.group(1)
if totHits is None:
totHits = x
elif totHits != x:
raise RuntimeError('total hits changed from %s to %s' % (totHits, x))
log.write('%7.1fs: %s, %s: %s\n' % (time.time()-t0, op, shape, line))
doPrintLine = False
if line.find('...') != -1 or line.find('ITER') != -1 or line.find('***') != -1:
doPrintLine = True
if line.startswith('BEST QPS: '):
doPrintLine = True
results[(shape, op)] = (float(line[10:]), bestMHPS, int(totHits))
pickle.dump((rev, stats, results), open(resultsFileName, 'wb'))
if line.startswith('BEST M hits/sec: '):
doPrintLine = True
bestMHPS = float(line[17:])
if line.startswith('INDEX SIZE: '):
doPrintLine = True
indexSizeGB = float(line[12:-3])
if line.startswith('READER MB: '):
doPrintLine = True
readerHeapMB = float(line[11:])
if line.startswith('numPoints='):
doPrintLine = True
if line.startswith('maxDoc='):
maxDoc = int(line[7:])
doPrintLine = True
i = line.find(' sec to index part ')
if i != -1:
doPrintLine = True
indexTimeSec += float(line[:i])
i = line.find(' sec to force merge part ')
if i != -1:
doPrintLine = True
forceMergeTimeSec += float(line[:i])
if doPrintLine:
print('%7.1fs: %s, %s: %s' % (time.time()-t0, op, shape, line))
tup[0] = readerHeapMB
tup[1] = indexSizeGB
tup[2] = indexTimeSec
tup[3] = forceMergeTimeSec
return maxDoc
def antCompile(basedir):
if os.chdir(basedir):
raise RuntimeError('cannot change working directory: %s' % basedir)
print ('ant compile on %s...' %basedir)
if os.system('ant compile > %s/compile.log' % GEO_LOGS_DIR):
raise RuntimeError('ant compile failed > %s/compile.log' % GEO_LOGS_DIR)
if os.chdir(GEO_UTIL_DIR):
raise RuntimeError('cannot change working directory: %s' % GEO_LOGS_DIR)
if nightly:
# paths for nightly run
GEO_UTIL_DIR = '/l/util.nightly/'
GEO_LUCENE_DIR = '/l/trunk.nightly/lucene/'
GEO_LOGS_DIR = '/l/logs.nightly/geoshape'
if '-timeStamp' in sys.argv:
timeStamp = sys.argv[sys.argv.index('-timeStamp')+1]
year, month, day, hour, minute, second = (int(x) for x in timeStamp.split('.'))
timeStampDateTime = datetime.datetime(year, month, day, hour, minute, second)
else:
start = datetime.datetime.now()
timeStamp = '%04d.%02d.%02d.%02d.%02d.%02d' % (start.year, start.month, start.day, start.hour, start.minute, start.second)
resultsFileName = '%s/%s.pk' % (GEO_LOGS_DIR, timeStamp)
else:
resultsFileName = 'geo.results.pk'
# nocommit should we "ant jar"?
if nightly:
logFileName = '%s/%s.log.txt' % (GEO_LOGS_DIR, timeStamp)
else:
logFileName = '%s/geoShapeBenchLog.txt' % (GEO_LOGS_DIR)
os.chdir(GEO_LUCENE_DIR)
rev = os.popen('git rev-parse HEAD').read().strip()
print('git head revision %s' % rev)
print('\nNOTE: logging all output to %s; saving results to %s\n' % (logFileName, resultsFileName))
# nocommit should we "ant jar"?
if '-ant' in sys.argv:
antCompile(GEO_LUCENE_DIR)
if compareRun:
antCompile(BASELINE_LUCENE_DIR)
compile(GEO_LUCENE_DIR)
if compareRun:
compile(BASELINE_LUCENE_DIR)
results = {}
stats = {}
theMaxDoc = None
resultsBase = {}
statsBase = {}
theMaxDocBase = None
didReIndex = set()
didReIndexBase = set()
t0 = time.time()
rev = os.popen('git rev-parse HEAD').read().strip()
print('git head revision %s' % rev)
print('\nNOTE: logging all output to %s; saving results to %s\n' % (logFileName, resultsFileName))
# TODO: filters
with open(logFileName, 'w') as log:
log.write('\ngit head revision %s' % rev)
for op in ops:
for shape in shapes:
indexKey = 'LatLonShape'
tup =[None, None, None, None]
maxDoc = execute(results, tup, didReIndex, indexKey, log, GEO_LUCENE_DIR, False)
if maxDoc is None:
raise RuntimeError('did not see maxDoc')
if indexKey not in stats:
stats[indexKey] = tup
elif stats[indexKey][:2] != tup[:2]:
raise RuntimeError('stats changed for %s: %s vs %s' % (indexKey, stats[indexKey], tup))
if theMaxDoc == None:
theMaxDoc = maxDoc
elif maxDoc != theMaxDoc:
raise RuntimeError('maxDoc changed from %s to %s' % (theMaxDoc, maxDoc))
if compareRun:
tupBase =[None, None, None, None]
maxDocBase = execute(resultsBase, tupBase, didReIndexBase, indexKey, log, BASELINE_LUCENE_DIR, True)
if maxDocBase is None:
raise RuntimeError('did not see maxDoc')
if maxDocBase != maxDoc:
raise RuntimeError('different count from current and baseline projects')
if indexKey not in statsBase:
statsBase[indexKey] = tupBase
elif statsBase[indexKey][:2] != tupBase[:2]:
raise RuntimeError('stats changed for %s: %s vs %s' % (indexKey, statsBase[indexKey], tupBase))
printCompareResults(results, stats, maxDoc, resultsBase, statsBase)
else:
printResults(results, stats, maxDoc)
if nightly:
os.system('bzip2 --best %s' % logFileName)
print('Took %.1f sec to run all geo benchmarks' % (time.time()-t0))
|
11471571
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
import sys
import datetime
import yaml
from vsmlib.benchmarks.sequence_labeling import load_data
import argparse
import vsmlib
from scipy.stats.stats import spearmanr
import os
import random
import math
def read_test_set(path):
test = []
with open(path) as f:
for line in f:
# line = line.lower();
x, y, sim = line.strip().split()
test.append(((x, y), float(sim)))
return test
def evaluate(m, data):
results = []
count = 0
for (x, y), sim in data:
x = x.lower()
y = y.lower()
# print(x,y)
if m.has_word(x) and m.has_word(y) and not math.isnan(m.get_row(x).dot(m.get_row(y))):
# print(m.get_row(x).dot(m.get_row(y)))
results.append((m.get_row(x).dot(m.get_row(y)), sim))
count += 1
else:
# results.append((-1, sim))
# results.append((0, sim))
pass
if len(results) <= 2:
return -1, count
actual, expected = zip(*results)
# print(actual)
return spearmanr(actual, expected)[0], count
def run(embeddings, options):
results = []
for file in os.listdir(options["path_dataset"]):
testset = read_test_set(os.path.join(options["path_dataset"], file))
out = dict()
out["result"], count = evaluate(embeddings, testset)
experiment_setup = dict()
experiment_setup["cnt_finded_pairs_total"] = count
experiment_setup["cnt_pairs_total"] = len(testset)
experiment_setup["embeddings"] = embeddings.metadata
experiment_setup["category"] = "default"
experiment_setup["dataset"] = os.path.splitext(file)[0]
experiment_setup["method"] = "cosine_distance"
experiment_setup["measurement"] = "spearman"
experiment_setup["task"] = "word_similarity"
experiment_setup["timestamp"] = datetime.datetime.now().isoformat()
out["experiment_setup"] = experiment_setup
results.append(out)
return results
def main(args=None):
# use ArgumentParser
# args = parse_args()
# use yaml
options = {}
if args is None or args.path_config is None:
if len(sys.argv) > 1:
path_config = sys.argv[1]
else:
print("usage: python3 -m vsmlib.benchmarls.similarity.similarity <config file>")
print("config file example can be found at ")
print("https://github.com/undertherain/vsmlib/blob/master/vsmlib/benchmarks/sequence_labeling/similarity/config.yaml")
return
else:
path_config = args.path_config
with open(path_config, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
options["path_vector"] = cfg["path_vector"]
options["path_dataset"] = cfg["path_dataset"]
options["normalize"] = cfg["normalize"]
# overwrite params
if args is not None:
if args.path_vector is not None:
options["path_vector"] = args.path_vector
if args.path_dataset is not None:
options["path_dataset"] = args.path_dataset
# get the embeddings
m = vsmlib.model.load_from_dir(options['path_vector'])
if options["normalize"]:
# m.clip_negatives() #make this configurable
m.normalize()
results = run(m, options)
return results
if __name__ == '__main__':
main()
|
11471608
|
import thorpy
def run():
application = thorpy.Application((800, 600), "ThorPy Overview")
element = thorpy.Element("Element")
thorpy.makeup.add_basic_help(element,"Element:\nMost simple graphical element.")
clickable = thorpy.Clickable("Clickable")
thorpy.makeup.add_basic_help(clickable,"Clickable:\nCan be hovered and pressed.")
draggable = thorpy.Draggable("Draggable")
thorpy.makeup.add_basic_help(draggable,"Draggable:\nYou can drag it.")
checker_check = thorpy.Checker("Checker")
checker_radio = thorpy.Checker("Radio", type_="radio")
browser = thorpy.Browser("../../", text="Browser")
browserlauncher = thorpy.BrowserLauncher.make(browser, const_text="Choose file:",
var_text="")
browserlauncher.max_chars = 20 #limit size of browser launcher
dropdownlist = thorpy.DropDownListLauncher(const_text="Choose number:",
var_text="",
titles=[str(i)*i for i in range(1, 9)])
dropdownlist.scale_to_title()
dropdownlist.max_chars = 20 #limit size of drop down list
slider = thorpy.SliderX(80, (5, 12), "Slider: ", type_=float,
initial_value=8.4)
inserter = thorpy.Inserter(name="Inserter: ", value="Write here.")
quit = thorpy.make_button("Quit",func=thorpy.functions.quit_menu_func)
title_element = thorpy.make_text("Overview example", 22, (255,255,0))
elements = [element, clickable, draggable, checker_check, checker_radio,
dropdownlist, browserlauncher, slider, inserter, quit]
central_box = thorpy.Box(elements=elements)
central_box.fit_children(margins=(30,30)) #we want big margins
central_box.center() #center on screen
central_box.add_lift() #add a lift (useless since box fits children)
central_box.set_main_color((220,220,220,180)) #set box color and opacity
background = thorpy.Background.make(image=thorpy.style.EXAMPLE_IMG,
elements=[title_element, central_box])
thorpy.store(background)
menu = thorpy.Menu(background)
menu.play()
application.quit()
if __name__ == "__main__":
run()
|
11471627
|
import numpy as np
import scipy.linalg as la
import pdb
from nltk import ngrams
import difflib
import pickle
from time import time
import os
import torch
import urllib.request
from tqdm import tqdm
from scipy.spatial.distance import pdist, squareform
import scipy
from numpy import dot
from numpy.linalg import norm
import gzip
import urllib
from torchtext.vocab import GloVe
from pathlib import Path
model = None
def trigger_dips():
global model
def unk_init(x):
return torch.randn_like(x)
model = GloVe('6B', dim=50, unk_init=unk_init)
cos_sim = lambda a, b: dot(a, b) / (norm(a) * norm(b))
rbf = lambda a, b, sigma: np.exp(-(np.sum((a - b) ** 2)) / sigma ** 2)
def sent2wvec(s):
v = model.get_vecs_by_tokens(s, lower_case_backup=True)
v = v.detach().cpu().numpy()
return v
def sentence_compare(s1, s2, kernel="cos", **kwargs):
l1 = s1.split()
l2 = s2.split()
v1 = sent2wvec(l1)
v2 = sent2wvec(l2)
score = 0
len_s1 = v1.shape[0]
for v in v1:
if kernel == "cos":
wscore = np.max(np.array([cos_sim(v, i) for i in v2]))
elif kernel == "rbf":
wscore = np.max(np.array([rbf(v, i, kwargs["sigma"]) for i in v2]))
else:
print("Error in kernel type")
score += wscore / len_s1
return score
def similarity_func(v, S):
if len(S):
score = 0.0
for sent in S:
score += sentence_compare(v, sent, kernel="rbf", sigma=1.0)
return np.sqrt(score)
else:
return 0.0
def similarity_gain(v, s, base_score=0.0):
score = 0.0
score += sentence_compare(v, s, sigma=1.0)
score += base_score ** 2
return np.sqrt(score)
#####################################################################################################################
#####################################################################################################################
########################################### NGRAM FUNCTIONS #########################################################
def ngram_toks(sents, n=1):
ntoks = []
for sent in sents:
ntok = list(ngrams(sent.split(), n))
newtoks = [tok for tok in ntok]
ntoks += newtoks
return ntoks
def distinct_ngrams(S):
if len(S):
S = " ".join(S)
N = [1, 2, 3]
score = 0.0
for n in N:
toks = set(ngram_toks([S], n))
score += (1.0 / n) * len(toks)
return score
else:
return 0.0
def ngram_overlap(v, S):
if len(S):
N = [1, 2, 3]
score = 0.0
for n in N:
src_toks = set(ngram_toks([v], n))
for sent in S:
sent_toks = set(ngram_toks(S, n))
overlap = src_toks.intersection(sent_toks)
score += (1.0 / (4 - n)) * len(overlap)
return np.sqrt(score)
else:
return 0.0
def ngram_overlap_unit(v, S, base_score=0.0):
N = [1, 2, 3]
score = 0.0
try:
temp = S[0]
except:
S = [S]
for n in N:
src_toks = set(ngram_toks([v], n))
sent_toks = set(ngram_toks([S], n))
overlap = src_toks.intersection(sent_toks)
score += (1.0 / (4 - n)) * len(overlap)
return np.sqrt((base_score ** 2) + score)
#####################################################################################################################
########################################### EDIT DISTANCE FUNCTION ##################################################
def seq_func(V, S):
if len(S):
score = 0.0
for v in V:
for s in S:
vx = v.split()
sx = s.split()
seq = difflib.SequenceMatcher(None, vx, sx)
score += seq.ratio()
return np.sqrt(score)
else:
return 0.0
def seq_gain(V, s, base_score=0.0):
gain = 0.0
for v in V:
vx = v.split()
sx = s.split()
seq = difflib.SequenceMatcher(None, vx, sx)
gain += seq.ratio()
score = (base_score ** 2) + gain
return np.sqrt(score)
def info_func(S, orig_count, ref_count):
if len(S):
score = 0.0
for s in S:
stoks = set(s.split())
orig_toks = set(orig_count.keys())
int_toks = stoks.intersection(orig_toks)
for tok in int_toks:
try:
score += orig_count[tok] / (1 + ref_count[tok])
except:
score += orig_count[tok]
return np.sqrt(score)
else:
return 0.0
def info_gain(s, orig_count, ref_count, base_score=0.0):
score = 0.0
stoks = set(s.split())
orig_toks = set(orig_count.keys())
int_toks = stoks.intersection(orig_toks)
for tok in int_toks:
try:
score += orig_count[tok] / (1 + ref_count[tok])
except:
score += orig_count[tok]
score += base_score ** 2
return np.sqrt(score)
|
11471674
|
from functools import cached_property
from wagtail.core.models import Page
from django.apps import apps
from strawberry import Schema
from strawberry.django.views import GraphQLView as BaseGraphQLView
from .schema import get_schema_from_models
def get_schema() -> Schema:
all_models = apps.get_models()
page_models = (
model for model in all_models if issubclass(model, Page) and model is not Page
)
return get_schema_from_models(page_models)
class GraphQLView(BaseGraphQLView):
def __init__(
self,
graphiql=True,
subscriptions_enabled=False,
):
self.graphiql = graphiql
self.subscriptions_enabled = subscriptions_enabled
@cached_property
def schema(self) -> Schema:
return get_schema()
|
11471689
|
from .BaseDoc import BaseDoc
from .CPF import CPF
from .CNPJ import CNPJ
from .CNH import CNH
from .CNS import CNS
from .PIS import PIS
from .TituloEleitoral import TituloEleitoral
from .Certidao import Certidao
from .RENAVAM import RENAVAM
from .generic import validate_docs
|
11471713
|
from eclcli.common import command
from eclcli.common import utils
from ..networkclient.common import utils as to_obj
class ListFICInterface(command.Lister):
def get_parser(self, prog_name):
parser = super(ListFICInterface, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
columns = (
'id',
'name',
'status',
)
column_headers = (
'ID',
'Name',
'Status',
)
data = [to_obj.FICInterface(ficsv) for ficsv in
network_client.list_fic_interfaces().get('fic_interfaces')]
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowFICInterface(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowFICInterface, self).get_parser(prog_name)
parser.add_argument(
'fic_interface_id',
metavar="FIC_INTERFACE_ID",
help="ID of FIC Interface to show."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
fic_interface_id = parsed_args.fic_interface_id
dic = network_client.show_fic_interface(fic_interface_id).get(
'fic_interface')
columns = utils.get_columns(dic)
obj = to_obj.FICInterface(dic)
data = utils.get_item_properties(
obj, columns, )
return columns, data
|
11471717
|
from django.db import connection
from django.conf import settings
from django.core.management import call_command
from django.test import TransactionTestCase
from django_tenants.utils import get_public_schema_name
class BaseTestCase(TransactionTestCase):
"""
Base test case that comes packed with overloaded INSTALLED_APPS,
custom public tenant, and schemas cleanup on tearDown.
"""
@classmethod
def setUpClass(cls):
settings.TENANT_MODEL = 'customers.Client'
settings.TENANT_DOMAIN_MODEL = 'customers.Domain'
settings.SHARED_APPS = ('django_tenants',
'customers')
settings.TENANT_APPS = ('dts_test_app',
'django.contrib.contenttypes',
'django.contrib.auth', )
settings.INSTALLED_APPS = settings.SHARED_APPS + settings.TENANT_APPS
if '.test.com' not in settings.ALLOWED_HOSTS:
settings.ALLOWED_HOSTS += ['.test.com']
cls.available_apps = settings.INSTALLED_APPS
super().setUpClass()
def setUp(self):
connection.set_schema_to_public()
super().setUp()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if '.test.com' in settings.ALLOWED_HOSTS:
settings.ALLOWED_HOSTS.remove('.test.com')
@classmethod
def get_tables_list_in_schema(cls, schema_name):
cursor = connection.cursor()
sql = """SELECT table_name FROM information_schema.tables
WHERE table_schema = %s"""
cursor.execute(sql, (schema_name, ))
return [row[0] for row in cursor.fetchall()]
@classmethod
def sync_shared(cls):
call_command('migrate_schemas',
schema_name=get_public_schema_name(),
interactive=False,
verbosity=0)
|
11471753
|
import cv2
import numpy as np
def visualize_detection(img, bboxes_and_landmarks, save_path=None, to_bgr=False):
"""Visualize detection results.
Args:
img (Numpy array): Input image. CHW, BGR, [0, 255], uint8.
"""
img = np.copy(img)
if to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for b in bboxes_and_landmarks:
# confidence
cv2.putText(img, f'{b[4]:.4f}', (int(b[0]), int(b[1] + 12)), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# bounding boxes
b = list(map(int, b))
cv2.rectangle(img, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
# landmarks (for retinaface)
cv2.circle(img, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(img, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(img, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(img, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(img, (b[13], b[14]), 1, (255, 0, 0), 4)
# save img
if save_path is not None:
cv2.imwrite(save_path, img)
|
11471775
|
from typing import Union
from discord import Color
from discord.ext.commands import Cog, Context, command
from nagatoro.converters import Role, User, Member
from nagatoro.objects import Embed
from nagatoro.utils import t
class Utility(Cog):
"""Utility commands"""
def __init__(self, bot):
self.bot = bot
@command(name="role")
async def role(self, ctx: Context, *, role: Role):
"""Shows info about a role"""
embed = Embed(ctx, title=t(ctx, "title", role=role.name), color=role.color)
embed.add_field(name=t(ctx, "id"), value=role.id)
if len(role.members) > 1:
embed.add_field(name=t(ctx, "members"), value=str(len(role.members)))
embed.add_field(
name=t(ctx, "mentionable"),
value=t(ctx, "mentionable_yes")
if role.mentionable
else t(ctx, "mentionable_no"),
)
if role.color != Color.default():
embed.add_field(
name=t(ctx, "color"),
value=t(
ctx,
"color_value",
hex=str(role.color),
rgb=str(role.color.to_rgb()),
),
)
embed.add_field(name=t(ctx, "created_at"), value=role.created_at)
await ctx.send(embed=embed)
@command(name="user", aliases=["me", "member"])
async def user(self, ctx: Context, *, user: Union[Member, User] = None):
"""Shows info about an user or a member"""
if not user:
user = ctx.author
title = str(user) if not user.bot else t(ctx, "title_bot", user=user.name)
embed = Embed(ctx, title=title, color=user.color)
embed.set_thumbnail(url=user.avatar_url)
embed.add_fields(
(t(ctx, "id"), user.id),
(t(ctx, "created_at"), user.created_at),
)
await ctx.send(embed=embed)
@command(name="avatar", aliases=["av", "pfp"])
async def avatar(self, ctx: Context, *, user: User = None):
"""Shows an user's avatar"""
if not user:
user = ctx.author
embed = Embed(ctx, title=t(ctx, "title", user=user.name))
embed.set_image(url=user.avatar_url_as(size=2048))
await ctx.send(embed=embed)
@command(name="server", aliases=["guild"])
async def server(self, ctx: Context):
"""Shows info about this server"""
embed = Embed(ctx, title=ctx.guild.name)
embed.set_thumbnail(url=ctx.guild.icon_url_as(size=2048))
embed.add_fields(
(t(ctx, "id"), ctx.guild.id),
(t(ctx, "owner"), ctx.guild.owner.mention),
(t(ctx, "region"), ctx.guild.region),
(t(ctx, "members"), str(ctx.guild.member_count)),
(t(ctx, "text_channels"), str(len(ctx.guild.text_channels))),
(t(ctx, "voice_channels"), str(len(ctx.guild.voice_channels))),
)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Utility(bot))
|
11471780
|
import os
import yaml
def get_env():
env = None
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_dir = os.path.join(base_dir, 'environments')
print(env_dir)
for fn in os.listdir(env_dir):
if fn.endswith('.yml'):
with open(os.path.join(env_dir, fn), 'r') as f:
envs = yaml.load(f, Loader=yaml.SafeLoader)
for env_name, _env in envs.items():
if os.path.exists(_env['must_exists']):
print(f'Environment detected: {env_name} (in {fn})')
env = _env
break
if env is not None:
break
if env:
return env
else:
raise ValueError('Could not determine env!')
|
11471806
|
from lib.parsers import parsers
class Message:
def __init__(self, header, length, id, rw, is_queued, params, direction='in'):
self.header = header
self.length = length
self.id = id
self.rw = rw
self.is_queued = is_queued
self.raw_params = []
self.params = []
if direction == 'in':
self.raw_params = params
self.params = self.parse_params('in')
elif direction == 'out':
self.params = params
self.raw_params = self.parse_params('out')
@staticmethod
def calculate_checksum(payload):
r = sum(payload) % 256
# Calculate the two's complement
check_byte = (256 - r) % 256
return check_byte
@staticmethod
def verify_checksum(payload, checksum):
a = sum(payload) % 256
is_correct = True if (a + checksum) % 256 == 0 else False
return is_correct
@staticmethod
def parse(message):
bytes = list(message)
header = bytes[0:2]
length = bytes[2]
id = bytes[3]
control = bytes[4]
rw = (control & 1) == 1
is_queued = ((control & 2) >> 1) == 1
params = bytes[5:-1]
checksum = bytes[-1]
verified = Message.verify_checksum([id] + [control] + params, checksum)
if verified:
return Message(header, length, id, rw, is_queued, params)
else:
return None
@staticmethod
def read(serial):
header = serial.read(2)
if header != b'\xaa\xaa':
return None
length = int.from_bytes(serial.read(1), 'little')
payload = serial.read(length)
checksum = serial.read(1)
return Message.parse(header + bytes([length]) + payload + checksum)
def parse_params(self, direction):
message_parsers = parsers[self.id]
if direction == 'in':
if message_parsers is None:
return None
parser = None
if self.rw == 0 and self.is_queued == 0:
parser = message_parsers[0]
elif self.rw == 1 and self.is_queued == 0:
parser = message_parsers[0]
elif self.rw == 1 and self.is_queued == 1:
parser = message_parsers[2]
if parser is None:
return []
return parser(self.raw_params)
elif direction == 'out':
if message_parsers is None:
return []
parser = None
if direction == 'out' and self.rw == 1:
parser = message_parsers[3]
if parser is None:
return []
return parser(self.params)
def package(self):
self.length = 2 + len(self.raw_params)
control = int('000000' + str(int(self.is_queued)) + str(int(self.rw)), 2)
self.checksum = Message.calculate_checksum([self.id] + [control] + self.raw_params)
result = bytes(self.header + [self.length] + [self.id] + [control] + self.raw_params + [self.checksum])
return result
|
11471828
|
import json
def return_json_file_content(file_name: str):
"""
Load data from a json file
:param file_name: name of the file
:return: the data content extracted from the file
"""
with open(file_name) as json_file:
data = json.load(json_file)
return data
|
11471851
|
def test():
assert (
"for doc in nlp.pipe(TEXTS)" in __solution__
), "Verwendest du nlp.pipe, um die Texte zu verarbeiten?"
assert (
"TRAINING_DATA.append" in __solution__
), "Verwendest du die Methode append, um das Beispiel zu TRAINING_DATA hinzuzufügen?"
assert (
len(TRAINING_DATA) == 6
), "Die Trainingsdaten sehen nicht korrekt aus. Erwartet werden 6 Beispiele."
for entry in TRAINING_DATA:
assert (
len(entry) == 2
and isinstance(entry[0], str)
and isinstance(entry[1], dict)
and "entities" in entry[1]
), "Es scheint, als haben die Daten das falsche Format. Es sollte Tuples mit einem Text und einem Dictionary mit dem Schlüssel 'entities' sein."
assert TRAINING_DATA[0][1]["entities"] == [
(0, 8, "GADGET")
], "Schau dir die Entitäten in Beispiel 1 nochmal an."
assert TRAINING_DATA[1][1]["entities"] == [
(4, 12, "GADGET")
], "Schau dir die Entitäten in Beispiel 2 nochmal an."
assert TRAINING_DATA[2][1]["entities"] == [
(29, 37, "GADGET")
], "Schau dir die Entitäten in Beispiel 3 nochmal an."
assert TRAINING_DATA[3][1]["entities"] == [
(21, 29, "GADGET")
], "Schau dir die Entitäten in Beispiel 4 nochmal an."
assert TRAINING_DATA[4][1]["entities"] == [
(0, 9, "GADGET"),
(13, 21, "GADGET"),
], "Schau dir die Entitäten in Beispiel 5 nochmal an."
assert (
TRAINING_DATA[5][1]["entities"] == []
), "Schau dir die Entitäten in Beispiel 6 nochmal an."
__msg__.good(
"Gut gemacht! Bevor du ein Modell mit den Daten trainierst, solltest du "
"immer nochmals überprüfen, dass dein Matcher keine falschpositiven Spans "
"gefunden hat. Aber dieser Prozess ist immer noch viel schneller, als "
"*alles* von Hand zu erledigen."
)
|
11471868
|
from collections import namedtuple
from ..common.asap import _process_asap_token
from .backend import WSGIBackend
Request = namedtuple('Request', ['environ', 'start_response'])
class ASAPMiddleware(object):
def __init__(self, handler, settings):
self._next = handler
self._backend = WSGIBackend(settings)
self._verifier = self._backend.get_verifier()
def __call__(self, environ, start_response):
settings = self._backend.settings
request = Request(environ, start_response)
error_response = _process_asap_token(
request, self._backend, settings, verifier=self._verifier
)
if error_response is not None:
return error_response
return self._next(environ, start_response)
|
11471887
|
import uuid
def is_production():
""" Determines if app is running on the production server via uuid comparison.
HOW TO USE:
Open a terminal
> python
> import uuid
> uuid.getnode()
12345678987654321 <-- Copy whatever is returned and replace 111111111 with this.
Ensure .gitignore excludes ``secrets.py``.
Finally, rename this file to ``secrets.py``.
Compare uuid for the machine against the known uuid(s) of your development machine(s).
:return: (bool) True if code is running on the production server, and False otherwise.
"""
developer_machines = [111111111, ]
return uuid.getnode() not in developer_machines
|
11471948
|
from kivy.animation import Animation
from functools import partial
from .base import Animator
__all__ = (
"RotateInAnimator",
"RotateInDownLeftAnimator",
"RotateInDownRightAnimator",
"RotateInUpLeftAnimator",
"RotateInUpRightAnimator",
)
# rotate in
class RotateInAnimator(Animator):
def start_(self, tmp=None):
props = ["angle", "opacity"]
vals = [200, 0]
self._initialize(**dict(zip(props, vals)))
vals = [0, 1]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateInDownLeftAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x - self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [90, 0]
self._initialize(**dict(zip(props, vals)))
vals = [0, 1]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateInDownRightAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x + 3 * self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [-90, 0]
self._initialize(**dict(zip(props, vals)))
vals = [0, 1]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateInUpLeftAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x - self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [-90, 0]
self._initialize(**dict(zip(props, vals)))
vals = [0, 1]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateInUpRightAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x + 3 * self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [90, 0]
self._initialize(**dict(zip(props, vals)))
vals = [0, 1]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
|
11471957
|
class Solution:
def reconstructMatrix(
self, upper: int, lower: int, colsum: List[int]
) -> List[List[int]]:
res = [[0] * len(colsum) for _ in range(2)]
for j, sm in enumerate(colsum):
if sm == 2:
if upper == 0 or lower == 0:
return []
upper -= 1
lower -= 1
res[0][j] = res[1][j] = 1
elif sm:
if upper == lower == 0:
return []
if upper >= lower:
upper -= 1
res[0][j] = 1
else:
lower -= 1
res[1][j] = 1
return res if upper == lower == 0 else []
|
11471962
|
import sys
import argparse
import os
from os import listdir
from os.path import isfile, isdir, join
import numpy as np
import pandas as pd
import multiprocessing
import sys
sys.path.insert(0, "./")
import deepAccNet
import torch.optim as optim
import os
import matplotlib.pyplot as plt
import seaborn as sns
from torch.utils.data import Dataset, DataLoader
import torch
def main():
parser = argparse.ArgumentParser(description="Error predictor network trainer",
epilog="v0.0.1")
parser.add_argument("folder",
action="store",
help="Location of folder to save checkpoints to.")
parser.add_argument("--epoch",
"-e", action="store",
type=int,
default=200,
help="# of epochs (path over all proteins) to train for (Default: 200)")
parser.add_argument("--bert",
"-bert",
action="store_true",
default=False,
help="Run with bert features (Default: False)")
parser.add_argument("--multi_dir",
"-multi_dir",
action="store_true",
default=False,
help="Run with multiple direcotory sources (Default: False)")
parser.add_argument("--num_blocks",
"-numb", action="store",
type=int,
default=5,
help="# of reidual blocks (Default: 8)")
parser.add_argument("--num_filters",
"-numf", action="store",
type=int,
default=128,
help="# of base filter size in residual blocks (Default: 256)")
parser.add_argument("--size_limit",
"-size_limit", action="store",
type=int,
default=280,
help="protein size limit (Default: 300)")
parser.add_argument("--decay",
"-d", action="store",
type=float,
default=0.99,
help="Decay rate for learning rate (Default: 0.99)")
parser.add_argument("--base",
"-b", action="store",
type=float,
default=0.0005,
help="Base learning rate (Default: 0.0005)")
parser.add_argument("--debug",
"-debug",
action="store_true",
default=False,
help="Debug mode (Default: False)")
parser.add_argument("--silent",
"-s",
action="store_true",
default=False,
help="Run in silent mode (Default: False)")
args = parser.parse_args()
script_dir = os.path.dirname(__file__)
base = join(script_dir, "data/")
epochs = args.epoch
base_learning_rate = args.base
decay = args.decay
loss_weight = [1, 0.25, 10] #change if you need different loss
validation = True #validation is always up
name = args.folder
lengthmax = args.size_limit
if not args.silent: print("Loading samples")
proteins = np.load(join(base, "train_proteins.npy"))
if args.debug: proteins = proteins[:100]
train_decoys = deepAccNet.DecoyDataset(targets = proteins,
lengthmax = lengthmax,
bert = args.bert,
multi_dir = args.multi_dir)
train_dataloader = DataLoader(train_decoys, batch_size=1, shuffle=True, num_workers=4)
proteins = np.load(join(base, "valid_proteins.npy"))
if args.debug: proteins = proteins[:100]
valid_decoys = deepAccNet.DecoyDataset(targets = proteins,
lengthmax = lengthmax,
bert = args.bert,
multi_dir = args.multi_dir)
valid_dataloader = DataLoader(valid_decoys, batch_size=1, shuffle=True, num_workers=4)
# Load the model if needed
if not args.silent: print("Instantitating a model")
net = deepAccNet.DeepAccNet(num_chunks = args.num_blocks,
num_channel = args.num_filters,
twobody_size = 49 if args.bert else 33)
restoreModel = False
if isdir(args.folder):
if not args.silent: print("Loading a checkpoint")
checkpoint = torch.load(join(name, "model.pkl"))
net.load_state_dict(checkpoint["model_state_dict"])
#optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint["epoch"]+1
train_loss = checkpoint["train_loss"]
valid_loss = checkpoint["valid_loss"]
best_models = checkpoint["best_models"]
if not args.silent: print("Restarting at epoch", epoch)
assert(len(train_loss["total"]) == epoch)
assert(len(valid_loss["total"]) == epoch)
restoreModel = True
else:
if not args.silent: print("Training a new model")
epoch = 0
train_loss = {"total":[], "esto":[], "mask":[], "lddt":[]}
valid_loss = {"total":[], "esto":[], "mask":[], "lddt":[]}
best_models = []
if not isdir(name):
if not args.silent: print("Creating a new dir at", name)
os.mkdir(name)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)
optimizer = optim.Adam(net.parameters(), lr=0.0005)
if restoreModel:
checkpoint = torch.load(join(name, "model.pkl"))
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# Loop over the dataset multiple times
start_epoch = epoch
for epoch in range(start_epoch, epochs):
# Update the learning rate
lr = base_learning_rate*np.power(decay, epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Loop over batches
net.train(True)
temp_loss = {"total":[], "esto":[], "mask":[], "lddt":[]}
for i, data in enumerate(train_dataloader):
# Get the data, Hardcoded transformation for whatever reasons.
idx, val, f1d, f2d, esto, esto_1hot, mask = data["idx"], data["val"], data["1d"], data["2d"],\
data["estogram"], data["estogram_1hot"], data["mask"]
idx = idx[0].long().to(device)
val = val[0].to(device)
f1d = f1d[0].to(device)
f2d = f2d[0].to(device)
esto_true = esto[0].to(device)
esto_1hot_true = esto_1hot[0].to(device)
mask_true = mask[0].to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
esto_pred, mask_pred, lddt_pred, (esto_logits, mask_logits) = net(idx, val, f1d, f2d)
lddt_true = deepAccNet.calculate_LDDT(esto_1hot_true[0], mask_true[0])
Esto_Loss = torch.nn.CrossEntropyLoss()
Mask_Loss = torch.nn.BCEWithLogitsLoss()
Lddt_Loss = torch.nn.MSELoss()
esto_loss = Esto_Loss(esto_logits, esto_true.long())
mask_loss = Mask_Loss(mask_logits, mask_true)
lddt_loss = Lddt_Loss(lddt_pred, lddt_true.float())
loss = loss_weight[0]*esto_loss + loss_weight[1]*mask_loss + loss_weight[2]*lddt_loss
loss.backward()
optimizer.step()
# Get training loss
temp_loss["total"].append(loss.cpu().detach().numpy())
temp_loss["esto"].append(esto_loss.cpu().detach().numpy())
temp_loss["mask"].append(mask_loss.cpu().detach().numpy())
temp_loss["lddt"].append(lddt_loss.cpu().detach().numpy())
# Display training results
sys.stdout.write("\rEpoch: [%2d/%2d], Batch: [%2d/%2d], loss: %.2f, esto-loss: %.2f, lddt-loss: %.2f, mask: %.2f"
%(epoch, epochs, i, len(train_decoys),
temp_loss["total"][-1], temp_loss["esto"][-1], temp_loss["lddt"][-1], temp_loss["mask"][-1]))
train_loss["total"].append(np.array(temp_loss["total"]))
train_loss["esto"].append(np.array(temp_loss["esto"]))
train_loss["mask"].append(np.array(temp_loss["mask"]))
train_loss["lddt"].append(np.array(temp_loss["lddt"]))
if validation:
net.eval() # turn off training mode
temp_loss = {"total":[], "esto":[], "mask":[], "lddt":[]}
with torch.no_grad(): # wihout tracking gradients
for i, data in enumerate(valid_dataloader):
# Get the data, Hardcoded transformation for whatever reasons.
idx, val, f1d, f2d, esto, esto_1hot, mask = data["idx"], data["val"], data["1d"], data["2d"],\
data["estogram"], data["estogram_1hot"], data["mask"]
idx = idx[0].long().to(device)
val = val[0].to(device)
f1d = f1d[0].to(device)
f2d = f2d[0].to(device)
esto_true = esto[0].to(device)
esto_1hot_true = esto_1hot[0].to(device)
mask_true = mask[0].to(device)
# forward + backward + optimize
esto_pred, mask_pred, lddt_pred, (esto_logits, mask_logits) = net(idx, val, f1d, f2d)
lddt_true = deepAccNet.calculate_LDDT(esto_1hot_true[0], mask_true[0])
Esto_Loss = torch.nn.CrossEntropyLoss()
Mask_Loss = torch.nn.BCEWithLogitsLoss()
Lddt_Loss = torch.nn.MSELoss()
esto_loss = Esto_Loss(esto_logits, esto_true.long())
mask_loss = Mask_Loss(mask_logits, mask_true)
lddt_loss = Lddt_Loss(lddt_pred, lddt_true.float())
loss = loss_weight[0]*esto_loss + loss_weight[1]*mask_loss + loss_weight[2]*lddt_loss
# Get training loss
temp_loss["total"].append(loss.cpu().detach().numpy())
temp_loss["esto"].append(esto_loss.cpu().detach().numpy())
temp_loss["mask"].append(mask_loss.cpu().detach().numpy())
temp_loss["lddt"].append(lddt_loss.cpu().detach().numpy())
valid_loss["total"].append(np.array(temp_loss["total"]))
valid_loss["esto"].append(np.array(temp_loss["esto"]))
valid_loss["mask"].append(np.array(temp_loss["mask"]))
valid_loss["lddt"].append(np.array(temp_loss["lddt"]))
# Saving the model if needed.
if name != "" and validation:
folder = name
# Name of ranked models. I know it is not optimal way to do it but the easiest fix is this.
name_map = ["best.pkl", "second.pkl", "third.pkl", "fourth.pkl", "fifth.pkl"]
new_model = (epoch, np.mean(valid_loss["total"][-1]))
new_best_models = best_models[:]
new_best_models.append(new_model)
new_best_models.sort(key=lambda x:x[1])
temp = new_best_models[:len(name_map)]
new_best_models = [(temp[i][0], temp[i][1], name_map[i]) for i in range(len(temp))]
# Saving and moving
for i in range(len(new_best_models)):
m, performance, filename = new_best_models[i]
if m in [j[0] for j in best_models]:
index = [j[0] for j in best_models].index(m)
command = "mv %s %s"%(join(folder, best_models[index][2]), join(folder, "temp_"+new_best_models[i][2]))
os.system(command)
else:
torch.save({
'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss,
'valid_loss': valid_loss,
}, join(folder, "temp_"+new_best_models[i][2]))
# Renaming
for i in range(len(new_best_models)):
command = "mv %s %s"%(join(folder, "temp_"+name_map[i]), join(folder, name_map[i]))
os.system(command)
# Update best list
best_models = new_best_models
# Save all models
torch.save({
'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss,
'valid_loss': valid_loss,
'best_models' : best_models
}, join(name, "model.pkl"))
# Saving progress plot
for label in ["total", "esto", "mask", "lddt"]:
width = 50
# Train plot
y_train = np.concatenate(train_loss[label])
y_train_conved = np.convolve(y_train, np.ones((width,))/width, mode='valid')
x_train = (np.arange(len(y_train))/len(train_decoys))[:-1*(width-1)]
# Valid plot
y_valid = np.concatenate(valid_loss[label])
y_valid_conved = np.convolve(y_valid, np.ones((width,))/width, mode='valid')
x_valid = (np.arange(len(y_valid))/len(valid_decoys))[:-1*(width-1)]
plt.figure()
plt.plot(x_train, y_train_conved, label="train")
plt.plot(x_valid, y_valid_conved, label="valid")
plt.legend(loc=1)
plt.savefig(join(name, label+".png"))
plt.close()
if __name__== "__main__":
main()
|
11471985
|
clsidx_2_labels = {
0: "frontal",
1: "profile45",
2: "profile75",
3: "upward",
4: "downward",
}
|
11471999
|
import glob
import re
from html.parser import HTMLParser
from time import sleep
from urllib.parse import unquote_plus
from urllib.request import urlopen
def show_blob_content(description, key):
config_files = glob.glob('/var/lib/waagent/ExtensionsConfig*.xml')
if len(config_files) == 0:
raise Exception('no extension config files found')
config_files.sort()
with open(config_files[-1], 'r') as fh:
config = fh.readlines()
status_line = list(filter(lambda s: key in s, config))[0]
status_pattern = '<{0}.*>(.*\?)(.*)<.*'.format(key)
match = re.match(status_pattern, status_line)
if not match:
raise Exception(description + ' not found')
decoded_url = match.groups()[0]
encoded_params = match.groups()[1].split('&')
for param in encoded_params:
kvp = param.split('=')
name = kvp[0]
skip = name == 'sig'
val = HTMLParser().unescape(unquote_plus(kvp[1])) if not skip else kvp[1]
decoded_param = '&{0}={1}'.format(name, val)
decoded_url += decoded_param
print("\n{0} uri: {1}\n".format(description, decoded_url))
status = None
retries = 3
while status is None:
try:
status = urlopen(decoded_url).read()
except Exception as e:
if retries > 0:
retries -= 1
sleep(60)
else:
# we are only collecting information, so do not fail the test
status = 'Error reading {0}: {1}'.format(description, e)
return "\n{0} content: {1}\n".format(description, status)
|
11472013
|
URANIUM_PY = """
from uranium import current_build
@current_build.task
def main(build):
current_build.history["test"] = True
"""
def test_current_build_in_ubuild(tmpdir, build):
""" current_build shoud be valid in the ubuild.py """
script = tmpdir.join("ubuild.py")
script.write(URANIUM_PY)
build._run_script(script.strpath, "main")
assert build.history["test"] is True
|
11472016
|
import abc
from typing import Optional
import torch
import numpy as np
import math
from duorat.utils import registry
def maybe_mask(attn: torch.Tensor, attn_mask: Optional[torch.Tensor]) -> None:
if attn_mask is not None:
assert all(
a == 1 or b == 1 or a == b
for a, b in zip(attn.shape[::-1], attn_mask.shape[::-1])
), "Attention mask shape {} should be broadcastable with attention shape {}".format(
attn_mask.shape, attn.shape
)
attn.data.masked_fill_(attn_mask, -float("inf"))
class Pointer(abc.ABC, torch.nn.Module):
@abc.abstractmethod
def forward(
self,
query: torch.Tensor,
keys: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
pass
@registry.register("pointer", "Bahdanau")
class BahdanauPointer(Pointer):
def __init__(self, query_size: int, key_size: int, proj_size: int) -> None:
super().__init__()
self.compute_scores = torch.nn.Sequential(
torch.nn.Linear(query_size + key_size, proj_size),
torch.nn.Tanh(),
torch.nn.Linear(proj_size, 1),
)
def forward(
self,
query: torch.Tensor,
keys: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# query shape: batch x seq_len x query_size
# keys shape: batch x mem_len x key_size
# query_expanded shape: batch x num keys x query_size
query_expanded = query.unsqueeze(2).expand(-1, -1, keys.shape[1], -1)
keys_expanded = keys.unsqueeze(1).expand(-1, query.shape[1], -1, -1)
# scores shape: batch x num keys x 1
attn_logits = self.compute_scores(
# shape: batch x num keys x query_size + key_size
torch.cat((query_expanded, keys_expanded), dim=3)
)
# scores shape: batch x num keys
attn_logits = attn_logits.squeeze(3)
maybe_mask(attn_logits, attn_mask)
return attn_logits
@registry.register("pointer", "BahdanauMemEfficient")
class BahdanauPointerMemEfficient(Pointer):
def __init__(self, query_size: int, key_size: int, proj_size: int) -> None:
super().__init__()
self.query_linear = torch.nn.Linear(query_size, proj_size, bias=False)
self.key_linear = torch.nn.Linear(key_size, proj_size)
# Correct weight initialization assuming query_size ~= key_size
with torch.no_grad():
self.query_linear.weight /= math.sqrt(2)
self.key_linear.weight /= math.sqrt(2)
self.key_linear.bias /= math.sqrt(2)
self.tanh = torch.nn.Tanh()
self.proj_linear = torch.nn.Linear(proj_size, 1)
def forward(
self,
query: torch.Tensor,
keys: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# query shape: batch x seq_len x query_size
# keys shape: batch x mem_len x key_size
h_query = self.query_linear(query) # batch_size x seq_len x proj_size
h_keys = self.key_linear(keys) # batch_size x mem_len x proj_size
h = h_keys.unsqueeze(1) + h_query.unsqueeze(
2
) # batch_size x seq_len x mem_len x proj_size
h = self.tanh(h)
attn_logits = self.proj_linear(h) # batch_size x seq_len x mem_len x 1
# scores shape: batch x seq_len x mem_len
attn_logits = attn_logits.squeeze(3)
maybe_mask(attn_logits, attn_mask)
return attn_logits
@registry.register("pointer", "ScaledDotProduct")
class ScaledDotProductPointer(Pointer):
def __init__(self, query_size: int, key_size: int) -> None:
super().__init__()
self.query_proj = torch.nn.Linear(query_size, key_size)
self.temp = np.power(key_size, 0.5)
self.key_size = key_size
def forward(
self,
query: torch.Tensor,
keys: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# query shape: batch x seq_len x query_size
# keys shape: batch x mem_len x key_size
_, mem_len, key_size = keys.shape
batch_size, seq_len, query_size = query.shape
# proj_query shape: batch x seq_len x key_size x 1
proj_query = self.query_proj(query).unsqueeze(-1)
# reshaped_query shape: (batch*seq_len) x key_size x 1
reshaped_query = proj_query.reshape(-1, key_size, 1)
# expanded_keys shape: batch x seq_len x mem_len x key_size
expanded_keys = keys.unsqueeze(1).repeat((1, seq_len, 1, 1))
# reshaped_keys shape: (batch*seq_len) x mem_len x key_size
reshaped_keys = expanded_keys.reshape(-1, mem_len, key_size)
# attn_logits shape: batch x num keys
attn_logits = torch.bmm(reshaped_keys, reshaped_query).squeeze(2) / self.temp
reshaped_attn_logits = attn_logits.reshape(batch_size, seq_len, mem_len)
maybe_mask(reshaped_attn_logits, attn_mask)
return reshaped_attn_logits
|
11472049
|
import os, subprocess, time, signal
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import sys
from plark_game import classes
from gym_plark.envs.plark_env import PlarkEnv
import logging
logger = logging.getLogger(__name__)
# logger.setLevel(logging.ERROR)
class PlarkEnvSonobuoyDeployment(PlarkEnv):
def __init__(self,config_file_path=None,verbose=False, **kwargs):
if kwargs is None:
kwargs = {}
kwargs['driving_agent'] = 'pelican'
super(PlarkEnvSonobuoyDeployment, self).__init__(config_file_path,verbose, **kwargs)
if self.driving_agent != 'pelican':
raise ValueError('This environment only supports pelican')
self.pelican_col = self.env.activeGames[len(self.env.activeGames)-1].pelicanPlayer.col
self.pelican_row = self.env.activeGames[len(self.env.activeGames)-1].pelicanPlayer.row
def step(self, action):
action = self.ACTION_LOOKUP[action]
if self.verbose:
logger.info('Action:'+action)
gameState,uioutput = self.env.activeGames[len(self.env.activeGames)-1].game_step(action)
self.status = gameState
self.uioutput = uioutput
reward = 0
self.globalSonobuoys = self.env.activeGames[len(self.env.activeGames)-1].globalSonobuoys
## Reward for droping a sonobuoy
if action == 'drop_buoy':
reward = 1.00
if len(self.globalSonobuoys) > 1 :
sonobuoy = self.globalSonobuoys[-1]
sbs_in_range = self.env.activeGames[len(self.env.activeGames)-1].gameBoard.searchRadius(sonobuoy.col, sonobuoy.row, sonobuoy.range, "SONOBUOY")
sbs_in_range.remove(sonobuoy) # remove itself from search results
if len(sbs_in_range) > 0:
reward = reward - 0.5
if reward > 1:
reward = 1
if reward < -1:
reward = -1
ob = self._observation()
done = False
if self.status in ["PELICANWIN","ESCAPE","BINGO","WINCHESTER"]:
done = True
if self.verbose:
logger.info("GAME STATE IS " + self.status)
return ob, reward, done, {}
|
11472063
|
import sys
import os
from pathlib import Path, WindowsPath
import importlib
from functools import wraps, reduce
import base64
import shutil
import inspect
from collections.abc import Iterable
from contextlib import contextmanager
from ploomber.exceptions import (CallbackSignatureError, CallbackCheckAborted,
TaskRenderError)
from ploomber.util.dotted_path import DottedPath
def requires(pkgs, name=None, extra_msg=None, pip_names=None):
"""
Check if packages were imported, raise ImportError with an appropriate
message for missing ones
Error message:
a, b are required to use function. Install them by running pip install a b
Parameters
----------
pkgs
The names of the packages required
name
The name of the module/function/class to show in the error message,
if None, the decorated function __name__ attribute is used
extra_msg
Append this extra message to the end
pip_names
Pip package names to show in the suggested "pip install {name}"
command, use it if different to the package name itself
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
is_pkg_missing = [
importlib.util.find_spec(pkg) is None for pkg in pkgs
]
if any(is_pkg_missing):
missing_pkgs = [
name for name, is_missing in zip(
pip_names or pkgs, is_pkg_missing) if is_missing
]
names_str = reduce(lambda x, y: x + ' ' + y, missing_pkgs)
fn_name = name or f.__name__
error_msg = ('{} {} required to use {}. Install {} by '
'running "pip install {}"'.format(
names_str,
'is' if len(missing_pkgs) == 1 else 'are',
fn_name,
'it' if len(missing_pkgs) == 1 else 'them',
names_str,
))
if extra_msg:
error_msg += ('. ' + extra_msg)
raise ImportError(error_msg)
return f(*args, **kwargs)
return wrapper
return decorator
def safe_remove(path):
if path.exists():
if path.is_file():
path.unlink()
else:
shutil.rmtree(path)
def image_bytes2html(data):
fig_base64 = base64.encodebytes(data)
img = fig_base64.decode("utf-8")
html = '<img src="data:image/png;base64,' + img + '"></img>'
return html
def isiterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return True
def isiterable_not_str(obj):
return isinstance(obj, Iterable) and not isinstance(obj, str)
# TODO: add more context to errors, which task and which hook?
def callback_check(fn, available, allow_default=True):
"""
Check if a callback function signature requests available parameters
Parameters
----------
fn : callable
Callable (e.g. a function) to check
available : dict
All available params
allow_default : bool, optional
Whether allow arguments with default values in "fn" or not
Returns
-------
dict
Dictionary with requested parameters
Raises
------
ploomber.exceptions.CallbackCheckAborted
When passing a dotted path whose underlying function hasn't been
imported
ploomber.exceptions.CallbackSignatureError
When fn does not have the required signature
"""
# keep a copy of the original value because we'll modified it if this is
# a DottedPath
available_raw = available
if isinstance(fn, DottedPath):
available = {**fn._spec.get_kwargs(), **available}
if fn.callable is None:
raise CallbackCheckAborted(
'Cannot check callback because function '
'is a dotted path whose function has not been imported yet')
else:
fn = fn.callable
parameters = inspect.signature(fn).parameters
optional = {
name
for name, param in parameters.items()
if param.default != inspect._empty
}
# not all functions have __name__ (e.g. partials)
fn_name = getattr(fn, '__name__', fn)
if optional and not allow_default:
raise CallbackSignatureError('Callback functions cannot have '
'parameters with default values, '
'got: {} in "{}"'.format(
optional, fn_name))
required = {
name
for name, param in parameters.items()
if param.default == inspect._empty
}
available_set = set(available)
extra = required - available_set
if extra:
raise CallbackSignatureError('Callback function "{}" unknown '
'parameter(s): {}, available ones are: '
'{}'.format(fn_name, extra,
available_set))
return {k: v for k, v in available_raw.items() if k in required}
def signature_check(fn, params, task_name):
"""
Verify if the function signature used as source in a PythonCallable
task matches available params
"""
params = set(params)
parameters = inspect.signature(fn).parameters
required = {
name
for name, param in parameters.items()
if param.default == inspect._empty
}
extra = params - set(parameters.keys())
missing = set(required) - params
errors = []
if extra:
msg = f'Got unexpected arguments: {sorted(extra)}'
errors.append(msg)
if missing:
msg = f'Missing arguments: {sorted(missing)}'
errors.append(msg)
if 'upstream' in missing:
errors.append('Verify this task declared upstream depedencies or '
'remove the "upstream" argument from the function')
missing_except_upstream = sorted(missing - {'upstream'})
if missing_except_upstream:
errors.append(f'Pass {missing_except_upstream} in "params"')
if extra or missing:
msg = '. '.join(errors)
# not all functions have __name__ (e.g. partials)
fn_name = getattr(fn, '__name__', fn)
raise TaskRenderError('Error rendering task "{}" initialized with '
'function "{}". {}'.format(
task_name, fn_name, msg))
return True
def call_with_dictionary(fn, kwargs):
"""
Call a function by passing elements from a dictionary that appear in the
function signature
"""
parameters = inspect.signature(fn).parameters
common = set(parameters) & set(kwargs)
sub_kwargs = {k: kwargs[k] for k in common}
return fn(**sub_kwargs)
def _make_iterable(o):
if isinstance(o, Iterable) and not isinstance(o, str):
return o
elif o is None:
return []
else:
return [o]
@contextmanager
def add_to_sys_path(path, chdir):
cwd_old = os.getcwd()
if path is not None:
path = os.path.abspath(path)
sys.path.insert(0, path)
if chdir:
os.chdir(path)
try:
yield
finally:
if path is not None:
sys.path.remove(path)
os.chdir(cwd_old)
def chdir_code(path):
"""
Returns a string with valid code to chdir to the passed path
"""
path = Path(path).resolve()
if isinstance(path, WindowsPath):
path = str(path).replace('\\', '\\\\')
return f'os.chdir("{path}")'
|
11472072
|
from sklearn.cluster import AgglomerativeClustering as skAgglomerative
import numpy as np
from .base import Clustering
from ..similarity.pairwise import pairwise_similarity
class AgglomerativeClustering(Clustering):
"""Hierarchical Agglomerative Clustering.
Parameters
----------
n_clusters : int
The number of clusters to group trajectories into.
linkage : str (default='ward')
The linkage method to use. Must be one of {'ward', complete',
'average'}.
measure : SimilarityMeasure object or str (default='precomputed')
The similarity measure to use for computing similarities (see
:mod:`trajminer.similarity`) or the string 'precomputed'.
n_jobs : int (default=1)
The number of parallel jobs.
"""
def __init__(self, n_clusters, linkage='ward', measure='precomputed',
n_jobs=1):
self.agglomerative = skAgglomerative(n_clusters=n_clusters,
affinity='precomputed')
self.n_clusters = n_clusters
self.measure = measure
self.n_jobs = n_jobs
def fit_predict(self, X):
if self.measure != 'precomputed':
self.distances = 1 - pairwise_similarity(X=X, measure=self.measure,
n_jobs=self.n_jobs)
else:
self.distances = np.array(X)
self.labels = self.agglomerative.fit_predict(self.distances)
return self.labels
|
11472075
|
import pytest
from setup_py_upgrade import main
def test_basic(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(name="foo")\n',
)
main((str(tmpdir),))
setup_py = tmpdir.join('setup.py').read()
setup_cfg = tmpdir.join('setup.cfg').read()
assert setup_py == 'from setuptools import setup\nsetup()\n'
assert setup_cfg == '[metadata]\nname = foo\n'
def test_non_from_import_setuptools(tmpdir):
tmpdir.join('setup.py').write(
'import setuptools\n'
'setuptools.setup(name="foo")\n',
)
main((str(tmpdir),))
setup_cfg = tmpdir.join('setup.cfg').read()
assert setup_cfg == '[metadata]\nname = foo\n'
def test_reads_file(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'with open("README.md") as f:\n'
' readme = f.read()\n'
'setup(name="foo", long_description=readme)',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'long_description = file: README.md\n'
)
def test_unrelated_with_statement(tmpdir): # only added for test coverage
tmpdir.join('setup.py').write(
'import contextlib\n'
'from setuptools import setup\n'
'with contextlib.suppress(ImportError):\n'
' import dne\n'
'setup(name="foo")\n',
)
main((str(tmpdir),))
setup_cfg = tmpdir.join('setup.cfg').read()
assert setup_cfg == '[metadata]\nname = foo\n'
def test_option_key(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(name="foo", install_requires=["astpretty", "six"])\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'\n'
'[options]\n'
'install_requires =\n'
' astpretty\n'
' six\n'
)
def test_unsupported_argument(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import Extension, setup\n'
'setup(name="foo", ext_modules=[Extension("_x", ["_x.c"])])\n',
)
with pytest.raises(SystemExit) as excinfo:
main((str(tmpdir),))
msg, = excinfo.value.args
assert msg == 'ext_modules= is not supported in setup.cfg'
def test_intentionally_not_parsable(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'from foo import __version__\n'
'setup(name="foo", version=__version__)\n',
)
with pytest.raises(NotImplementedError) as excinfo:
main((str(tmpdir),))
msg, = excinfo.value.args
assert msg == 'unparsable: version='
def test_find_packages(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import find_packages, setup\n'
'setup(name="foo", packages=find_packages(exclude=("tests*",)))\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'\n'
'[options]\n'
'packages = find:\n'
'\n'
'[options.packages.find]\n'
'exclude = tests*\n'
)
def test_package_dir(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(name="foo", package_dir={"": "src", "pkg1": "pkg1"})\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'\n'
'[options]\n'
'package_dir =\n'
' =src\n'
' pkg1=pkg1\n'
)
def test_project_urls(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(\n'
' name="foo",\n'
' project_urls={"homepage": "https://example.com"},\n'
')\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'project_urls =\n'
' homepage=https://example.com\n'
)
def test_project_urls_multiple(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(\n'
' name="foo",\n'
' project_urls={\n'
' "homepage": "https://example.com",\n'
' "issues": "https://example.com/issues",\n'
' },\n'
')\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'project_urls =\n'
' homepage=https://example.com\n'
' issues=https://example.com/issues\n'
)
def test_entry_points(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(name="foo", entry_points={"console_scripts": ["a=a:main"]})\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'\n'
'[options.entry_points]\n'
'console_scripts =\n'
' a=a:main\n'
)
def test_extras_to_requirements_rewrite(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(\n'
' name="foo",\n'
" extras_require={':python_version==\"2.7\"': ['typing']}\n"
')\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'\n'
'[options]\n'
'install_requires = typing;python_version=="2.7"\n'
)
def test_normal_extras(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(\n'
' name="foo",\n'
" extras_require={'lint': ['pre-commit']},\n"
')\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'\n'
'[options.extras_require]\n'
'lint = pre-commit\n'
)
def test_empty_string_package_data(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(name="foo", package_data={"": ["*.pyi"]})\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'\n'
'[options.package_data]\n'
'* =\n'
' *.pyi\n'
)
def test_empty_string_exclude_package_data(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(name="foo", exclude_package_data={"": ["*.tar.gz"]})\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'\n'
'[options.exclude_package_data]\n'
'* =\n'
' *.tar.gz\n'
)
def test_package_data_multiple_entries(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(name="foo", package_data={"resources": ["*.json", "*.pyi"]})\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'\n'
'[options.package_data]\n'
'resources =\n'
' *.json\n'
' *.pyi\n'
)
def test_updates_existing_setup_cfg(tmpdir):
tmpdir.join('setup.py').write(
'from setuptools import setup\n'
'setup(name="foo")\n',
)
tmpdir.join('setup.cfg').write(
'[metadata]\n'
'license_file = LICENSE\n'
'\n'
'[bdist_wheel]\n'
'universal = 1\n',
)
main((str(tmpdir),))
assert tmpdir.join('setup.cfg').read() == (
'[metadata]\n'
'name = foo\n'
'license_file = LICENSE\n'
'\n'
'[bdist_wheel]\n'
'universal = 1\n'
)
|
11472080
|
from __future__ import absolute_import
input_name = '../examples/diffusion/time_advection_diffusion.py'
output_name_trunk = 'test_time_advection_diffusion'
from tests_basic import TestInputEvolutionary
class Test(TestInputEvolutionary):
pass
|
11472101
|
import datetime
import os
import shutil
import tempfile
import unittest
from catkin_pkg.package_version import _replace_version
from catkin_pkg.package_version import bump_version
from catkin_pkg.package_version import update_changelog_sections
from catkin_pkg.package_version import update_versions
import mock
from .util import in_temporary_directory
class PackageVersionTest(unittest.TestCase):
def test_bump_version(self):
self.assertEqual('0.0.1', bump_version('0.0.0'))
self.assertEqual('1.0.1', bump_version('1.0.0'))
self.assertEqual('0.1.1', bump_version('0.1.0'))
self.assertEqual('0.0.1', bump_version('0.0.0', 'patch'))
self.assertEqual('1.0.1', bump_version('1.0.0', 'patch'))
self.assertEqual('0.1.1', bump_version('0.1.0', 'patch'))
self.assertEqual('1.0.0', bump_version('0.0.0', 'major'))
self.assertEqual('1.0.0', bump_version('0.0.1', 'major'))
self.assertEqual('1.0.0', bump_version('0.1.1', 'major'))
self.assertEqual('0.1.0', bump_version('0.0.0', 'minor'))
self.assertEqual('0.1.0', bump_version('0.0.1', 'minor'))
self.assertEqual('1.1.0', bump_version('1.0.1', 'minor'))
self.assertRaises(ValueError, bump_version, '0.0.asd')
self.assertRaises(ValueError, bump_version, '0.0')
self.assertRaises(ValueError, bump_version, '0')
self.assertRaises(ValueError, bump_version, '0.0.-1')
def test_replace_version(self):
self.assertEqual('<package><version>0.1.1</version></package>',
_replace_version('<package><version>0.1.0</version></package>', '0.1.1'))
self.assertEqual("<package><version abi='0.1.0'>0.1.1</version></package>",
_replace_version("<package><version abi='0.1.0'>0.1.0</version></package>", '0.1.1'))
self.assertRaises(RuntimeError, _replace_version, '<package></package>', '0.1.1')
self.assertRaises(RuntimeError, _replace_version, '<package><version>0.1.1</version><version>0.1.1</version></package>', '0.1.1')
def test_update_versions(self):
try:
root_dir = tempfile.mkdtemp()
sub_dir = os.path.join(root_dir, 'sub')
with open(os.path.join(root_dir, 'package.xml'), 'w') as fhand:
fhand.write('<package><version>2.3.4</version></package>')
os.makedirs(os.path.join(sub_dir))
with open(os.path.join(sub_dir, 'package.xml'), 'w') as fhand:
fhand.write('<package><version>1.5.4</version></package>')
update_versions([root_dir, sub_dir], '7.6.5')
with open(os.path.join(root_dir, 'package.xml'), 'r') as fhand:
contents = fhand.read()
self.assertEqual('<package><version>7.6.5</version></package>', contents)
with open(os.path.join(sub_dir, 'package.xml'), 'r') as fhand:
contents = fhand.read()
self.assertEqual('<package><version>7.6.5</version></package>', contents)
finally:
shutil.rmtree(root_dir)
@in_temporary_directory
def test_update_changelog_unicode(self, directory=None):
"""Test that updating the changelog does not throw an exception on unicode characters."""
temp_file = os.path.join(directory, 'changelog')
missing_changelogs_but_forthcoming = {}
# Mock the Changelog object from catkin_pkg
mock_changelog = mock.Mock()
# Create a changelog entry with a unicode char.
mock_changelog.rst = ('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n'
'Changelog for package fake_pkg\n'
'^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n'
'\n'
'Forthcoming\n'
'-----------\n'
'* This is my changelog entry\n'
'* This is a line that has unicode' u'\xfc''\n'
'\n'
'0.0.9 (2017-01-30)\n'
'------------------\n'
'* This is old version.\n')
# Create tuple with expected entires.
missing_changelogs_but_forthcoming['fake_pkg'] = (temp_file, mock_changelog, 'Forthcoming')
# Should not raise an exception
update_changelog_sections(missing_changelogs_but_forthcoming, '1.0.0')
# Generate dynamic lines, using present system date,
# the length of the line of '-'s for the underline
# and the utf-8 encoded data expected to be read back.
ver_line = '1.0.0 (%s)' % datetime.date.today().isoformat()
ver_line = ver_line.encode('utf-8')
dash_line = '-' * len(ver_line)
dash_line = dash_line.encode('utf-8')
unicode_line = u'* This is a line that has unicode\xfc'.encode('utf-8')
expected = [b'^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^',
b'Changelog for package fake_pkg',
b'^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^',
b'',
ver_line,
dash_line,
b'* This is my changelog entry',
unicode_line,
b'',
b'0.0.9 (2017-01-30)',
b'------------------',
b'* This is old version.']
# Open the file written, and compare each line written to
# the one read back.
with open(temp_file, 'rb') as verify_file:
content = verify_file.read().splitlines()
for line_written, line_expected in zip(content, expected):
self.assertEqual(line_written.strip(), line_expected)
|
11472111
|
import os
import requests
headers = {
"Cookie": "arccount62298=c; arccount62019=c",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66"
}
while 1:
dicname = input()
try:
os.mkdir("./py/newimage/"+dicname)
except:
pass
os.chdir("./py/newimage/"+dicname)
while 1:
s=input()
if s=="0":break
s = s.split("(")
url = s[-1][:-1]
name = url.split("/")
name = name[-1]
with open(name, "wb") as img:
img.write(requests.get(url, headers=headers).content)
print("已保存"+name)
os.chdir("..")
os.chdir("..")
os.chdir("..")
|
11472112
|
from subprocess import call
from string import Template
jobs_spec= []
with open("jobs_info_both.txt") as f:
keys=[]
for i,line in enumerate(f.readlines()):
if i==0:
keys= line[:-1].split(",")
else:
if not line.startswith("#"):
values= line[:-1].split(",")
print(i,keys,values,len(keys), len(values))
assert len(keys)==len(values), "something is missing"
tmp_dict={}
for k in range(0,len(keys)):
tmp_dict[keys[k]]=values[k]
jobs_spec.append(tmp_dict)
print(jobs_spec)
#open the file
filein = open( 'job_template.tmpl' )
#read it
src = Template( filein.read() )
#do the substitution
for dict in jobs_spec:
with open(dict['name']+".sh", "w") as text_file:
text_file.write(src.substitute(dict))
print(call("sbatch_dgx "+dict['name']+".sh", shell=True))
|
11472149
|
from __future__ import absolute_import, division, print_function
import os,sys
from cctbx.examples.merging import test_levenberg_sparse as test
import libtbx.load_env
# test script assumes that you get the data files directly from the author (NKS) and
# install them in the directory "xscale_reserve" at the same dir-level as cctbx_project
if __name__=="__main__":
modules_dist = os.path.abspath(os.path.join(libtbx.env.dist_path("cctbx"),"../.."))
datadir = os.path.join(modules_dist,"xscale_reserve") # Get files directly from author, NKS
plot_flag=False
esd_plot_flag=False
for N in [25, 200, 300, 400, 500, 800, 1000, 2000, 5000]:
for trans in [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001]:
test.execute_case(datadir, n_frame=N, transmittance=trans, apply_noise=True,
plot=plot_flag, esd_plot = esd_plot_flag)
print("OK")
#raw_input("OK")
sys.stdout.flush()
|
11472170
|
from bs4 import BeautifulSoup
import requests
from .content.document import ScribdTextualDocument
from .content.document import ScribdImageDocument
from .content.book import ScribdBook
from .content.audiobook import ScribdAudioBook
from .pdf_converter import ConvertToPDF
class Downloader:
"""
A helper class for downloading books and documents off Scribd.
Parameters
----------
url : `str`
A string containing path to a Scribd URL
"""
def __init__(self, url):
self.url = url
is_audiobook = self.is_audiobook()
if is_audiobook:
is_book = False
else:
is_book = self.is_book()
self._is_audiobook = is_audiobook
self._is_book = is_book
def download(self, is_image_document=None):
"""
Downloads books and documents from Scribd.
Returns an object of `ConvertToPDF` class.
"""
if self._is_audiobook:
content = self._download_audiobook()
return content
if self._is_book:
content = self._download_book()
else:
if is_image_document is None:
raise TypeError(
"The input URL points to a document. You must specify "
"whether it is an image document or a textual document "
"in the `image_document` parameter."
)
content = self._download_document(is_image_document)
return content
def _download_book(self):
"""
Downloads books off Scribd.
Returns an object of `ConvertToPDF` class.
"""
book = ScribdBook(self.url)
md_path = book.download()
pdf_path = "{}.pdf".format(book.sanitized_title)
return ConvertToPDF(md_path, pdf_path)
def _download_document(self, image_document):
"""
Downloads textual and image documents off Scribd.
Returns an object of `ConvertToPDF` class.
"""
if image_document:
document = ScribdImageDocument(self.url)
else:
document = ScribdTextualDocument(self.url)
content_path = document.download()
pdf_path = "{}.pdf".format(document.sanitized_title)
return ConvertToPDF(content_path, pdf_path)
def _download_audiobook(self):
"""
Downloads audiobooks off Scribd.
Returns a list containing local audio filepaths.
"""
audiobook = ScribdAudioBook(self.url)
playlist = audiobook.playlist
if not audiobook.premium_cookies:
print("Premium cookies not detected. Only the preview version of audiobook will be downloaded.")
playlist.download()
return playlist.download_paths
def is_book(self):
"""
Checks whether the passed URL points to a Scribd book
or a Scribd document.
"""
response = requests.get(self.url)
soup = BeautifulSoup(response.text, "html.parser")
content_class = soup.find("body")["class"]
matches_with_book = content_class[0] == "autogen_class_views_layouts_book_web"
return matches_with_book
def is_audiobook(self):
"""
Checks whether the passed URL points to a Scribd audiobook.
"""
return "/audiobook/" in self.url
|
11472210
|
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
sender_email = "" # Your Email
receiver_email = "" # Email of Receiver
password = "" # Your Password
message = MIMEMultipart("alternative")
message["Subject"] = "multipart test"
message["From"] = sender_email
message["To"] = receiver_email
# Create the plain-text and HTML version of your message
text = """\
Hi,
How are you?
Real Python has many great tutorials:
www.realpython.com"""
html = """\
<html>
<body>
<p>Hi,<br>
How are you?<br>
<a href="http://www.realpython.com">Real Python</a>
has many great tutorials.
</p>
</body>
</html>
"""
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
message.attach(part2)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, receiver_email, message.as_string()
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.