text string | size int64 | token_count int64 |
|---|---|---|
#!/usr/bin/env pypy3
'''
Testing 2D list (list of lists) data structure.
'''
import time
import random
from lib import benchmark, random_tuple
g_list = []
g_size = 0
g_count = 0
g_get_keys = []
g_set_keys = []
def setup(size, density):
''' Populated the table.
:param int size: total entries
:param float density: (0,1] value for how many entries to add.
'''
assert size > 0, size
assert density > 0 and density <= 1, density
global g_list
global g_size
global g_count
g_list = [[None]*size for _ in range(size)]
count = size * size * 1.0 * density // 1
g_size = size
g_count = count
i = 0
while i < count:
idx = random.randint(0, size*size-1)
x, y = (idx // size, idx % size)
if g_list[x][y] is None:
g_list[x][y] = random_tuple()
i += 1
global g_get_keys
for i in range(1000000):
idx = random.randint(0, size*size-1)
g_get_keys.append((idx // size, idx % size))
global g_set_keys
g_set_keys = g_get_keys
def get():
''' Testing getting '''
global g_get_keys
global g_size
s = time.time()
for _x, _y in g_get_keys:
if g_list[_x][_y] is not None:
x = g_list[_x][_y]
return time.time() - s
def set():
''' Testing setting '''
global g_set_keys
global g_size
tmp = [1,2,3,4,5]
s = time.time()
for _x, _y in g_set_keys:
if g_list[_x][_y] is not None:
last = g_list[_x][_y]
g_list[_x][_y] = tmp
tmp = last
return time.time() - s
def scan():
global g_list
s = time.time()
for x in g_list:
for i in x:
if i is not None:
_ = i[0]
return time.time() - s
def main():
setup(700, 0.7)
benchmark(get)
benchmark(set)
benchmark(scan)
if __name__ == "__main__":
main()
| 1,891 | 703 |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import
import collections
import json
from stepfunctions.inputs.utils import flatten, replace_type_with_str
ValidationResult = collections.namedtuple('ValidationResult', 'valid keys_missing keys_type_mismatch')
class Placeholder(object):
"""
A collection of Placeholder variables.
"""
def __init__(self, schema=None, **kwargs):
"""
Args:
schema (dict, optional): Schema for the placeholder collection. (default: None)
Example below::
{
'ModelName': str,
'JobName': str,
'Hyperparameters': {
'tol': float
}
}
Keyword Args:
name (str, optional): Name of the placeholder variable. (default: None)
type (type, optional): Type of the placeholder variable. (default: None)
parent (Placeholder, optional): Parent variable for a placeholder variable. (default: None)
"""
self.store = {}
self.immutable = False
self.schema = schema
if self.schema:
self._set_schema(schema)
self._make_immutable()
self.json_str_template = "{}"
self.name = kwargs.get("name")
self.type = kwargs.get("type")
self.parent = kwargs.get("parent")
def get(self, name, type):
"""
Create a placeholder variable with an associated type.
Args:
name (str): Name of the placeholder variable.
type (type): Type of the placeholder variable.
Raises:
ValueError: If placeholder variable with the same name but different type already exists.
ValueError: If placeholder variable does not fit into a previously specified schema for the placeholder collection.
Returns:
Placeholder: Placeholder variable.
"""
if not self._is_valid_name(name):
raise ValueError('Key name can only be string or integer')
if name in self.store:
curr_variable = self.store[name]
if curr_variable.type != type:
raise ValueError('Key already exists with a different value type: {current_value_type}'.format(current_value_type=curr_variable.type))
return curr_variable
else:
self.store[name] = self._create_variable(name=name, parent=self, type=type)
return self.store[name]
def get_schema_as_dict(self):
"""
Generate a schema for the placeholder collection as a Python dictionary.
Returns:
dict: Placeholder collection schema.
"""
schema = {}
for k, v in self.store.items():
if v._is_empty():
schema[k] = v.type or str
else:
schema[k] = v.get_schema_as_dict()
return schema
def get_schema_as_json(self, pretty=False):
"""
Generate a schema for the placeholder collection as a JSON formatted string.
Args:
pretty (bool, optional): Boolean flag set to `True` if JSON string should be prettified. `False`, otherwise. (default: False)
Returns:
str: JSON formatted string representation of the block.
"""
dict_schema_str = replace_type_with_str(self.get_schema_as_dict())
if pretty:
return json.dumps(dict_schema_str, indent=4)
return json.dumps(dict_schema_str)
def contains(self, placeholder):
"""
Check if the placeholder collection contains the specified placeholder variable.
Args:
placeholder (Placeholder): Placeholder variable to search for, in the collection.
Returns:
bool: `True` if placeholder variable was found in the collection. `False`, otherwise.
"""
for k, v in self.store.items():
if placeholder == v:
return True
elif v.contains(placeholder):
return True
return False
def __contains__(self, placeholder):
"""
Containment check operator for placeholder variables.
"""
return self.contains(placeholder)
def validate(self, input):
"""
Validate a specified input against the placeholder collection schema.
Args:
input (dict): Input to validate against the placeholder collection schema.
Returns:
ValidationResult: Named tuple with the keys:
`valid` (Boolean): Representing the result of validation ,
`keys_missing` (list(str)): List of keys missing in the input ,
`keys_type_mismatch` (list(str), type, type): List of tuples with key name, expected type, and provided type.
"""
if input is None:
return False, None, None
flattened_schema = flatten(self.get_schema_as_dict())
flattened_input = flatten(input)
keys_missing = [i for i in flattened_schema if i not in flattened_input]
keys_type_mismatch = []
for k, v in flattened_input.items():
if k in flattened_schema and not isinstance(v, flattened_schema.get(k)):
keys_type_mismatch.append((k, flattened_schema.get(k), type(v)))
if len(keys_missing) > 0 or len(keys_type_mismatch) > 0:
valid = False
else:
valid = True
return ValidationResult(valid=valid, keys_missing=keys_missing, keys_type_mismatch=keys_type_mismatch)
def _create_variable(self, name, parent, type=None):
raise NotImplementedError
def _get_path(self):
"""
Get path to a placeholder variable node in the collection.
"""
path = []
node = self
while node.name is not None:
path.append(node.name)
node = node.parent
path.reverse()
return path
def _is_empty(self):
"""
Check if the store for a placeholder collection/variable is empty.
"""
return len(self.store) == 0
def _set_schema(self, schema, path=[]):
"""
Set the schema for a placeholder collection.
"""
for k, v in schema.items():
if isinstance(v, dict):
self._set_schema(v, path + [k])
else:
current = self
for node in path:
current = current.get(node, dict)
temp = current.get(k, v)
def _make_immutable(self):
"""
Make a placeholder collection (including all variables contained) immutable.
"""
for k, v in self.store.items():
if isinstance(v, Placeholder):
v._make_immutable()
self.immutable = True
def _is_valid_name(self, name):
if isinstance(name, str) or isinstance(name, int):
return True
else:
return False
def __getitem__(self, name):
"""
Subscript operator to build placeholder variables.
"""
if not self._is_valid_name(name):
raise ValueError('Key name can only be string or integer')
if name in self.store:
return self.store[name]
else:
self.store[name] = self._create_variable(name=name, parent=self)
return self.store[name]
def _join_path(self, path):
subscript_list = []
for i in path:
if isinstance(i, str):
subscript_list.append("['{}']".format(i))
elif isinstance(i, int):
subscript_list.append('[{}]'.format(i))
return "".join(subscript_list)
def to_jsonpath(self):
"""
Returns a JSON path representation of the placeholder variable to be used for step parameters.
Returns:
str: JSON path representation of the placeholder variable
"""
return self.json_str_template.format(self._join_path(self._get_path()))
class ExecutionInput(Placeholder):
"""
Top-level class for execution input placeholders.
"""
def __init__(self, schema=None, **kwargs):
super(ExecutionInput, self).__init__(schema, **kwargs)
self.json_str_template = '$$.Execution.Input{}'
def _create_variable(self, name, parent, type=None):
"""
Creates a placeholder variable for Workflow Input.
A placeholder variable can only be created if the collection is not immutable due to a pre-specified schema.
"""
if self.immutable:
raise ValueError("Placeholder variable does not conform to schema set for the placeholder collection.")
if type:
return ExecutionInput(name=name, parent=parent, type=type)
else:
return ExecutionInput(name=name, parent=parent)
class StepInput(Placeholder):
"""
Top-level class for step input placeholders.
"""
def __init__(self, schema=None, **kwargs):
super(StepInput, self).__init__(schema, **kwargs)
self.json_str_template = '${}'
def _create_variable(self, name, parent, type=None):
"""
Creates a placeholder variable for Step Input.
A placeholder variable can only be created if the collection is not immutable due to a pre-specified schema.
"""
if self.immutable:
raise ValueError("Placeholder variable does not conform to schema set for the placeholder collection.")
if type:
return StepInput(name=name, parent=parent, type=type)
else:
return StepInput(name=name, parent=parent)
| 10,622 | 2,699 |
# Copyright 2020 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Static transformer module."""
import typing
from copy import deepcopy
from mrack.transformers.transformer import Transformer
CONFIG_KEY = "static"
class StaticTransformer(Transformer):
"""
Static transformer.
Does almost no operation as there is nothing to provision.
"""
_config_key = CONFIG_KEY
_required_config_attrs: typing.List[str] = []
_required_host_attrs = ["name", "os", "group", "ip"]
def create_host_requirement(self, host):
"""Create single input for Static provisioner."""
self.dsp_name = "Static"
return deepcopy(host)
| 1,177 | 350 |
# Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractmethod
from time import perf_counter
from types import TracebackType
from typing import Any, Callable, Generic, Mapping, NoReturn, Optional, Tuple, Type, TypeVar
import numpy as np
import tensorflow as tf
from typing_extensions import Final, final
from ..types import TensorType
C = TypeVar("C", bound=Callable[..., object])
""" A type variable bound to `typing.Callable`. """
def jit(apply: bool = True, **optimize_kwargs: Any) -> Callable[[C], C]:
"""
A decorator that conditionally wraps a function with `tf.function`.
:param apply: If `True`, the decorator is equivalent to `tf.function`. If `False`, the decorator
does nothing.
:param optimize_kwargs: Additional arguments to `tf.function`.
:return: The decorator.
"""
def decorator(func: C) -> C:
return tf.function(func, **optimize_kwargs) if apply else func
return decorator
def shapes_equal(this: TensorType, that: TensorType) -> TensorType:
"""
Return a scalar tensor containing: `True` if ``this`` and ``that`` have equal runtime shapes,
else `False`.
"""
return tf.rank(this) == tf.rank(that) and tf.reduce_all(tf.shape(this) == tf.shape(that))
def to_numpy(t: TensorType) -> np.ndarray:
"""
:param t: An array-like object.
:return: ``t`` as a NumPy array.
"""
if isinstance(t, tf.Tensor):
return t.numpy()
return t
ResultType = TypeVar("ResultType", covariant=True)
""" An unbounded covariant type variable. """
class Result(Generic[ResultType], ABC):
"""
Represents the result of an operation that can fail with an exception. It contains either the
operation return value (in an :class:`Ok`), or the exception raised (in an :class:`Err`).
To check whether instances such as
>>> res = Ok(1)
>>> other_res = Err(ValueError("whoops"))
contain a value, use :attr:`is_ok` (or :attr:`is_err`)
>>> res.is_ok
True
>>> other_res.is_ok
False
We can access the value if it :attr:`is_ok` using :meth:`unwrap`.
>>> res.unwrap()
1
Trying to access the value of a failed :class:`Result`, or :class:`Err`, will raise the wrapped
exception
>>> other_res.unwrap()
Traceback (most recent call last):
...
ValueError: whoops
**Note:** This class is not intended to be subclassed other than by :class:`Ok` and
:class:`Err`.
"""
@property
@abstractmethod
def is_ok(self) -> bool:
"""`True` if this :class:`Result` contains a value, else `False`."""
@property
def is_err(self) -> bool:
"""
`True` if this :class:`Result` contains an error, else `False`. The opposite of
:attr:`is_ok`.
"""
return not self.is_ok
@abstractmethod
def unwrap(self) -> ResultType:
"""
:return: The contained value, if it exists.
:raise Exception: If there is no contained value.
"""
@final
class Ok(Result[ResultType]):
"""Wraps the result of a successful evaluation."""
def __init__(self, value: ResultType):
"""
:param value: The result of a successful evaluation.
"""
self._value = value
def __repr__(self) -> str:
""""""
return f"Ok({self._value!r})"
@property
def is_ok(self) -> bool:
"""`True` always."""
return True
def unwrap(self) -> ResultType:
"""
:return: The wrapped value.
"""
return self._value
@final
class Err(Result[NoReturn]):
"""Wraps the exception that occurred during a failed evaluation."""
def __init__(self, exc: Exception):
"""
:param exc: The exception that occurred.
"""
self._exc = exc
def __repr__(self) -> str:
""""""
return f"Err({self._exc!r})"
@property
def is_ok(self) -> bool:
"""`False` always."""
return False
def unwrap(self) -> NoReturn:
"""
:raise Exception: Always. Raises the wrapped exception.
"""
raise self._exc
class DEFAULTS:
"""Default constants used in Trieste."""
JITTER: Final[float] = 1e-6
"""
The default jitter, typically used to stabilise computations near singular points, such as in
Cholesky decomposition.
"""
K = TypeVar("K")
""" An unbound type variable. """
U = TypeVar("U")
""" An unbound type variable. """
V = TypeVar("V")
""" An unbound type variable. """
def map_values(f: Callable[[U], V], mapping: Mapping[K, U]) -> Mapping[K, V]:
"""
Apply ``f`` to each value in ``mapping`` and return the result. If ``f`` does not modify its
argument, :func:`map_values` does not modify ``mapping``. For example:
>>> import math
>>> squares = {'a': 1, 'b': 4, 'c': 9}
>>> map_values(math.sqrt, squares)['b']
2.0
>>> squares
{'a': 1, 'b': 4, 'c': 9}
:param f: The function to apply to the values in ``mapping``.
:param mapping: A mapping.
:return: A new mapping, whose keys are the same as ``mapping``, and values are the result of
applying ``f`` to each value in ``mapping``.
"""
return {k: f(u) for k, u in mapping.items()}
class Timer:
"""
Functionality for timing chunks of code. For example:
>>> from time import sleep
>>> with Timer() as timer: sleep(2.0)
>>> timer.time # doctest: +SKIP
2.0
"""
def __enter__(self) -> Timer:
self.start = perf_counter()
return self
def __exit__(
self,
type: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.end = perf_counter()
self.time = self.end - self.start
def flatten_leading_dims(x: TensorType) -> Tuple[TensorType, Callable[[TensorType], TensorType]]:
"""
Flattens the leading dimensions of `x` (all but the last two dimensions), and returns a
function that can be used to restore them (typically after first manipulating the
flattened tensor).
"""
x_batched_shape = tf.shape(x)
batch_shape = x_batched_shape[:-1]
input_shape = x_batched_shape[-1:]
x_flat_shape = tf.concat([[-1], input_shape], axis=0)
def unflatten(y: TensorType) -> TensorType:
tf.debugging.assert_rank(y, 2, message="unflatten is expecting a rank two tensor.")
y_flat_shape = tf.shape(y)
output_shape = y_flat_shape[1:]
y_batched_shape = tf.concat([batch_shape, output_shape], axis=0)
y_batched = tf.reshape(y, y_batched_shape)
tf.debugging.assert_shapes([(y, ["N", "D"]), (y_batched, [..., "M", "D"])])
return y_batched
return tf.reshape(x, x_flat_shape), unflatten
| 7,421 | 2,303 |
import pygame as pg
import state
from .. import util
class SplashState(state._State):
def __init__(self):
super(SplashState, self).__init__()
self.bg_color = (0,0,0)
self.text_color = (155,255,155)
self.duration = 3 #seg
self.image = pg.Surface(util.SCREEN_SIZE)
self.next = "MainMenu"
self.title = "HackerMan"
self.titleSurface = self.make_title_surface()
def start(self, data, current_time):
super(SplashState, self).start(data, current_time)
self.duration = 3
def make_title_surface(self):
font = pg.font.Font(util.FONTS['west-england.regular'], 40)
return font.render(self.title, False, self.text_color)
def handle_events(self, event):
if event.type == pg.KEYDOWN:
if event.key == pg.K_RETURN:
self.done = True
def update(self, dt, current_time, keys):
self.duration -= dt
if self.duration <= 0:
self.done = True
def render(self, surface):
self.image.fill(self.bg_color)
self.image.blit(self.titleSurface, util.SCREEN_RECT.center)
surface.blit(self.image, (0,0))
| 1,185 | 408 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Some basic functions for image construction."""
import sys as _sys
import numpy as _np
from PyQt4 import QtGui as _qt
from qimageview import qimageview as _qimageview
if _sys.byteorder == 'little':
_bgra = (0, 1, 2, 3)
else:
_bgra = (3, 2, 1, 0)
bgra_dtype = _np.dtype({'b': (_np.uint8, _bgra[0], 'blue'),
'g': (_np.uint8, _bgra[1], 'green'),
'r': (_np.uint8, _bgra[2], 'red'),
'a': (_np.uint8, _bgra[3], 'alpha')})
def gray(array, alpha):
"""Return a rgba array which color ranges from black to white."""
h, w = array.shape
new_array = _np.zeros((h, w, 4), dtype=_np.uint8)
array[array<=0] = 0
array[array>255] = 255
new_array[..., 0] = array
new_array[..., 1] = array
new_array[..., 2] = array
new_array[..., 3] = alpha * array.clip(0, 1)
return new_array
def red2yellow(array, alpha):
"""Return a rgba array which color ranges from red to yellow."""
h, w = array.shape
new_array = _np.zeros((h, w, 4), dtype=_np.uint8)
array[array<=0] = 0
array[array>255] = 255
new_array[..., 0] = 255 * array.clip(0, 1)
new_array[..., 1] = array
new_array[..., 2] = 0
new_array[..., 3] = alpha * array.clip(0, 1)
return new_array
def blue2cyanblue(array, alpha):
"""Return a rgba array which color ranges from blue to cyanblue."""
h, w = array.shape
new_array = _np.zeros((h, w, 4), dtype=_np.uint8)
array[array<=0] = 0
array[array>255] = 255
new_array[..., 0] = 0
new_array[..., 1] = array
new_array[..., 2] = 255 * array.clip(0, 1)
new_array[..., 3] = alpha * array.clip(0, 1)
return new_array
def red(array, alpha):
"""Return a whole red rgba array."""
h, w = array.shape
new_array = _np.zeros((h, w, 4), dtype=_np.uint8)
new_array[..., 0] = 255 * array.clip(0, 1)
new_array[..., 1] = 0
new_array[..., 2] = 0
new_array[..., 3] = alpha * array.clip(0, 1)
return new_array
def green(array, alpha):
"""Return a whole green rgba array."""
h, w = array.shape
new_array = _np.zeros((h, w, 4), dtype=_np.uint8)
new_array[..., 0] = 0
new_array[..., 1] = 255 * array.clip(0, 1)
new_array[..., 2] = 0
new_array[..., 3] = alpha * array.clip(0, 1)
return new_array
def blue(array, alpha):
"""Return a whole blue rgba array."""
h, w = array.shape
new_array = _np.zeros((h, w, 4), dtype=_np.uint8)
new_array[..., 0] = 0
new_array[..., 1] = 0
new_array[..., 2] = 255 * array.clip(0, 1)
new_array[..., 3] = alpha * array.clip(0, 1)
return new_array
def single_roi(array, alpha, roi):
"""Return a single roi view array."""
color = (70, 70, 70)
h, w = array.shape
new_array = _np.zeros((h, w, 4), dtype=_np.uint8)
if roi is None or roi == 0:
return new_array
mask = array == roi
new_array[mask, 0] = color[0]
new_array[mask, 1] = color[1]
new_array[mask, 2] = color[2]
new_array[mask, 3] = alpha
return new_array
def _normalize255(array, normalize, scale_length=255.0):
"""Normalize the array."""
if not normalize:
return array
if normalize is True:
normalize = array.min(), array.max()
elif _np.isscalar(normalize):
normalize = (0, normalize)
elif isinstance(normalize, tuple) and (normalize[0] == normalize[1]):
normalize = array.min(), array.max()
nmin, nmax = normalize
if nmin:
array = array - nmin
if nmax == nmin:
return _np.round(array)
else:
scale = scale_length / (nmax - nmin)
if scale != 1.0:
array = array * scale
array[_np.logical_and(array > 0, array < 1)] = 1
return _np.round(array)
def gray2qimage(array, normalize=False):
"""Convert a 2D numpy array 'array' into a 8-bit, indexed QImage with a specific colormap.
The first dimension represents the vertical image
axis.
The parameter 'normalize' can be used to normalize an image's value range
to 0 ~ 255:
normalize = (nmin, nmax):
scale & clip image values from nmin..nmax to 0..255
normalize = nmax:
lets nmin default to zero, i.e. scale & clip the range 0..nmax to
0..255
normalize = True:
scale image values to 0..255 (same as passing (array.min(),
array.max()))
If the source array 'array' contains masked values, the result will have
only 255 shades of gray, and one color map entry will be used to make the
corresponding pixels transparent.
"""
if _np.ndim(array) != 2:
raise ValueError("gray2qimage can only convert 2D arrays")
h, w = array.shape
result = _qt.QImage(w, h, _qt.QImage.Format_Indexed8)
array = _normalize255(array, normalize)
for i in range(256):
result.setColor(i, _qt.qRgb(i, i, i))
_qimageview(result)[:] = array.clip(0, 255)
return result
def byte_view(qimage, byteorder = 'little'):
"""Return the bytes in the view with the given byteorder."""
raw = _qimageview(qimage)
result = raw.view(_np.uint8).reshape(raw.shape + (-1, ))
if byteorder and byteorder != _sys.byteorder:
result = result[...,::-1]
return result
def rgb_view(qimage, byteorder='big'):
"""Return the rgb value array in view."""
if byteorder is None:
byteorder = _sys.byteorder
bytes = byte_view(qimage, byteorder)
if bytes.shape[2] != 4:
raise ValueError, "For rgb_view, the image must have 32 bit pixel" + \
" size (use RGB32, ARGB32, or ARGB32_Premultiplied)"
if byteorder == 'little':
return bytes[..., :3]
else:
return bytes[..., 1:]
def alpha_view(qimage):
"""Return the alpha value array in view."""
bytes = byte_view(qimage, byteorder = None)
if bytes.shape[2] != 4:
raise ValueError, "For alpha_view, the image must have 32 bit pixel" + \
" size (use RGB32, ARGB32, or ARGB32_Premultiplied)"
return bytes[..., _bgra[3]]
def array2qrgba(array, alpha, colormap, normalize=False, roi=None):
"""Convert a 2D-array into a 3D-array containing rgba value."""
if _np.ndim(array) != 2:
raise ValueError("array2qrgb can only convert 2D array")
if isinstance(colormap, str):
if colormap != 'rainbow':
if colormap != 'single ROI':
array = _normalize255(array, normalize)
if colormap == 'gray':
new_array = gray(array, alpha)
elif colormap == 'red2yellow':
new_array = red2yellow(array, alpha)
elif colormap == 'blue2cyanblue':
new_array = blue2cyanblue(array, alpha)
elif colormap == 'red':
new_array = red(array, alpha)
elif colormap == 'green':
new_array = green(array, alpha)
elif colormap == 'blue':
new_array = blue(array, alpha)
else:
new_array = single_roi(array, alpha, roi)
else:
if _np.isscalar(normalize):
new_array = array.clip(0, array.max())
new_array[array < 0] = 0
new_array[array > normalize] = 0
elif isinstance(normalize, tuple):
new_array = array.clip(0, array.max())
new_array[array < normalize[0]] = 0
new_array[array > normalize[1]] = 0
else:
new_array = array.clip(0, array.max())
new_array[array < 0] = 0
h, w = new_array.shape
R, G, B = 41, 61, 83
fst_norm = 100000.0
new_array_raw = _normalize255(new_array,
normalize,
scale_length=fst_norm)
new_array_R = _normalize255(new_array_raw % R,
(0, R),
scale_length=254.0)
new_array_G = _normalize255(new_array_raw % G,
(0, G),
scale_length=254.0)
new_array_B = _normalize255(new_array_raw % B,
(0, B),
scale_length=254.0)
new_array2 = _np.zeros((h, w, 4), dtype=_np.uint8)
add_ = new_array.clip(0, 1)
new_array2[..., 0] = new_array_R + add_
new_array2[..., 1] = new_array_G + add_
new_array2[..., 2] = new_array_B + add_
new_array2[..., 3] = alpha * _np.sum(new_array2, 2).clip(0, 1)
#_np.set_printoptions(threshold=1000000)
new_array = new_array2
else:
if _np.isscalar(normalize):
new_array = array.clip(0, array.max())
new_array[array < 0] = 0
new_array[array > normalize] = 0
elif isinstance(normalize, tuple):
new_array = array.clip(0, array.max())
new_array[array < normalize[0]] = 0
new_array[array > normalize[1]] = 0
else:
new_array = array.clip(0, array.max())
new_array[array < 0] = 0
values = colormap.keys()
values = [int(item) for item in values]
h, w = new_array.shape
new_array2 = _np.zeros((h, w, 4), dtype=_np.uint8)
for item in values:
new_array2[new_array==item] = [colormap[item][0],
colormap[item][1],
colormap[item][2],
0]
new_array2[..., 3] = alpha * _np.sum(new_array2, 2).clip(0, 1)
new_array = new_array2
return new_array
def qcomposition(array_list):
"""Composite several qrgba arrays into one."""
if not len(array_list):
raise ValueError('Input array list cannot be empty.')
if _np.ndim(array_list[0]) != 3:
raise ValueError('RGBA array must be 3D.')
h, w, channel = array_list[0].shape
result = _np.array(array_list[0][..., :3], dtype=_np.int64)
for index in range(1, len(array_list)):
item = _np.array(array_list[index], dtype=_np.int64)
alpha_array = _np.tile(item[..., -1].reshape((-1, 1)), (1, 1, 3))
alpha_array = alpha_array.reshape((h, w, 3))
result = item[..., :3] * alpha_array + result * \
(255 - alpha_array)
result = result / 255
result = _np.array(result, dtype=_np.uint8)
return result
def composition(dest, source):
"""Save result in place
Note
----
The dest is a rgb image, while the source is a rgba image
"""
alpha = source[...,3].reshape(source.shape[0], source.shape[1], 1).astype(_np.float)
alpha /= 255
source_rgb = source[...,:3].astype(_np.float)
dest[:] = _np.uint8(source_rgb * alpha + dest.astype(_np.float) * (1 - alpha))
return dest
def qrgba2qimage(array):
"""Convert the input array into a image."""
if _np.ndim(array) != 3:
raise ValueError("RGBA array must be 3D.")
h, w, channel = array.shape
fmt = _qt.QImage.Format_ARGB32
result = _qt.QImage(w, h, fmt)
rgb_view(result)[:] = array[..., :3]
alpha = alpha_view(result)
alpha[:] = 255
return result
def null_image(h, w):
"""Return a whole black rgba array."""
new_array = _np.zeros((h, w, 4), dtype=_np.uint8)
new_array[..., 3] = 255
return new_array
| 11,735 | 4,170 |
import numpy as np
def atom_to_numbers(atom_name, number_property):
"""
Function that contains useful conversions.
"""
name2number = {"H": 1, "He": 2, "Li": 3, "Be": 4, "B": 5, "C": 6, "N": 7, "O": 8, "F": 9, "Ne": 10, "Na": 11, "Mg": 12, "Al": 13, "Si": 14, "P": 15, "S": 16,
"Cl": 17, "Ar": 18, "K": 19, "Ca": 20, "Sc": 21, "Ti": 22, "V": 23, "Cr": 24, "Mn": 25, "Fe": 26, "Co": 27, "Ni": 28, "Cu": 29, "Zn": 30, "Ga": 31,
"Ge": 32, "As": 33, "Se": 34, "Br": 35, "Kr": 36, "Rb": 37, "Sr": 38, "Y": 39, "Zr": 40, "Nb": 41, "Mo": 42, "Tc": 43, "Ru": 44, "Rh": 45, "Pd": 46,
"Ag": 47, "Cd": 48, "In": 49, "Sn": 50, "Sb": 51, "Te": 52, "I": 53, "Xe": 54, "Cs": 55, "Ba": 56, "La": 57, "Ce": 58, "Pr": 59, "Nd": 60, "Pm": 61,
"Sm": 62, "Eu": 63, "Gd": 64, "Tb": 65, "Dy": 66, "Ho": 67, "Er": 68, "Tm": 69, "Yb": 70, "Lu": 71, "Hf": 72, "Ta": 73, "W": 74, "Re": 75, "Os": 76,
"Ir": 77, "Pt": 78, "Au": 79, "Hg": 80, "Tl": 81, "Pb": 82, "Bi": 83, "Po": 84, "At": 85, "Rn": 86, "Fr": 87, "Ra": 88, "Ac": 89, "Th": 90, "Pa": 91,
"U": 92, "Np": 93, "Pu": 94, "Am": 95, "Cm": 96, "Bk": 97, "Cf": 98, "Es": 99, "Fm": 100, "Md": 101, "No": 102, "Lr": 103, "Rf": 104, "Db": 105, "Sg":
106, "Bh": 107, "Hs": 108, "Mt": 109, "Ds": 110, "Rg": 111, "Cn": 112, "Nh": 113, "Fl": 114, "Mc": 115, "Lv": 116, "Ts": 117, "Og": 118}
number2name = {1: "H", 2: "He", 3: "Li", 4: "Be", 5: "B", 6: "C", 7: "N", 8: "O", 9: "F", 10: "Ne", 11: "Na", 12: "Mg", 13: "Al", 14: "Si", 15: "P", 16: "S",
17: "Cl", 18: "Ar", 19: "K", 20: "Ca", 21: "Sc", 22: "Ti", 23: "V", 24: "Cr", 25: "Mn", 26: "Fe", 27: "Co", 28: "Ni", 29: "Cu", 30: "Zn", 31: "Ga", 32:
"Ge", 33: "As", 34: "Se", 35: "Br", 36: "Kr", 37: "Rb", 38: "Sr", 39: "Y", 40: "Zr", 41: "Nb", 42: "Mo", 43: "Tc", 44: "Ru", 45: "Rh", 46: "Pd", 47:
"Ag", 48: "Cd", 49: "In", 50: "Sn", 51: "Sb", 52: "Te", 53: "I", 54: "Xe", 55: "Cs", 56: "Ba", 57: "La", 58: "Ce", 59: "Pr", 60: "Nd", 61: "Pm", 62:
"Sm", 63: "Eu", 64: "Gd", 65: "Tb", 66: "Dy", 67: "Ho", 68: "Er", 69: "Tm", 70: "Yb", 71: "Lu", 72: "Hf", 73: "Ta", 74: "W", 75: "Re", 76: "Os", 77:
"Ir", 78: "Pt", 79: "Au", 80: "Hg", 81: "Tl", 82: "Pb", 83: "Bi", 84: "Po", 85: "At", 86: "Rn", 87: "Fr", 88: "Ra", 89: "Ac", 90: "Th", 91: "Pa", 92:
"U", 93: "Np", 94: "Pu", 95: "Am", 96: "Cm", 97: "Bk", 98: "Cf", 99: "Es", 100: "Fm", 101: "Md", 102: "No", 103: "Lr", 104: "Rf", 105: "Db", 106: "Sg",
107: "Bh", 108: "Hs", 109: "Mt", 110: "Ds", 111: "Rg", 112: "Cn", 113: "Nh", 114: "Fl", 115: "Mc", 116: "Lv", 117: "Ts", 118: "Og"}
vdw_radii = {1: 2.26767118629, 2: 2.64561638401, 3: 3.43930129921, 4: 2.89128076253, 5: 3.62827389807, 6: 3.21253418058, 7: 2.9290752823, 8: 2.87238350264, 9:
2.77789720321, 10: 2.91017802241, 11: 4.28967799407, 12: 3.26922596024, 13: 3.47709581899, 14: 3.96842457602, 15: 3.40150677944, 16: 3.40150677944, 17:
3.30702048001, 18: 3.55268485853, 19: 5.19674646859, 20: 4.36526703362, 21: 3.9873218359, 22: False, 23: False, 24: False, 25: False, 26: False, 27: False, 28:
3.08025336138, 29: 2.64561638401, 30: 2.62671912412, 31: 3.53378759864, 32: 3.9873218359, 33: 3.49599307887, 34: 3.5904793783, 35: 3.49599307887, 36:
3.81724649693, 37: 5.72586974539, 38: 4.70541771156, 39: False, 40: False, 41: False, 42: False, 43: False, 44: False, 45: False, 46: 3.08025336138, 47:
3.25032870036, 48: 2.98576706195, 49: 3.64717115796, 50: 4.10070539522, 51: 3.89283553647, 52: 3.89283553647, 53: 3.74165745739, 54: 4.08180813533, 55:
6.48176014083, 56: 5.06446564939, 57: False, 58: False, 59: False, 60: False, 61: False, 62: False, 63: False, 64: False, 65: False, 66: False, 67: False, 68: False, 69: False,
70: False, 71: False, 72: False, 73: False, 74: False, 75: False, 76: False, 77: False, 78: 3.30702048001, 79: 3.13694514104, 80: 2.9290752823, 81: 3.70386293761, 82:
3.81724649693, 83: 3.91173279636, 84: 3.7227601975, 85: 3.81724649693, 86: 4.15739717487, 87: 6.57624644025, 88: 5.34792454768, 89: False, 90: False, 91:
False, 92: 3.51489033876, 93: False, 94: False, 95: False, 96: False, 97: False, 98: False, 99: False, 100: False, 101: False, 102: False, 103: False, 104: False, 105: False, 106:
False, 107: False, 108: False, 109: False, 110: False, 111: False, 112: False, 113: False, 114: False, 115: False, 116: False, 117: False, 118: False}
mass = {"H": 1.008, "Na": 22.989, "Sc": 44.955, "Ga": 69.723, "Nb": 92.906, "Sb": 121.76, "Pm": False, "Lu": 174.9668, "Tl": 204.38, "Pa": 231.035, "Md": False, "Rg": False,
"He": 4.002, "Mg": 24.305, "Ti": 47.867, "Ge": 72.63, "Mo": 95.95, "Te": 127.6, "Sm": 150.36, "Hf": 178.49, "Pb": 207.2, "U": 238.028, "No": False, "Cn": False,
"Li": 6.94, "Al": 26.981, "V": 50.9415, "As": 74.921, "Tc": False, "I": 126.904, "Eu": 151.964, "Ta": 180.947, "Bi": 208.98, "Np": False, "Lr": False, "Nh": False,
"Be": 9.012, "Si": 28.085, "Cr": 51.9961, "Se": 78.971, "Ru": 101.07, "Xe": 131.293, "Gd": 157.25, "W": 183.84, "Po": False, "Pu": False, "Rf": False, "Fl": False,
"B": 10.81, "P": 30.973, "Mn": 54.938, "Br": 79.904, "Rh": 102.905, "Cs": 132.905, "Tb": 158.925, "Re": 186.207, "At": False, "Am": False, "Db": False, "Mc": False,
"C": 12.011, "S": 32.06, "Fe": 55.845, "Kr": 83.798, "Pd": 106.42, "Ba": 137.327, "Dy": 162.5, "Os": 190.23, "Rn": False, "Cm": False, "Sg": False, "Lv": False,
"N": 14.007, "Cl": 35.45, "Co": 58.933, "Rb": 85.4678, "Ag": 107.8682, "La": 138.905, "Ho": 164.93, "Ir": 192.217, "Fr": False, "Bk": False, "Bh": False, "Ts": False,
"O": 15.999, "Ar": 39.948, "Ni": 58.6934, "Sr": 87.62, "Cd": 112.414, "Ce": 140.116, "Er": 167.259, "Pt": 195.084, "Ra": False, "Cf": False, "Hs": False, "Og": False,
"F": 18.998, "K": 39.0983, "Cu": 63.546, "Y": 88.905, "In": 114.818, "Pr": 140.907, "Tm": 168.934, "Au": 196.966, "Ac": False, "Es": False, "Mt": False,
"Ne": 20.1797, "Ca": 40.078, "Zn": 65.38, "Zr": 91.224, "Sn": 118.71, "Nd": 144.242, "Yb": 173.045, "Hg": 200.592, "Th": 232.0377, "Fm": False, "Ds": False}
if number_property.lower() == "charge":
return name2number[atom_name]
elif number_property.lower() == "mass":
return mass[atom_name]
elif number_property.lower() == "vdw_radii":
return vdw_radii[name2number[atom_name]]
def calc_center_of_mass(elements, xyz):
"""
Calculates the center of elements.
Input : elements, vector(n)
: xyz, x,y,z coordinate matrix(n, 3)
"""
mass = np.zeros(len(elements))
for i in range(len(elements)):
mass[i] = atom_to_numbers(elements[i], "mass")
Xcm = np.sum(mass*xyz[:,0])/np.sum(mass)
Ycm = np.sum(mass*xyz[:,1])/np.sum(mass)
Zcm = np.sum(mass*xyz[:,2])/np.sum(mass)
return np.array([Xcm, Ycm, Zcm]) | 7,084 | 4,806 |
import random
import string
import uuid
from channels import Group
from django.db import models
from app.avalon import assign_roles, gen_role_list
from .util import lobby_json
class GameManager(models.Manager):
def create_game(self, num_players, has_mordred, has_oberon):
joinable_id = ''.join(random.choices(string.ascii_uppercase, k=4))
# For the set of all unstarted games joinable ID must be unique
while self.filter(is_started=False, joinable_id=joinable_id):
joinable_id = ''.join(random.choices(string.ascii_uppercase, k=4))
if not gen_role_list(num_players, has_mordred, has_oberon):
return False
game = self.model(joinable_id=joinable_id,
num_players=num_players,
has_mordred=has_mordred,
has_oberon=has_oberon)
game.save(using=self._db)
return game
class Game(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
joinable_id = models.CharField(max_length=4)
is_started = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
has_mordred = models.BooleanField(default=False)
has_oberon = models.BooleanField(default=False)
num_players = models.PositiveIntegerField(default=5)
games = GameManager()
def start(self):
self.is_started = True
assign_roles(self)
self.save()
self.message_players()
def players(self):
return self.player_set.filter(is_kicked=False).order_by('created_at').all()
def message_players(self):
Group(str(self.id)).send({
'text': lobby_json(self),
})
def to_dict(self):
return {
'id': str(self.id),
'joinable_id': self.joinable_id,
'is_started': self.is_started,
'num_players': self.num_players,
'has_mordred': self.has_mordred,
'has_oberon': self.has_oberon,
}
| 2,090 | 678 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# time_get_lock_info.py
# utils
#
# 🎂"Here's to the crazy ones. The misfits. The rebels.
# The troublemakers. The round pegs in the square holes.
# The ones who see things differently. They're not found
# of rules. And they have no respect for the status quo.
# You can quote them, disagree with them, glority or vilify
# them. About the only thing you can't do is ignore them.
# Because they change things. They push the human race forward.
# And while some may see them as the creazy ones, we see genius.
# Because the poeple who are crazy enough to think thay can change
# the world, are the ones who do."
#
# Created by Chyi Yaqing on 03/16/19 12:01.
# Copyright © 2019. Chyi Yaqing.
# All rights reserved.
#
# Distributed under terms of the MIT
"""
时钟的实现与C库函数绑定在一起,所以一些细节使基于特定平台的
"""
import os
import textwrap # Text wrapping and filling
import time # Time access and conversions
import hashlib
available_clocks = [
('clock', time.clock),
('monotonic', time.monotonic),
('perf_counter', time.perf_counter),
('process_time', time.process_time),
('thread_time', time.thread_time),
('time', time.time), # epoch [Unix time 1970.1.1 00:00] 开始之后的秒数以浮点数格式返回
]
for (clock_name, func) in available_clocks:
print(textwrap.dedent('''\
{name}:
adjustable : {info.adjustable}
implementation : {info.implementation}
monotonic : {info.monotonic}
resolution : {info.resolution}
current : {current}
''').format(
name=clock_name,
info=time.get_clock_info(clock_name),
current=func()))
# time.time() 从[epoch] 开始以后以浮点数格式返回秒
print("The time is: ", time.time())
# time.ctime() Convert a time expressed in seconds since the epoch to a string
# representing local time
print('The time is :', time.ctime())
later = time.time()+15
print('15 secs from now :', time.ctime(later))
# time.time() 函数返回的是系统时钟可以被用户或者系统服务更改,所以重复调用time()函数产生的
# 时间值可能会前后波动。monotonic()函数总是返回前向的时间值
# The monotonic is not affected by system clock updates.
start = time.monotonic()
time.sleep(0.1)
end = time.monotonic()
print('start : {:>9.2f}'.format(start))
print('end : {:>9.2f}'.format(end))
print('span : {:>9.2f}'.format(end - start))
# time.perf_counter() : fractional seconds of a performance counter
# 用于计算 sha1校验和的数据
data = open(__file__, 'rb').read()
loop_start = time.perf_counter()
for i in range(5):
iter_start = time.perf_counter()
h = hashlib.sha1()
for i in range(300000):
h.update(data)
cksum = h.digest()
now = time.perf_counter()
loop_elapsed = now - loop_start
iter_elapsed = now - iter_start
print(time.ctime(), ': {:0.3f} {:0.3f}'.format(iter_elapsed, loop_elapsed))
# struct_time : The type of the time value sequence returned by
def show_struct(s):
print(' tm_year :', s.tm_year)
print(' tm_mon :', s.tm_mon)
print(' tm_mday :', s.tm_mday)
print(' tm_hour :', s.tm_hour)
print(' tm_min :', s.tm_min)
print(' tm_sec :', s.tm_sec)
print(' tm_wday :', s.tm_wday)
print(' tm_yday :', s.tm_yday)
print(' tm_isdst:', s.tm_isdst)
print('gmtime: UTC')
show_struct(time.gmtime())
print('\nlocaltime:')
show_struct(time.localtime())
print('\nmktime:', time.mktime(time.localtime()))
# 当前时间依赖于时区设置, 时区可以由程序设置,也可以使用系统默认时区设置
# 改变时区并不会改变实际的时间,只是改变它的表现方式
def show_zone_info():
print(' TZ :', os.environ.get('TZ', '(not set)'))
print(' tzname :', time.tzname)
print(' Zone : {} ({})'.format(time.timezone, (time.timezone / 3600)))
print(' DST :', time.daylight)
print(' Time :', time.ctime())
print()
print('Default :')
show_zone_info()
ZONES = [
'GMT',
'Asia/Hong_Kong',
]
for zone in ZONES:
# 改变时区,首先设定环境变量TZ,然后调用tzset()
os.environ['TZ'] = zone
time.tzset()
print(zone, ':')
show_zone_info()
# 解析和格式化时间
# strptime() strftime()
now = time.ctime(1552717743.187825)
print('Now:', now)
parsed = time.strptime(now)
print('\nParsed:')
show_struct(parsed)
print('\nFormatted:', time.strftime("%a %b %d %H:%M:%S %Y", parsed))
| 4,230 | 1,753 |
from django.core.management.base import BaseCommand
from ...test.factories import AddressFactory
class Command(BaseCommand):
help = "generates fake users to list in the application"
def add_arguments(self, parser):
parser.add_argument("n_of_entries", type=int)
def handle(self, *args, **options):
AddressFactory.create_batch(options["n_of_entries"])
| 383 | 109 |
import tensorflow as tf
tf.set_random_seed(42)
import numpy as np
from scipy import integrate
import neural_networks
import poisson_problem
import matplotlib.pyplot as plt
import sys, getopt
class sampling_from_dataset:
def __init__(self, filepath, total_samples):
self.filepath = filepath
self.total_samples = total_samples
self.last_grab_int = 0
self.last_grab_bou = 0
def load_dataset(self):
self.dataset = np.genfromtxt(self.filepath, delimiter=',')
def increase_grab_number(self, num, batchsize):
num += batchsize
if(num==self.total_samples):
return 0
else:
return num
def interior_samples(self, batchsize):
sampling_int_draw_x = self.dataset[self.last_grab_int:(self.last_grab_int+batchsize), 0]
sampling_int_draw_y = self.dataset[self.last_grab_int:(self.last_grab_int+batchsize), 1]
self.last_grab_int = self.increase_grab_number(self.last_grab_int, batchsize)
return sampling_int_draw_x, sampling_int_draw_y
def boundary_samples(self, batchsize):
sampling_bou_draw_x = self.dataset[self.last_grab_bou:(self.last_grab_bou+batchsize), 2]
sampling_bou_draw_y = self.dataset[self.last_grab_bou:(self.last_grab_bou+batchsize), 3]
self.last_grab_bou = self.increase_grab_number(self.last_grab_bou, batchsize)
return sampling_bou_draw_x, sampling_bou_draw_y
def main(argv):
# DEFAULT
SENSOR_DATA = False
N_LAYERS = 1
BATCHSIZE = 1000
MAX_ITER = 50000
DO_SAVE = False
SEED = 42
try:
opts, args = getopt.getopt(argv,"hb:n:m:d:r:s:",["batchsize=","n_layers=", "max_iterations=", "sensor_data=", "random_seed=", "save_network="])
except getopt.GetoptError:
print('poisson.py -b <batchsize> -n <n_layers> -m <max_iterations> -d <sensor_data> -r <random_seed> -s <save_network>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('poisson.py -b <batchsize> -n <n_layers> -m <max_iterations> -d <sensor_data> -r <random_seed> -s <save_network>')
sys.exit()
elif opt in ("-b", "--batchsize"):
BATCHSIZE = int(arg)
elif opt in ("-n", "--n_layers"):
N_LAYERS = int(arg)
elif opt in ("-m", "--max_iterations"):
MAX_ITER = int(arg)
elif opt in ("-d", "--sensor_data"):
if(int(arg)==1):
SENSOR_DATA = True
elif opt in ("-r", "--random_seed"):
SEED = int(arg)
tf.set_random_seed(SEED)
elif opt in ("-s", "--save_network"):
DO_SAVE = bool(int(arg))
if DO_SAVE:
print("Saving network after training.")
HIDDEN_UNITS = []
for i in range(N_LAYERS):
HIDDEN_UNITS.append(16)
if(SENSOR_DATA):
save_name = 'test_model/' + str(len(HIDDEN_UNITS)) + '_layer_sq_loss_' + str(BATCHSIZE) + '_m_iter_' + str(MAX_ITER) + '_rs_' + str(SEED) + '_wsd'
else:
save_name = 'test_model/' + str(len(HIDDEN_UNITS)) + '_layer_sq_loss_' + str(BATCHSIZE) + '_m_iter_' + str(MAX_ITER) + '_rs_' + str(SEED)
problem = poisson_problem.poisson_2d()
sampler = sampling_from_dataset('datasets/' + str(BATCHSIZE), BATCHSIZE)
sampler.load_dataset()
NUM_INPUTS = 2
neural_network = neural_networks.neural_network(NUM_INPUTS, 1, HIDDEN_UNITS)
int_var = tf.placeholder(tf.float64, [None, NUM_INPUTS])
bou_var = tf.placeholder(tf.float64, [None, NUM_INPUTS])
sensor_var = tf.placeholder(tf.float64, [None, NUM_INPUTS])
value_int = neural_network.value(int_var)
value_bou = neural_network.value(bou_var)
value_sensor = neural_network.value(sensor_var)
grad = neural_network.first_derivatives(int_var)
grad_grad= neural_network.second_derivatives(int_var)
grad_grad_sensor = neural_network.second_derivatives(sensor_var)
sol_int = tf.placeholder(tf.float64, [None, 1])
sol_bou = tf.placeholder(tf.float64, [None, 1])
sum_of_second_derivatives = 0.0
sum_of_second_derivatives_sensor = 0.0
for i in range(NUM_INPUTS):
sum_of_second_derivatives += grad_grad[i]
sum_of_second_derivatives_sensor += grad_grad_sensor[i]
loss_int = tf.square(sum_of_second_derivatives+sol_int)
loss_bou = tf.square(value_bou-sol_bou)
loss_sensor_int = tf.square(sum_of_second_derivatives_sensor)
loss_sensor_bou = tf.square(value_sensor)
loss = tf.sqrt(tf.reduce_mean(loss_int + loss_bou))
sensor_loss = tf.sqrt(tf.reduce_mean(loss_int) + tf.reduce_mean(loss_bou) + tf.reduce_mean(loss_sensor_int) + tf.reduce_mean(loss_sensor_bou))
train_scipy = tf.contrib.opt.ScipyOptimizerInterface(loss, method='BFGS', options={'gtol':1e-14, 'disp':True, 'maxiter':MAX_ITER})
train_scipy_sensor = tf.contrib.opt.ScipyOptimizerInterface(sensor_loss, method='BFGS', options={'gtol':1e-14, 'disp':True, 'maxiter':MAX_ITER})
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
int_draw_x, int_draw_y = sampler.interior_samples(BATCHSIZE)
int_draw_x = np.reshape(int_draw_x, (BATCHSIZE, 1))
int_draw_y = np.reshape(int_draw_y, (BATCHSIZE, 1))
boundary_draw_x, boundary_draw_y = sampler.boundary_samples(BATCHSIZE)
boundary_draw_x = np.reshape(boundary_draw_x, (BATCHSIZE, 1))
boundary_draw_y = np.reshape(boundary_draw_y, (BATCHSIZE, 1))
int_draw = np.concatenate([int_draw_x, int_draw_y], axis=1)
bou_draw = np.concatenate([boundary_draw_x, boundary_draw_y], axis=1)
f = problem.rhs(int_draw)
f = np.reshape(np.array(f), (BATCHSIZE, 1))
bou = problem.velocity(bou_draw)
bou = np.reshape(np.array(bou), (BATCHSIZE, 1))
if(SENSOR_DATA):
sensor_points_x = np.reshape(np.array([0.0, 1.0, 0.0, 1.0]), (4,1))
sensor_points_y = np.reshape(np.array([0.0, 0.0, 1.0, 1.0]), (4,1))
sensor_points = np.concatenate([sensor_points_x, sensor_points_y], axis=1)
print(sensor_points)
train_scipy_sensor.minimize(sess, feed_dict={sol_int:f, sol_bou:bou, int_var:int_draw, bou_var:bou_draw, sensor_var: sensor_points})
else:
train_scipy.minimize(sess, feed_dict={sol_int:f, sol_bou:bou, int_var:int_draw, bou_var:bou_draw})
if DO_SAVE:
save_path = saver.save(sess, save_name)
print("Model saved in path: %s" % save_path)
if __name__ == '__main__':
main(sys.argv[1:])
| 5,995 | 2,711 |
maxn = 1000000
isprime = [False] * 2 + [True] * maxn
for i in range(2, maxn):
if isprime[i]:
j = i*i
while j < maxn:
isprime[j] = False
j += i
def truncatable(n):
x = n
while x > 0:
if not isprime[x]: return False
x /= 10
b = 10
x = n % b
while x < n:
if not isprime[x]: return False
b *= 10
x = n % b
return True
ans = 0
for i in range(10, maxn):
if truncatable(i):
ans += i
print i
print ans
| 429 | 225 |
import click
from os import environ
from flask.cli import with_appcontext
@click.group('{{cookiecutter.app_slug}}')
def {{cookiecutter.app_slug}}():
"""Perform {{cookiecutter.app_name}} specific operations."""
@{{cookiecutter.app_slug}}.command()
@with_appcontext
@click.argument('queue_name')
def subscribe(queue_name): # pragma: no cover
"""Subscribe a queue for the observed events and messages.
QUEUE_NAME specifies the name of the queue.
"""
from .extensions import broker, MAIN_EXCHANGE_NAME
from . import actors # noqa
channel = broker.channel
channel.exchange_declare(MAIN_EXCHANGE_NAME)
click.echo(f'Declared "{MAIN_EXCHANGE_NAME}" direct exchange.')
if environ.get('APP_USE_LOAD_BALANCING_EXCHANGE', '') not in ['', 'False']:
bind = channel.exchange_bind
unbind = channel.exchange_unbind
else:
bind = channel.queue_bind
unbind = channel.queue_unbind
bind(queue_name, MAIN_EXCHANGE_NAME, queue_name)
click.echo(f'Subscribed "{queue_name}" to "{MAIN_EXCHANGE_NAME}.{queue_name}".')
for actor in [broker.get_actor(actor_name) for actor_name in broker.get_declared_actors()]:
if 'event_subscription' in actor.options:
routing_key = f'events.{actor.actor_name}'
if actor.options['event_subscription']:
bind(queue_name, MAIN_EXCHANGE_NAME, routing_key)
click.echo(f'Subscribed "{queue_name}" to "{MAIN_EXCHANGE_NAME}.{routing_key}".')
else:
unbind(queue_name, MAIN_EXCHANGE_NAME, routing_key)
click.echo(f'Unsubscribed "{queue_name}" from "{MAIN_EXCHANGE_NAME}.{routing_key}".')
| 1,691 | 553 |
from substrateinterface import SubstrateInterface, Keypair
from substrateinterface.exceptions import SubstrateRequestException
from scalecodec.type_registry import load_type_registry_file
import time
substrate = SubstrateInterface(
url='wss://ws.mof.sora.org',
ss58_format=69,
type_registry_preset='default',
type_registry=load_type_registry_file('custom_types.json'),
)
keypair = Keypair.create_from_mnemonic('<your 12 word passphrase here>')
call = substrate.compose_call(
call_module='LiquidityProxy',
call_function='swap',
call_params={
'dex_id': '0',
'input_asset_id': '0x0200050000000000000000000000000000000000000000000000000000000000',
'output_asset_id': '0x0200000000000000000000000000000000000000000000000000000000000000',
'swap_amount': {'WithDesiredInput': {'desired_amount_in': '13370000000000000000000', 'min_amount_out': '0'}},
'selected_source_types': ["XYKPool","MulticollateralBondingCurvePool"],
'filter_mode': 'AllowSelected'
}
)
while True:
try:
extrinsic = substrate.create_signed_extrinsic(call=call, keypair=keypair)
receipt = substrate.submit_extrinsic(extrinsic, wait_for_inclusion=False)
print("Extrinsic '{}' sent".format(receipt.extrinsic_hash))
# print("Extrinsic '{}' sent and included in block '{}'".format(receipt.extrinsic_hash, receipt.block_hash))
except Exception as e:
print("Failed to send: {}".format(e))
time.sleep(100)
| 1,509 | 617 |
from re import S
from pydantic import BaseModel
from typing import Optional
class User(BaseModel):
first_name:str
last_name:str
age:int
sex:Optional[str]=None
| 185 | 58 |
#!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import unittest
from pxr import Usd, Sdf, Tf
class TestUsdStagePopulationMask(unittest.TestCase):
def test_Basic(self):
pm = Usd.StagePopulationMask.All()
assert not pm.IsEmpty()
assert pm.Includes('/any/path')
assert pm.GetIncludedChildNames('/') == (True, [])
pm = Usd.StagePopulationMask()
assert pm.IsEmpty()
assert not pm.Includes('/any/path')
assert pm.GetIncludedChildNames('/') == (False, [])
pm2 = Usd.StagePopulationMask().Add('/foo').Add('/bar')
assert not pm.Includes(pm2)
assert pm2.Includes(pm)
assert pm.GetUnion(pm2) == pm2
assert Usd.StagePopulationMask.Union(pm, pm2) == pm2
assert pm2.GetIncludedChildNames('/') == (True, ['bar', 'foo'])
assert pm2.GetIncludedChildNames('/foo') == (True, [])
assert pm2.GetIncludedChildNames('/bar') == (True, [])
assert pm2.GetIncludedChildNames('/baz') == (False, [])
pm.Add('/World/anim/chars/CharGroup')
assert pm.GetPaths() == ['/World/anim/chars/CharGroup']
assert not pm.IsEmpty()
pm.Add('/World/anim/chars/CharGroup/child')
assert pm.GetPaths() == ['/World/anim/chars/CharGroup']
pm.Add('/World/anim/chars/OtherCharGroup')
assert pm.GetPaths() == ['/World/anim/chars/CharGroup',
'/World/anim/chars/OtherCharGroup']
pm.Add('/World/sets/arch/Building')
assert pm.GetPaths() == ['/World/anim/chars/CharGroup',
'/World/anim/chars/OtherCharGroup',
'/World/sets/arch/Building']
pm2 = Usd.StagePopulationMask()
assert pm2 != pm
pm2.Add('/World/anim/chars/CharGroup')
assert pm2 != pm
pm2.Add('/World/sets/arch/Building')
pm2.Add('/World/anim/chars/OtherCharGroup')
pm2.Add('/World/anim/chars/CharGroup/child')
assert pm2 == pm
assert pm2.GetUnion(pm) == pm
assert pm2.GetUnion(pm) == pm2
pm2 = Usd.StagePopulationMask()
assert Usd.StagePopulationMask.Union(pm, pm2) == pm
assert Usd.StagePopulationMask.Union(pm, pm2) != pm2
assert pm.Includes('/World')
assert not pm.IncludesSubtree('/World')
assert pm.Includes('/World/anim')
assert not pm.IncludesSubtree('/World/anim')
assert pm.Includes('/World/anim/chars/CharGroup')
assert pm.IncludesSubtree('/World/anim/chars/CharGroup')
assert pm.Includes('/World/anim/chars/CharGroup/child')
assert pm.IncludesSubtree('/World/anim/chars/CharGroup/child')
pm = Usd.StagePopulationMask().Add('/world/anim')
pm2 = pm.GetUnion('/world')
assert pm2.GetPaths() == ['/world']
pm = Usd.StagePopulationMask(['/A', '/AA', '/B/C', '/U'])
pm2 = Usd.StagePopulationMask(['/A/X', '/B', '/Q'])
assert (Usd.StagePopulationMask.Union(pm, pm2) ==
Usd.StagePopulationMask(['/A', '/AA', '/B', '/Q', '/U']))
assert (Usd.StagePopulationMask.Intersection(pm, pm2) ==
Usd.StagePopulationMask(['/A/X', '/B/C']))
pm = Usd.StagePopulationMask(['/A/B', '/A/C', '/A/D/E', '/A/D/F', '/B'])
assert pm.GetIncludedChildNames('/') == (True, ['A', 'B'])
assert pm.GetIncludedChildNames('/A') == (True, ['B', 'C', 'D'])
assert pm.GetIncludedChildNames('/A/B') == (True, [])
assert pm.GetIncludedChildNames('/A/C') == (True, [])
assert pm.GetIncludedChildNames('/A/D') == (True, ['E', 'F'])
assert pm.GetIncludedChildNames('/A/D/E') == (True, [])
assert pm.GetIncludedChildNames('/A/D/F') == (True, [])
assert pm.GetIncludedChildNames('/B') == (True, [])
assert pm.GetIncludedChildNames('/C') == (False, [])
# Errors.
with self.assertRaises(Tf.ErrorException):
Usd.StagePopulationMask(['relativePath/is/no/good'])
with self.assertRaises(Tf.ErrorException):
Usd.StagePopulationMask().Add('relativePath/is/no/good')
with self.assertRaises(Tf.ErrorException):
Usd.StagePopulationMask(['/property/path/is/no.good'])
with self.assertRaises(Tf.ErrorException):
Usd.StagePopulationMask().Add('/property/path/is/no.good')
with self.assertRaises(Tf.ErrorException):
Usd.StagePopulationMask(['/variant/selection/path/is{no=good}'])
with self.assertRaises(Tf.ErrorException):
Usd.StagePopulationMask().Add('/variant/selection/path/is{no=good}')
def test_Stages(self):
unmasked = Usd.Stage.CreateInMemory()
unmasked.DefinePrim('/World/anim/chars/DoryGroup/Dory')
unmasked.DefinePrim('/World/anim/chars/NemoGroup/Nemo')
unmasked.DefinePrim('/World/sets/Reef/Coral/CoralGroup1')
unmasked.DefinePrim('/World/sets/Reef/Rocks/RockGroup1')
doryMask = Usd.StagePopulationMask().Add('/World/anim/chars/DoryGroup')
doryStage = Usd.Stage.OpenMasked(unmasked.GetRootLayer(), doryMask)
assert doryStage.GetPopulationMask() == doryMask
assert doryStage.GetPrimAtPath('/World')
assert doryStage.GetPrimAtPath('/World/anim')
assert doryStage.GetPrimAtPath('/World/anim/chars')
assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup')
assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup/Dory')
assert not doryStage.GetPrimAtPath('/World/sets')
assert not doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup')
assert not doryStage._GetPcpCache().FindPrimIndex('/World/sets')
assert not doryStage._GetPcpCache().FindPrimIndex(
'/World/anim/chars/NemoGroup')
doryAndNemoMask = (Usd.StagePopulationMask()
.Add('/World/anim/chars/DoryGroup')
.Add('/World/anim/chars/NemoGroup'))
# Test modifying an existing mask.
doryStage.SetPopulationMask(doryAndNemoMask)
assert doryStage.GetPrimAtPath('/World')
assert doryStage.GetPrimAtPath('/World/anim')
assert doryStage.GetPrimAtPath('/World/anim/chars')
assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup')
assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup/Dory')
assert doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup')
assert doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup/Nemo')
assert doryStage._GetPcpCache().FindPrimIndex(
'/World/anim/chars/NemoGroup')
doryStage.SetPopulationMask(doryMask)
assert doryStage.GetPrimAtPath('/World')
assert doryStage.GetPrimAtPath('/World/anim')
assert doryStage.GetPrimAtPath('/World/anim/chars')
assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup')
assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup/Dory')
assert not doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup')
assert not doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup/Nemo')
assert not doryStage._GetPcpCache().FindPrimIndex(
'/World/anim/chars/NemoGroup')
doryAndNemoStage = Usd.Stage.OpenMasked(
unmasked.GetRootLayer(), doryAndNemoMask)
assert doryAndNemoStage.GetPopulationMask() == doryAndNemoMask
assert doryAndNemoStage.GetPrimAtPath('/World')
assert doryAndNemoStage.GetPrimAtPath('/World/anim')
assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars')
assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars/DoryGroup')
assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars/DoryGroup/Dory')
assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars/NemoGroup')
assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars/NemoGroup/Nemo')
assert not doryAndNemoStage.GetPrimAtPath('/World/sets')
def test_ExpansionRelationships(self):
stage = Usd.Stage.CreateInMemory()
a = stage.DefinePrim('/World/A')
b = stage.DefinePrim('/World/B')
c = stage.DefinePrim('/World/C')
d = stage.DefinePrim('/World/D')
e = stage.DefinePrim('/World/E')
cAttr = c.CreateAttribute('attr', Sdf.ValueTypeNames.Float)
a.CreateRelationship('r').AddTarget(b.GetPath())
b.CreateRelationship('r').AddTarget(cAttr.GetPath())
c.CreateRelationship('r').AddTarget(d.GetPath())
a.CreateRelationship('pred').AddTarget(e.GetPath())
mask = Usd.StagePopulationMask().Add(a.GetPath())
masked = Usd.Stage.OpenMasked(stage.GetRootLayer(), mask)
assert masked.GetPrimAtPath(a.GetPath())
assert not masked.GetPrimAtPath(b.GetPath())
assert not masked.GetPrimAtPath(c.GetPath())
assert not masked.GetPrimAtPath(d.GetPath())
assert not masked.GetPrimAtPath(e.GetPath())
# Now expand the mask for all relationships.
masked.ExpandPopulationMask()
assert masked.GetPrimAtPath(a.GetPath())
assert masked.GetPrimAtPath(b.GetPath())
assert masked.GetPrimAtPath(c.GetPath())
assert masked.GetPrimAtPath(d.GetPath())
assert masked.GetPrimAtPath(e.GetPath())
masked.SetPopulationMask(Usd.StagePopulationMask().Add(a.GetPath()))
assert masked.GetPrimAtPath(a.GetPath())
assert not masked.GetPrimAtPath(b.GetPath())
assert not masked.GetPrimAtPath(c.GetPath())
assert not masked.GetPrimAtPath(d.GetPath())
assert not masked.GetPrimAtPath(e.GetPath())
# Expand with a predicate that only consults relationships named 'pred'
masked.ExpandPopulationMask(
relationshipPredicate=lambda r: r.GetName() == 'pred')
assert masked.GetPrimAtPath(a.GetPath())
assert not masked.GetPrimAtPath(b.GetPath())
assert not masked.GetPrimAtPath(c.GetPath())
assert not masked.GetPrimAtPath(d.GetPath())
assert masked.GetPrimAtPath(e.GetPath())
def test_ExpansionConnections(self):
stage = Usd.Stage.CreateInMemory()
a = stage.DefinePrim('/World/A')
b = stage.DefinePrim('/World/B')
c = stage.DefinePrim('/World/C')
d = stage.DefinePrim('/World/D')
e = stage.DefinePrim('/World/E')
bAttr = b.CreateAttribute('attr', Sdf.ValueTypeNames.Float)
cAttr = c.CreateAttribute('attr', Sdf.ValueTypeNames.Float)
dAttr = d.CreateAttribute('attr', Sdf.ValueTypeNames.Float)
eAttr = e.CreateAttribute('attr', Sdf.ValueTypeNames.Float)
floatType = Sdf.ValueTypeNames.Float
a.CreateAttribute('a', floatType).AddConnection(bAttr.GetPath())
b.CreateAttribute('a', floatType).AddConnection(cAttr.GetPath())
c.CreateAttribute('a', floatType).AddConnection(dAttr.GetPath())
a.CreateAttribute('pred', floatType).AddConnection(eAttr.GetPath())
mask = Usd.StagePopulationMask().Add(a.GetPath())
masked = Usd.Stage.OpenMasked(stage.GetRootLayer(), mask)
assert masked.GetPrimAtPath(a.GetPath())
assert not masked.GetPrimAtPath(b.GetPath())
assert not masked.GetPrimAtPath(c.GetPath())
assert not masked.GetPrimAtPath(d.GetPath())
assert not masked.GetPrimAtPath(e.GetPath())
# Now expand the mask for all connections.
masked.ExpandPopulationMask()
assert masked.GetPrimAtPath(a.GetPath())
assert masked.GetPrimAtPath(b.GetPath())
assert masked.GetPrimAtPath(c.GetPath())
assert masked.GetPrimAtPath(d.GetPath())
assert masked.GetPrimAtPath(e.GetPath())
masked.SetPopulationMask(Usd.StagePopulationMask().Add(a.GetPath()))
assert masked.GetPrimAtPath(a.GetPath())
assert not masked.GetPrimAtPath(b.GetPath())
assert not masked.GetPrimAtPath(c.GetPath())
assert not masked.GetPrimAtPath(d.GetPath())
assert not masked.GetPrimAtPath(e.GetPath())
# Expand with a predicate that only consults attributes named 'pred'
masked.ExpandPopulationMask(
attributePredicate=lambda r: r.GetName() == 'pred')
assert masked.GetPrimAtPath(a.GetPath())
assert not masked.GetPrimAtPath(b.GetPath())
assert not masked.GetPrimAtPath(c.GetPath())
assert not masked.GetPrimAtPath(d.GetPath())
assert masked.GetPrimAtPath(e.GetPath())
def test_Bug143308(self):
# We didn't correctly mask calls to parallel prim indexing, leading to
# errors with instancing.
stage = Usd.Stage.CreateInMemory()
foo, bar, i1, i2 = [
stage.DefinePrim(p) for p in ('/foo', '/bar', '/i1', '/i2')]
foo.SetInstanceable(True)
[p.GetReferences().AddInternalReference(foo.GetPath()) for p in (i1, i2)]
assert len(stage.GetPrototypes())
stage2 = Usd.Stage.OpenMasked(
stage.GetRootLayer(), Usd.StagePopulationMask(['/i1']))
assert len(stage2.GetPrototypes())
def test_Bug145873(self):
# The payload inclusion predicate wasn't being invoked on ancestors of
# requested index paths in pcp.
payload = Usd.Stage.CreateInMemory()
for n in ('One', 'Two', 'Three'):
payload.DefinePrim('/CubesModel/Geom/Cube' + n)
root = Usd.Stage.CreateInMemory()
cubes = root.DefinePrim('/Cubes')
cubes.GetPayloads().AddPayload(payload.GetRootLayer().identifier,
'/CubesModel')
testStage = Usd.Stage.OpenMasked(
root.GetRootLayer(),
Usd.StagePopulationMask(['/Cubes/Geom/CubeTwo']))
# Only /Cubes/Geom/CubeTwo (and ancestors) should be present.
assert testStage.GetPrimAtPath('/Cubes')
assert testStage.GetPrimAtPath('/Cubes/Geom')
assert not testStage.GetPrimAtPath('/Cubes/Geom/CubeOne')
assert testStage.GetPrimAtPath('/Cubes/Geom/CubeTwo')
assert not testStage.GetPrimAtPath('/Cubes/Geom/CubeThree')
def test_Bug152904(self):
# Prototype prims weren't being generated on stages where the population
# mask included paths of prims beneath instances.
stage = Usd.Stage.CreateInMemory()
stage.DefinePrim('/Ref/geom')
stage.DefinePrim('/Ref/shading')
for path in ['/Instance_1', '/Instance_2']:
prim = stage.DefinePrim(path)
prim.GetReferences().AddInternalReference('/Ref')
prim.SetInstanceable(True)
# Open the stage with a mask that includes the 'geom' prim beneath
# the instances.
maskedStage = Usd.Stage.OpenMasked(
stage.GetRootLayer(),
Usd.StagePopulationMask(['/Instance_1/geom', '/Instance_2/geom']))
# Both instances should share the same prototype prim.
instance_1 = maskedStage.GetPrimAtPath('/Instance_1')
assert instance_1.IsInstance()
assert instance_1.GetPrototype()
instance_2 = maskedStage.GetPrimAtPath('/Instance_2')
assert instance_2.IsInstance()
assert instance_2.GetPrototype()
# Only the 'geom' prim in the prototype will be composed, since
# it's the only one in the population mask.
assert instance_1.GetPrototype() == instance_2.GetPrototype()
prototype = instance_1.GetPrototype()
assert prototype.GetChild('geom')
assert not prototype.GetChild('shading')
# Open the stage with a mask that includes the 'geom' prim beneath
# /Instance_1 and all children beneath /Instance_2.
maskedStage = Usd.Stage.OpenMasked(
stage.GetRootLayer(),
Usd.StagePopulationMask(['/Instance_1/geom', '/Instance_2']))
# Both instances should *not* share the same prototype, since they
# are affected by different population masks.
instance_1 = maskedStage.GetPrimAtPath('/Instance_1')
assert instance_1.IsInstance()
assert instance_1.GetPrototype()
instance_2 = maskedStage.GetPrimAtPath('/Instance_2')
assert instance_2.IsInstance()
assert instance_2.GetPrototype()
# Only the 'geom' prim will be composed in the prototype for the
# /Instance_1, but both 'geom' and 'shading' will be composed for
# /Instance_2.
assert instance_1.GetPrototype() != instance_2.GetPrototype()
prototype = instance_1.GetPrototype()
assert prototype.GetChild('geom')
assert not prototype.GetChild('shading')
prototype = instance_2.GetPrototype()
assert prototype.GetChild('geom')
assert prototype.GetChild('shading')
if __name__ == '__main__':
unittest.main()
| 17,892 | 5,584 |
"""
Module to mutate sequences based on a variants list.
Assumptions for which no check is performed:
- Only ``deletion insertion`` operations.
- Only exact locations, i.e., no uncertainties such as `10+?`.
- Locations are zero-based right-open with ``start > end``.
- There is no overlapping between variants locations.
Notes:
- If any of the above is not met, the result will be bogus.
- There can be empty inserted lists.
"""
from .util import reverse_complement
class UnknownInsertedSource(Exception):
pass
def _get_inverted(sequence):
"""
Reverse complement inversion using code extracted from BioPython.
"""
return reverse_complement(sequence)
def _get_start_end(location):
"""
Get the start and the end of a location object. For point locations both
start and end equal the position value.
"""
if location["type"] == "range":
return location["start"]["position"], location["end"]["position"]
elif location["type"] == "point":
return location["position"], location["position"]
def _get_inserted_sequence(inserted, sequences):
"""
Retrieves the actual sequence mentioned in the insertion.
"""
if inserted["source"] == "description":
sequence = inserted["sequence"]
elif inserted["source"] == "reference":
sequence = sequences[inserted["source"]][
slice(*_get_start_end(inserted["location"]))
]
elif isinstance(inserted["source"], dict) and inserted["source"].get("id"):
sequence = sequences[inserted["source"]["id"]][
slice(*_get_start_end(inserted["location"]))
]
else:
raise UnknownInsertedSource("Inserted source not supported.")
if (
inserted.get("repeat_number")
and inserted["repeat_number"].get("value") is not None
):
sequence = sequence * inserted.get("repeat_number")["value"]
if inserted.get("inverted"):
sequence = _get_inverted(sequence)
return sequence
def mutate(sequences, variants):
"""
Mutate the reference sequence under ``sequences["reference"]`` according
to the provided variants operations.
:arg dict sequences: Sequences dictionary.
:arg list variants: Operations list.
:returns: Mutated sequence.
:rtype: str
"""
reference = sequences["reference"]
variants = sorted(variants, key=lambda v: (_get_start_end(v["location"])))
parts = []
current_index = 0
for variant in variants:
start, end = _get_start_end(variant["location"])
parts.append(reference[current_index:start])
for insertion in variant["inserted"]:
parts.append(_get_inserted_sequence(insertion, sequences))
current_index = end
parts.append(reference[current_index:])
return "".join(parts)
| 2,822 | 781 |
import copy
from collections import defaultdict
inputs = [list(line) for line in open("day11/input").read().splitlines()]
nodes = defaultdict(lambda: [])
for y in range(len(inputs)):
for x in range(len(inputs[y])):
if inputs[y][x] != ".":
for i in range(-1, 2):
for j in range(-1, 2):
if 0 == j and 0 == i:
continue
index_x = x + j
index_y = y + i
while 0 <= index_y < len(inputs) and 0 <= index_x < len(inputs[y]):
if inputs[index_y][index_x] != ".":
nodes[x + len(inputs[y]) * y].append((index_y, index_x))
break
index_x += j
index_y += i
def round(seats):
result = copy.deepcopy(seats)
for y in range(len(seats)):
for x in range(len(seats[y])):
if seats[y][x] != ".":
occupied_adjacent = 0
for node in nodes[x + len(seats[y]) * y]:
neighbour = seats[node[0]][node[1]]
if neighbour == "#":
occupied_adjacent += 1
if seats[y][x] == "L" and occupied_adjacent == 0:
result[y][x] = "#"
elif seats[y][x] == "#" and occupied_adjacent >= 5:
result[y][x] = "L"
return result
seats = inputs
while True:
prev_seats = copy.deepcopy(seats)
seats = round(seats)
if prev_seats == seats:
break
total_occupied = 0
for y in range(len(seats)):
for x in range(len(seats[y])):
if seats[y][x] == "#":
total_occupied += 1
print("Total seats occupied: " + str(total_occupied))
| 1,791 | 567 |
# Generated by Django 2.2.3 on 2020-07-31 14:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('UserProfile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DoctorProfile',
fields=[
('profile_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('gender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], max_length=7)),
('date_of_birth', models.DateField()),
('Year_of_Graduation', models.DateField()),
('Sch_of_Graduation', models.CharField(max_length=255)),
('Hospital_of_housemanship', models.CharField(max_length=255)),
('Folio_Number', models.CharField(max_length=50)),
('Full_License', models.FileField(upload_to='../media/License_document/%Y/%m/%d/')),
('Evidence_of_License_Reg', models.FileField(upload_to='../media/Evidence_of_Annual_License_Reg/%Y/%m/%d/')),
('CV', models.FileField(upload_to='../media/CV/%Y/%m/%d/')),
('Specialization', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
bases=('UserProfile.profile', models.Model),
),
]
| 1,553 | 493 |
from __future__ import unicode_literals
def find_ddf():
import os
if 'DDF_HOME' in os.environ:
return os.path.abspath(os.environ['DDF_HOME'])
path = os.path.abspath(os.path.split(os.path.abspath(__file__))[0] + '/../../')
if all([os.path.exists(os.path.join(path, x)) for x in ['core', 'spark']]):
return path
raise ImportError('Unable to find DDF_HOME. Please define this variable in your environment')
DDF_HOME = find_ddf()
# TODO: find a better way to set this
SCALA_VERSION = '2.10'
| 529 | 193 |
numbers = (x for x in range(1, 34) if x % 3 == 0)
numbers = filter(lambda x: x % 2, numbers)
numbers = map(lambda x: x ** 3, numbers)
numbers = list(numbers)
result = sum(numbers) / len(numbers)
| 196 | 77 |
# written by falsetru
import cookielib
import os
from contextlib import closing
import re
import getpass
import webbrowser
import sys
import requests
class AlgoSpot(object):
login_url = 'https://algospot.com/accounts/login/?next=/'
def __init__(self):
cookiefile_path = os.path.join(os.path.expanduser('~'), '.kaka')
self.cj = cj = cookielib.LWPCookieJar(cookiefile_path)
try:
cj.load()
except IOError:
pass
self.opener = requests.Session()
self.opener.cookies = cj
def login(self, username, password):
html = self._request(self.login_url)
csrf_token = self._get_csrf_token(html)
data = {
'username': username,
'password': password,
'csrfmiddlewaretoken': csrf_token,
}
html = self._request(self.login_url, data)
ok = self._is_loggedin(html)
if ok:
self.cj.save()
return ok
def is_loggedin(self):
html = self._request('https://algospot.com')
return self._is_loggedin(html)
def ensure_login(self):
if self.is_loggedin():
return
while True:
username = raw_input('Username: ')
password = getpass.getpass()
if self.login(username, password):
break
print 'Login failure.'
def submit(self, problem, lang, content):
url = 'https://algospot.com/judge/problem/submit/{}'.format(problem)
html = self._request(url)
csrf_token = self._get_csrf_token(html)
data = {
'csrfmiddlewaretoken': csrf_token,
'language': lang,
'source': content,
}
self._request(url, data)
def open_recent_submission(self, problem):
webbrowser.open('https://algospot.com/judge/submission/recent/?user={}&problem={}'.format(self.username, problem))
def _is_loggedin(self, html):
if 'href="/accounts/logout/"' in html:
self.username = re.search('<a href="/user/profile/\d+" class="username">([^<]+)</a>', html).group(1).strip()
return True
else:
return False
def _get_csrf_token(self, html):
return re.search("name='csrfmiddlewaretoken' value='(\w+)'", html).group(1)
def _request(self, url, data=None):
if data is None:
r = self.opener.get(url)
else:
r = self.opener.post(url, data, headers={'Referer': self.login_url})
return r.content
ext_to_lang = {
'.java' : 'java',
'.scala': 'scala',
'.hs' : 'hs',
'.py' : 'py',
'.js' : 'js',
'.rb' : 'rb',
'.c' : 'cpp',
'.cpp' : 'cpp',
'.cxx' : 'cpp',
'.cc' : 'cpp',
}
def guess_language(filename):
base, ext = os.path.splitext(filename)
ext = ext.lower()
return ext_to_lang.get(ext)
def guess_problem(filename):
filename = os.path.basename(filename)
base, ext = os.path.splitext(filename)
return re.search('[0-9A-Z]+', base.upper()).group(0)
assert guess_language('/path/to/boggle.py') == 'py'
assert guess_problem('/path/to/boggle.py') == 'BOGGLE'
assert guess_problem('/path/to/snail-recursion.py') == 'SNAIL'
assert guess_problem('/path/to/tripathcnt_dp.py') == 'TRIPATHCNT'
def main(filepath):
lang = guess_language(filepath)
if not lang:
print 'Language guess fail.'
return
problem = guess_problem(filepath)
try:
with open(filepath) as f:
content = f.read()
except IOError:
print "Can't open/read file."
return
site = AlgoSpot()
site.ensure_login()
site.submit(problem, lang, content)
site.open_recent_submission(problem)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage: {} <file>'.format(sys.argv[0])
sys.exit(1)
main(sys.argv[1])
| 3,896 | 1,297 |
import numpy as np
import cv2
import open3d as o3d
from .original_repo_utils import *
np.random.seed(3)
MAX_CLASS_NUM = 100 # In the original model there are only 7 classes
segmenation_colors = np.random.randint(0, 255, (MAX_CLASS_NUM, 3)).astype("uint8")
def util_draw_seg(seg_map, image, alpha = 0.5):
# Convert segmentation prediction to colors
color_segmap = segmenation_colors[seg_map]
# Resize to match the image shape
color_segmap = cv2.resize(color_segmap, (image.shape[1],image.shape[0]))
# Fuse both images
if(alpha == 0):
combined_img = np.hstack((image, color_segmap))
else:
combined_img = cv2.addWeighted(image, alpha, color_segmap, (1-alpha),0)
return combined_img
def util_draw_depth(depth_map, image, max_depth = 2, alpha = 0.5):
# Normalize estimated depth to color it
if max_depth:
min_depth = 0
depth_map = depth_map/1000 # Convert to meters
else:
min_depth = depth_map.min()
max_depth = depth_map.max()
norm_depth_map = 255*(depth_map-min_depth)/(max_depth-min_depth)
norm_depth_map[norm_depth_map < 0] =0
norm_depth_map[norm_depth_map >= 255] = 255
# Normalize and color the image
color_depth = cv2.applyColorMap(cv2.convertScaleAbs(norm_depth_map,1), cv2.COLORMAP_PLASMA )
# Resize to match the image shape
color_depth = cv2.resize(color_depth, (image.shape[1],image.shape[0]))
# Fuse both images
if(alpha == 0):
combined_img = np.hstack((image, color_depth))
else:
combined_img = cv2.addWeighted(image, alpha, color_depth, (1-alpha),0)
return combined_img
def util_draw_heatmap(heatmap, image, alpha = 0.5):
# Normalize and color the image
color_heatmap = cv2.applyColorMap(cv2.convertScaleAbs(heatmap*255,1), cv2.COLORMAP_JET)
# Resize to match the image shape
color_heatmap = cv2.resize(color_heatmap, (image.shape[1],image.shape[0]))
# Fuse both images
if(alpha == 0):
combined_img = np.hstack((image, color_heatmap))
else:
combined_img = cv2.addWeighted(image, alpha, color_heatmap, (1-alpha),0)
return combined_img
def util_draw_points2d(points_2d_list, image, label_ids):
# Normalize and color the image
for i, points_2d in enumerate(points_2d_list):
color = (int(segmenation_colors[label_ids[i]][0]),
int(segmenation_colors[label_ids[i]][1]),
int(segmenation_colors[label_ids[i]][2]))
for point in points_2d.astype(int):
cv2.circle(image, (int(point[0]),int(point[1])), 1, color, -1)
return image
def util_draw_pose2d(boxes_2d_list, axes_2d_list, image, label_ids):
# Normalize and color the image
for i, (box, axis) in enumerate(zip(boxes_2d_list, axes_2d_list)):
color = (int(segmenation_colors[label_ids[i]][0]*0.5),
int(segmenation_colors[label_ids[i]][1]*0.5),
int(segmenation_colors[label_ids[i]][2]*0.5))
image = draw_bboxes(image, box, axis, color)
return image
def util_draw_2d(points_2d_list, boxes_2d_list, axes_2d_list, image, label_ids):
image = util_draw_points2d(points_2d_list, image, label_ids)
return util_draw_pose2d(boxes_2d_list, axes_2d_list, image, label_ids)
class Open3dVisualizer():
def __init__(self):
self.point_cloud = o3d.geometry.PointCloud()
self.boxes = o3d.geometry.LineSet()
self.o3d_started = False
self.vis = o3d.visualization.Visualizer()
self.vis.create_window()
def __call__(self, points_3d_list, boxes_3d_list, is_image = False):
self.update(points_3d_list, boxes_3d_list, is_image)
def update(self, points_3d_list, boxes_3d_list, is_image = False):
# Process points
all_points, all_boxes, all_lines = Open3dVisualizer.process_data(points_3d_list, boxes_3d_list)
# Add values to vectors
self.point_cloud.points = o3d.utility.Vector3dVector(all_points)
self.boxes.points = o3d.utility.Vector3dVector(all_boxes)
self.boxes.lines = o3d.utility.Vector2iVector(all_lines)
# Add geometries if it is the first time
if not self.o3d_started:
self.vis.add_geometry(self.point_cloud)
self.vis.add_geometry(self.boxes)
self.o3d_started = True
else:
self.vis.update_geometry(self.point_cloud)
self.vis.update_geometry(self.boxes)
self.vis.poll_events()
self.vis.update_renderer()
@staticmethod
def process_data(points_3d_list, boxes_3d_list):
all_points = points_3d_list[0]
all_boxes = boxes_3d_list[0]
all_lines = np.array(open_3d_lines)
box_count = 0
for points_3d, box_3d in zip(points_3d_list[1:], boxes_3d_list[1:]):
box_count += 1
all_points = np.vstack((all_points, points_3d))
all_boxes = np.vstack((all_boxes, box_3d))
all_lines = np.vstack((all_lines, np.array(open_3d_lines)+8*box_count))
# Fix axis to match open3d
all_points = -all_points[:,[0,1,2]]
all_boxes = -all_boxes[:,[0,1,2]]
all_points[:,0] = -all_points[:,0]
all_boxes[:,0] = -all_boxes[:,0]
return all_points, all_boxes, all_lines
| 4,806 | 2,068 |
def calculate_strokes_gained(reference_value, user_putts):
'''Return the strokes gained based on reference and user input'''
return round((reference_value - user_putts), 2)
def calculate_strokes_gained_putting(reference_data, user_input):
'''Return the strokes gained value from a dictionary of user input
{distance, putts} and a list of reference strokes gained data.'''
# get the reference distance from the first entry in the baseline data
position = 0
not_matched = True
# loop through the reference data to find the right value of average putts
while not_matched:
# set up the reference data
baseline_data = reference_data[position]
reference_distance = baseline_data['distance']
reference_putts = baseline_data['putts']
min_reference_distance = reference_data[0]['distance']
max_reference_distance = reference_data[-1]['distance']
# first check that the input is within the putt_range
if user_input['distance'] < min_reference_distance:
# use the lowest value of the reference putts
reference_putts = reference_data[0]['putts']
not_matched = False
elif user_input['distance'] > max_reference_distance:
# use the highest value of the reference putts
reference_putts = reference_data[-1]['putts']
not_matched = False
# if we get an exact match
elif user_input['distance'] == reference_distance:
reference_putts = reference_data[position]['putts']
not_matched = False
# if the putt distance sits between baseline values
elif user_input['distance'] < reference_distance and user_input['distance'] > last_distance:
distance_range = reference_distance - last_distance
putt_range = reference_putts - last_putts
proportion = (user_input['distance'] - last_distance)/distance_range
#update the reference_putts
reference_putts = round(last_putts + (putt_range * proportion), 2)
not_matched = False
# keep track of the last distance if you don't get an exact match
last_distance = reference_distance
last_putts = reference_putts
position += 1
print(f"Your input of distance of {user_input['distance']} feet equates to a tour averge of {reference_putts} putts")
strokes_gained = calculate_strokes_gained(reference_putts, user_input['putts'])
return strokes_gained
| 2,519 | 667 |
import os
import torch.nn
from torch import nn
from crf_torch import CRF
import re
import random
import time
from torch.optim import Adam
import torch.nn.functional as F
from datetime import timedelta
# TODO 准确率计算函数的bug修复
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
class CnnWordSeg(nn.Module):
"""CNN 分词"""
def __init__(self, config):
super(CnnWordSeg, self).__init__()
vocab_size = config.vocab_size
hidden_size = config.hidden_size
num_labels = config.num_labels
self.embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=0)
self.conv1 = torch.nn.Sequential(
# 这里采用重复填充 padding=1填充一层
torch.nn.Conv1d(in_channels=hidden_size, out_channels=hidden_size,
kernel_size=3, stride=1, padding=1, padding_mode='replicate'),
torch.nn.ReLU()
)
self.conv2 = torch.nn.Sequential(
torch.nn.Conv1d(hidden_size, hidden_size, 3, 1, 1, padding_mode='replicate'),
torch.nn.ReLU()
)
self.conv3 = torch.nn.Sequential(
torch.nn.Conv1d(hidden_size, hidden_size, 3, 1, 1, padding_mode='replicate'),
torch.nn.ReLU()
)
self.dense = nn.Linear(hidden_size, 4)
self.crf = CRF(num_tags=num_labels, batch_first=True)
def forward(self, x, y, mask, test=False):
hidden_state = self.embedding(x) # (batch,seq_len,hidden_size)
hidden_state = hidden_state.permute(0, 2, 1) # 一维卷积是在length维度
hidden_state = self.conv1(hidden_state)
hidden_state = self.conv2(hidden_state)
hidden_state = self.conv3(hidden_state)
hidden_state = hidden_state.permute(0, 2, 1)
hidden_state = self.dense(hidden_state)
if not test:
hidden_state = self.crf(hidden_state, y, mask)
else:
hidden_state = self.crf.decode(hidden_state, mask)
return hidden_state
class DatasetIterater(object):
def __init__(self, data_list, batch_size, device):
self.batch_size = batch_size
self.data_list = data_list
self.n_batches = len(data_list) // batch_size
self.residue = False # 记录batch数量是否为整数
if len(data_list) % self.n_batches != 0:
self.residue = True
self.index = 0
self.device = device
def _to_tensor(self, datas):
max_len = max([len(data[0]) for data in datas])
x = torch.LongTensor([data[0] + [0]*(max_len-len(data[0])) for data in datas]).to(self.device)
y = torch.LongTensor([data[1] + [0]*(max_len-len(data[0])) for data in datas]).to(self.device)
mask = torch.ByteTensor([data[2] + [0]*(max_len-len(data[0])) for data in datas]).to(self.device)
return x, y, mask
def __next__(self):
if self.residue and self.index == self.n_batches:
batches = self.data_list[self.index * self.batch_size: len(self.data_list)]
self.index += 1
batches = self._to_tensor(batches)
return batches
elif self.index >= self.n_batches:
self.index = 0
raise StopIteration
else:
batches = self.data_list[self.index * self.batch_size: (self.index + 1) * self.batch_size]
self.index += 1
batches = self._to_tensor(batches)
return batches
def __iter__(self):
return self
def __len__(self):
if self.residue:
return self.n_batches + 1
else:
return self.n_batches
def build_dataset(path, max_len=32):
sents = open(path, 'r', encoding='utf8').read().strip().split('\n')
sents = [re.split(' +', s) for s in sents] # 词之间以两个空格隔开
sents = [[w for w in s if w] for s in sents] # 去掉空字符串
random.shuffle(sents) # 打乱语料,以便后面划分验证集
def build_vocab(sents, min_count=2):
chars = {}
for s in sents:
for c in ''.join(s):
if c in chars:
chars[c] += 1
else:
chars[c] = 1
chars = {i: j for i, j in chars.items() if j >= min_count}
id2char = {i+1: j for i, j in enumerate(chars.keys())}
char2id = {j: i for i, j in id2char.items()}
return id2char, char2id
id2char, char2id = build_vocab(sents)
def to_id():
datasets = []
for s in sents:
x, y = [], []
for w in s:
if not all(c in char2id for c in w):
continue
x.extend([char2id[c] for c in w])
if len(w) == 1:
y.append(0)
elif len(w) == 2:
y.extend([1, 3])
else:
y.extend([1] + [2] * (len(w) - 2) + [3])
if x:
datasets.append((x, y, [1]*len(x))) # x,y,mask
return datasets
data = to_id()
trains, valids = data[:-5000], data[-5000:]
return trains, valids, id2char, char2id
class Train:
def __init__(self, model, train_iter, dev_iter, config):
self.model = model
self.train_iter = train_iter
self.dev_iter = dev_iter
self.config = config
def train(self):
start_time = time.time()
self.model.train()
optimizer = Adam(self.model.parameters(), lr=self.config.lr)
total_batch = 0 # 记录进行到多少batch
dev_best_loss = float('inf') # dev 最小loss
for epoch in range(self.config.num_epochs):
print('Epoch [{}/{}]'.format(epoch + 1, self.config.num_epochs))
for i, (x, y, mask) in enumerate(self.train_iter):
self.model.zero_grad()
loss = self.model(x, y, mask)
loss.backward()
optimizer.step()
if total_batch % 100 == 0:
y_pre = self.model(x, y, mask, test=True)
y_true = y.cpu().numpy().tolist()
mask = mask.cpu().numpy().sum(axis=1).tolist()
train_acc, rec = self.cal_acc(y_pre, y_true, mask)
dev_loss, dev_acc, dev_rec = self.evaluate()
if dev_loss < dev_best_loss:
dev_best_loss = dev_loss
torch.save(model.state_dict(), config.save_path)
improve = '*'
else:
improve = ''
time_dif = get_time_dif(start_time)
msg = 'Iter: {0:>6}, Train Loss: {1:>5.2}, Train Acc: {2:>6.2%}, Rec: {3:>6.2%}, Val Loss: {4:>5.2}, Val Acc: {5:>6.2%}, Time: {6} {7}'
print(msg.format(total_batch, loss.item(), train_acc, rec, dev_loss, dev_acc, time_dif, improve))
model.train()
total_batch += 1
def evaluate(self):
self.model.eval()
loss_total = 0.0
acc_total = 0.0
rec_total = 0.0
n = 0
with torch.no_grad():
for x, y, mask in self.dev_iter:
loss = self.model(x, y, mask)
loss_total += loss.item()
y_pre = self.model(x, y, mask, test=True)
y_true = y.cpu().numpy().tolist()
mask = mask.cpu().numpy().sum(axis=1).tolist()
acc, rec = self.cal_acc(y_pre, y_true, mask)
acc_total += acc
rec_total += rec
n += 1
return loss_total/n, acc_total/n, rec_total/n
# 重写了准确率计算的函数,有bug待修复
def cal_acc(self, y_pre, y_true, mask):
n = len(y_pre)
acc, rec = 0.0, 0.0
for i in range(n):
length = mask[i]
tp = y_pre[i][:length]
tt = y_true[i][:length]
tt = set([i*2 + x for i, x in enumerate(tt) if x == 0 or x == 1])
tp = set([i*2 + x for i, x in enumerate(tp) if x == 0 or x == 1])
acc += len(tt & tp) / (len(tp)+1)
rec += len(tt & tp) / (len(tt)+1)
return acc/n, rec/n
class Config:
def __init__(self):
self.lr = 1e-3
self.num_epochs = 10
self.batch_size = 128
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_labels = 4
self.hidden_size = 128
self.path = '../data/icwb2/msr_training.utf8'
self.num_labels = 4
self.vocab_size = 0
self.save_path = 'model.ckpt'
if __name__ == '__main__':
config = Config()
train_data, valid_data, id2char, char2id = build_dataset(config.path)
config.vocab_size = len(id2char) + 1
train_iter = DatasetIterater(train_data, config.batch_size, config.device)
valid_iter = DatasetIterater(valid_data, config.batch_size, config.device)
model = CnnWordSeg(config).cuda(0)
train = Train(model, train_iter, valid_iter, config)
train.train() | 8,988 | 3,215 |
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from gs_quant.backtests.data_handler import DataHandler
from gs_quant.backtests.event import *
import datetime as dt
class ExecutionEngine(object):
pass
class SimulatedExecutionEngine(ExecutionEngine):
def __init__(self, data_handler: DataHandler):
self.data_handler = data_handler
self.orders = []
def submit_order(self, order: OrderEvent):
self.orders.append(order)
self.orders.sort(key=lambda e: e.order.execution_end_time())
def ping(self, state: dt.datetime):
fill_events = []
while self.orders:
order: OrderBase = self.orders[0].order
end_time = order.execution_end_time()
if end_time > state:
break
else:
fill = FillEvent(order=order,
filled_price=order.execution_price(self.data_handler),
filled_units=order.execution_quantity(self.data_handler))
fill_events.append(fill)
self.orders.pop(0)
return fill_events
| 1,636 | 455 |
""" Utilities for OpenCOR
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2021-05-28
:Copyright: 2021, BioSimulators Team
:License: MIT
"""
from .data_model import KISAO_ALGORITHM_MAP
from biosimulators_utils.config import get_config, Config # noqa: F401
from biosimulators_utils.data_model import ValueType # noqa: F401
from biosimulators_utils.log.data_model import TaskLog # noqa: F401
from biosimulators_utils.report.data_model import VariableResults # noqa: F401
from biosimulators_utils.sedml.data_model import ( # noqa: F401
SedDocument, ModelLanguage, ModelAttributeChange, UniformTimeCourseSimulation, Algorithm, Task, RepeatedTask,
VectorRange, SubTask, DataGenerator, Variable)
from biosimulators_utils.sedml.io import SedmlSimulationWriter
from biosimulators_utils.sedml import validation
from biosimulators_utils.simulator.utils import get_algorithm_substitution_policy
from biosimulators_utils.utils.core import validate_str_value, raise_errors_warnings
from biosimulators_utils.warnings import warn, BioSimulatorsWarning
from kisao.data_model import AlgorithmSubstitutionPolicy, ALGORITHM_SUBSTITUTION_POLICY_LEVELS
from kisao.utils import get_preferred_substitute_algorithm_by_ids
from unittest import mock
import copy
import lxml.etree
import opencor
import os
import tempfile
__all__ = [
'validate_task',
'validate_variable_xpaths',
'validate_simulation',
'get_opencor_algorithm',
'get_opencor_parameter_value',
'build_opencor_sedml_doc',
'save_task_to_opencor_sedml_file',
'load_opencor_simulation',
'validate_opencor_simulation',
'get_results_from_opencor_simulation',
'log_opencor_execution',
'get_mock_libcellml',
]
def validate_task(task, variables, config=None):
""" Validate that a simulation can be executed with OpenCOR
Args:
task (:obj:`Task`): request simulation task
variables (:obj:`list` of :obj:`Variable`): variables that should be recorded
config (:obj:`Config`, optional): BioSimulators common configuration
Returns:
:obj:`tuple:`:
* :obj:`Task`: possibly alternate task that OpenCOR should execute
* :obj:`lxml.etree._ElementTree`: element tree for model
* :obj:`dict`: dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it
"""
config = config or get_config()
model = task.model
sim = task.simulation
if config.VALIDATE_SEDML:
raise_errors_warnings(validation.validate_task(task),
error_summary='Task `{}` is invalid.'.format(task.id))
raise_errors_warnings(validation.validate_model_language(model.language, ModelLanguage.CellML),
error_summary='Language for model `{}` is not supported.'.format(model.id))
raise_errors_warnings(validation.validate_model_change_types(model.changes, (ModelAttributeChange,)),
error_summary='Changes for model `{}` are not supported.'.format(model.id))
raise_errors_warnings(*validation.validate_model_changes(model),
error_summary='Changes for model `{}` are invalid.'.format(model.id))
raise_errors_warnings(validation.validate_simulation_type(sim, (UniformTimeCourseSimulation, )),
error_summary='{} `{}` is not supported.'.format(sim.__class__.__name__, sim.id))
raise_errors_warnings(*validation.validate_simulation(sim),
error_summary='Simulation `{}` is invalid.'.format(sim.id))
raise_errors_warnings(*validation.validate_data_generator_variables(variables),
error_summary='Data generator variables for task `{}` are invalid.'.format(task.id))
# read model; TODO: support imports
model_etree = lxml.etree.parse(model.source)
# validate variables
opencor_variable_names = validate_variable_xpaths(variables, model_etree)
# validate simulation
opencor_simulation = validate_simulation(task.simulation)
# check that OpenCOR can execute the request algorithm (or a similar one)
opencor_algorithm = get_opencor_algorithm(task.simulation.algorithm, config=config)
# create new task to manage configuration for OpenCOR
opencor_task = copy.deepcopy(task)
opencor_task.simulation = opencor_simulation
opencor_task.simulation.algorithm = opencor_algorithm
return opencor_task, model_etree, opencor_variable_names
def validate_variable_xpaths(sed_variables, model_etree):
""" Get the names OpenCOR uses to refer to model variable
Args:
model_etree (:obj:`lxml.etree._ElementTree`): element tree for model
sed_variables (:obj:`list` of :obj:`Variable`): SED variables
Returns:
:obj:`dict`: dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it
"""
opencor_variable_names = {}
for sed_variable in sed_variables:
if not sed_variable.target:
msg = 'Symbols are not supported.'
raise NotImplementedError(msg)
namespaces = copy.copy(sed_variable.target_namespaces)
namespaces.pop(None, None)
obj_target, _, attrib_target = sed_variable.target.partition('/@')
xml_objs = model_etree.xpath(obj_target, namespaces=namespaces)
if len(xml_objs) == 0:
msg = (
'XPath targets of variables must reference unique observables. '
'The target `{}` of variable `{}` does not match any model elements.'
).format(sed_variable.target, sed_variable.id)
raise ValueError(msg)
if len(xml_objs) > 1:
msg = (
'XPath targets of variables must reference unique observables. '
'The target `{}` of variable `{}` matches multiple model elements.'
).format(sed_variable.target, sed_variable.id)
raise ValueError(msg)
xml_obj = xml_objs[0]
names = []
while True:
name = xml_obj.attrib.get('name', None)
names.append(name)
xml_obj = xml_obj.getparent()
ns, _, tag = xml_obj.tag[1:].partition('}')
if not name or not ns.startswith('http://www.cellml.org/cellml/'):
msg = 'Target `{}` of variable `{}` is not a valid observable.'.format(sed_variable.target, sed_variable.id)
raise ValueError(msg)
if tag == 'model':
break
if attrib_target:
names.insert(0, attrib_target)
opencor_variable_names[sed_variable.id] = '/'.join(reversed(names))
return opencor_variable_names
def validate_simulation(simulation):
""" Validate a simulation
Args:
simulation (:obj:`UniformTimeCourseSimulation`): requested simulation
Returns:
:obj:`UniformTimeCourseSimulation`: simulation instructions for OpenCOR
"""
number_of_steps = (
simulation.output_end_time - simulation.initial_time
) / (
simulation.output_end_time - simulation.output_start_time
) * simulation.number_of_steps
output_start_time = simulation.initial_time
if abs(number_of_steps - round(number_of_steps)) > 1e-8:
msg = (
'Number of steps must be an integer, not `{}`:'
'\n Initial time: {}'
'\n Output start time: {}'
'\n Output end time: {}'
'\n Number of steps (output start - end time) time: {}'
).format(
number_of_steps, simulation.initial_time,
simulation.output_start_time, simulation.output_end_time,
simulation.number_of_steps,
)
raise NotImplementedError(msg)
else:
number_of_steps = round(number_of_steps)
opencor_simulation = copy.deepcopy(simulation)
opencor_simulation.number_of_steps = number_of_steps
opencor_simulation.output_start_time = output_start_time
return opencor_simulation
def get_opencor_algorithm(requested_alg, config=None):
""" Get a possibly alternative algorithm that OpenCOR should execute
Args:
requested_alg (:obj:`Algorithm`): requested algorithm
config (:obj:`Config`, optional): configuration
Returns:
:obj:`Algorithm`: possibly alternative algorithm that OpenCOR should execute
"""
exec_alg = copy.deepcopy(requested_alg)
algorithm_substitution_policy = get_algorithm_substitution_policy(config=config)
exec_alg.kisao_id = get_preferred_substitute_algorithm_by_ids(
requested_alg.kisao_id, KISAO_ALGORITHM_MAP.keys(),
substitution_policy=algorithm_substitution_policy)
if exec_alg.kisao_id == requested_alg.kisao_id:
alg_specs = KISAO_ALGORITHM_MAP[exec_alg.kisao_id]
params_specs = alg_specs['parameters']
for change in list(exec_alg.changes):
param_specs = params_specs.get(change.kisao_id, None)
if param_specs:
is_valid, change.new_value = get_opencor_parameter_value(
change.new_value, param_specs['type'], param_specs.get('enum', None))
if not is_valid:
if (
ALGORITHM_SUBSTITUTION_POLICY_LEVELS[algorithm_substitution_policy]
> ALGORITHM_SUBSTITUTION_POLICY_LEVELS[AlgorithmSubstitutionPolicy.NONE]
):
warn('Unsupported value `{}` of {}-valued algorithm parameter `{}` (`{}`) was ignored.'.format(
change.new_value, param_specs['type'].name, param_specs['name'], change.kisao_id), BioSimulatorsWarning)
exec_alg.changes.remove(change)
else:
msg = '`{}` (`{}`) must a {}, not `{}`.'.format(
param_specs['name'], change.kisao_id, param_specs['type'].name, change.new_value)
raise ValueError(msg)
else:
if (
ALGORITHM_SUBSTITUTION_POLICY_LEVELS[algorithm_substitution_policy]
> ALGORITHM_SUBSTITUTION_POLICY_LEVELS[AlgorithmSubstitutionPolicy.NONE]
):
warn('Unsupported algorithm parameter `{}` was ignored.'.format(
change.kisao_id), BioSimulatorsWarning)
exec_alg.changes.remove(change)
else:
msg = '{} ({}) does not support parameter `{}`. {} support the following parameters:\n {}'.format(
alg_specs['name'], alg_specs['kisao_id'], change.kisao_id, alg_specs['name'],
'\n '.join(sorted('{}: {}'.format(param_kisao_id, param_specs['name'])
for param_kisao_id, param_specs in params_specs.items()))
)
raise NotImplementedError(msg)
else:
exec_alg.changes = []
return exec_alg
def get_opencor_parameter_value(value, value_type, enum_cls=None):
""" Get the OpenCOR representation of a value of a parameter
Args:
value (:obj:`str`): string-encoded parameter value
value_type (:obj:`ValueType`): expected type of the value
enum_cls (:obj:`type`): allowed values of the parameter
Returns:
:obj:`tuple`:
* :obj:`bool`: whether the value is valid
* :obj:`str`: OpenCOR representation of a value of a parameter
"""
if not validate_str_value(value, value_type):
return False, None
if enum_cls:
try:
return True, enum_cls[value].value
except KeyError:
pass
try:
return True, enum_cls[value.replace('KISAO:', 'KISAO_')].value
except KeyError:
pass
try:
return True, enum_cls(value).value
except ValueError:
pass
return False, None
else:
return True, value
def build_opencor_sedml_doc(task, variables, include_data_generators=False):
""" Create an OpenCOR-compatible SED-ML document for a task and its output variables
Args:
task (:obj:`Task`): SED task
variables (:obj:`list` of :obj:`Variable`): SED variables
include_data_generators (:obj:`bool`, optional): whether to export data generators
Returns:
:obj:`SedDocument`: SED document
"""
doc = SedDocument()
model_copy = copy.deepcopy(task.model)
model_copy.id = 'model'
model_copy.source = os.path.abspath(model_copy.source)
doc.models.append(model_copy)
sim_copy = copy.deepcopy(task.simulation)
sim_copy.id = 'simulation1'
doc.simulations.append(sim_copy)
basic_task = Task(id='task1', model=model_copy, simulation=sim_copy)
repeated_task = RepeatedTask(
id='repeatedTask',
range=VectorRange(id="once", values=[1]),
sub_tasks=[
SubTask(order=1, task=basic_task),
],
reset_model_for_each_iteration=True,
)
repeated_task.ranges = [repeated_task.range]
doc.tasks.append(basic_task)
doc.tasks.append(repeated_task)
if include_data_generators:
for variable in variables:
doc.data_generators.append(
DataGenerator(
id='data_generator_' + variable.id,
variables=[
Variable(id=variable.id, target=variable.target, target_namespaces=variable.target_namespaces, task=repeated_task),
],
math=variable.id,
)
)
return doc
def save_task_to_opencor_sedml_file(task, variables, include_data_generators=False):
""" Save a SED task to an OpenCOR-compatible SED-ML file
Args:
task (:obj:`Task`): SED task
variables (:obj:`list` of :obj:`Variable`): SED variables
include_data_generators (:obj:`bool`, optional): whether to export data generators
Returns:
:obj:`str`: path to SED-ML file for the SED document
"""
doc = build_opencor_sedml_doc(task, variables, include_data_generators=include_data_generators)
fid, sed_filename = tempfile.mkstemp(suffix='.sedml')
os.close(fid)
doc.models[0].source = os.path.relpath(doc.models[0].source, os.path.dirname(sed_filename))
# use a mocked version because libCellML cannot be installed into the OpenCOR docker image
with mock.patch.dict('sys.modules', libcellml=get_mock_libcellml()):
SedmlSimulationWriter().run(doc, sed_filename, validate_models_with_languages=False)
return sed_filename
def load_opencor_simulation(task, variables, include_data_generators=False):
""" Load an OpenCOR simulation
Args:
task (:obj:`Task`): SED task
variables (:obj:`list` of :obj:`Variable`): SED variables
include_data_generators (:obj:`bool`, optional): whether to export data generators
Returns:
:obj:`PythonQt.private.SimulationSupport.Simulation`: OpenCOR simulation
"""
# save SED-ML to a file
filename = save_task_to_opencor_sedml_file(task, variables, include_data_generators=include_data_generators)
# Read the SED-ML file
try:
opencor_sim = opencor.open_simulation(filename)
finally:
# clean up temporary SED-ML file
os.remove(filename)
validate_opencor_simulation(opencor_sim)
return opencor_sim
def validate_opencor_simulation(sim):
""" Validate an OpenCOR simulation
Args:
sim (:obj:`PythonQt.private.SimulationSupport.Simulation`): OpenCOR simulation)
Raises:
:obj:`ValueError`: if the simulation is invalid
"""
if sim.hasBlockingIssues() or not sim.valid():
msg = 'The task does not describe a valid simulation:\n\n {}'.format(
'\n\n '.join(
''.join(lxml.etree.fromstring('<root>' + issue + '</root>').itertext())
for issue in sim.issues()
)
)
raise ValueError(msg)
def get_results_from_opencor_simulation(opencor_sim, sed_task, sed_variables, opencor_variable_names):
""" Get the results of SED variables from an OpenCOR simulation
Args:
opencor_sim (:obj:`PythonQt.private.SimulationSupport.Simulation`): OpenCOR simulation
sed_task (:obj:`Task`): requested SED task
sed_variables (:obj:`list` of :obj:`Variable`): SED variables
opencor_variable_names (:obj:`dict`): dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it)
Returns:
:obj:`VariableResults`: results of the SED variables
"""
opencor_results = opencor_sim.results()
opencor_voi_results = opencor_results.voi()
opencor_states_results = opencor_results.states()
opencor_rates_results = opencor_results.rates()
opencor_constants_results = opencor_results.constants()
opencor_algebraic_results = opencor_results.algebraic()
sed_results = VariableResults()
invalid_variables = []
for sed_variable in sed_variables:
opencor_name = opencor_variable_names[sed_variable.id]
if opencor_name == opencor_voi_results.uri():
sed_results[sed_variable.id] = opencor_voi_results.values()[-(sed_task.simulation.number_of_steps + 1):]
elif opencor_name in opencor_states_results:
sed_results[sed_variable.id] = opencor_states_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):]
elif opencor_name in opencor_rates_results:
sed_results[sed_variable.id] = opencor_rates_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):]
elif opencor_name in opencor_constants_results:
sed_results[sed_variable.id] = opencor_constants_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):]
elif opencor_name in opencor_algebraic_results:
sed_results[sed_variable.id] = opencor_algebraic_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):]
else:
invalid_variables.append('{}: {}'.format(sed_variable.id, sed_variable.target))
if invalid_variables:
msg = (
'The target of each variable must be a valid observable. '
'The targets of the following variables are not valid observables.\n {}'
).format('\n '.join(invalid_variables))
raise ValueError(msg)
return sed_results
def log_opencor_execution(task, log):
""" Log information about how OpenCOR was used to execute the simulation
Args:
task (:obj:`Task`): SED task
log (:obj:`TaskLog`): execution log
"""
log.algorithm = task.simulation.algorithm.kisao_id
log.simulator_details = {
'method': 'OpenCOR.SimulationSupport.Simulation.run',
'algorithmParameters': [
{'kisaoID': change.kisao_id, 'value': change.new_value}
for change in task.simulation.algorithm.changes
],
}
def get_mock_libcellml():
""" Get a mocked version of libCellML
Returns:
:obj:`mock.Mock`: mocked libcellml module
"""
return mock.Mock(
Parser=lambda: mock.Mock(
parseModel=lambda: None,
errorCount=lambda: 0,
warningCount=lambda: 0,
),
Validator=lambda: mock.Mock(
validateModel=lambda model: None,
errorCount=lambda: 0,
warningCount=lambda: 0,
),
)
| 19,578 | 5,816 |
from .jogadores import Jogador
class MeuJogador(Jogador):
def escolha_de_cacada(self, rodada, comida_atual, reputacao_atual, m, reputacoes_dos_jogadores):
if comida_atual <= 5:
escolhas = ['d' for x in reputacoes_dos_jogadores]
return escolhas
else:
escolhas = ['d' if x > 0.8333 else 'c' if x > 0.1667 else 'd' for x in reputacoes_dos_jogadores]
return escolhas | 432 | 169 |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import norm
from scipy.optimize import brentq
from arpym.tools.transpose_square_root import transpose_square_root
def saddle_point_quadn(y, alpha, beta, gamma, mu, sigma2):
"""For details, see here.
Parameters
----------
y : array, shape(j_,)
alpha : scalar
beta : array, shape(n_,)
gamma : array, shape(n_, n_)
mu : array, shape(n_,)
sigma2 : array, shape(n_, n_)
Returns
-------
cdf : array, shape(j_,)
pdf : array, shape(j_,)
"""
y = np.asarray(y).copy().reshape(-1)
beta = np.asarray(beta).copy().reshape(-1, 1)
mu = np.asarray(mu).copy().reshape(-1, 1)
j_ = len(y)
# Step 1: Compute the eigenvalues and eigenvectors of l.T @ gamma @ l
l = transpose_square_root(sigma2, 'Cholesky')
lam, e = np.linalg.eig(l.T @ gamma @ l)
lam = lam.reshape(-1, 1)
# Step 2: Compute transformed parameters
alpha_tilde = alpha + beta.T @ mu + mu.T @ gamma @ mu
beta_tilde = beta + 2*gamma @ mu
gamma_tilde = e.T @ l.T @ beta_tilde
# Step 3: Compute the log-characteristic function and its derivatives
# log-characteristic function
def c_y(w):
return alpha_tilde * w - 0.5 * np.sum(np.log(1 - 2.*w*lam) -
w**2 * gamma_tilde**2 /
(1 - 2.*w*lam))
# first derivative
def c_y_prime(w):
return alpha_tilde + np.sum(lam / (1 - 2.*w*lam) +
gamma_tilde**2 * (w - w**2 * lam) /
(1 - 2.*w*lam)**2)
# second derivative
def c_y_second(w):
return np.array([np.sum(2. * (lam / (1 - 2.*w*lam))**2 +
gamma_tilde**2 / (1 - 2.*w*lam)**3)])
# Step 4: Find w_hat numerically using Brent's method
lam_max = np.max(lam)
lam_min = np.min(lam)
if lam_max > 0:
w_max = (1 - 1e-5) / (2 * lam_max)
else:
w_max = 1e20
if lam_min < 0:
w_min = (1 + 1e-5) / (2 * lam_min)
else:
w_min = -1e20
y_min = c_y_prime(w_min)
y_max = c_y_prime(w_max)
# initialize
w_hat = np.zeros(j_)
c_y_w_hat = np.zeros(j_) # c(w_hat)
c_y_second_w_hat = np.zeros(j_) # c''(w_hat)
idx = np.argsort(y)
w_last = w_min
for j in range(j_):
if y[idx[j]] <= y_min:
w_hat[idx[j]] = w_min
elif y[idx[j]] >= y_max:
w_hat[idx[j]] = w_max
else:
# Brent’s method for finding the root of the function.
# Since y is sorted and c_y_prime is a monotone increasing function
# it is guaranteed that the solution w is in the interval
# [w_last, w_max].
w_hat[idx[j]] = brentq(lambda w: c_y_prime(w) - y[idx[j]],
w_last, w_max)
w_last = w_hat[idx[j]]
c_y_w_hat[idx[j]] = c_y(w_hat[idx[j]])
c_y_second_w_hat[idx[j]] = c_y_second(w_hat[idx[j]])
# Step 5: Compute cdf and pdf
r = np.sign(w_hat) * np.sqrt(2. * (w_hat * y - c_y_w_hat))
u = w_hat * np.sqrt(c_y_second_w_hat)
cdf = norm.cdf(r) - norm.pdf(r) * (1. / u - 1. / r)
pdf = np.exp(c_y_w_hat - w_hat * y) / np.sqrt(2 * np.pi * c_y_second_w_hat)
return np.squeeze(cdf), np.squeeze(pdf)
| 3,407 | 1,368 |
# -*- coding: utf-8 -*-
from dozer import Dozer
from .wsgi import application
application = Dozer(application)
| 114 | 39 |
"""
This package groups all the built-in node classes offered by the `abstract_codegen` package.
"""
| 101 | 28 |
from .DHT22 import sensor
import time
import pigpio
async def poll_once():
pi = pigpio.pi()
s = sensor(pi, 24, LED=None, power=None,DHT11=False)
s.trigger()
time.sleep(0.2)
humidity = s.humidity()
temperature = s.temperature()
s.cancel()
pi.stop()
return (humidity, temperature) | 320 | 126 |
# import the necessary packages
from keras.preprocessing import image as image_utils
from imagenet_utils import decode_predictions
from imagenet_utils import preprocess_input
from vgg16 import VGG16
import numpy as np
import argparse
import cv2
from keras.utils import np_utils
import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
args = vars(ap.parse_args())
# load the original image via OpenCV so we can draw on it and display
# it to our screen later
orig = cv2.imread(args["image"])
#cv2.imshow("test",orig)
# load the input image using the Keras helper utility while ensuring
# that the image is resized to 224x224 pxiels, the required input
# dimensions for the network -- then convert the PIL image to a
# NumPy array
print("[INFO] loading and preprocessing image...")
image = image_utils.load_img(args["image"], target_size=(224, 224))
image = image_utils.img_to_array(image)
# our image is now represented by a NumPy array of shape (3, 224, 224),
# but we need to expand the dimensions to be (1, 3, 224, 224) so we can
# pass it through the network -- we'll also preprocess the image by
# subtracting the mean RGB pixel intensity from the ImageNet dataset
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)
# load the VGG16 network
print("[INFO] loading network...")
model = VGG16(weights="imagenet")
# classify the image
print("[INFO] classifying image...")
preds = model.predict(image)
result = decode_predictions(preds, top=1)
(inID, label, val) = decode_predictions(preds)[0][0]
print(result[0])
print(len(result))
#result1 = ([col.strip() for col in part] for part in result)
#print(result1)
#print(decode_predictions(preds)[0])
# display the predictions to our screen
print("ImageNet ID: {}, Label: {}".format(inID, label))
cv2.putText(orig, "Label: {}".format(label), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
plt.ioff()
plt.imshow(orig)
plt.pause(1)
plt.show()
#cv2.imshow("Classification", orig)
#cv2.waitKey(0)
P = decode_predictions(preds)
(imagenetID, label, prob) = P[0][0]
#plt.show() | 2,267 | 778 |
'''Tests for methods.py'''
from methods import FuncName, Method, TVList
from common import Capitalizer
from protocol import DirectReturn
def test_funcname():
funcname_p = FuncName(Capitalizer())
assert funcname_p.transform('foo_bar_baz') == 'FooBarBaz'
def test_funcname_default():
funcname_p = FuncName()
assert funcname_p.transform('foo_bar_baz') == 'FooBarBaz'
def test_tvlist():
tvlist_p = TVList(DirectReturn())
assert tvlist_p.transform('int a, int b') == 'int a, int b'
def test_tvlist_default():
tvlist_p = TVList()
assert tvlist_p.transform('int a, int b') == 'a int, b int'
def test_method():
method_p = Method(DirectReturn(), DirectReturn(), DirectReturn())
assert method_p.transform(
' int foo(int a, int b);') == ' foo(int a, int b) int'
def test_method_default():
method_p = Method()
assert method_p.transform(
' int foo(int a, int b);') == ' Foo(a int, b int) int'
def test_method_default_no_args():
method_p = Method()
assert method_p.transform(' uint32 get_type();') == ' GetType() uint32'
def test_method_void_type():
method_p = Method()
assert method_p.transform(
' void foo(int a, int b);') == ' Foo(a int, b int) '
def test_method_empty_lines():
method_p = Method()
assert method_p.transform('\n\n\n \n') == ''
def test_method_with_const():
method_p = Method(DirectReturn(), DirectReturn(), DirectReturn())
assert method_p.transform(
' int foo(int a, int b) const ;') == ' foo(int a, int b) int'
| 1,534 | 543 |
from django.db import models
class Text(models.Model):
PlayerLine = models.CharField(max_length=1000)
def __str__(self):
return self.PlayerLine | 152 | 49 |
# stdlib
from typing import List, Sequence, Union
# 3rd party
import pytest
from coincidence.regressions import AdvancedDataRegressionFixture
from coincidence.selectors import min_version, not_windows, only_version
from domdf_python_tools.paths import PathPlus
from packaging.requirements import Requirement
from packaging.specifiers import Specifier, SpecifierSet
from pytest_regressions.data_regression import DataRegressionFixture
# this package
from shippinglabel.requirements import (
ComparableRequirement,
check_dependencies,
combine_requirements,
list_requirements,
parse_pyproject_dependencies,
parse_pyproject_extras,
parse_requirements,
read_requirements,
resolve_specifiers
)
class TestComparableRequirement:
@pytest.fixture(scope="class")
def req(self):
return ComparableRequirement('pytest==6.0.0; python_version <= "3.9"')
@pytest.mark.parametrize(
"other",
[
ComparableRequirement('pytest==6.0.0; python_version <= "3.9"'),
ComparableRequirement("pytest==6.0.0"),
ComparableRequirement("pytest"),
ComparableRequirement("pytest[extra]"),
Requirement('pytest==6.0.0; python_version <= "3.9"'),
Requirement("pytest==6.0.0"),
Requirement("pytest"),
Requirement("pytest[extra]"),
"pytest",
]
)
def test_eq(self, req, other):
assert req == req
assert req == other
@pytest.mark.parametrize(
"other",
[
"pytest-rerunfailures",
ComparableRequirement("pytest-rerunfailures"),
ComparableRequirement("pytest-rerunfailures==1.2.3"),
Requirement("pytest-rerunfailures"),
Requirement("pytest-rerunfailures==1.2.3"),
ComparableRequirement("pytest"),
ComparableRequirement("pytest[extra]"),
Requirement("pytest"),
Requirement("pytest[extra]"),
]
)
def test_gt(self, req, other):
assert req < other
@pytest.mark.parametrize(
"other",
[
"apeye",
ComparableRequirement("apeye"),
ComparableRequirement("apeye==1.2.3"),
Requirement("apeye"),
Requirement("apeye==1.2.3"),
]
)
def test_lt(self, req, other):
assert req > other
@pytest.mark.parametrize(
"other",
[
"pytest-rerunfailures",
ComparableRequirement("pytest-rerunfailures"),
ComparableRequirement("pytest-rerunfailures==1.2.3"),
ComparableRequirement('pytest==6.0.0; python_version <= "3.9"'),
Requirement("pytest-rerunfailures"),
Requirement("pytest-rerunfailures==1.2.3"),
Requirement('pytest==6.0.0; python_version <= "3.9"'),
ComparableRequirement("pytest==6.0.0"),
ComparableRequirement("pytest"),
ComparableRequirement("pytest[extra]"),
Requirement("pytest==6.0.0"),
Requirement("pytest"),
Requirement("pytest[extra]"),
"pytest",
]
)
def test_ge(self, req, other):
assert req <= other
assert req <= req
@pytest.mark.parametrize(
"other",
[
"apeye",
ComparableRequirement("apeye"),
ComparableRequirement("apeye==1.2.3"),
Requirement("apeye"),
Requirement("apeye==1.2.3"),
ComparableRequirement('pytest==6.0.0; python_version <= "3.9"'),
ComparableRequirement("pytest==6.0.0"),
ComparableRequirement("pytest"),
ComparableRequirement("pytest[extra]"),
Requirement('pytest==6.0.0; python_version <= "3.9"'),
Requirement("pytest==6.0.0"),
Requirement("pytest"),
Requirement("pytest[extra]"),
"pytest",
]
)
def test_le(self, req, other):
assert req >= other
assert req >= req
def test_combine_requirements():
reqs = [
ComparableRequirement("foo"),
ComparableRequirement("foo>2"),
ComparableRequirement("foo>2.5"),
ComparableRequirement("foo==3.2.1"),
ComparableRequirement("foo==3.2.3"),
ComparableRequirement("foo==3.2.5"),
]
assert combine_requirements(reqs) == [Requirement("foo==3.2.1,==3.2.3,==3.2.5,>2.5")]
assert str(combine_requirements(reqs)[0]) == "foo==3.2.1,==3.2.3,==3.2.5,>2.5"
assert str(combine_requirements(reqs)[0].specifier) == "==3.2.1,==3.2.3,==3.2.5,>2.5"
def test_combine_requirements_duplicates():
reqs = [
ComparableRequirement('typing-extensions>=3.6.4; python_version < "3.8"'),
ComparableRequirement("typing-extensions>=3.7.4.3"),
ComparableRequirement("typing-extensions>=3.7.4.3"),
ComparableRequirement("typing-extensions>=3.7.4.3"),
ComparableRequirement("typing-extensions>=3.7.4.3"),
ComparableRequirement("typing-extensions>=3.7.4.1"),
ComparableRequirement("typing-extensions>=3.7.4"),
ComparableRequirement('typing-extensions; python_version < "3.8"'),
]
combined_reqs = combine_requirements(reqs)
assert len(combined_reqs) == 2
assert combined_reqs[1] == ComparableRequirement("typing-extensions>=3.7.4.3")
assert combined_reqs[0] == ComparableRequirement('typing-extensions>=3.6.4; python_version < "3.8"')
reqs.append(reqs.pop(0))
combined_reqs = combine_requirements(reqs)
assert len(combined_reqs) == 2
assert combined_reqs[0] == ComparableRequirement("typing-extensions>=3.7.4.3")
assert combined_reqs[1] == ComparableRequirement('typing-extensions>=3.6.4; python_version < "3.8"')
def test_combine_requirements_differing_precision():
reqs = [
ComparableRequirement("lockfile>=0.9"),
ComparableRequirement("lockfile>=0.9"),
ComparableRequirement("lockfile>=0.12.2"),
]
assert combine_requirements(reqs) == [Requirement("lockfile>=0.12.2")]
@pytest.mark.parametrize(
"reqs, combined",
[
(
[
ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'),
ComparableRequirement('numpy>=1.19.1; platform_system != "Windows"')
],
[
ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'),
ComparableRequirement('numpy>=1.19.1; platform_system != "Windows"')
],
),
(
[
ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'),
ComparableRequirement("numpy>=1.19.1"),
],
[
ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'),
ComparableRequirement("numpy>=1.19.1"),
],
),
(
[ComparableRequirement("numpy==1.19.3"), ComparableRequirement("numpy>=1.19.1")],
[ComparableRequirement("numpy==1.19.3")],
),
(
[ComparableRequirement("numpy<=1.19.3"), ComparableRequirement("numpy==1.19.1")],
[ComparableRequirement("numpy==1.19.1")],
),
(
[ComparableRequirement("numpy<=1.19.3"), ComparableRequirement("numpy<1.19.1")],
[ComparableRequirement("numpy<1.19.1")],
),
(
[ComparableRequirement("numpy>1.2.3"), ComparableRequirement("numpy>=1.2.2")],
[ComparableRequirement("numpy>1.2.3")],
),
]
)
def test_combine_requirements_markers(reqs, combined):
assert combine_requirements(reqs) == combined
@pytest.mark.parametrize(
"specifiers, resolved",
[
([Specifier(">1.2.3"), Specifier(">=1.2.2"), Specifier("<2")], SpecifierSet(">1.2.3,<2")),
([Specifier(">1.2.3"), Specifier(">=1.2.2")], SpecifierSet(">1.2.3")),
([Specifier(">=1.2.2"), Specifier("<2")], SpecifierSet(">=1.2.2,<2")),
([Specifier(">1.2.3"), Specifier("<2")], SpecifierSet(">1.2.3,<2")),
([Specifier("<1.2.2"), Specifier("<=1.2.3"), Specifier(">2")], SpecifierSet("<1.2.2,>2")),
([Specifier("<1.2.2"), Specifier("<=1.2.3")], SpecifierSet("<1.2.2")),
([Specifier("<=1.2.3"), Specifier(">2")], SpecifierSet("<=1.2.3,>2")),
([Specifier("<1.2.2"), Specifier(">2")], SpecifierSet("<1.2.2,>2")),
]
)
def test_resolve_specifiers(specifiers, resolved):
assert resolve_specifiers(specifiers) == resolved
requirements_a = [
"autodocsumm>=0.2.0",
"default-values>=0.2.0",
"domdf-sphinx-theme>=0.1.0",
"extras-require>=0.2.0",
"repo-helper-sphinx-theme>=0.0.2",
"seed-intersphinx-mapping>=0.1.1",
"sphinx>=3.0.3",
"ruamel-yaml>=0.16.12",
"sphinx-click>=2.5.0",
"sphinx-copybutton>=0.2.12",
"sphinx-notfound-page>=0.5",
"sphinx-prompt>=1.1.0",
"sphinx-tabs>=1.1.13",
"sphinx-toolbox>=1.7.1",
"sphinxcontrib-autoprogram>=0.1.5",
"sphinxcontrib-httpdomain>=1.7.0",
"sphinxemoji>=0.1.6",
"toctree-plus>=0.0.4",
]
requirements_b = [
"autodocsumm>=0.2.0",
"default-values>=0.2.0",
"domdf-sphinx-theme>=0.1.0",
"domdf-sphinx-theme>=0.1.0",
"extras-require>=0.2.0",
"repo-helper-sphinx-theme>=0.0.2",
"seed-intersphinx-mapping>=0.1.1",
"sphinx>=3.0.3",
"sphinx-click>=2.5.0",
"sphinx-copybutton>=0.2.12",
"sphinx-copybutton>=0.2.12",
"sphinx-notfound-page>=0.5",
"sphinx-prompt>=1.1.0",
"sphinx-tabs>=1.1.13",
"sphinx-toolbox>=1.7.1",
"ruamel.yaml>=0.16.12",
"sphinxcontrib-autoprogram>=0.1.5",
"sphinxcontrib-autoprogram>=0.1.5",
"sphinxcontrib-httpdomain>=1.7.0",
"sphinxemoji>=0.1.6",
"toctree-plus>=0.0.4",
"toctree-plus>=0.0.3",
]
requirements_c = [
'numpy==1.19.3; platform_system == "Windows"',
'numpy>=1.19.1; platform_system != "Windows"',
]
@pytest.mark.parametrize(
"requirements",
[
pytest.param(requirements_a, id='a'),
pytest.param(requirements_b, id='b'),
pytest.param(requirements_c, id='c'),
]
)
def test_read_requirements(
tmp_pathplus,
advanced_data_regression: AdvancedDataRegressionFixture,
requirements: List[str],
):
(tmp_pathplus / "requirements.txt").write_lines(requirements)
advanced_data_regression.check([
str(x) for x in sorted(read_requirements(tmp_pathplus / "requirements.txt")[0])
])
@pytest.mark.parametrize(
"requirements",
[
pytest.param(requirements_a, id='a'),
pytest.param(requirements_b, id='b'),
pytest.param(requirements_c, id='c'),
pytest.param(iter(requirements_a), id="iter(a)"),
pytest.param(iter(requirements_b), id="iter(b)"),
pytest.param(iter(requirements_c), id="iter(c)"),
pytest.param(set(requirements_a), id="set(a)"),
pytest.param(set(requirements_b), id="set(b)"),
pytest.param(set(requirements_c), id="set(c)"),
pytest.param(tuple(requirements_a), id="tuple(a)"),
pytest.param(tuple(requirements_b), id="tuple(b)"),
pytest.param(tuple(requirements_c), id="tuple(c)"),
]
)
def test_parse_requirements(
tmp_pathplus: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
requirements: List[str],
):
advanced_data_regression.check([str(x) for x in sorted(parse_requirements(requirements)[0])])
def test_read_requirements_invalid(
tmp_pathplus: PathPlus, advanced_data_regression: AdvancedDataRegressionFixture
):
(tmp_pathplus / "requirements.txt").write_lines([
"# another comment",
"autodocsumm>=apples",
"default-value---0.2.0",
"domdf-sphinx-theme!!!0.1.0",
"0.2.0",
'',
'',
"https://bbc.co.uk",
"toctree-plus>=0.0.4",
"# a comment",
])
with pytest.warns(UserWarning) as record:
requirements, comments = read_requirements(tmp_pathplus / "requirements.txt")
# check that only one warning was raised
assert len(record) == 3
# check that the message matches
for idx, warning in enumerate([
"Creating a LegacyVersion has been deprecated and will be removed in the next major release",
"Ignored invalid requirement 'domdf-sphinx-theme!!!0.1.0'",
"Ignored invalid requirement 'https://bbc.co.uk'",
]):
assert record[idx].message.args[0] == warning # type: ignore
advanced_data_regression.check([str(x) for x in sorted(requirements)])
assert comments == ["# another comment", "# a comment"]
def test_sort_mixed_requirements():
requirements: Sequence[Union[str, ComparableRequirement]] = [
"urllib3",
ComparableRequirement("six==1.15.0"),
"botocore",
ComparableRequirement("requests>=2.19.1"),
"python-dateutil",
]
assert sorted(requirements) == [
"botocore",
"python-dateutil",
ComparableRequirement("requests>=2.19.1"),
ComparableRequirement("six==1.15.0"),
"urllib3",
]
def test_check_dependencies(capsys):
deps = ["pytest", "domdf_python_tools", "madeup_module"]
missing_deps = check_dependencies(deps, False)
assert isinstance(missing_deps, list)
assert len(missing_deps) == 1
assert missing_deps == ["madeup_module"]
missing_deps = check_dependencies(deps)
captured = capsys.readouterr()
stdout = captured.out.split('\n')
assert stdout[0] == "The following modules are missing:"
assert stdout[1] == "['madeup_module']"
assert stdout[2] == "Please check the documentation."
assert stdout[3] == ''
assert isinstance(missing_deps, list)
assert len(missing_deps) == 1
assert missing_deps == ["madeup_module"]
missing_deps = check_dependencies(["pytest"])
captured = capsys.readouterr()
stdout = captured.out.split('\n')
assert stdout[0] == "All modules installed"
assert stdout[1] == ''
assert isinstance(missing_deps, list)
assert len(missing_deps) == 0
assert missing_deps == []
def test_comparable_requirement():
assert ComparableRequirement("foo") != ComparableRequirement("bar")
assert ComparableRequirement("foo") == ComparableRequirement("foo")
assert ComparableRequirement("foo>=1.2.3") == ComparableRequirement("foo >= 1.2.3")
def req_with_marker():
return ComparableRequirement('importlib-metadata>=1.5.0; python_version < "3.8"')
def req_without_marker():
return ComparableRequirement("importlib-metadata>=1.5.0")
def req_with_different_marker():
return ComparableRequirement('importlib-metadata>=1.5.0; python_version < "3.10"')
assert req_with_marker() == req_with_marker()
assert req_with_marker() is not req_with_marker()
assert req_without_marker() is not req_without_marker()
assert req_with_marker() != req_with_different_marker()
assert "importlib-metadata" in [req_with_marker()]
assert req_without_marker() in [req_with_marker()]
assert req_with_marker() in [req_with_marker()]
assert "importlib-metadata" in (req_with_marker(), )
assert req_without_marker() in (req_with_marker(), )
assert req_with_marker() in (req_with_marker(), )
assert {req_without_marker(), req_without_marker()} == {req_without_marker()}
assert {req_with_marker(), req_with_marker()} == {req_with_marker()}
assert hash(req_with_marker()) == hash(req_with_marker())
assert hash(req_with_marker()) != hash(req_without_marker())
assert req_without_marker() not in {req_with_marker()}
assert req_with_marker() in {req_with_marker()}
assert req_without_marker() != "123foo?"
only_36 = pytest.param("3.6", marks=only_version((3, 6), reason="Output differs on Python 3.6"))
only_37 = pytest.param("3.7", marks=only_version((3, 7), reason="Output differs on Python 3.7"))
only_38 = pytest.param("3.8", marks=only_version((3, 8), reason="Output differs on Python 3.8"))
min_38 = pytest.param("3.8+", marks=min_version((3, 8), reason="Output differs on Python 3.8+"))
only_39 = pytest.param("3.9", marks=only_version((3, 9), reason="Output differs on Python 3.9"))
only_310 = pytest.param("3.10", marks=only_version((3, 10), reason="Output differs on Python 3.10"))
@not_windows("Output differs on Windows")
@pytest.mark.parametrize("py_version", [
only_36,
only_37,
only_38,
only_39,
only_310,
])
@pytest.mark.parametrize(
"library", [
"shippinglabel",
"apeye",
"cachecontrol[filecache]",
"domdf-python-tools",
"domdf_python_tools",
]
)
@pytest.mark.parametrize("depth", [-1, 0, 1, 2, 3])
# @pytest.mark.parametrize("depth", [3])
def test_list_requirements(
data_regression: DataRegressionFixture,
library,
depth,
py_version,
):
data_regression.check(list(list_requirements(library, depth=depth)))
@not_windows("Output differs on Windows")
@pytest.mark.parametrize("py_version", [
only_36,
only_37,
min_38,
])
@pytest.mark.parametrize("depth", [-1, 0, 1, 2, 3])
# @pytest.mark.parametrize("depth", [3])
def test_list_requirements_pytest(
data_regression: DataRegressionFixture,
depth,
py_version,
):
data_regression.check(list(list_requirements("pytest", depth=depth)))
@pytest.fixture()
def pyproject_toml(tmp_pathplus: PathPlus):
filename = (tmp_pathplus / "pyproject.toml")
filename.write_lines([
"[build-system]",
'requires = [ "setuptools>=40.6.0", "wheel>=0.34.2",]',
'build-backend = "setuptools.build_meta"',
'',
"[project]",
"dependencies = [",
' "httpx",',
' "gidgethub[httpx]>4.0.0",',
" \"django>2.1; os_name != 'nt'\",",
" \"django>2.0; os_name == 'nt'\"",
']',
'',
"[project.optional-dependencies]",
"test = [",
' "pytest < 5.0.0",',
' "pytest-cov[all]"',
']',
"[tool.flit.metadata]",
"requires = [",
'\t"requests >=2.6",',
"\t\"configparser; python_version == '2.7'\",",
']',
'',
"[tool.flit.metadata.requires-extra]",
"test = [",
'\t"pytest >=2.7.3",',
'\t"pytest-cov",',
']',
])
return filename
@pytest.mark.parametrize("flavour", ["auto", "pep621", "flit"])
def test_parse_pyproject_dependencies(
pyproject_toml: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
flavour: str,
):
deps = parse_pyproject_dependencies(pyproject_toml, flavour) # type: ignore
advanced_data_regression.check(sorted(str(x) for x in deps))
@pytest.mark.parametrize("flavour", ["auto", "pep621", "flit"])
def test_parse_pyproject_extras(
pyproject_toml: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
flavour: str,
):
extras = parse_pyproject_extras(pyproject_toml, flavour) # type: ignore
advanced_data_regression.check({k: sorted(str(x) for x in v) for k, v in extras.items()})
| 17,468 | 7,689 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.context as context
from mindspore import Tensor, Parameter
import mindspore.nn as nn
from mindspore.common.api import _executor
from mindspore.nn import TrainOneStepCell, Momentum
from mindspore.ops import operations as P
class Net(nn.Cell):
def __init__(self, embedding_weight, num_true, num_sampled, unique, range_max, seed, remove_accidential,
strategy1=None):
super(Net, self).__init__()
self.sampler = P.UniformCandidateSampler(num_true, num_sampled, unique, range_max, seed,
remove_accidential)
if strategy1:
self.sampler.shard(strategy1)
self.embedding_table = Parameter(embedding_weight, "embedding_weight")
self.gatherv2 = P.Gather()
self.reduce_sum = P.ReduceSum()
self.reduce_sum2 = P.ReduceSum()
self.reduce_sum3 = P.ReduceSum()
def construct(self, x):
out1, out2, out3 = self.sampler(x)
lookup = self.gatherv2(self.embedding_table, out1, 0)
loss = out1 - out3
loss = self.reduce_sum(loss, (0,))
loss2 = self.reduce_sum2(lookup, (0, 1))
loss3 = self.reduce_sum3(out2, (0, 1))
loss4 = loss + loss2 + loss3
return loss4
class Net2(nn.Cell):
def __init__(self, mul_weight, num_true, num_sampled, unique, range_max, seed, remove_accidential,
strategy1=None):
super(Net2, self).__init__()
self.sampler = P.UniformCandidateSampler(num_true, num_sampled, unique, range_max, seed,
remove_accidential)
self.cast = P.Cast()
self.weight = Parameter(mul_weight, "w1")
self.mul = P.Mul()
if strategy1:
self.sampler.shard(strategy1)
def construct(self, x):
x = self.mul(x, self.weight)
x = self.cast(x, ms.int32)
_, out2, _ = self.sampler(x)
return out2
_w = Tensor(np.ones([48, 16]), dtype=ms.float32)
_w1 = Tensor(np.ones([96, 64]), dtype=ms.float32)
_x = Tensor(np.ones([48, 16]), dtype=ms.int32)
def compile_net(net):
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x)
context.reset_auto_parallel_context()
def test_uniform_candidate_sampler_no_full_0d_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1),)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_no_full_1d_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 4),)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_full_0d_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((8, 1),)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_full_1d_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8),)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_full_1d_unqiue_false():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8),)
net = Net(_w1, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_auto_parllel():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net(_w1, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1,
remove_accidential=False, strategy1=None)
compile_net(net)
def test_uniform_candidate_sampler_auto_parllel_unqiue_true():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=None)
compile_net(net)
def test_uniform_candidate_sampler_auto_parllel_remove_true():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=True, strategy1=None)
compile_net(net)
def test_uniform_candidate_sampler_full_1d_remove_true():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8),)
net = Net(_w1, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1,
remove_accidential=True, strategy1=strategy1)
with pytest.raises(RuntimeError):
compile_net(net)
def test_uniform_candidate_sampler_as_final():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8),)
net = Net2(_w, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1, remove_accidential=False,
strategy1=strategy1)
with pytest.raises(RuntimeError):
compile_net(net)
| 6,682 | 2,367 |
# Copyright (C) 2007 Matthew Neeley
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
### BEGIN NODE INFO
[info]
name = Data Vault
version = 3.0.2
description = Store and retrieve numeric data
[startup]
cmdline = %PYTHON% %FILE%
timeout = 20
[shutdown]
message = 987654321
timeout = 5
### END NODE INFO
"""
from __future__ import absolute_import
import os
import sys
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
import labrad.util
import labrad.wrappers
from data_vault import SessionStore
from data_vault.server import DataVault
@inlineCallbacks
def load_settings(cxn, name):
"""Load settings from registry with fallback to command line if needed.
Attempts to load the data vault configuration for this node from the
registry. If not configured, we instead prompt the user to enter a path
to use for storing data, and save this config into the registry to be
used later.
"""
path = ['', 'Servers', name, 'Repository']
nodename = labrad.util.getNodeName()
reg = cxn.registry
yield reg.cd(path, True)
(dirs, keys) = yield reg.dir()
if nodename in keys:
datadir = yield reg.get(nodename)
elif '__default__' in keys:
datadir = yield reg.get('__default__')
else:
default_datadir = os.path.expanduser('~/.labrad/vault')
print('Could not load repository location from registry.')
print('Please enter data storage directory or hit enter to use')
print('the default directory ({}):'.format(default_datadir))
datadir = os.path.expanduser(input('>>>'))
if datadir == '':
datadir = default_datadir
if not os.path.exists(datadir):
os.makedirs(datadir)
# set as default and for this node
yield reg.set(nodename, datadir)
yield reg.set('__default__', datadir)
print('Data location configured in the registry at {}: {}'.format(\
path + [nodename], datadir))
print('To change this, edit the registry keys and restart the server.')
returnValue(datadir)
def main(argv=sys.argv):
@inlineCallbacks
def start():
opts = labrad.util.parseServerOptions(name=DataVault.name)
cxn = yield labrad.wrappers.connectAsync(
host=opts['host'], port=int(opts['port']), password=opts['password'])
datadir = yield load_settings(cxn, opts['name'])
yield cxn.disconnect()
session_store = SessionStore(datadir, hub=None)
server = DataVault(session_store)
session_store.hub = server
# Run the server. We do not need to start the reactor, but we will
# stop it after the data_vault shuts down.
labrad.util.runServer(server, run_reactor=False, stop_reactor=True)
_ = start()
reactor.run()
if __name__ == '__main__':
main()
| 3,464 | 1,063 |
# Generated by Django 2.2.16 on 2020-11-07 00:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('atlantisbot_api', '0003_auto_20201107_0018'),
]
operations = [
migrations.AlterModelTable(
name='discordingamename',
table=None,
),
migrations.AlterModelTable(
name='discorduser',
table=None,
),
]
| 446 | 160 |
import requests
import pandas
try:
# Use ujson if available.
import ujson as json
except Exception:
import json
class OpenTSDBResponseSerie(object):
"""
A single OpenTSDB response serie i.e 1 element of the response
array.
Params:
**kwargs : OpenTSDB response serie data
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
@property
def id(self):
"""
id for serie
Returns:
metric{sorted=tag,key=value}
"""
if len(self.tags.keys()) > 0:
tags = ",".join(["%s=%s" %
(k, self.tags[k]) for k in sorted(self.tags.keys())])
return "%s{%s}" % (self.metric, tags)
else:
return self.metric
def alias(self, functOrStr):
"""
User specified alias using lambda functions and string formatting using
metadata provided by opentsdb.
This function fails silently.
Params:
functOrStr : lambda function or python string format. When using lambda
functions, they must begin with '!' e.g. !lambda x: x....
Return:
Formatted alias on success and id or failure.
"""
flatData = self.__flattenedMetadata()
# Normalized alias
_alias = ""
if functOrStr.startswith("!"):
try:
_alias = eval(functOrStr[1:])(flatData)
except Exception as e:
pass
else:
try:
_alias = functOrStr % (flatData)
except Exception as e:
pass
if _alias == "":
return self.id
return _alias
def __flattenedMetadata(self):
"""
Flattens all metadata which is used for normalization
"""
return dict([("metric", self.metric)] +
[("tags.%s" % (k), v) for k, v in self.tags.items()])
def datapoints(self, convertTime=False):
"""
Converts datapoints
Params:
convertTime : Whether to convert epoch to pandas datetime
Return:
Array of tuples (time, value)
"""
if convertTime:
return dict([(pandas.to_datetime(int(k), unit='s'), v) for k, v in self.dps.items()])
return dict([(int(k), v) for k, v in self.dps.items()])
class OpenTSDBResponse(object):
""" Complete OpenTSDB response """
def __init__(self, otsdbResp):
"""
Params:
otsdbResp : raw opentsdb response as a str, list or tuple.
"""
if isinstance(otsdbResp, str) or isinstance(otsdbResp, unicode):
# string response
self._series = [ OpenTSDBResponseSerie(**s) for s in json.loads(otsdbResp) ]
elif isinstance(otsdbResp, list) or isinstance(otsdbResp, tuple):
# dict response
self._series = [ OpenTSDBResponseSerie(**s) for s in otsdbResp ]
else:
raise RuntimeError("Invalid type: %s" % (type(otsdbResp)))
@property
def series(self):
"""
Use iterator for better memory management
"""
for s in self._series:
yield s
def DataFrame(self, aliasTransform=None, convertTime=False):
"""
Converts an OpenTSDB array response into a DataFrame
Params:
convertTime : Whether to convert epoch to pandas datetime
aliasTransform : lambda function or string format to customize
serie name i.e. alias
Return:
OpenTSDB response DataFrame
"""
if aliasTransform == None:
return pandas.DataFrame(dict([
(s.id, s.datapoints(convertTime)) for s in self.series ]))
else:
return pandas.DataFrame(dict([
(s.alias(aliasTransform), s.datapoints(convertTime)) for s in self.series ]))
class BaseClient(object):
def __init__(self, host, port=4242, ssl=False):
if ssl:
self.url = "https://%s:%d" % (host, port)
else:
self.url = "http://%s:%d" % (host, port)
def queryUrl(self, **kwargs):
return str("%s/api/query?%s" % (self.url, self.__urlEncodedParams(**kwargs)))
def __urlEncodedParams(self, aggr="sum", rate=False, counter=False, end=None, **kwargs):
timeStr = "start=%s" % (kwargs["start"])
if end != None:
timeStr += "&end=%s" % (end)
if rate:
prefix = "%s:rate:%s" % (aggr, kwargs["metric"])
elif counter:
prefix = "%s:rate{counter,,1}:%s" % (aggr, kwargs["metric"])
else:
prefix = "%s:%s" % (aggr, kwargs["metric"])
# TODO: check
tagsStr = ",".join([ "%s=%s" % (k, kwargs["tags"][k]) for k in sorted(kwargs["tags"].keys()) ])
if tagsStr != "":
return "%s&m=%s{%s}" % (timeStr, prefix, tagsStr)
else:
return "%s&m=%s" % (timeStr, prefix)
class Client(BaseClient):
def query(self, **kwargs):
resp = requests.get(self.queryUrl(**kwargs))
if resp.status_code >= 200 and resp.status_code < 400:
return OpenTSDBResponse(resp.text)
#return resp.text
# error
return resp.text
| 5,465 | 1,562 |
import os
from os.path import dirname
from unittest import TestCase
import pytest
import src.superannotate as sa
class TestCloneProject(TestCase):
PROJECT_NAME_1 = "test_create_like_project_1"
PROJECT_NAME_2 = "test_create_like_project_2"
PROJECT_DESCRIPTION = "desc"
PROJECT_TYPE = "Vector"
IMAGE_QUALITY = "original"
PATH_TO_URLS = "data_set/attach_urls.csv"
def setUp(self, *args, **kwargs):
self.tearDown()
self._project_1 = sa.create_project(
self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE
)
def tearDown(self) -> None:
sa.delete_project(self.PROJECT_NAME_1)
sa.delete_project(self.PROJECT_NAME_2)
def test_create_like_project(self):
_, _, _ = sa.attach_image_urls_to_project(
self.PROJECT_NAME_1,
os.path.join(dirname(dirname(__file__)), self.PATH_TO_URLS),
)
sa.create_annotation_class(
self.PROJECT_NAME_1,
"rrr",
"#FFAAFF",
[
{
"name": "tall",
"is_multiselect": 0,
"attributes": [{"name": "yes"}, {"name": "no"}],
},
{
"name": "age",
"is_multiselect": 0,
"attributes": [{"name": "young"}, {"name": "old"}],
},
],
)
sa.set_project_default_image_quality_in_editor(self.PROJECT_NAME_1,self.IMAGE_QUALITY)
sa.set_project_workflow(
self.PROJECT_NAME_1,
[
{
"step": 1,
"className": "rrr",
"tool": 3,
"attribute": [
{
"attribute": {
"name": "young",
"attribute_group": {"name": "age"},
}
},
{
"attribute": {
"name": "yes",
"attribute_group": {"name": "tall"},
}
},
],
}
],
)
new_project = sa.clone_project(
self.PROJECT_NAME_2, self.PROJECT_NAME_1, copy_contributors=True
)
source_project = sa.get_project_metadata(self.PROJECT_NAME_1)
self.assertEqual(new_project['upload_state'], source_project['upload_state'])
new_settings = sa.get_project_settings(self.PROJECT_NAME_2)
image_quality = None
for setting in new_settings:
if setting["attribute"].lower() == "imagequality":
image_quality = setting["value"]
break
self.assertEqual(image_quality,self.IMAGE_QUALITY)
self.assertEqual(new_project["description"], self.PROJECT_DESCRIPTION)
self.assertEqual(new_project["type"].lower(), "vector")
ann_classes = sa.search_annotation_classes(self.PROJECT_NAME_2)
self.assertEqual(len(ann_classes), 1)
self.assertEqual(ann_classes[0]["name"], "rrr")
self.assertEqual(ann_classes[0]["color"], "#FFAAFF")
new_workflow = sa.get_project_workflow(self.PROJECT_NAME_2)
self.assertEqual(len(new_workflow), 1)
self.assertEqual(new_workflow[0]["className"], "rrr")
self.assertEqual(new_workflow[0]["tool"], 3)
self.assertEqual(len(new_workflow[0]["attribute"]), 2)
self.assertEqual(new_workflow[0]["attribute"][0]["attribute"]["name"], "young")
self.assertEqual(
new_workflow[0]["attribute"][0]["attribute"]["attribute_group"]["name"],
"age",
)
self.assertEqual(new_workflow[0]["attribute"][1]["attribute"]["name"], "yes")
self.assertEqual(
new_workflow[0]["attribute"][1]["attribute"]["attribute_group"]["name"],
"tall",
)
class TestCloneProjectAttachedUrls(TestCase):
PROJECT_NAME_1 = "TestCloneProjectAttachedUrls_1"
PROJECT_NAME_2 = "TestCloneProjectAttachedUrls_2"
PROJECT_DESCRIPTION = "desc"
PROJECT_TYPE = "Document"
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def setUp(self, *args, **kwargs):
self.tearDown()
self._project_1 = sa.create_project(
self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE
)
def tearDown(self) -> None:
sa.delete_project(self.PROJECT_NAME_1)
sa.delete_project(self.PROJECT_NAME_2)
def test_create_like_project(self):
sa.create_annotation_class(
self.PROJECT_NAME_1,
"rrr",
"#FFAAFF",
[
{
"name": "tall",
"is_multiselect": 0,
"attributes": [{"name": "yes"}, {"name": "no"}],
},
{
"name": "age",
"is_multiselect": 0,
"attributes": [{"name": "young"}, {"name": "old"}],
},
],
)
new_project = sa.clone_project(
self.PROJECT_NAME_2, self.PROJECT_NAME_1, copy_contributors=True
)
self.assertEqual(new_project["description"], self.PROJECT_DESCRIPTION)
self.assertEqual(new_project["type"].lower(), "document")
ann_classes = sa.search_annotation_classes(self.PROJECT_NAME_2)
self.assertEqual(len(ann_classes), 1)
self.assertEqual(ann_classes[0]["name"], "rrr")
self.assertEqual(ann_classes[0]["color"], "#FFAAFF")
self.assertIn("Workflow copy is deprecated for Document projects.",self._caplog.text)
| 5,865 | 1,784 |
import pyamg
from . import gmg_base
class ClassicalAMG(gmg_base.GMG):
def __init__(self, A, max_levels=10,
presmoother="jacobi", presmooth_par={"omega": 2./3, "iterations": 2, "withrho": False},
postsmoother=None, postsmooth_par=None,
cycle="V"):
super().__init__(A, cycle, presmoother, presmooth_par,
postsmoother, postsmooth_par, max_levels)
self._amg_solver = pyamg.classical.classical.ruge_stuben_solver(A.to_csr(), max_levels=max_levels,
max_coarse=1)
pyamg.relaxation.smoothing.change_smoothers(self._amg_solver,
presmoother=(self._presmoother, self._presmooth_par),
postsmoother=(self._postsmoother, self._postsmooth_par)
)
def _V_cycle(self, rhs, x):
return self._amg_solver.solve(b=rhs, x0=x, maxiter=1, cycle="V")
def __str__(self):
return self._amg_solver.__str__() | 1,139 | 362 |
"""
Code to plot average nearest neighbor distance between fish in a school as a function of group size - one line per water temperature.
"""
# imports
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import pickle
from matplotlib import cm
import argparse
#argparse
def boolean_string(s):
# this function helps with getting Boolean input
if s not in ['False', 'True']:
raise ValueError('Not a valid boolean string')
return s == 'True' # note use of ==
# create the parser object
parser = argparse.ArgumentParser()
# NOTE: argparse will throw an error if:
# - a flag is given with no value
# - the value does not match the type
# and if a flag is not given it will be filled with the default.
parser.add_argument('-a', '--a_string', default='annd', type=str)
#parser.add_argument('-s', '--a_string', default='annd_std', type=str)
parser.add_argument('-b', '--integer_b', default=3, type=int)
parser.add_argument('-c', '--float_c', default=1.5, type=float)
parser.add_argument('-v', '--verbose', default=True, type=boolean_string)
# Note that you assign a short name and a long name to each argument.
# You can use either when you call the program, but you have to use the
# long name when getting the values back from "args".
# get the arguments
args = parser.parse_args()
xx=0
h = 0.3
if args.a_string=='annd':
y_label = 'ANND (Body Length)'
xx = 1
if args.a_string=='speed':
y_label = 'Speed (Body Length/s)'
if args.a_string=='acceleration':
y_label = 'Acceleration (Body Length/s'+r'$^2$)'
if args.a_string=='polarization':
y_label = 'Polarization'
xx=1
if args.a_string=='spikes':
y_label = 'Number of \n startles'
h = 0.4
if args.a_string=='accurate':
y_label = 'Number of \n accurate startles'
h = 0.4
if args.a_string=='latency':
y_label = 'Latency (frames)'
if args.a_string=='local_pol':
y_label = 'Local polarization'
xx = 1
if args.a_string=='local_pol_m':
y_label = 'Local polarization'
xx = 1
if args.a_string=='dtc':
y_label = 'Distance to center \n (pixels)'
if args.a_string=='dtc_roi':
y_label = 'Distance to center \n (pixels)'
if args.a_string=='dtc_roi_norm':
y_label = 'Distance to center \n (Body Length)'
if args.a_string=='percentile_speed99':
y_label = '99th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed90':
y_label = '90th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed80':
y_label = '80th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed70':
y_label = '70th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed60':
y_label = '60th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed100':
y_label = '100th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed_low_pass99':
y_label = '99th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed_low_pass90':
y_label = '90th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed_low_pass80':
y_label = '80th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed_low_pass70':
y_label = '70th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed_low_pass60':
y_label = '60th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_speed_low_pass100':
y_label = '100th percentile of speed \n (Body Length/s)'
if args.a_string=='percentile_acc99':
y_label = '99th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc90':
y_label = '90th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc80':
y_label = '80th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc70':
y_label = '70th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc60':
y_label = '60th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc100':
y_label = '100th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc_low_pass99':
y_label = '99th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc_low_pass90':
y_label = '90th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc_low_pass80':
y_label = '80th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc_low_pass70':
y_label = '70th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc_low_pass60':
y_label = '60th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='percentile_acc_low_pass100':
y_label = '100th percentile of \n acceleration \n (Body Length/s'+r'$^2$)'
if args.a_string=='unmasked_startles':
y_label = 'No. of startles \n per unit unmasked time'
if args.a_string=='max_loom_speed':
y_label = 'Maximum speed during loom \n (Body Length/s)'
if args.a_string=='loom_speed99':
y_label = '99th percentile of \n speed during loom \n (Body Length/s)'
if args.a_string=='loom_speed90':
y_label = '90th percentile of \n speed during loom \n (Body Length/s)'
if args.a_string=='max_loom_acc':
y_label = 'Maximum acceleration during loom \n (Body Length/s'+r'$^2$)'
if args.a_string=='loom_acc99':
y_label = '99th percentile of \n acceleration during loom \n (Body Length/s'+r'$^2$)'
if args.a_string=='loom_acc90':
y_label = '90th percentile of \n acceleration during loom \n (Body Length/s'+r'$^2$)'
if args.a_string=='max_loom_speed_low_pass':
y_label = 'Maximum speed during loom \n (Body Length/s)'
if args.a_string=='loom_speed99_low_pass':
y_label = '99th percentile of \n speed during loom \n (Body Length/s)'
if args.a_string=='loom_speed90_low_pass':
y_label = '90th percentile of \n speed during loom \n (Body Length/s)'
if args.a_string=='max_loom_acc_low_pass':
y_label = 'Maximum acceleration during loom \n (Body Length/s'+r'$^2$)'
if args.a_string=='loom_acc99_low_pass':
y_label = '99th percentile of \n acceleration during loom \n (Body Length/s'+r'$^2$)'
if args.a_string=='loom_acc90_low_pass':
y_label = '90th percentile of \n acceleration during loom \n (Body Length/s'+r'$^2$)'
if args.a_string=='ratio_max_loom_speed_low_pass':
y_label = 'Ratio of maximum \n speed during loom \n to before loom'
if args.a_string=='ratio_loom_speed99_low_pass':
y_label = 'Ratio of 99th percentile of \n speed during loom \n to before loom'
if args.a_string=='ratio_loom_speed90_low_pass':
y_label = 'Ratio of 90th percentile of \n speed during loom \n to before loom'
if args.a_string=='ratio_max_loom_acc_low_pass':
y_label = 'Ratio of maximum \n acceleration during loom \n to before loom'
if args.a_string=='ratio_loom_acc99_low_pass':
y_label = 'Ratio of 99th percentile of \n acceleration during loom \n to before loom'
if args.a_string=='ratio_loom_acc90_low_pass':
y_label = 'Ratio of 90th percentile of \n acceleration during loom \n to before loom'
if args.a_string=='ratio_loom_acc50_low_pass':
y_label = 'Ratio of 50th percentile of \n acceleration during loom \n to before loom'
if args.a_string=='ratio_loom_speed50_low_pass':
y_label = 'Ratio of 50th percentile of \n speed during loom \n to before loom'
if args.a_string=='max_non_loom_speed_low_pass':
y_label = 'Maximum speed before loom \n (Body Length/s)'
if args.a_string=='non_loom_speed99_low_pass':
y_label = '99th percentile of \n speed before loom \n (Body Length/s)'
if args.a_string=='non_loom_speed90_low_pass':
y_label = '90th percentile of \n speed before loom \n (Body Length/s)'
if args.a_string=='max_non_loom_acc_low_pass':
y_label = 'Maximum acceleration before loom \n (Body Length/s'+r'$^2$)'
if args.a_string=='non_loom_acc99_low_pass':
y_label = '99th percentile of \n acceleration before loom \n (Body Length/s'+r'$^2$)'
if args.a_string=='non_loom_acc90_low_pass':
y_label = '90th percentile of \n acceleration before loom \n (Body Length/s'+r'$^2$)'
if args.a_string=='unmasked_startles_ratio':
y_label = 'Proportion of accurate startles'
if args.a_string=='new_masked_startles_ratio':
y_label = 'Proportion of accurate startles'
if args.a_string=='prop_startles':
y_label = 'Proportion of individuals \n that startle'
xx=1
if args.a_string=='prop_startles_new_mask':
y_label = 'Proportion of individuals \n that startle'
xx=1
if args.a_string=='prop_startles_no_nan_new_mask':
y_label = 'Proportion of individuals \n that startle'
xx=1
if args.a_string=='loom_startles':
y_label = 'Number of startles \n per fish during loom'
if args.a_string=='loom_startles_normalized':
y_label = 'Number of startles \n per fish during loom'
if args.a_string=='preloom_startles_normalized':
y_label = 'Number of startles \n per fish before loom'
if args.a_string=='nonloom_startles_normalized':
y_label = 'Number of startles \n per fish between looms'
if args.a_string=='ind_startle_speed':
y_label = 'Maximum startle speed \n (Body Length/s)'
if args.a_string=='ind_median_speed':
y_label = 'Median speed before loom \n (Body Length/s)'
if args.a_string=='ind_ratio_speed':
y_label = 'Ratio of max startle speed \n to median speed before loom'
in_dir1 = '../../output/temp_collective/roi/' + args.a_string + '.p'
annd_values = pickle.load(open(in_dir1, 'rb')) # 'rb is for read binary
in_dir2 = '../../output/temp_collective/roi/' + args.a_string + '_std.p'
out_dir = '../../output/temp_collective/roi_figures/' + args.a_string + '.png'
std_annd_values = pickle.load(open(in_dir2, 'rb')) # 'rb is for read binary
temperature = [9,13,17,21,25,29]
if xx == 0:
group = [1,2,4,8,16]
else:
group = [2,4,8,16]
#group = [1,8]
x = 5 # 5 for gs upto 16
#Plotting
lw=1.25
fs=12
colors = plt.cm.viridis(np.linspace(0,1,6))
plt.close('all') # always start by cleaning up
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(211)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=h)
for i in range(6):
ax.plot(group[:x], annd_values[i,:x], label = str(temperature[i])+ r'$^{\circ}$C', linewidth = lw, color = colors[i])
ax.fill_between(group[:x], annd_values[i,:x] - std_annd_values[i,:x], annd_values[i,:x] + std_annd_values[i,:x], alpha = 0.3, color = colors[i])
plt.xlabel('Group Size', size = fs)
plt.ylabel(y_label, size = fs)
plt.xscale('log',basex=2)
if xx == 0:
plt.xticks(ticks = [1,2,4,8,16], labels = [1,2,4,8,16])
else:
plt.xticks(ticks = [2,4,8,16], labels = [2,4,8,16])
"""
if xx == 0:
plt.xticks(ticks = [1,2,4,8,16,32], labels = [1,2,4,8,16,32])
else:
plt.xticks(ticks = [2,4,8,16,32], labels = [2,4,8,16,32])
"""
#plt.xlim(right = 30)
ax.tick_params(labelsize=.9*fs)
ax.set_title('a)', loc='left', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Water Temperature', framealpha = 0.5)
x=6
colors = plt.cm.viridis(np.linspace(0,1,5)) # 5 for gs upto 16
ax = fig.add_subplot(212)
for i in range(4):
ax.plot(temperature[0:x], annd_values[0:x,i], label = str(group[i]), linewidth = lw, color = colors[i])
ax.fill_between(temperature[0:x], annd_values[0:x,i] - std_annd_values[0:x,i], annd_values[0:x,i] + std_annd_values[0:x,i], alpha = 0.3, color = colors[i])
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs)
plt.locator_params(axis='x', nbins=5)
plt.ylabel(y_label, size = fs)
plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29])
#plt.xlim(right = 30)
ax.tick_params(labelsize=.9*fs)
ax.set_title('b)', loc='left', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Group Size', framealpha = 0.5)
fig.savefig(out_dir, dpi = 300)
plt.show()
"""
#SPEED
in_dir1 = '../../output/temp_collective/roi/average_speed.p'
speed_values = pickle.load(open(in_dir1, 'rb')) # 'rb is for read binary
in_dir2 = '../../output/temp_collective/roi/speed_std.p'
out_dir = '../../output/temp_collective/roi_figures/speed.png'
std_speed = pickle.load(open(in_dir2, 'rb')) # 'rb is for read binary
temperature = [29,25,21,17,13,9]
group = [1,2,4,8,16]
x = 5
#Plotting
lw=1.25
fs=14
colors = plt.cm.viridis_r(np.linspace(0,1,6))
plt.close('all') # always start by cleaning up
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(211)
for i in range(6):
ax.plot(group[0:x], speed_values[i,0:x], label = str(temperature[i])+ r'$^{\circ}$C', linewidth = lw, color = colors[i])
ax.fill_between(group[0:x], speed_values[i,0:x] - std_speed[i,0:x], speed_values[i,0:x] + std_speed[i,0:x], alpha = 0.3, color = colors[i])
plt.xlabel('Group Size', size = 0.9*fs)
plt.ylabel('Speed (Body Length/s)', size = 0.9*fs)
ax.tick_params(labelsize=.8*fs)
ax.set_title('a)', loc='left', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Water Temperature')
x=6
colors = plt.cm.viridis(np.linspace(0,1,5))
ax = fig.add_subplot(212)
for i in range(1,5):
ax.plot(temperature[0:x], speed_values[0:x,i], label = str(group[i]), linewidth = lw, color = colors[i])
ax.fill_between(temperature[0:x], speed_values[0:x,i] - std_speed[0:x,i], speed_values[0:x,i] + std_speed[0:x,i], alpha = 0.3, color = colors[i])
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = 0.9*fs)
plt.locator_params(axis='x', nbins=5)
plt.ylabel('Speed (Body Length/s)', size = 0.9*fs)
ax.tick_params(labelsize=.8*fs)
ax.set_title('b)', loc='left', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Group Size')
#fig.suptitle('Average Nearest Neighbor Distance (ANND)', size = 1.5*fs)
fig.savefig(out_dir)
plt.show()
"""
| 13,601 | 5,733 |
"""
.. module:: tools.__init__
:synopsis: This package contains tools for handling results obtained with the
main SModelS code.
"""
| 144 | 39 |
"""Author: Brandon Trabucco.
Utility class for loading and managing locations in the robot's map.
"""
import json
import math
import rospy
from rt_msgs.msg import Odom
from std_msgs.msg import Header
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import PoseStamped
from tf.transformations import euler_from_quaternion
from image_caption_machine.msg import WorldPlace
from image_caption_machine.convert.message import convert_ros_message_to_dictionary
from image_caption_machine.convert.message import convert_dictionary_to_ros_message
class Place(object):
"""Utility class for managing physycal naed locations.
"""
def __init__(self, name="default", pose_stamped=PoseStamped(
Header(0, rospy.Time(secs=0, nsecs=0), "None"),
Pose(Point(0.0, 0.0, 0.0),
Quaternion(0.0, 0.0, 0.0, 0.0))),
x=None, y=None,
json=None, msg=None):
"""Initialize the class with default parameters.
Args:
name: str REQUIRED
pose_stamped: PoseStamped REQUIRED
x: float
y: float
json: {name: "...", pose_stamped: {...}}
msg: WorldPlace message
"""
self.name = name
self.pose_stamped = pose_stamped
if x is not None:
self.pose_stamped.pose.position.x = x
if y is not None:
self.pose_stamped.pose.position.y = y
if json is not None:
self.json = json
if msg is not None:
self.msg = msg
@property
def json(self):
"""Serialize the place to json.
"""
return {"name": self.name, "pose_stamped":
convert_ros_message_to_dictionary(self.pose_stamped)}
@json.setter
def json(self, val):
"""Load json into the odom object.
"""
self.name = val["name"]
self.pose_stamped = convert_dictionary_to_ros_message(
"geometry_msgs/PoseStamped", val["pose_stamped"])
@property
def msg(self):
"""Utility to convert Place() to WorldPlace message.
"""
return WorldPlace(name=self.name, pose_stamped=self.pose_stamped)
@msg.setter
def msg(self, val):
"""Utility to convert WorldPlace message to Place().
"""
self.name = val.name
self.pose_stamped = val.pose_stamped
@property
def x(self):
"""Helper to get the x position.
"""
return self.pose_stamped.pose.position.x
@property
def y(self):
"""Helper to get the y position.
"""
return self.pose_stamped.pose.position.y
def to(self, other):
"""Helper to get the length to another place.
Args:
other: Place() object
"""
dx = self.x - other.x
dy = self.y - other.y
return math.sqrt((dx * dx) + (dy * dy))
def __str__(self):
"""Helper to convert the object to string.
"""
return self.name
| 3,106 | 982 |
from rest_framework import serializers
from joplin_web.models import Folders, Notes, Tags, NoteTags, Version
class FoldersSerializer(serializers.ModelSerializer):
nb_notes = serializers.IntegerField(read_only=True)
class Meta:
fields = ('id', 'title', 'parent_id', 'nb_notes', 'created_time')
model = Folders
class NotesSerializer(serializers.ModelSerializer):
parent = FoldersSerializer(read_only=True)
parent_id = serializers.PrimaryKeyRelatedField(queryset=Folders.objects.using('joplin').all(),
source='folders',
write_only=True)
class Meta:
fields = ('id', 'parent_id', 'parent', 'title', 'body',
'is_todo', 'todo_due',
'created_time', 'updated_time',
'source', 'source_application',
'latitude', 'longitude', 'altitude',
'author')
model = Notes
class TagsSerializer(serializers.ModelSerializer):
nb_notes = serializers.IntegerField(read_only=True)
class Meta:
fields = '__all__'
model = Tags
class NoteTagsSerializer(serializers.ModelSerializer):
note = NotesSerializer(read_only=True)
tag = TagsSerializer(read_only=True)
note_id = serializers.PrimaryKeyRelatedField(
queryset=Notes.objects.using('joplin').all(), source='notes', write_only=True)
tag_id = serializers.PrimaryKeyRelatedField(
queryset=Tags.objects.using('joplin').all(), source='tags', write_only=True)
class Meta:
fields = ('id', 'note_id', 'note', 'tag_id', 'tag', 'created_time',
'updated_time', 'user_created_time', 'user_updated_time',
'encryption_cipher_text', 'encryption_applied')
model = NoteTags
class NoteTagsByNoteIdSerializer(serializers.ModelSerializer):
tag = TagsSerializer(read_only=True)
class Meta:
fields = ('tag',)
model = NoteTags
class VersionSerializer(serializers.ModelSerializer):
version = serializers.IntegerField()
class Meta:
fields = ('version', )
read_only_fields = ('version', )
model = Version
| 2,238 | 646 |
import datetime
from app.constants import Constants as c
from app.input import InputMonthly
from app.output import OutputFactory
from app.create import CreatorUtility, SolverMIP
inp = InputMonthly()
out = OutputFactory()
solv = SolverMIP()
creator = CreatorUtility(inp, out, solv)
settings = {c.START: datetime.date(2019, 1, 1),
c.END: datetime.date(2019, 31, 1),
c.EXCEL_OUT: True}
creator.create(settings)
| 435 | 155 |
"""
NCL_sat_3.py
================
This script illustrates the following concepts:
- zooming into an orthographic projection
- plotting filled contour data on an orthographic map
- plotting lat/lon tick marks on an orthographic map
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/sat_3.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/sat_3_lg.png
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import geocat.datafiles as gdf
###############################################################################
# Import packages:
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
import xarray as xr
from geocat.viz import util as gvutil
###############################################################################
# Define a helper function for plotting lat/lon ticks on an orthographic plane
def plotOrthoTicks(coords, loc):
if loc == 'zero':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN}'.format(lon),
va='bottom',
ha='center',
transform=ccrs.PlateCarree())
if loc == 'left':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN} N '.format(lat),
va='center',
ha='right',
transform=ccrs.PlateCarree())
if loc == 'right':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN} N '.format(lat),
va='center',
ha='left',
transform=ccrs.PlateCarree())
if loc == 'top':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN} W '.format(-lon),
va='bottom',
ha='center',
transform=ccrs.PlateCarree())
if loc == 'bottom':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN} W '.format(-lon),
va='top',
ha='center',
transform=ccrs.PlateCarree())
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and
# load the data into xarrays
ds = xr.open_dataset(gdf.get('netcdf_files/h_avg_Y0191_D000.00.nc'),
decode_times=False)
# Extract a slice of the data
t = ds.T.isel(time=0, z_t=0)
###############################################################################
# Plot:
plt.figure(figsize=(8, 8))
# Create an axis with an orthographic projection
ax = plt.axes(projection=ccrs.Orthographic(central_longitude=-35,
central_latitude=60),
anchor='C')
# Set extent of map
ax.set_extent((-80, -10, 30, 80), crs=ccrs.PlateCarree())
# Add natural feature to map
ax.coastlines(resolution='110m')
ax.add_feature(cfeature.LAND, facecolor='lightgray', zorder=3)
ax.add_feature(cfeature.COASTLINE, linewidth=0.2, zorder=3)
ax.add_feature(cfeature.LAKES,
edgecolor='black',
linewidth=0.2,
facecolor='white',
zorder=4)
# plot filled contour data
heatmap = t.plot.contourf(ax=ax,
transform=ccrs.PlateCarree(),
levels=80,
vmin=-1.5,
vmax=28.5,
cmap='RdGy',
add_colorbar=False,
zorder=1)
# Add color bar
cbar_ticks = np.arange(-1.5, 31.5, 3)
cbar = plt.colorbar(heatmap,
orientation='horizontal',
extendfrac=[0, .1],
shrink=0.8,
aspect=14,
pad=0.05,
extendrect=True,
ticks=cbar_ticks)
cbar.ax.tick_params(labelsize=10)
# Get rid of black outline on colorbar
cbar.outline.set_visible(False)
# Set main plot title
main = r"$\bf{Example}$" + " " + r"$\bf{of}$" + " " + r"$\bf{Zooming}$" + \
" " + r"$\bf{a}$" + " " + r"$\bf{Sat}$" + " " + r"$\bf{Projection}$"
# Set plot subtitles using NetCDF metadata
left = t.long_name
right = t.units
# Use geocat-viz function to create main, left, and right plot titles
title = gvutil.set_titles_and_labels(ax,
maintitle=main,
maintitlefontsize=16,
lefttitle=left,
lefttitlefontsize=14,
righttitle=right,
righttitlefontsize=14,
xlabel="",
ylabel="")
# Plot gridlines
gl = ax.gridlines(color='black', linewidth=0.2, zorder=2)
# Set frequency of gridlines in the x and y directions
gl.xlocator = mticker.FixedLocator(np.arange(-180, 180, 15))
gl.ylocator = mticker.FixedLocator(np.arange(-90, 90, 15))
# Manually plot tick marks.
# NCL has automatic tick mark placement on orthographic projections,
# Python's cartopy module does not have this functionality yet.
plotOrthoTicks([(0, 81.7)], 'zero')
plotOrthoTicks([(-80, 30), (-76, 20), (-88, 40), (-107, 50)], 'left')
plotOrthoTicks([(-9, 30), (-6, 40), (1, 50), (13, 60)], 'right')
plotOrthoTicks([(-120, 60), (-60, 82.5)], 'top')
plotOrthoTicks([(-75, 16.0), (-60, 25.0), (-45, 29.0), (-30, 29.5),
(-15, 26.5)], 'bottom')
plt.tight_layout()
plt.show()
| 5,859 | 1,886 |
from bot import db
def save(data):
sql = """
insert into clippingsbot.teams (
team_id, access_token, user_id, team_name, scope
) values (
:team_id, :access_token, :user_id, :team_name, :scope
) on conflict (team_id) do update
set scope = excluded.scope,
access_token = excluded.access_token,
user_id = excluded.user_id,
team_name = excluded.team_name
returning team_id
"""
return db.scalar(sql, **data)
def find(team_id):
return db.find_one(
'select * from clippingsbot.teams where team_id = :team_id',
team_id = team_id)
def watch(team, channel_id, pattern, pattern_id):
sql = """
insert into clippingsbot.team_patterns (
team_id, channel_id, pattern_id, display_pattern
)
values (:team_id, :channel_id, :pattern_id, :pattern)
on conflict (team_id, channel_id, pattern_id) do nothing
"""
db.execute(
sql, team_id=team['team_id'], channel_id=channel_id,
pattern_id=pattern_id, pattern=pattern
)
def find_patterns(team, channel_id):
sql = """
select * from
clippingsbot.team_patterns
where team_id = :team_id and channel_id = :channel_id
"""
return db.find(sql, team_id=team['team_id'], channel_id=channel_id)
def count_other_channel_patterns(team, channel_id):
sql = """
select count(*)
from clippingsbot.team_patterns
where team_id = :team_id and channel_id != :channel_id
"""
return db.scalar(sql, team_id=team['team_id'], channel_id=channel_id)
def count_patterns(team):
sql = """
select count(*) from clippingsbot.team_patterns where team_id = :team_id
"""
return db.scalar(sql, team_id=team['team_id'])
def stop(team, channel_id, pattern):
sql = """
delete from clippingsbot.team_patterns
where team_id = :team_id and channel_id = :channel_id
and lower(display_pattern) = lower(:pattern)
"""
db.execute(sql, team_id=team['team_id'], channel_id=channel_id,
pattern=pattern)
| 2,047 | 692 |
import typing
from operator import itemgetter
from http_types import HttpExchange
from jsonpath_rw import parse
from openapi_typed_2 import OpenAPIObject, convert_from_openapi, convert_to_openapi
from meeshkan.nlp.data_extractor import DataExtractor
from meeshkan.nlp.entity_extractor import EntityExtractor
from meeshkan.nlp.ids.id_classifier import IdClassifier, IdType
from meeshkan.nlp.operation_classifier import OperationClassifier
from meeshkan.nlp.spec_normalizer import SpecNormalizer
class SpecTransformer:
def __init__(
self,
extractor: EntityExtractor,
path_analyzer,
normalizer: SpecNormalizer,
id_classifier: IdClassifier,
):
self._extractor = extractor
self._path_analyzer = path_analyzer
self._normalizer = normalizer
self._operation_classifier = OperationClassifier()
self._id_classifier = id_classifier
self._data_extractor = DataExtractor()
def optimize_spec(
self, spec: OpenAPIObject, recordings: typing.List[HttpExchange]
) -> OpenAPIObject:
entity_paths = self._extractor.get_entity_from_spec(spec)
spec_dict = convert_from_openapi(spec)
datapaths, spec_dict = self._normalizer.normalize(spec_dict, entity_paths)
grouped_records = self._data_extractor.group_records(spec_dict, recordings)
spec_dict = self._replace_path_ids(spec_dict, grouped_records)
spec_dict = self._operation_classifier.fill_operations(spec_dict)
data = self._data_extractor.extract_data(datapaths, grouped_records)
spec_dict = self._add_entity_ids(spec_dict, data)
spec_dict = self._inject_data(spec_dict, data)
return convert_to_openapi(spec_dict)
def _replace_path_ids(self, spec, grouped_records):
for pathname, path_record in grouped_records.items():
for param in reversed(path_record.path_args):
res = self._id_classifier.by_values(path_record.path_arg_values[param])
if res != IdType.UNKNOWN:
path_item = spec["paths"].pop(pathname)
for param_desc in path_item["parameters"]:
if param_desc["name"] == param:
param_desc["name"] = "id"
param_desc["x-meeshkan-id-type"] = res.value
break
pathname = pathname.replace("{{{}}}".format(param), "{id}")
spec["paths"][pathname] = path_item
break
return spec
def _add_entity_ids(self, spec_dict, data):
for name, values in data.items():
schema = spec_dict["components"]["schemas"][name]
potential_ids = []
for property in schema["properties"]:
name_score = self._id_classifier.by_name(name, property)
if name_score > 0:
res = self._id_classifier.by_values(
(v[property] for v in values if property in v)
)
if res != IdType.UNKNOWN:
potential_ids.append((property, res, name_score))
if len(potential_ids) > 0:
idx = max(potential_ids, key=itemgetter(2))
schema["x-meeshkan-id-path"] = idx[0]
schema["x-meeshkan-id-type"] = idx[1].value
return spec_dict
def _inject_data(self, spec_dict, data):
spec_dict["x-meeshkan-data"] = {}
for name, values in data.items():
expr = parse(spec_dict["components"]["schemas"][name]["x-meeshkan-id-path"])
injected_values = dict()
for val in values:
idx = expr.find(val)
if len(idx) > 0:
injected_values[idx[0].value] = val
spec_dict["x-meeshkan-data"][name] = list(injected_values.values())
return spec_dict
| 3,947 | 1,150 |
from flask import render_template, request
from . import main
| 62 | 15 |
import os
import sys
import torch.nn as nn
if True:
DDLNN_HOME = os.environ['DDLNN_HOME']
meta_rule_home = '{}/src/meta_rule/'.format(DDLNN_HOME)
src_rule_home = '{}/dd_lnn/'.format(DDLNN_HOME)
sys.path.append(meta_rule_home)
sys.path.append(src_rule_home)
from lnn_operators \
import and_lukasiewicz, \
and_lukasiewicz_unconstrained, and_lukasiewicz_lambda
EPS = 1e-10
class SimpleAndLNN(nn.Module):
def __init__(self, arity=4, use_slack=True, alpha=0.95, constrained=True,
use_lambda=True):
super().__init__()
self.alpha = alpha
self.use_slack = use_slack
self.arity = arity
self.constrained = constrained
self.use_lambda = use_lambda
if use_lambda:
assert constrained, \
'Lambda LNN can only be used for constrained version'
if constrained:
if use_lambda:
self.and_node = and_lukasiewicz_lambda(alpha, arity, use_slack)
else:
self.and_node = and_lukasiewicz(alpha, arity, use_slack)
else:
self.and_node = \
and_lukasiewicz_unconstrained(alpha, arity, use_slack)
def forward(self, x):
final_pred, final_slack = self.and_node(x)
return final_pred, final_slack
def extract_weights(self, normed=True, verbose=False):
if self.constrained:
if self.use_lambda:
beta, wts = self.and_node.get_params()
else:
beta, wts, slacks = self.and_node.cdd()
else:
beta, wts = self.and_node.get_params()
if normed:
wts = wts / wts.max()
if verbose:
print('beta : ' + str(beta.item()))
print('argument weights : ' + str(wts.detach()))
return beta, wts
class PolicyLNNTWC_SingleAnd(nn.Module):
def __init__(self,
admissible_verbs,
use_constraint=True,
num_by_arity=None):
super().__init__()
alpha = 0.95
use_slack = True
self.alpha = alpha
self.use_slack = use_slack
self.use_constraint = use_constraint
self.admissible_verbs = admissible_verbs
self.models = nn.ModuleDict()
if num_by_arity is None:
self.total_inputs = {1: 6, 2: 12}
else:
self.total_inputs = num_by_arity
for v, arity in admissible_verbs.items():
self.init_model_for_verb(v, self.total_inputs[arity])
def init_model_for_verb(self, v, nb_inputs):
self.models[v] = \
SimpleAndLNN(arity=nb_inputs, use_slack=self.alpha,
alpha=self.alpha, constrained=self.use_constraint)
def compute_constraint_loss(self, lnn_model_name='go', lam=0.0001):
return \
self.models[lnn_model_name].\
and_node.compute_constraint_loss(lam=lam)\
if self.models[lnn_model_name].and_node.lam else 0.0
def forward_eval(self, x, lnn_model_name='go', split=True):
out, _ = self.models[lnn_model_name](x)
activations = out.view(1, -1) + EPS
return activations
| 3,216 | 1,093 |
import moto
import boto3
@moto.mock_sns
def test_topic_should_receive_message_assertion():
arn = boto3.client("sns").create_topic(Name="foo")["TopicArn"]
topic = boto3.resource("sns").Topic(arn)
with topic.should.receive("foo"):
topic.publish(Message="foo")
with topic.should_not.receive("bar"):
topic.publish(Message="foo")
| 359 | 131 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 12 21:22:49 2021
@author: Hrishikesh Terdalkar
"""
###############################################################################
__author__ = """Hrishikesh Terdalkar"""
__email__ = 'hrishikeshrt@linuxmail.org'
__version__ = '0.0.2'
###############################################################################
from .stopwatch import Stopwatch
| 423 | 144 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v3.1.3),
on June 24, 2019, at 16:21
If you publish work using this script please cite the PsychoPy publications:
Peirce, JW (2007) PsychoPy - Psychophysics software in Python.
Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy.
Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import absolute_import, division
from psychopy import locale_setup, sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '3.1.3'
expName = 'stroop' # from the Builder filename that created this script
expInfo = {'session': '01', 'participant': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data' + os.sep + '%s_%s' % (expInfo['participant'], expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='C:\\Users\\lpzdb\\pavloviaDemos\\stroop\\stroop.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=[1920, 1080], fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color='black', colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "instruct"
instructClock = core.Clock()
instrText = visual.TextStim(win=win, name='instrText',
text='OK. Ready for the real thing?\n\nRemember, ignore the word itself; press:\nLeft for red LETTERS\nDown for green LETTERS\nRight for blue LETTERS\n(Esc will quit)\n\nPress any key to continue',
font='Arial',
units='height', pos=[0, 0], height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "trial"
trialClock = core.Clock()
word = visual.TextStim(win=win, name='word',
text='default text',
font='Arial',
units='height', pos=[0, 0], height=0.15, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "thanks"
thanksClock = core.Clock()
thanksText = visual.TextStim(win=win, name='thanksText',
text='This is the end of the experiment.\n\nThanks!',
font='Arial',
units='height', pos=[0, 0], height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "instruct"-------
t = 0
instructClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
ready = keyboard.Keyboard()
# keep track of which components have finished
instructComponents = [instrText, ready]
for thisComponent in instructComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instruct"-------
while continueRoutine:
# get current time
t = instructClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instrText* updates
if t >= 0 and instrText.status == NOT_STARTED:
# keep track of start time/frame for later
instrText.tStart = t # not accounting for scr refresh
instrText.frameNStart = frameN # exact frame index
win.timeOnFlip(instrText, 'tStartRefresh') # time at next scr refresh
instrText.setAutoDraw(True)
# *ready* updates
waitOnFlip = False
if t >= 0 and ready.status == NOT_STARTED:
# keep track of start time/frame for later
ready.tStart = t # not accounting for scr refresh
ready.frameNStart = frameN # exact frame index
win.timeOnFlip(ready, 'tStartRefresh') # time at next scr refresh
ready.status = STARTED
# keyboard checking is just starting
win.callOnFlip(ready.clearEvents, eventType='keyboard') # clear events on next screen flip
if ready.status == STARTED and not waitOnFlip:
theseKeys = ready.getKeys(keyList=None, waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instructComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instruct"-------
for thisComponent in instructComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('instrText.started', instrText.tStartRefresh)
thisExp.addData('instrText.stopped', instrText.tStopRefresh)
# the Routine "instruct" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=5, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('trialTypes.xls'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
word.setColor(letterColor, colorSpace='rgb')
word.setText(text)
resp = keyboard.Keyboard()
# keep track of which components have finished
trialComponents = [word, resp]
for thisComponent in trialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "trial"-------
while continueRoutine:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *word* updates
if t >= 0.5 and word.status == NOT_STARTED:
# keep track of start time/frame for later
word.tStart = t # not accounting for scr refresh
word.frameNStart = frameN # exact frame index
win.timeOnFlip(word, 'tStartRefresh') # time at next scr refresh
word.setAutoDraw(True)
# *resp* updates
waitOnFlip = False
if t >= 0.5 and resp.status == NOT_STARTED:
# keep track of start time/frame for later
resp.tStart = t # not accounting for scr refresh
resp.frameNStart = frameN # exact frame index
win.timeOnFlip(resp, 'tStartRefresh') # time at next scr refresh
resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if resp.status == STARTED and not waitOnFlip:
theseKeys = resp.getKeys(keyList=['left', 'down', 'right'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
resp.keys = theseKeys.name # just the last key pressed
resp.rt = theseKeys.rt
# was this 'correct'?
if (resp.keys == str(corrAns)) or (resp.keys == corrAns):
resp.corr = 1
else:
resp.corr = 0
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('word.started', word.tStartRefresh)
trials.addData('word.stopped', word.tStopRefresh)
# check responses
if resp.keys in ['', [], None]: # No response was made
resp.keys = None
# was no response the correct answer?!
if str(corrAns).lower() == 'none':
resp.corr = 1; # correct non-response
else:
resp.corr = 0; # failed to respond (incorrectly)
# store data for trials (TrialHandler)
trials.addData('resp.keys',resp.keys)
trials.addData('resp.corr', resp.corr)
if resp.keys != None: # we had a response
trials.addData('resp.rt', resp.rt)
trials.addData('resp.started', resp.tStartRefresh)
trials.addData('resp.stopped', resp.tStopRefresh)
# the Routine "trial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 5 repeats of 'trials'
# get names of stimulus parameters
if trials.trialList in ([], [None], None):
params = []
else:
params = trials.trialList[0].keys()
# save data for this loop
trials.saveAsExcel(filename + '.xlsx', sheetName='trials',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
# ------Prepare to start Routine "thanks"-------
t = 0
thanksClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(2.000000)
# update component parameters for each repeat
# keep track of which components have finished
thanksComponents = [thanksText]
for thisComponent in thanksComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "thanks"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = thanksClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *thanksText* updates
if t >= 0.0 and thanksText.status == NOT_STARTED:
# keep track of start time/frame for later
thanksText.tStart = t # not accounting for scr refresh
thanksText.frameNStart = frameN # exact frame index
win.timeOnFlip(thanksText, 'tStartRefresh') # time at next scr refresh
thanksText.setAutoDraw(True)
frameRemains = 0.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if thanksText.status == STARTED and t >= frameRemains:
# keep track of stop time/frame for later
thanksText.tStop = t # not accounting for scr refresh
thanksText.frameNStop = frameN # exact frame index
win.timeOnFlip(thanksText, 'tStopRefresh') # time at next scr refresh
thanksText.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in thanksComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "thanks"-------
for thisComponent in thanksComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('thanksText.started', thanksText.tStartRefresh)
thisExp.addData('thanksText.stopped', thanksText.tStopRefresh)
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| 16,604 | 4,999 |
from pydatastructsalgorithms import tree_list as tree
# r = tree.binary_tree(3)
# tree.insert_left(r, 4)
# tree.insert_left(r, 5)
# tree.insert_right(r, 6)
# tree.insert_right(r, 7)
# l = tree.get_left_child(r)
# tree.set_root_val(l, 9)
# tree.insert_left(l, 11)
# print(tree.get_right_child(tree.get_right_child(r)))
# x = tree.binary_tree('a')
# tree.insert_left(x, 'b')
# tree.insert_right(x, 'c')
# print x
# tree.insert_right(tree.get_right_child(x), 'd')
# print x
# tree.insert_left(tree.get_right_child(tree.get_right_child(x)), 'e')
# print x
def build_tree():
r = tree.binary_tree('a')
tree.insert_left(r, 'b')
tree.insert_right(r, 'c')
tree.insert_right(tree.get_left_child(r), 'd')
tree.insert_left(tree.get_right_child(r), 'e')
tree.insert_right(tree.get_right_child(r), 'f')
return r
print build_tree() | 831 | 370 |
from flask import _app_ctx_stack, jsonify
from choptop import app
def get_model():
appContext = _app_ctx_stack.top
choptop = getattr(appContext, "ChopTop", None)
return choptop
@app.route('/position')
def get_position(self):
choptop = get_model()
return jsonify(choptop.finger_position) | 309 | 103 |
# ----------------------- PATH ------------------------
ROOT_PATH = "."
DATA_PATH = "%s/../Datasets" % ROOT_PATH
FB15K_DATA_PATH = "%s/fb15k" % DATA_PATH
DB100K_DATA_PATH = "%s/db100k" % DATA_PATH
FB15K_SPARSE_DATA_PATH = "%s/fb15k-sparse" % DATA_PATH
LOG_PATH = "%s/log_dir" % ROOT_PATH
CHECKPOINT_PATH = "%s/checkpoint" % ROOT_PATH
# ----------------------- DATA ------------------------
DATASET = {}
FB15K_TRAIN_RAW = "%s/train.txt" % FB15K_DATA_PATH
FB15K_VALID_RAW = "%s/valid.txt" % FB15K_DATA_PATH
FB15K_TEST_RAW = "%s/test.txt" % FB15K_DATA_PATH
FB15K_TRAIN = "%s/digitized_train.txt" % FB15K_DATA_PATH
FB15K_VALID = "%s/digitized_valid.txt" % FB15K_DATA_PATH
FB15K_TEST = "%s/digitized_test.txt" % FB15K_DATA_PATH
FB15K_E2ID = "%s/e2id.txt" % FB15K_DATA_PATH
FB15K_R2ID = "%s/r2id.txt" % FB15K_DATA_PATH
FB15K_GNDS = "%s/groundings.txt" % FB15K_DATA_PATH
FB15K_RULES = "%s/lifted_rules.txt" % FB15K_DATA_PATH
DATASET["fb15k"] = {
"train_raw": FB15K_TRAIN_RAW,
"valid_raw": FB15K_VALID_RAW,
"test_raw": FB15K_TEST_RAW,
"train": FB15K_TRAIN,
"valid": FB15K_VALID,
"test": FB15K_TEST,
"e2id": FB15K_E2ID,
"r2id": FB15K_R2ID,
"groundings": FB15K_GNDS,
}
DB100K_TRAIN_RAW = "%s/train.txt" % DB100K_DATA_PATH
DB100K_VALID_RAW = "%s/valid.txt" % DB100K_DATA_PATH
DB100K_TEST_RAW = "%s/test.txt" % DB100K_DATA_PATH
DB100K_TRAIN = "%s/digitized_train.txt" % DB100K_DATA_PATH
DB100K_VALID = "%s/digitized_valid.txt" % DB100K_DATA_PATH
DB100K_TEST = "%s/digitized_test.txt" % DB100K_DATA_PATH
DB100K_E2ID = "%s/e2id.txt" % DB100K_DATA_PATH
DB100K_R2ID = "%s/r2id.txt" % DB100K_DATA_PATH
DB100K_GNDS = "%s/groundings.txt" % DB100K_DATA_PATH
DATASET["db100k"] = {
"train_raw": DB100K_TRAIN_RAW,
"valid_raw": DB100K_VALID_RAW,
"test_raw": DB100K_TEST_RAW,
"train": DB100K_TRAIN,
"valid": DB100K_VALID,
"test": DB100K_TEST,
"e2id": DB100K_E2ID,
"r2id": DB100K_R2ID,
"groundings": DB100K_GNDS,
}
FB15K_SPARSE_TRAIN_RAW = "%s/train.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_VALID_RAW = "%s/valid.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_TEST_RAW = "%s/test.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_TRAIN = "%s/digitized_train.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_VALID = "%s/digitized_valid.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_TEST = "%s/digitized_test.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_E2ID = "%s/e2id.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_R2ID = "%s/r2id.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_GNDS = "%s/groundings.txt" % FB15K_SPARSE_DATA_PATH
DATASET["fb15k-sparse"] = {
"train_raw": FB15K_SPARSE_TRAIN_RAW,
"valid_raw": FB15K_SPARSE_VALID_RAW,
"test_raw": FB15K_SPARSE_TEST_RAW,
"train": FB15K_SPARSE_TRAIN,
"valid": FB15K_SPARSE_VALID,
"test": FB15K_SPARSE_TEST,
"e2id": FB15K_SPARSE_E2ID,
"r2id": FB15K_SPARSE_R2ID,
"groundings": FB15K_SPARSE_GNDS,
}
groundings = [str(50 + i * 5) for i in range(11)] + ['oneTime']
for item in groundings:
DATASET["fb15k_" + str(item)] = {
"train_raw": FB15K_TRAIN_RAW,
"valid_raw": FB15K_VALID_RAW,
"test_raw": FB15K_TEST_RAW,
"train": FB15K_TRAIN,
"valid": FB15K_VALID,
"test": FB15K_TEST,
"e2id": FB15K_E2ID,
"r2id": FB15K_R2ID,
"groundings": "%s/groundings_%s.txt" % (FB15K_DATA_PATH,str(item)),
}
for item in groundings:
DATASET["db100k_" + str(item)] = {
"train_raw": DB100K_TRAIN_RAW,
"valid_raw": DB100K_VALID_RAW,
"test_raw": DB100K_TEST_RAW,
"train": DB100K_TRAIN,
"valid": DB100K_VALID,
"test": DB100K_TEST,
"e2id": DB100K_E2ID,
"r2id": DB100K_R2ID,
"groundings": "%s/groundings_%s.txt" % (DB100K_DATA_PATH,str(item)),
}
# ----------------------- PARAM -----------------------
RANDOM_SEED = 123
| 3,856 | 1,977 |
from flask import Flask
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.serving import run_simple
from Base import Telemetric, CONFIG
__all__ = ['start_app']
__version__ = "0.1.0"
LOGGER = Telemetric.LOGGER.getChild('WebApp')
APP = Flask(__name__)
HOSTNAME = CONFIG.get('WebApp', 'HOST', fallback='0.0.0.0')
PORT = CONFIG.getint('WebApp', 'PORT', fallback=80)
import WebApp.Monitor
import WebApp.FileServer
mounts = {
'/Monitor': WebApp.Monitor.FLASK_APP,
'/FileServer': WebApp.FileServer.FLASK_APP,
}
def start_app():
application = DispatcherMiddleware(APP, mounts)
if __name__ == '__main__':
for mount_path in mounts:
LOGGER.info(f'WebApp running on http://{HOSTNAME}:{PORT}/{mount_path}')
run_simple(
hostname=HOSTNAME,
port=PORT,
application=application
)
if __name__ == '__main__':
start_app()
| 932 | 325 |
''' testsvg.py '''
import pygal
fa4_in_packets = [24, 21, 40, 32, 21, 21, 49, 9, 21, 34, 24, 21]
fa4_out_packets = [21, 24, 21, 40, 32, 21, 21, 49, 9, 21, 34, 24]
# Create a Chart of type Line
line_chart = pygal.Line()
# Title
line_chart.title = 'Input/Output Packets and Bytes'
# X-axis labels (samples were every five minutes)
line_chart.x_labels = ['5', '10', '15', '20', '25', '30', '35', '40', '45', '50', '55', '60']
# Add each one of the above lists into the graph as a line with corresponding label
line_chart.add('InPackets', fa4_in_packets)
line_chart.add('OutPackets', fa4_out_packets)
# Create an output image file from this
line_chart.render_to_file('test.svg')
| 682 | 318 |
import urllib.parse, urllib.request, json, ssl
# Authentication and API Requests
# LEARNING LAB 2 Cisco Kinetic for Cities
# The Initial login steps are the same as Learning Lab 1.
# You can skip ahead to 'LEARNING LAB 2 CODE BEGINS HERE'
#Ignore invalid Certificates
ssl._create_default_https_context = ssl._create_unverified_context
############################### LEARNING LAB 2 CODE BEGINS HERE ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/locations/user/{userId}/info
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/{userId}/info
# The access_token and user_id from Learning Lab 1 will be used to obtain the current Users Location Information
print('Learning Lab 2 Starts Here:')
user_id = '86847897-ab35-489c-af17-6fbf301a6016'
access_token = '0f493c98-9689-37c4-ad76-b957020d0d6c'
#Define the required GET Headers needed by the CKC API
headers = {
'authorization': "Bearer " + access_token,
'Content-Type': "application/json"
}
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/' + user_id + '/info'
print('\nGetting User Location Info: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Location Info:', results, '\n')
############################### LEARNING LAB 2 PART-2 ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/capabilities/customer
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer
# The access_token obtained as explained in Learning Lab 1 is used for authorization
#Define the required GET Headers needed by the CKC API
headers = {'authorization': "Bearer " + access_token }
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer'
print('\nGetting User capabilities: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Capabilities:', results, '\n')
| 2,573 | 839 |
import json
import uuid
import os
import docker
import time
from celery.utils.log import get_task_logger
from config import settings
from .language import LANGUAGE
from .status import ComputingStatus
logger = get_task_logger(__name__)
class Machine:
client = docker.from_env()
def __init__(self):
self.container = None
self.src_path = None
self.stdout_path = None
self.output_path = None
self.start_time = None # s
self.time_limit = None # ms
self.memory_limit = None # byte
self.uuid = str(uuid.uuid4())
self.temp_file_path = os.path.join(settings.BASE_DIR, 'tmp', self.uuid + '.log')
f = open(self.temp_file_path, 'w')
f.write('')
f.close()
self.status = ComputingStatus.PENDING
def create(self, language,
src_path, stdin_path, output_path, error_path,
time_limit=1000, memory_limit=256 * 1024 * 1024):
if self.container:
raise Exception('Container already exist')
self.src_path = src_path
self.output_path = output_path
self.time_limit = time_limit
self.memory_limit = memory_limit
self.container = self.client.containers.create(
LANGUAGE.get_image_name(language),
volumes={
src_path: {'bind': '/judge/{}'.format(LANGUAGE.get_source_name(language)), 'mode': 'ro'},
stdin_path: {'bind': '/judge/stdin', 'mode': 'ro'},
# stdout_path: {'bind': '/judge/stdout', 'mode': 'ro'},
output_path: {'bind': '/judge/userout', 'mode': 'rw'},
error_path: {'bind': '/judge/usererr', 'mode': 'rw'},
self.temp_file_path: {'bind': '/judge/return', 'mode': 'rw'}
},
mem_limit=int(memory_limit / 0.95),
memswap_limit=int(memory_limit / 0.95),
oom_kill_disable=True,
)
def start(self):
self.start_time = time.time()
self.container.start()
def stats(self):
return self.container.stats(decode=True, stream=False)
def container_status(self):
self.container.reload()
return self.container.status
def _wait_for_computing(self):
cpu_usage = 0
memory_usage = 0
logger.debug('judge machine compute: %s' % self.src_path)
logger.debug('time_limit: %s', self.time_limit)
for stats in self.container.stats(decode=True):
time_used = time.time() - self.start_time
cpu_usage = max(cpu_usage, time_used / 2 * 1000)
logger.debug('time_used: %s', time_used)
logger.debug('cpu_usage: %s', cpu_usage)
# stats = self.stats()
logger.debug(json.dumps(stats, indent=2, sort_keys=True))
if self.container_status() == 'exited':
self.status = ComputingStatus.FINISHED
break
cpu_usage = max(cpu_usage, stats['cpu_stats']['cpu_usage']['total_usage'] / 1e6)
logger.debug('time_limit : %s' % self.time_limit)
logger.debug('cpu_usage : %s' % cpu_usage)
memory_usage = max(memory_usage, stats['memory_stats'].get('max_usage', 0))
if cpu_usage > self.time_limit:
self.status = ComputingStatus.TIME_LIMIT_EXCEED
break
logger.debug('memory_limit: %s' % self.memory_limit)
logger.debug('memory_usage: %s' % memory_usage)
if memory_usage >= self.memory_limit:
self.status = ComputingStatus.MEMORY_LIMIT_EXCEED
break
if time_used > self.time_limit * 2 / 1000:
self.status = ComputingStatus.TIME_LIMIT_EXCEED
self.container.stop(timeout=0)
break
time.sleep(0.5)
try:
result = json.load(open(self.temp_file_path, mode='r'))
except:
result = None
return {
'status': self.status,
'cpu_usage': cpu_usage,
'memory_usage': memory_usage,
'output': open(self.output_path, mode='r'),
'result': result,
}
def wait_for_computing(self):
try:
return self._wait_for_computing()
except Exception as e:
logger.error(e)
return {
'status': ComputingStatus.ERROR,
'cpu_usage': 0,
'memory_usage': 0,
'output': None,
'result': None,
}
finally:
self.destroy()
def destroy(self):
if self.container:
self.container.stop(timeout=0)
self.container.remove()
self.container = None
| 4,780 | 1,442 |
__author__ = 'heddevanderheide' | 31 | 12 |
from functools import partial
import logging
from typing import Callable, Any, Iterable
from collections import defaultdict
from kombu import Connection
from kombu.mixins import ConsumerMixin
from classic.components import component
from .handlers import MessageHandler, SimpleMessageHandler
from .scheme import BrokerScheme
logger = logging.getLogger(__file__)
AnyCallable = Callable[[Any], None]
@component
class KombuConsumer(ConsumerMixin):
connection: Connection
scheme: BrokerScheme
def __attrs_post_init__(self):
self._handlers = defaultdict(list)
def _get_queues(self, queue_names: Iterable[str]):
queues = []
for name in queue_names:
assert name in self.scheme.queues, \
f'Queue with name {name} do not exists in broker scheme!'
queues.append(self.scheme.queues[name])
return queues
def register_handler(self, handler: MessageHandler, *queue_names: str):
queues = self._get_queues(queue_names)
self._handlers[handler].extend(queues)
def register_function(self,
function: AnyCallable,
*queue_names: str,
late_ack: bool = True):
handler = SimpleMessageHandler(
function=function, late_ack=late_ack,
)
queues = self._get_queues(queue_names)
self._handlers[handler].extend(queues)
def get_consumers(self, consumer_cls, channel):
consumers = []
for handler, queues in self._handlers.items():
on_message = partial(self.on_message, handler=handler)
c = consumer_cls(
queues=queues,
callbacks=[on_message],
)
consumers.append(c)
return consumers
@staticmethod
def on_message(body, message, handler):
try:
logger.info(f'Trying to call {handler}')
handler.handle(message, body)
except Exception as error:
logger.error(error)
def run(self, *args, **kwargs):
logger.info('Worker started')
return super().run(*args, **kwargs)
| 2,160 | 598 |
from cmath import exp, pi
from math import log2
def vratLiche(a):
oddA = list();
for i in range(len(a)):
if(i % 2 == 1):
oddA.append(a[i])
return oddA
def vratSude(a):
evenA = list()
for i in range(len(a)):
if(i % 2 == 0):
evenA.append(a[i])
return evenA
def roundComplex(vysl): #zaokrouhlování
newVysl = list()
for v in vysl:
a = round(v.real,5)
b = round(v.imag,5)
newVysl.append(complex(a,b))
return newVysl
def recursiveComplexFFT(n, prim, a):
if(n == 1):
return [a[0]]
else:
nHalf = int(n/2)
newPrim = prim*prim
b = recursiveComplexFFT(nHalf, newPrim, vratSude(a))
c = recursiveComplexFFT(nHalf, newPrim, vratLiche(a))
result = [0]*int(n)
for i in range(nHalf):
tempPrim = prim**i
result[i] = b[i]+(tempPrim)*c[i]
result[nHalf+i] = b[i]-(tempPrim)*c[i]
return roundComplex(result)
def rev(i,k): #rev funkce
mask = '{0:0' + str(k) + 'b}'
return int(mask.format(i)[::-1],2)
def iterativeComplexFFT(n, prim, a):
k = int(log2(n))
A = [0]*n
for i in range(n):
A[i] = a[rev(i,k)]
prims = [0]*k
prims[k-1] = prim
for i in range(k-2,-1,-1):
prims[i] = prims[i+1]*prims[i+1]
for u in range(1,k+1,1):
m = 2**u
for i in range(0, n-m+1, m):
for j in range(0,int(m/2),1):
temp = (prims[u-1]**j)*A[i+j+int(m/2)]
v1 = A[i+j] + temp
v2 = A[i+j] - temp
A[i+j] = v1
A[i+j+int(m/2)] = v2
return roundComplex(A)
vektor = [1,1,2,2,5,2,4,7] #pocitani vektor
n = len(vektor)
myPrim = exp((2j*pi)/n) #primitivni odmocnina
res = recursiveComplexFFT(n, myPrim, vektor) #rekurzivni fft
print(res)
myPrim = exp((2j*pi)/n)
res2 = iterativeComplexFFT(n, myPrim, vektor) #iterativni fft
print(res2) | 2,004 | 840 |
#! /usr/bin/env python
# -*- coding:UTF-8 -*-
# 把对象pickle至文件
try:
import cPickle as pickle
except:
import pickle
import sys
class SimpleObject(object):
def __init__(self,name):
self.name = name
self.name_backwards = name[::-1]
if __name__ == '__main__':
data = []
data.append(SimpleObject("pickle"))
data.append(SimpleObject("cPickle"))
data.append(SimpleObject("last"))
filename = sys.argv[1]
with open(filename, 'wb') as f:
for o in data:
print "WRITING : %s (%s)" % (o.name, o.name_backwards)
pickle.dump(o, f)
| 609 | 232 |
#!/usr/bin/env python
# -*- coding: utf-8; buffer-read-only: t -*-
__author__ = "Gregorio Ambrosio"
__contact__ = "gambrosio[at]uma.es"
__copyright__ = "Copyright 2021, Gregorio Ambrosio"
__date__ = "2021/02/22"
__license__ = "MIT"
import unittest
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
import robotathome as rh
from robotathome import logger, set_log_level
class Test(unittest.TestCase):
"""Test class of toolbox module """
# @unittest.skip("testing skipping")
def setUp(self):
""" The setUp() method allow you to define instructions that will be
executed before and after each test method
Examples:
python -m unittest <testModule>.<className>.<function_name>
$ cd ~/cloud/GIT/RobotAtHome_API/tests
$ python -m unittest test_reader.Test.test_get_home_names
"""
# we are testing: set the lowest log level
rh.set_log_level('TRACE')
logger.trace("*** Test.setUp")
# Local references
'''
/home/user
└─── WORKSPACE
├─── R@H2-2.0.1
│ └── files
│ ├── rgbd
│ └── scene
└─────── rh.db
'''
self.rh_path = os.path.expanduser('~/WORKSPACE/R@H2-2.0.1')
self.wspc_path = os.path.expanduser('~/WORKSPACE')
self.rgbd_path = os.path.join(self.rh_path, 'files/rgbd')
self.scene_path = os.path.join(self.rh_path, 'files/scene')
self.db_filename = 'rh.db'
try:
self.rh = rh.RobotAtHome(rh_path = self.rh_path,
rgbd_path = self.rgbd_path,
scene_path = self.scene_path,
wspc_path = self.wspc_path,
db_filename = self.db_filename
)
except:
logger.error("setUp: something was wrong")
# exit without handling
os._exit(1)
def tearDown(self):
"""The tearDown() method allow you to define instructions that will be
executed after each test method"""
logger.trace("*** Test.tearDown")
del self.rh
def test_say_hello(self):
"""Testing of say_hello
"""
logger.trace("*** Testing of say_hello()")
logger.info("Running say_hello in _greetings.py")
logger.info(rh.say_hello())
def test_get_labeled_img(self):
"""Testing of get_labeled_img
"""
logger.trace("*** Testing of get_labeled_img()")
logger.info("Getting labeled image")
id = 100000 # 100000 <= id < 200000
[rgb_f, _] = self.rh.get_RGBD_files(id)
labels = self.rh.get_RGBD_labels(id)
[labeled_img, _] = rh.get_labeled_img(labels, rgb_f)
plt.imshow(labeled_img)
plt.show()
def test_plot_labeled_img(self):
"""Testing of plot_labels
"""
logger.trace("*** Testing of plot_labeled_img()")
logger.info("Plotting RGB image patched with labels")
set_log_level('INFO')
id = 100000 # 100000 <= id < 200000
[rgb_f, _] = self.rh.get_RGBD_files(id)
labels = self.rh.get_RGBD_labels(id)
logger.info("\nlabel names: \n{}", labels['name'])
logger.info("\nlabel masks type: \n{}", type(labels['mask'].iat[0]))
rh.plot_labeled_img(labels, rgb_f)
def test_get_scan_xy(self):
""" Docstring
"""
id = 200000 # 0 <= id <= inf
laser_scan = self.rh.get_laser_scan(id)
xy = rh.get_scan_xy(laser_scan)
print(xy)
def test_plot_scan(self):
""" Docstring
"""
id = 200000 # 0 <= id <= inf
laser_scan = self.rh.get_laser_scan(id)
rh.plot_scan(laser_scan)
def test_plot_scene(self):
scenes = self.rh.get_scenes()
s_id = 0
logger.info("\nScene file: \n{}", scenes.iloc[s_id].scene_file)
rh.plot_scene(scenes.iloc[s_id].scene_file)
if __name__ == '__main__':
unittest.main()
| 4,147 | 1,407 |
# Differentiable Augmentation for Data-Efficient GAN Training
# Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han
# https://arxiv.org/pdf/2006.10738
import torch
import torch.nn.functional as F
from torch.distributions.dirichlet import _Dirichlet
def BetaSample(alpha, beta, sample_shape=torch.Size()):
concentration = torch.stack([alpha, beta], -1)
shape = sample_shape + concentration.shape[:-1] + concentration.shape[-1:]
concentration = concentration.expand(shape)
return _Dirichlet.apply(concentration).select(-1, 0)
def DiffAugment(x, policy='', channels_first=True):
if policy:
x_ori = x.clone()
if not channels_first:
x = x.permute(0, 3, 1, 2)
for p in policy.split(','):
if p in list(AUGMENT_FNS.keys()):
for f in AUGMENT_FNS[p]:
x = f(x)
if not channels_first:
x = x.permute(0, 2, 3, 1)
x = x.contiguous()
# mixup
if 'mixup' in policy:
if not channels_first:
x1 = x_ori.permute(0, 3, 1, 2)
else:
x1 = x_ori.clone()
for p in policy.split(','):
if p in list(AUGMENT_FNS.keys()):
for f in AUGMENT_FNS[p]:
x1 = f(x1)
if not channels_first:
x1 = x1.permute(0, 2, 3, 1)
x1 = x1.contiguous()
#TODO
alpha = torch.ones(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)*0.1
beta = torch.ones(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)*0.1
weight = BetaSample(alpha, beta)
x = (1 - weight)*x1 + weight*x
'''weight = torch.distributions.beta.Beta(alpha, beta).sample()
weight = torch.max(weight, 1 - weight)
x = (1 - weight)*x_ori + weight*x'''
return x
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
def noise(x, sd=0.05):
x = x + torch.randn_like(x)*sd*sd
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
'noise': [noise],
}
| 4,483 | 1,892 |
import re
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class PlayBoard:
numbers: List[List[Optional[int]]]
def read_numbers() -> List[int]:
with open("data.txt", "r") as file:
data = file.readline()
return list(map(int, data.split(",")))
def read_boards() -> List[PlayBoard]:
"""
Reading each board defined by a new line then 5 lists of 5 ints.
Given the data format, this divides equally by 6 for possible performant mapping.
"""
with open("data.txt", "r") as file:
data = file.readlines()[2:]
cleaned_data = list(map(lambda x: re.split("\s+", x.strip()), data)) # noqa
play_boards: List[PlayBoard] = list()
for i in range(0, len(data), 6):
play_boards.append(PlayBoard(numbers=[list(map(int, x)) for x in cleaned_data[i : i + 5]]))
return play_boards
def calculate_final_score(play_board: PlayBoard, number: int) -> int:
"""Sum remaining values on the play board."""
return sum([sum([val for val in row if val]) for row in play_board.numbers]) * number
def check_board_and_return_optional_score(play_board: PlayBoard, number: int) -> Optional[int]:
# Check rows
for row_num, row in enumerate(play_board.numbers):
if number in row:
row[row.index(number)] = None
if row == [None] * 5:
final_score = calculate_final_score(play_board=play_board, number=number)
return final_score
# check cols
for n in range(5):
col = [x[n] for x in play_board.numbers]
if col == [None] * 5:
final_score = calculate_final_score(play_board=play_board, number=number)
return final_score
else:
return None
def part_one() -> int:
numbers, play_boards = read_numbers(), read_boards()
game_results = list()
for number in numbers:
for play_board in play_boards:
score = check_board_and_return_optional_score(play_board=play_board, number=number)
if score:
game_results.append(score)
return game_results[0]
def part_two() -> int:
numbers, play_boards = read_numbers(), read_boards()
game_results = list()
for number in numbers:
for play_board in play_boards:
score = check_board_and_return_optional_score(play_board=play_board, number=number)
if score:
game_results.append(score)
return game_results[-1]
if __name__ == "__main__":
print("Day 4: Giant Squid")
print("-" * 80)
result_part_1 = part_one()
print(
f"Part 1: To guarantee victory against the giant squid, figure out which board will win first. "
f"What will your final score be if you choose that board?: {result_part_1}"
)
print("-" * 80)
result_part_2 = part_two()
print(
f"Part 2: Figure out which board will win last. Once it wins, what would its final score be?: {result_part_2}"
)
print("-" * 80)
| 3,000 | 944 |
import os
static_path = os.path.join(os.path.dirname(__file__), "..", "static")
apiurl = "http://localhost:8000/api/%s"
local_store = os.path.join(static_path, "graphs")
local_store_url = "http://localhost:8000/static/graphs"
nodename = "lg"
nodepwd = "lg@home"
| 265 | 110 |
class loop():
def __init__(self, _loop_type, **kwargs) -> None:
self.type = _loop_type
self.kwargs = kwargs
self.break_function = self.kwargs.get("break_function")
self.range = kwargs.get("range")
self.start = getattr(self, f"_{self.type}")
self.counter = 0
self.outPut_function = 0
def _while(self, function, *ags, **kws):
while not self.break_function():
self.counter = 0
while not self.pause_function():
self.outPut_function = function(*ags, **kws)
self.counter+=1
return self.counter, self.outPut_function
def _for(self, function, *args, **kwargs):
self.counter = 0
for _c_ in self.range:
self.outPut_function = function(*args, **kwargs)
self.counter = _c_
return self.counter, self.outPut_function
def break_verify(self):
self.break_function() | 952 | 286 |
import sys
import numpy as np
#############################################################
### ###
### Module for Python3 ###
### * Using Numpy ( + Cupy ? ) ###
### ###
#############################################################
class RayTriangleIntersection():
### https://pheema.hatenablog.jp/entry/ray-triangle-intersection
def __init__(self):
pass
def calc_intersection(self, o, d, v0, v1, v2):
e1 = np.subtract(v1, v0)
e2 = np.subtract(v2, v0)
### https://www.it-swarm.dev/ja/python/python-numpy-machine-epsilon/1041749812/
kEpsilon = np.finfo(float).eps
alpha = np.cross(d, e2)
# det = np.dot(e1, alpha)
det = np.sum(e1 * alpha, axis=1)
# print("e1.shape : {}".format(e1.shape))
# print("e2.shape : {}".format(e2.shape))
# print("alpha.shape : {}".format(alpha.shape))
# print("det.shape : {}".format(det.shape))
# intersect_count = np.count_nonzero(det)
### True = InterSection
### (1) Check Parallel
bool_p = (-kEpsilon > det) | (det > kEpsilon)
### Remove (1)
v0 = v0[bool_p]
v1 = v1[bool_p]
v2 = v2[bool_p]
e1 = e1[bool_p]
e2 = e2[bool_p]
alpha = alpha[bool_p]
det = det[bool_p]
# print("det.shape (1) : {}".format(det.shape))
det_inv = 1.0 / det
r = np.subtract(o, v0)
### (2) Check u-Value in the Domain (0 <= u <= 1)
# u = np.dot(alpha, r) * det_inv
u = np.sum(alpha * r, axis=1) * det_inv
bool_u = (0.0 < u) & (u < 1.0)
### Remove (2)
v0 = v0[bool_u]
v1 = v1[bool_u]
v2 = v2[bool_u]
e1 = e1[bool_u]
e2 = e2[bool_u]
alpha = alpha[bool_u]
r = r[bool_u]
u = u[bool_u]
det = det[bool_u]
det_inv = det_inv[bool_u]
# print("det.shape (2) : {}".format(det.shape))
beta = np.cross(r, e1)
### (3) Check v-Value in the Domain (0 <= v <= 1)
### and
### Check (u + v = 1)
# v = np.dot(d, beta) * det_inv
v = np.sum(d * beta, axis=1) * det_inv
bool_v = (0.0 < v) & (u + v < 1.0)
### Remove (3)
v0 = v0[bool_v]
v1 = v1[bool_v]
v2 = v2[bool_v]
e1 = e1[bool_v]
e2 = e2[bool_v]
alpha = alpha[bool_v]
beta = beta[bool_v]
r = r[bool_v]
u = u[bool_v]
v = v[bool_v]
det = det[bool_v]
det_inv = det_inv[bool_v]
# print("det.shape (3) : {}".format(det.shape))
### (4) Check t_value (t >= 0)
# t = np.dot(e2, beta) * det_inv
t = np.sum(e2 * beta, axis=1) * det_inv
bool_t = 0.0 < t
### Remove (4)
v0 = v0[bool_t]
v1 = v1[bool_t]
v2 = v2[bool_t]
e1 = e1[bool_t]
e2 = e2[bool_t]
alpha = alpha[bool_t]
beta = beta[bool_t]
r = r[bool_t]
t = t[bool_t]
u = u[bool_t]
v = v[bool_t]
det = det[bool_t]
det_inv = det_inv[bool_t]
# print("det.shape (4) : {}".format(det.shape))
### Intersett : True !!
# intersect_val = [t, u, v]
### Barycenrinc_Coordinate >> XYZ
### ((1 - u - v) * v0) + (u * v1) + (v * v2)
new_amp = 1.0 - u - v
new_v0 = np.multiply(v0, new_amp[:, np.newaxis])
new_v1 = np.multiply(v1, u[:, np.newaxis])
new_v2 = np.multiply(v2, v[:, np.newaxis])
intersect_pos = np.add(np.add(new_v0, new_v1), new_v2)
ray_line = np.subtract(intersect_pos, o)
# print("ray_line.shape : {}".format(ray_line.shape))
### (5) Check Line-Triangle Intersection
### Compare Length, Line-Length / Origin-IntersectPoint-Length
line_length = np.linalg.norm(d)
intersect_length = np.linalg.norm(ray_line, axis=1)
# print("line_len : {}".format(line_length))
# print("inter_len : {}".format(intersect_length))
# print("inter_len.shape : {}".format(intersect_length.shape))
bool_l = intersect_length < line_length
# print(bool_l)
intersect_count = np.count_nonzero(bool_l)
return intersect_count | 4,646 | 1,815 |
class DieRoll(object):
"""Roll object that parses roll string and calls appropriate function."""
def __init__(self, roll_str, flag):
"""Initialize Die roll object by breaking apart roll string."""
valid_flags = {
"a": self.advantage,
"d": self.disadvantage
}
self.roll_str = roll = roll_str
self.operator = "+"
self.action = valid_flags[flag] if flag else self.roll_die
self.modifier = 0
self.message = ""
valid_operators = ["+", "-"]
for o in valid_operators:
if o in roll:
self.operator = o
roll, mod = roll.split(o)
self.modifier = int(mod) * -1 if o == "-" else int(mod)
self.number, self.sides = map(int, roll.split("d"))
self.min_roll = self.number
self.max_roll = self.sides * self.number
def print_results(self, roll_result, name=None):
"""Return result of roll."""
roll_plus_mods = "{} {} {}".format(
roll_result,
self.operator,
abs(self.modifier)
)
final_result = "*[ {} ]* _({} = {}) (min {}, max {}) {}_".format(
roll_result + self.modifier,
self.roll_str,
roll_plus_mods,
self.min_roll + self.modifier,
self.max_roll + self.modifier,
self.message
)
if name:
final_result += " with {}".format(name)
return final_result
def roll_die(self):
"""Standard roll of die."""
import random
result = 0
for x in range(0, self.number):
result += random.randint(1, self.sides)
return result
def advantage(self):
"""Roll with advantage."""
self.message = "with advantage"
return max(self.roll_die(), self.roll_die())
def disadvantage(self):
"""Roll with disadvantage."""
self.message = "with disadvantage"
return min(self.roll_die(), self.roll_die())
| 2,046 | 600 |
import os, pickle
import functools
def load_or_make(creator):
"""
Loads data that is pickled at filepath if filepath exists;
otherwise, calls creator(*args, **kwargs) to create the data
and pickle it at filepath.
Returns the data in either case.
Inputs:
- filepath: path to where data is / should be stored
- creator: function to create data if it is not already pickled
- *args, **kwargs: arguments passed to creator()
Outputs:
- item: the data that is stored at filepath
Usage:
@load_or_make
def data_creator(args):
# code
# return data
my_data = data_creator(save_file_path, *args, **kwargs)
"""
@functools.wraps(creator)
def cached_creator(filepath, *args, **kwargs):
if os.path.isfile(filepath):
with open(filepath, 'rb') as pkl:
item = pickle.load(pkl)
else:
item = creator(*args, **kwargs)
with open(filepath, 'wb') as pkl:
pickle.dump(item, pkl)
return item
return cached_creator
| 1,099 | 345 |
from rest_framework import routers
from .api import AnimalViewSet
router = routers.DefaultRouter()
router.register('api/animals', AnimalViewSet, 'animals')
urlpatterns = router.urls | 183 | 53 |
from os import replace
from typing import List, Dict, Any, Callable
import os
import re
import json
import functools
ST_UNKNOWN = "*"
ST_BOOL = "bool"
ST_INT = "integer"
ST_STR = "string"
ST_FLOAT = "float"
ST_URL = "url"
ST_DATETIME = "datetime"
REGEXP_URL = re.compile('^https?://.+$')
REGEXP_DATE = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$')
class TypeBase:
@property
def isLeaf(self) -> bool:
return True
class NullType(TypeBase):
def __repr__(self) -> str:
return 'null'
class UniTypeHolder(TypeBase):
def __init__(self, vtype: TypeBase) -> None:
assert(type(vtype) != NullType)
self._type = vtype
@property
def type(self) -> TypeBase:
return self._type
def replaceWithCommonObject(self, commonObject: 'CommonObjectType'):
self._type = commonObject
@property
def isLeaf(self) -> bool:
if self._type is None:
return False
return self._type.isLeaf
class Nullable(UniTypeHolder):
def __repr__(self) -> str:
return str(self._type) + '?'
class ValueType(TypeBase):
def __init__(self, typename: str) -> None:
assert(type(typename) == str)
self.__typename = typename
def __eq__(self, other):
return type(other) == ValueType and self.__typename == other.__typename
def __repr__(self) -> str:
return '"' + self.__typename + '"'
@property
def typename(self):
return self.__typename
class ArrayType(UniTypeHolder):
def __repr__(self) -> str:
return '[' + str(self._type) + ']' if self._type is not None else '[]'
class ObjectType(TypeBase):
def __init__(self, props) -> None:
assert(type(props) == dict)
self.__props = props
@property
def isLeaf(self) -> bool:
return False
@property
def isPlain(self):
return all(map(lambda e: e.isLeaf, self.__props.values()))
def get(self, v):
return self.__props.get(v, None)
def keys(self):
return self.__props.keys()
def items(self):
return self.__props.items()
def __repr__(self) -> str:
return '{' + ','.join(['"%s":%s' % (k,str(v)) for (k,v) in self.__props.items()]) + '}'
@property
def numOfKeys(self):
return len(list(self.keys()))
def hasSameKeysOf(self, other) -> bool:
assert(type(other) == ObjectType)
return set(self.keys()) == set(other.keys())
def containsAllKeysOf(self, other) -> bool:
assert(type(other) == ObjectType)
return set(self.keys()).issuperset(set(other.keys()))
def replaceWithCommonObject(self, key, commonObject: 'CommonObjectType'):
self.__props[key] = commonObject
class CommonObjectType(TypeBase):
def __init__(self, typename, object: ObjectType) -> None:
assert(type(object) == ObjectType)
self.__typename = typename
self.__object = object
def __repr__(self) -> str:
return '"$' + self.__typename + '"'
@property
def typename(self):
return self.__typename
@property
def object(self):
return self.__object
def __guessTypeForValue(v):
assert(type(v) != dict and type(v) != list)
if type(v) == type(None): return NullType()
typemap = {
bool: ST_BOOL,
int: ST_INT,
str: ST_STR,
float: ST_FLOAT
}
vtype = typemap.get(type(v), NullType())
if type(vtype) == NullType:
return NullType()
if vtype == ST_STR:
if v.find('http://') == 0 or v.find('https://') == 0:
if v.find('{') == -1: # FIXME ???
return ValueType(ST_URL)
if REGEXP_DATE.match(v):
return ValueType(ST_DATETIME)
return ValueType(vtype)
def __guessTypeForArray(json) -> ArrayType:
assert(type(json) == list)
def aggregateArrayOfObjectType(array):
keys = functools.reduce(lambda a, e: a.union(set(e.keys())), array, set())
if len(keys) == 0:
return ArrayType(None)
merged = {}
for obj in array:
for key in keys:
value = obj.get(key)
if type(value) == ObjectType:
merged[key] = value
#elif type(value) == ArrayType:
# merged[key] = aggregateArrayOfObjectType(value)
elif key in merged:
if type(merged[key]) == NullType and type(value) == NullType:
pass
elif type(merged[key]) == ObjectType and type(value) == NullType:
merged[key] = Nullable(merged[key])
elif type(merged[key]) == NullType and type(value) == ObjectType:
merged[key] = Nullable(value)
elif type(merged[key]) == type(value) and type(value) == ValueType and merged[key] == value:
pass
else:
pass
#merged[key] = merged[key].union(value)
else:
merged[key] = value
return ArrayType(ObjectType(merged))
if all([type(i) == dict for i in json]):
arr = [__guessTypeForDict(i) for i in json]
return aggregateArrayOfObjectType(arr)
types = functools.reduce(lambda a, e: a.union(set([type(e)])), json, set())
if len(types) == 1:
return ArrayType(__guessTypeForValue(json[0]))
assert(False)
def __guessTypeForDict(json) -> ObjectType:
assert(type(json) == dict)
return ObjectType({k:guessType(v) for (k,v) in json.items()})
def guessType(value) -> TypeBase:
if type(value) == dict:
return __guessTypeForDict(value)
elif type(value) == list:
return __guessTypeForArray(value)
else:
return __guessTypeForValue(value)
def collectNonNestedObjects(obj: TypeBase, path: str = '', collected_map: Dict[str, TypeBase] = dict()) -> Dict[str, TypeBase]:
if obj.isLeaf:
return collected_map
if obj.isPlain:
collected_map[path] = obj
return collected_map
assert(type(obj) == ObjectType)
for key, value in obj.items():
if type(value) == Nullable and type(value.type) == ObjectType:
collectNonNestedObjects(value.type, path + '/' + key + '?', collected_map)
elif type(value) == ObjectType:
collectNonNestedObjects(value, path + '/' + key, collected_map)
elif type(value) == ArrayType and type(value.type) == ObjectType:
collectNonNestedObjects(value.type, path + '/' + key + '/0', collected_map)
return collected_map
def exactMatch(a: ObjectType, b: ObjectType):
return a.numOfKeys > 0 and a.isPlain and a.hasSameKeysOf(b)
def similarMatch(a: ObjectType, b: ObjectType):
return a.numOfKeys > 0 and a.isPlain and a.containsAllKeysOf(b) and a.numOfKeys > 3
def bothMatch(a: ObjectType, b: ObjectType):
return exactMatch(a, b) or similarMatch(a, b)
class Endpoint:
def __init__(self, request: Dict, response: TypeBase, rawResponse: str) -> None:
self.__request = request
self.__response = response
self.__rawResponse = rawResponse
@property
def request(self):
return self.__request
@property
def response(self):
return self.__response
@property
def rawResponse(self):
return self.__rawResponse
def replaceWithCommonObject(self, commonObject: CommonObjectType):
cond = lambda v: bothMatch(commonObject.object, v)
def visitObject(obj: TypeBase):
if obj.isLeaf:
return 0
if type(obj) != ObjectType:
return 0
assert(type(obj) == ObjectType)
replaceCount = 0
for key, value in obj.items():
#print(' ', value)
if type(value) == ObjectType:
if cond(value):
replaceCount += 1
obj.replaceWithCommonObject(key, commonObject)
elif not value.isPlain:
replaceCount += visitObject(value)
elif type(value) == ArrayType and type(value.type) == ObjectType:
if cond(value.type):
replaceCount += 1
value.replaceWithCommonObject(commonObject)
else:
replaceCount += visitObject(value.type)
elif type(value) == Nullable and type(value.type) == ObjectType:
if cond(value.type):
replaceCount += 1
value.replaceWithCommonObject(commonObject)
else:
replaceCount += visitObject(value.type)
return replaceCount
#print('>>>>', self.__request['name'])
replaceCount = 0
if type(self.__response) == ObjectType and cond(self.__response):
replaceCount = 1
self.__response = commonObject
else:
replaceCount = visitObject(self.__response)
return replaceCount
def nonNextedResponseObjects(self) -> Dict[str, TypeBase]:
def resolveTypename(path):
n = [e for e in path.split('/') if not e.isdigit()][-1]
if len(n) == 0:
return self.__request['name'] + 'Response'
return n if n[-1] != '?' else n[:-1]
if self.__response is None:
return None
if type(self.__response) == ArrayType:
return None
d = collectNonNestedObjects(self.__response, '', dict())
return {resolveTypename(k):v for (k,v) in d.items() if len(v.keys()) > 0}
def __repr__(self) -> str:
return '%s = %s' % (self.__request['name'], self.__response)
class API:
def __init__(self, endpoints: List[Endpoint] = []) -> None:
self.__endpoints = endpoints
self.__commonObjects = []
def endpoints(self) -> List[Endpoint]:
return self.__endpoints
def commonObjects(self) -> List[CommonObjectType]:
return self.__commonObjects
def __resolveTypename(self, typenameCanditates: List[str]):
exists = lambda name: any(filter(lambda e: e.typename == name, self.__commonObjects))
def rename(name):
for i in range(26):
newTypename = name + chr(ord('A') + i) + 'xx'
if not exists(newTypename):
return newTypename
assert('Temporary typename exhausted' and False)
filteredTypenameCanditates = sorted([e for e in typenameCanditates if len(e) > 0], key=functools.cmp_to_key(lambda a,b:len(a) - len(b)))
typename = filteredTypenameCanditates[0]
cappedTypename = typename[0].upper() + typename[1:]
return rename(cappedTypename) if exists(cappedTypename) else cappedTypename
def findAndRegisterSimilarObjects(self):
def findSimilarObject(objects: List[ObjectType], matchFunction: Callable[[ObjectType, ObjectType], bool]) -> CommonObjectType:
for (_, obj) in objects:
if any(filter(lambda e: matchFunction(e.object, obj), self.__commonObjects)): continue
typenameCanditates = [n for (n,o) in objects if matchFunction(obj, o)]
if len(typenameCanditates) >= 2:
return CommonObjectType(self.__resolveTypename(typenameCanditates), obj)
return None
for i in range(100000):
#nonNestedObjects = functools.reduce(lambda a, e: a + list(e.nonNextedResponseObjects().items()), self.__endpoints, [])
nonNestedObjects = []
for e in self.__endpoints:
objs = e.nonNextedResponseObjects()
if objs is None:
continue
nonNestedObjects += objs.items()
sot = findSimilarObject(nonNestedObjects, exactMatch) or findSimilarObject(nonNestedObjects, similarMatch)
if sot is None:
break
self.__commonObjects.append(sot)
for e in self.__endpoints:
e.replaceWithCommonObject(sot)
@staticmethod
def initWithDir(dir: str, lang: str):
endpoints = []
#for d in ['get-message.json', 'get-messages.json']: #os.listdir(os.path.join(dir, 'api')):
path = os.path.join(dir, 'api', lang)
for d in os.listdir(path):
with open(os.path.join(path, d)) as req:
req_json = json.load(req)
res_text = None
res_json = None
try:
with open(os.path.join(dir, 'response', d)) as res:
res_text = ''.join(res.readlines())
res_json = json.loads(res_text)
except (OSError, IOError) as e:
pass # when reponse file doesn't exist
endpoint = Endpoint(req_json, guessType(res_json), res_text)
endpoints.append(endpoint)
return API(endpoints)
| 13,075 | 3,867 |
from setuptools import setup
from platform import system
SYSTEM = system()
VERSION = '1.0.2'
if SYSTEM == 'Windows':
scripts = ['grebot/grebot.bat']
else:
scripts = ['grebot/grebot.sh']
setup(
name='grebot',
version=VERSION,
packages=['grebot'],
license='MIT',
long_description=open('README.txt').read(),
scripts=scripts,
install_requires=['colorama']
)
| 393 | 136 |
import operator
from magicgui import magicgui
OPERATOR_DICTIONARY = {
"Divide": (operator.truediv, "Measurement_Ratio"),
"Multiply": (operator.mul, "Measurement_Product"),
"Add": (operator.add, "Measurement_Sum"),
"Subtract": (operator.sub, "Measurement_Difference"),
}
measurement_math_options = list(OPERATOR_DICTIONARY.keys())
measurement_math_options.append("None")
@magicgui(
call_button="Set Options",
position={
"choices": ["upper_right", "upper_left", "lower_right", "lower_left", "center"]
},
size={"min": 0, "max": 1000},
x_shift={"min": -1000, "max": 1000},
y_shift={"min": -1000, "max": 1000},
)
def timestamp_options(
start_time=0,
step_time=1,
prefix="T =",
suffix="frame",
position="upper_left",
size=12,
x_shift=12,
y_shift=0,
):
"""
Widget to choose timestamp options from when called
"""
timestamp_options.close()
# used as a callback function in main widget file
def show_timestamp_options():
timestamp_options.show()
@magicgui(
call_button=False,
Ok={"widget_type": "PushButton", "tooltip": "Press to load data"},
frame={
"choices": ["None"],
"label": "Frame Column:",
"tooltip": "Select frame column in input data",
},
track_id={
"choices": ["None"],
"label": "Object id Column:",
"tooltip": "Select column representing object track ids in input data", # noqa: E501
},
x_coordinates={
"choices": ["None"],
"label": "X Coordinate Column:",
"tooltip": "Select x coordinate column in input data",
},
y_coordinates={
"choices": ["None"],
"label": "Y Coordinate Column:",
"tooltip": "Select y coordinate column in input data",
},
z_coordinates={
"choices": ["None"],
"label": "Z Coordinate Column:",
"tooltip": "Select z coordinate column in input data, select None if column does not exist", # noqa: E501
},
measurment={
"choices": ["None"],
"label": "Measurement Column:",
"tooltip": "Select measurement column in input data",
},
field_of_view_id={
"choices": ["None"],
"label": "Field of View/Position Column:",
"tooltip": "Select fov column in input data, select None if column does not exist", # noqa: E501
},
additional_filter={
"choices": ["None"],
"label": "Additional Filter Column:",
"tooltip": "Select additional filter column, for example Well of a wellplate, select None if column does not exist", # noqa: E501
},
second_measurment={
"choices": ["None"],
"label": "Second Measurement Column:",
"visible": False,
"tooltip": "Select second measurement",
},
measurement_math={
"widget_type": "RadioButtons",
"orientation": "horizontal",
"choices": measurement_math_options,
"label": "Math on first and \n second measurement:",
"tooltip": "Choose operation to calculate the measurment to be used in arcos calculation on first and second measurement", # noqa: E501
},
)
def columnpicker(
frame="None",
track_id="None",
x_coordinates="None",
y_coordinates="None",
z_coordinates="None",
measurment="None",
second_measurment="None",
field_of_view_id="None",
additional_filter="None",
measurement_math="None",
Ok=False,
):
"""Dialog with magicgui for selecting columns"""
columnpicker.Ok.bind(not Ok)
def toggle_visible_second_measurment():
curr_value = columnpicker.measurement_math.value
if curr_value in ["None", "1/X"]:
columnpicker.second_measurment.hide()
else:
columnpicker.second_measurment.show()
| 3,774 | 1,201 |
"""
Model classes - contains the primary objects that power pylibRETS.
"""
class MetadataSystem(object):
def __init__(self):
self.GetSystemID = None
self.GetSystemDescription = None
self.GetComments = None
self.GetTimeZoneOffset = None
self.GetMetadataID = None
self.GetResourceVersion = None
self.GetResourceDate = None
self.GetForeignKeyVersion = None
self.GetForeignKeyDate = None
self.GetFilterVersion = None
self.GetFilterDate = None
class MetadataResource(object):
def __init__(self):
self.ResourceID = None
self.StandardName = None
self.KeyField = None
class MetadataClass(object):
def __init__(self):
self.ClassName = None
self.StandardName = None
self.Description = None
self.VisibleName = None
self.TableVersion = None
self.TableDate = None
self.UpdateVersion = None
self.UpdateDate = None
self.ClassTimeStamp = None
self.DeletedFlagField = None
self.DeletedFlagValue = None
self.HasKeyIndex = None
self.OffsetSupport = None
class MetadataTable(object):
def __init__(self):
self.SystemName = None
self.StandardName = None
self.LongName = None
self.DBName = None
self.ShortName = None
self.MaximumLength = None
self.DataType = None
self.Precision = None
self.Searchable = None
self.Interpretation = None
self.Alignment = None
self.UseSeparator = None
self.EditMaskID = None
self.LookupName = None
self.MaxSelect = None
self.Units = None
self.Index = None
self.Minimum = None
self.Maximum = None
self.Default = None
self.Required = None
self.SearchHelpID = None
self.Unique = None
self.UpdatesModTimeStamp = None
self.ForeignKey = None
self.ForeignField = None
self.KeyRetrievalQuery = None
self.KeyRetrievalSelect = None
self.InKeyIndex = None
self.FilterParentField = None
self.DefaultSearchOrder = None
self.Case = None
class MetadataLookup(object):
def __init__(self):
self.LookupName = None
self.VisibleName = None
self.Version = None
self.Date = None
self.FilterID = None
self.NotShownByDefault = None
class MetadataLookupType(object):
def __init__(self):
self.Value = None
self.LongValue = None
self.ShortValue = None
class MetadataObject(object):
def __init__(self):
self.ObjectType = None
self.MIMEType = None
self.VisibleName = None
self.Description = None
self.ObjectTimeStamp = None
self.ObjectCount = None
self.LocationAvailability = None
self.ObjectData = None
self.MaxFileSize = None
class LoginResponse(object):
def __init__(self):
self.GetMemberName = None
self.GetUserInfo = None
self.GetBroker = None
self.GetMetadataVersion = None
self.GetMetadataTimestamp = None
self.GetMinMetadataTimestamp = None
self.GetOfficeList = None
self.GetBalance = None
self.GetTimeout = None
self.GetPasswordExpire = None
self.GetActionUrl = None
self.GetChangePasswordUrl = None
self.GetGetObjectUrl = None
self.GetLoginUrl = None
self.GetLoginCompleteUrl = None
self.GetLogoutUrl = None
self.GetSearchUrl = None
self.GetGetMetadataUrl = None
self.GetServerInformationUrl = None
self.GetUpdateUrl = None
self.GetPayloadListUrl = None
self.GetUserID = None
self.GetUserClass = None
self.GetUserLevel = None
self.GetAgentCode = None
self.GetBrokerCode = None
self.GetBrokerBranch = None
self.GetMetadataID = None
self.GetWarnPasswordExpirationDays = None
self.GetStandardNamesVersion = None
self.GetVendorName = None
self.GetServerProductName = None
self.GetServerProductVersion = None
self.GetOperatorName = None
self.GetRoleName = None
self.GetSupportContactInformation = None
self.GetSessionInformationTokens = None
def CreateCapabilityUrls(baseUrl):
pass
| 3,717 | 1,303 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='uumarrty',
version='0.0.1',
url='https://github.com/michaelremington2/uumarrty',
author='Michael Remington and Jeet Sukumaran',
author_email='michaelremington2@gmail.com',
license="LICENSE.txt",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
],
scripts=[
"bin/simulate_uumarrty.py",
],
test_suite = "tests",
package_dir={"": "src"},
description="Agent based simulation of predator prey dynamics.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(where="src"),
python_requires=">=3.6",
) | 980 | 329 |
import angr
class gen_simproc4v(angr.SimProcedure):
def run(self, arg1, ar2, arg3, arg4):
return
| 111 | 45 |
#!/usr/bin/env python
#Boa:App:BoaApp
import wx
import matplotlib as _matplotlib
import pylab as _pylab
import _pylab_colorslider_frame as _pcf; reload(_pcf)
try: _prefs
except: _prefs = None
modules ={u'pylab_colorslider_frame': [1,
'Main frame of Application',
u'pylab_colorslider_frame.py']}
class BoaApp(wx.App):
def OnInit(self):
self.main = _pcf.create(None)
self.main.Show()
self.SetTopWindow(self.main)
return True
def main():
application = BoaApp(0)
application.MainLoop()
if __name__ == '__main__':
main()
#
# This class contains one color point and generates new slider gui's when it's time to modify
#
class ColorPoint:
color = None
color2 = None
position = 0.0
min = 0.0 # in case the user modifies this
max = 1.0 # in case the user modifies this
parent = None
slider = None
def __init__(self, parent, position, red=0, green=0, blue=255, red2=0, green2=0, blue2=255):
# just store the local variables
self.parent = parent
self.color = wx.Colour(red, green, blue)
self.color2 = wx.Colour(red2,green2,blue2)
self.position = position
return
def ShowSlider(self, position=[0,0]):
"""
Creates a color slider GUI object, and pops it up. When the colorslider
moves, this object's color data is updated.
"""
# close/delete any old ones
self.HideSlider()
# find out if this is the "main" slider (that appears in the taskbar)
n = None
for i in range(len(self.parent.colorpoints)):
if self == self.parent.colorpoints[i]: n=i
# modify the style accordingly
style = 0
if not n==len(self.parent.colorpoints)-1:
style = wx.FRAME_NO_TASKBAR|wx.CLIP_CHILDREN|wx.FRAME_FLOAT_ON_PARENT|wx.NO_BORDER
size = wx.Size(351, 38)
parent = self.parent.colorpoints[-1].slider # better make the last one first!
else:
style = wx.CLIP_CHILDREN|wx.CAPTION|wx.MINIMIZE_BOX|wx.CLOSE_BOX|wx.SYSTEM_MENU
size = wx.Size(351, 40+35*(len(self.parent.colorpoints)-1))
parent = wx.GetApp().GetTopWindow()
# convert the coords to a real position
position = wx.Point(position[0], position[1])
# create the GUI object
self.slider = _pcf.ColorSliderFrame(parent, self, style=style, size=size, position=position)
if n in [0, len(self.parent.colorpoints)-1]: self.slider.EnableStuff(False)
self.slider.Show()
def HideSlider(self):
if self.slider:
self.slider.Hide()
self.slider.Destroy()
self.slider = None
#
# This class contains a list of color points and a link to a parent image.
# Its job is to update the parent image colormap
#
class GuiColorMap:
# define the local variables of the class
colorpoints = []
image = None
def __init__(self, image="top", colormap="_last"):
"""
This class contains a list of color points defining a colormap. It is
capable of providing GUI sliders to modify the colors and locations of
the color points in the color map and updating the supplied image on
the fly.
To get the initial color from the supplied image, it assumes that
the red, green, and blue channels have the same set of positions!
To find the image, try gca().images[0]
set colormap=None to try and import the current colormap
"""
if image == "top":
image = _pylab.gca().images[0]
# store the reference to the image
self.image = image
# get the data for easier coding
if colormap == None:
# use the color map from the image if possible
c = image.cmap._segmentdata
cr = c['red']
cg = c['green']
cb = c['blue']
# get the number of steps in this cmap
N = len(cb)
# loop over the number of entries and generate the list
self.colorpoints = []
# try to import the colormap from the image
for n in range(N):
if cr[n][0] == cb[n][0] and cr[n][0] == cg[n][0]:
self.colorpoints.append(ColorPoint(
self, cr[n][0],
cr[n][1]*255, cg[n][1]*255, cb[n][1]*255,
cr[n][2]*255, cg[n][2]*255, cb[n][2]*255))
else:
print "This colormap is too complicated. Switching to default."
colormap = "default"
break;
# if we need to, use the default map
if not colormap == None:
self.LoadColorMap(colormap)
# may as well show these guys to the user too
self.ShowSliders()
def LoadColorMap(self, name="default"):
# open the file "[spinmobpath]/colormaps/whatever.txt"
try:
f = open(_prefs.colormaps_dir + _prefs.path_delimiter + name + ".txt", "r")
lines = f.readlines()
f.close()
# now loop over the colors (lines) and generate a list
self.colorpoints = []
for line in lines:
# split the line by white space
s = line.split()
# now create a new color point
if len(s) == 7:
self.colorpoints.append(ColorPoint(self, float(s[0]),
float(s[1]), float(s[2]), float(s[3]),
float(s[4]), float(s[5]), float(s[6])))
# use the hard-coded default
except:
print "Could not load "+_prefs.colormaps_dir + _prefs.path_delimiter + name + ".txt"
self.colorpoints = [ColorPoint(self, 0.0, 255, 255, 255, 255, 255, 255),
ColorPoint(self, 0.5, 0, 0, 255, 0, 0, 255),
ColorPoint(self, 1.0, 255, 0, 0, 255, 0, 0)]
# now update
self.UpdateImage()
def SaveColorMap(self, name="_last"):
try:
f = open(_prefs.colormaps_dir + _prefs.path_delimiter + name + ".txt", "w")
# loop over the color points
for c in self.colorpoints:
f.write(str(c.position) + " " +
str(c.color.Red()) + " " + str(c.color.Green()) + " " + str(c.color.Blue()) + " " +
str(c.color2.Red())+ " " + str(c.color2.Green())+ " " + str(c.color2.Blue()) + "\n")
f.close()
except:
print "Couldn't save last colormap!"
def UpdateImage(self):
"""
This takes the current values of the various color points, orders them,
and updates the colormap of the parent image.
"""
# first order the list according to the element positions
new_list = []
while len(self.colorpoints):
# find the minimum position
x0 = 2.0
n0 = 0
for n in range(len(self.colorpoints)):
# if this item is smaller than the previous record, store it
if self.colorpoints[n].position < x0:
x0 = self.colorpoints[n].position
n0 = n
# if it's equal to the previous record, make it a little bigger
# next time around, this can be the new minimum
elif self.colorpoints[n].position == x0:
self.colorpoints[n].position = x0 + 0.0001
# if it's larger than 1, set it to 1 and knock off the best a little
if self.colorpoints[n].position > 1.0:
self.colorpoints[n].position = 1.0
self.colorpoints[n0].position = 1.0-0.0001
# now we have the minimum index
new_list.append(self.colorpoints.pop(n0))
# now set the new list
self.colorpoints = new_list
# now generate the colormap from the ordered list
r = []
g = []
b = []
for point in self.colorpoints:
r.append((point.position, point.color.Red()/255.0, point.color2.Red()/255.0))
g.append((point.position, point.color.Green()/255.0, point.color2.Green()/255.0))
b.append((point.position, point.color.Blue()/255.0, point.color2.Blue()/255.0))
# store the formatted dictionary
c = {'red':r, 'green':g, 'blue':b}
# now set the dang thing
self.image.set_cmap(_matplotlib.colors.LinearSegmentedColormap('custom', c))
_pylab.draw()
self.SaveColorMap()
def ShowSliders(self):
"""
This will show all the sliders, tiling them to the right of the figure
"""
# loop over the points in the list
for n in range(len(self.colorpoints)-1,-1,-1): self.ShowSlider(n, "auto")
def HideSliders(self):
for p in self.colorpoints: p.HideSlider()
def ShowSlider(self, n, position="auto"):
"""
This will show the n'th slider at the specified screen position
"""
try:
if position == "auto":
# get the figure position and size
p = self.image.figure.canvas.Parent.GetPosition()
w = self.image.figure.canvas.Parent.GetSize()[0]
if n==len(self.colorpoints)-1:
position = [p[0]+w, p[1]+40*(len(self.colorpoints)-n-1)]
else:
position = [p[0]+w+3, p[1]+65+35*(len(self.colorpoints)-n-2)]
except:
print "Can't position slider relative to anything but a wxAgg plot."
if not hasattr(position, '__iter__'): position = [0,0]
self.colorpoints[n].ShowSlider(position)
def HideSlider(self, n):
self.colorpoints[n].HideSlider()
| 10,370 | 3,367 |
from Data.Drawer import Drawer
from Data.Helper import *
from Pages.PageBase import PageBase
class Reboot(PageBase):
def __init__(self, drawer: Drawer):
PageBase.__init__(self, drawer)
def UpdateCanvas(self):
if not self.CanUpdate(100):
return
self.drawer.ClearCanvas()
self.drawer.WriteOnCanvas(".......Reboot.......", line=0)
self.drawer.WriteOnCanvas(" Hold Button ", line=1)
self.drawer.WriteOnCanvas(" To Reboot ", line=2)
def OnLongPress(self):
self.drawer.ClearCanvas()
cmd = "sudo reboot now"
print("REBOOT")
subprocess.Popen(cmd, shell = True) | 683 | 212 |
from math import radians, sin, cos, tan
angulo = float(input('Digite o ângulo que você deseja: '))
seno = sin(radians(angulo))
cosseno = cos(radians(angulo))
tangente = tan(radians(angulo))
print(f'O ângulo de {angulo} tem o SENO de {seno :.2f}!')
print(f'O ângulo de {angulo} tem o COSSENO de {cosseno :.2f}!')
print(f'O ângulo de {angulo} tem a TANGENTE de {tangente :.2f}!')
| 380 | 170 |
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import numpy as np
app = dash.Dash()
# Creating Data
np.random.seed(42)
random_x = np.random.randint(1, 101, 100)
random_y = np.random.randint(1, 101, 100)
# everything that we are going to be inserting will be inside this Div for html
# The Graph component is what will receive our Plotly figure
app.layout = html.Div([dcc.Graph(id='scatterplot',
figure={'data': [
go.Scatter(
x=random_x,
y=random_y,
mode='markers',
marker={
'size': 12,
'color': 'rgb(51,204,153)',
'symbol': 'pentagon',
'line': {'width': 2}
}
)],
'layout': go.Layout(title='My Scatterplot',
xaxis={'title': 'Some X title'})}
),
dcc.Graph(id='scatterplot2',
figure={'data': [
go.Scatter(
x=random_x,
y=random_y,
mode='markers',
marker={
'size': 12,
'color': 'rgb(200,204,53)',
'symbol': 'pentagon',
'line': {'width': 2}
}
)],
'layout': go.Layout(title='My Scatterplot 2',
xaxis={'title': 'Some X title'})}
)])
if __name__ == '__main__':
app.run_server()
| 2,315 | 523 |
__author__ = 'yinjun'
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @param x, an integer
# @return a ListNode
def partition(self, head, x):
h1 = ListNode(0)
h2 = ListNode(0)
h1h = h1
h2h = h2
h = head
while h != None:
if h.val < x :
h1.next = ListNode(h.val)
h1 = h1.next
else:
h2.next = ListNode(h.val)
h2 = h2.next
h = h.next
h1.next = h2h.next
return h1h.next
| 685 | 242 |
"""
Faça um programa que leia o ano de nascimento de um jovem e informe, de acordo com sua idade
se ele ainda vai se alistar
se é a hora de se alistar
se já passou o tempo de alistar
o programa também deve falar o tempo que falta ou que passou
"""
import datetime
import time
ano_nasc = int(input('Ano de Nascimento: '))
ano_atual = datetime.date.today().year
idade = ano_atual - ano_nasc
print('Um momento estamos fazendo o calculo.\n', end='')
time.sleep(1)
print('.')
time.sleep(1)
print('.')
time.sleep(1)
if idade == 18:
print('Quem nasceu no ano de {} tem {} anos em {}'.format(ano_nasc, idade, ano_atual))
print('Você tem que se alistar IMEDIATAMENTE')
elif idade > 18:
print('Quem nasceu em {} tem {} anos em {}.'.format(ano_nasc, idade, ano_atual))
print('Você já deveria ter se alistado há {} anos.'.format(idade-18))
print('Seu alistamento deu-se em {}.'.format(ano_nasc+18))
else:
print('Quem nasceu em {} tem {} anos em {}.'.format(ano_nasc, idade, ano_atual))
print('Ainda faltam {} anos para seu alistamento.'.format(18-idade))
print('Seu alistamento será em {}.'.format(ano_nasc+18))
| 1,134 | 427 |
"""
A class interfce to netvlad based whole image descriptor. To use the
pre-trained network in your application use this code and unit-test
Author : Manohar Kuse <mpkuse@connect.ust.hk>
Created : 20th Aug, 2018
"""
import cv2
import numpy as np
import os
import time
import code
import argparse
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
TF_MAJOR_VERSION = int(tf.__version__.split('.')[0])
TF_MINOR_VERSION = int(tf.__version__.split('.')[1])
from CartWheelFlow import VGGDescriptor
from ColorLUT import ColorLUT
import TerminalColors
tcolor = TerminalColors.bcolors()
class WholeImageDescriptor:
def __init__( self, NET_TYPE, PARAM_K, PARAM_model_restore ):
self.NET_TYPE = NET_TYPE
self.PARAM_K = PARAM_K
self.PARAM_model_restore = PARAM_model_restore
## Create Network
tf_x = tf.placeholder( 'float', [1,240,320,3], name='x' ) #this has to be 3 if training with color images
is_training = tf.placeholder( tf.bool, [], name='is_training')
vgg_obj = VGGDescriptor(K=PARAM_K, D=256, N=60*80, b=1)
tf_vlad_word = vgg_obj.network(tf_x, is_training, net_type=NET_TYPE )
## Restore Model
sess = tf.Session()
print tcolor.OKGREEN,'Restore model from : ', PARAM_model_restore, tcolor.ENDC
tensorflow_saver = tf.train.Saver()
tensorflow_saver.restore( sess, PARAM_model_restore )
self.tf_x = tf_x
self.tf_vlad_word = tf_vlad_word
self.is_training = is_training
self.vgg_obj = vgg_obj
self.sess = sess
def get_descriptor( self, im ):
""" im: 1x240x320x3 """
assert( len(im.shape) == 4 )
feed_dict = {self.tf_x : im,\
self.is_training:True,\
self.vgg_obj.initial_t: 0
}
tff_vlad_word, tff_sm = self.sess.run( [self.tf_vlad_word, self.vgg_obj.nl_sm], feed_dict=feed_dict)
Assgn_matrix = np.reshape( tff_sm, [1,60,80,-1] ).argmax( axis=-1 ) #assuming batch size = 1
return tff_vlad_word, Assgn_matrix
if __name__=='__main__':
## Network Params
NET_TYPE = "resnet6"
PARAM_K = 16
PARAM_model_restore = './tfmodels/B_vgg/model-8000'
PARAM_model_restore = './tfmodels/D/model-8000'
WID_net = WholeImageDescriptor( NET_TYPE, PARAM_K, PARAM_model_restore )
## Load Image
INPUT_FILE_NAME = 'sample_images/a0.jpg'
print 'Load Image : ', INPUT_FILE_NAME
IM = cv2.resize( cv2.imread( INPUT_FILE_NAME), (320, 240) )
im_batch = np.expand_dims( IM.astype('float32'), 0 )
## descriptor and association map
## tff_vlad_word : 1x4096
## Assgn_matrix : 1x60x80
tff_vlad_word, Assgn_matrix = WID_net.get_descriptor( im_batch )
## Visualize Assgn_matrix - as a false color map
colorLUT = ColorLUT()
lut = colorLUT.lut( Assgn_matrix[0,:,:] )
cv2.imshow( 'IM', IM )
cv2.imshow( 'Assgn_matrix', cv2.resize( lut, (320,240) ) )
cv2.waitKey(0)
| 3,040 | 1,176 |
import argparse
import os
import time
## Argparser
def str2slist(s):
s.replace(' ', '')
return s.split(',')
def str2ilist(s):
s.replace(' ', '')
return [int(c) for c in s.split(',')]
def str2bool(v):
if v in ['true', 'True']:
return True
elif v in ['false', 'False']:
return False
else:
assert(False)
argparser = argparse.ArgumentParser()
argparser.register('type','bool',str2bool)
argparser.register('type','slist', str2slist)
argparser.register('type','ilist', str2ilist)
# Adopted from: http://stackoverflow.com/a/8412405
def rolling_window(l, w_size):
for i in range(len(l)-w_size+1):
yield [l[i+o] for o in range(w_size)]
def striding_windows(l, w_size):
curr_idx = 0
while curr_idx < len(l):
yield l[curr_idx:curr_idx + w_size]
curr_idx += w_size
def check_and_create_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Adopted from: https://stackoverflow.com/a/21894086
class bidict(dict):
def __init__(self, *args, **kwargs):
super(bidict, self).__init__(*args, **kwargs)
self.inverse = {}
for key, value in self.items():
self.inverse.setdefault(value,[]).append(key)
def __setitem__(self, key, value):
if key in self:
self.inverse[self[key]].remove(key)
super(bidict, self).__setitem__(key, value)
self.inverse.setdefault(value,[]).append(key)
def __delitem__(self, key):
self.inverse.setdefault(self[key],[]).remove(key)
if self[key] in self.inverse and not self.inverse[self[key]]:
del self.inverse[self[key]]
super(bidict, self).__delitem__(key)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
class FtnTimer(object):
def __init__(self):
self.tot_time = 0
self.tot_cnt = 0
self.curr_time = 0
def start(self):
self.start_time = time.clock()
def end(self):
end_time = time.clock()
self.tot_time += end_time - self.start_time
self.tot_cnt += 1
def get_result(self):
if not self.tot_cnt:
avg_time = None
else:
avg_time = self.tot_time / self.tot_cnt
res = {
'average_time': avg_time
}
return res
| 2,389 | 854 |
# Download data, unzip, etc.
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import scipy.stats as st
# Set some parameters to apply to all plots. These can be overridden
# in each plot if desired
import matplotlib
# Plot size to 14" x 7"
matplotlib.rc('figure', figsize = (14, 7))
# Font size to 14
matplotlib.rc('font', size = 14)
# Do not display top and right frame lines
matplotlib.rc('axes.spines', top = False, right = False)
# Remove grid lines
matplotlib.rc('axes', grid = False)
# Set backgound color to white
matplotlib.rc('axes', facecolor = 'white')
_, ax = plt.subplots()
# Define a function for the line plot with intervals
def lineplotCI(x_data, y_data, low_CI, upper_CI, minimum, maximum, x_label, y_label, title, color, file_name):
# Create the plot object
# Plot the data, set the linewidth, color and transparency of the
# line, provide a label for the legend
ax.plot(x_data, y_data, lw = 3, color = color, alpha = 1, label = file_name)
ax.plot(x_data, minimum, lw=1, color=color, alpha=1, label='5% quantile')
ax.plot(x_data, maximum, lw=1, color=color, alpha=1, label='95% quantile')
# Shade the confidence interval
ax.fill_between(x_data, low_CI, upper_CI, color=color, alpha=0.1, label='25-75 quantile')
# Label the axes and provide a title
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
# Display legend
ax.legend(loc = 'best')
def add_plot(csv_name, color):
dataset = pd.read_csv(csv_name, header=None)
mean = dataset.mean(axis=0)
std = dataset.std(axis=0)
upper = mean + std
lower = mean - std
upper_quantile = dataset.quantile(0.75)
median = dataset.quantile(0.5)
lower_quantile = dataset.quantile(0.25)
max_quantile = dataset.quantile(0.95)
min_quantile = dataset.quantile(0.05)
lower_interval, upper_interval = st.t.interval(0.95, 99, loc=mean, scale=std)
# Call the function to create plot
# lineplotCI(x_data = list(range(0, 400))
# , y_data = median
# , low_CI=lower_quantile
# , upper_CI=upper_quantile
# , minimum = min_quantile
# , maximum = max_quantile
# , x_label='Episodes'
# , y_label='Value of Policy'
# , title='Value of policy over time'
# , color=color)
lineplotCI(x_data=list(range(0, 400))
, y_data=mean
, low_CI=lower
, upper_CI=upper
, minimum=min_quantile
, maximum=max_quantile
, x_label='Episodes'
, y_label='Value of Policy'
, title='Value of policy over time'
, file_name=csv_name
, color=color)
# add_plot("q_learning_epsilon_rewards.csv", '#539caf')
add_plot("q_learning_epsilon_rewards.csv", '#999111')
add_plot("double_q_epsilon_rewards.csv", '#990a11')
plt.show() | 2,983 | 1,023 |
__author__ = 'croman'
from pipeline import pipe
from lxml import etree
import rdflib
def ner(datasetfile, format):
tweets = ""
tweetids = []
if format == 'xml':
dataset = etree.parse(datasetfile)
for tweet in dataset.xpath('//Tweet'):
tweetText = tweet.xpath('./TweetText/text()')[0]
tweets += tweetText+"\n"
tweetids.append(tweet.xpath('./TweetId/text()')[0])
tweets = tweets.encode('utf-8')
elif format == "nif":
tweetdict = {}
a = rdflib.Graph()
a.parse(datasetfile, format='n3')
for s, p, o in a:
if s.endswith(',') and p.endswith('isString'):
tweetid = s.split('#')[0].split('.xml/')[1]
tweetdict[tweetid] = o
for key in sorted(tweetdict):
tweetids.append(key)
tweets += tweetdict[key]+'\n'
tweets = tweets.encode('utf-8')
print tweets
indexes = []
tweetlines = tweets.split('\n')
for t in tweetlines:
tweetlength = 0
for word in t.split():
tweetlength += len(word)
print tweetlength
indexes.append(tweetlength)
options = {'log':'DEBUG', 'conf': 'pipeline/settings.py', 'text': tweets}
results = pipe.main(options, [])
print 'results: ' + results
x = 0
finalresults = ''
resultslines = results.splitlines()
finalresults = ''
for i in indexes:
print i
length = 0
tweetresult = ''
print x
print resultslines[x]
while length < i:
if resultslines[x] != '':
entity = resultslines[x].split('\t')
print entity
length += len(entity[0])
tweetresult += entity[0]+'/'+entity[1]+' '
x += 1
#print 'x=', x
print 'length: ', length
else:
print 'ok'
x += 1
print tweetresult
finalresults += tweetresult[:-1]+' END\n'
print finalresults
ner("Mena Collection.ttl", "nif")
"""__author__ = 'croman'
from pipeline import pipe
from lxml import etree
import rdflib
def ner(datasetfile, format):
tweets = ""
tweetids = []
if format == 'xml':
dataset = etree.parse(datasetfile)
for tweet in dataset.xpath('//Tweet'):
tweetText = tweet.xpath('./TweetText/text()')[0]
tweets += tweetText+"\n"
tweetids.append(tweet.xpath('./TweetId/text()')[0])
tweets = tweets.encode('utf-8')
elif format == "nif":
tweetdict = {}
a = rdflib.Graph()
a.parse(datasetfile, format='n3')
for s, p, o in a:
if s.endswith(',') and p.endswith('isString'):
tweetid = s.split('#')[0].split('.xml/')[1]
tweetdict[tweetid] = o
for key in sorted(tweetdict):
tweetids.append(key)
tweets += tweetdict[key]+'\n'
tweets = tweets.encode('utf-8')
print tweets
indexes = []
tweetlines = tweets.split('\n')
for t in tweetlines:
tweetlength = 0
for word in t.split():
tweetlength += len(word)
indexes.append(tweetlength)
options = {'log':'DEBUG', 'conf': 'pipeline/settings.py', 'text': tweets}
results = pipe.main(options, [])
print results
x = 0
finalresults = ''
for i in indexes:
print i
resultslines = results.split('\n')
length = 0
while length < i:
entity = resultslines[x].split('\t')
print resultslines[x]
length += len(entity[0])
if len(entity)>1:
finalresults += entity[0]+'/'+entity[1]+' '
x += 1
print 'x=', x
print 'length: ', length
finalresults = finalresults[:-1]+' END\n'
print finalresults
ner("Mena Collection.ttl", "nif")""" | 3,953 | 1,244 |
import pandas as pd
import random
def generate_teams(n_teams=128, n_countries=3, csv_file="times.csv"):
times = pd.DataFrame(columns=['nome', 'estadio', 'nacionalidade', 'score'])
for i in range(n_teams):
times = times.append({
'nome': 'Time ' + str(i),
'estadio': random.randint(1000, 50000),
'nacionalidade': random.randint(0, n_countries),
'score': random.randint(0, 20),
}, ignore_index=True)
times.to_csv(csv_file)
return times
def generate_players(n_teams=128, n_countries=3, csv_file="jogadores.csv"):
"""
gerar os jogadores
0 - sarrafeiro
1 - caceteiro
2 - cordeirinho
3 - cavalheiro
4 - fair play
0 - goleiro 3
1 - defensor 7
2 - meio 7
3 - atacante 7
:param n_teams:
:param n_countries:
:param csv_file:
:return:
"""
jogadores = pd.DataFrame(
columns=['nome', 'nacionalidade', 'idade', 'estrela', 'time', 'posicao', 'comportamento', 'forca'])
numero_por_posicao = [3, 7, 7, 7]
k = 0
for i in range(n_teams):
for p in range(4):
for j in range(numero_por_posicao[p]):
jogadores = jogadores.append({
'nome': 'Jogador ' + str(k),
'nacionalidade': random.randint(0, n_countries),
'idade': random.randint(18, 45),
'estrela': random.randint(1, 100) > 95,
'time': i,
'posicao': p,
'comportamento': random.randint(0, 4),
'forca': random.randint(1, 50),
}, ignore_index=True)
k = k + 1
jogadores.to_csv(csv_file)
return jogadores
def generate_coaches(n_teams=128, csv_file="tecnicos.csv"):
tecnicos = pd.DataFrame(columns=['nome', 'time', 'idade', 'comportamento'])
for i in range(n_teams):
tecnicos = tecnicos.append({
'nome': 'Tecnico ' + str(i),
'time': i,
'idade': random.randint(30, 70),
'comportamento': random.randint(0, 4),
}, ignore_index=True)
tecnicos.to_csv(csv_file)
if __name__ == "__main__":
generate_teams(csv_file="../../data/times.csv") | 2,313 | 812 |
#!/usr/bin/python
"""
Ansible module for rpm-based systems determining existing package version information in a host.
"""
from ansible.module_utils.basic import AnsibleModule
IMPORT_EXCEPTION = None
try:
import rpm # pylint: disable=import-error
except ImportError as err:
IMPORT_EXCEPTION = err # in tox test env, rpm import fails
class RpmVersionException(Exception):
"""Base exception class for package version problems"""
def __init__(self, message, problem_pkgs=None):
Exception.__init__(self, message)
self.problem_pkgs = problem_pkgs
def main():
"""Entrypoint for this Ansible module"""
module = AnsibleModule(
argument_spec=dict(
package_list=dict(type="list", required=True),
),
supports_check_mode=True
)
if IMPORT_EXCEPTION:
module.fail_json(msg="rpm_version module could not import rpm: %s" % IMPORT_EXCEPTION)
# determine the packages we will look for
pkg_list = module.params['package_list']
if not pkg_list:
module.fail_json(msg="package_list must not be empty")
# get list of packages available and complain if any
# of them are missing or if any errors occur
try:
pkg_versions = _retrieve_expected_pkg_versions(_to_dict(pkg_list))
_check_pkg_versions(pkg_versions, _to_dict(pkg_list))
except RpmVersionException as excinfo:
module.fail_json(msg=str(excinfo))
module.exit_json(changed=False)
def _to_dict(pkg_list):
return {pkg["name"]: pkg for pkg in pkg_list}
def _retrieve_expected_pkg_versions(expected_pkgs_dict):
"""Search for installed packages matching given pkg names
and versions. Returns a dictionary: {pkg_name: [versions]}"""
transaction = rpm.TransactionSet()
pkgs = {}
for pkg_name in expected_pkgs_dict:
matched_pkgs = transaction.dbMatch("name", pkg_name)
if not matched_pkgs:
continue
for header in matched_pkgs:
if header['name'] == pkg_name:
if pkg_name not in pkgs:
pkgs[pkg_name] = []
pkgs[pkg_name].append(header['version'])
return pkgs
def _check_pkg_versions(found_pkgs_dict, expected_pkgs_dict):
invalid_pkg_versions = {}
not_found_pkgs = []
for pkg_name, pkg in expected_pkgs_dict.items():
if not found_pkgs_dict.get(pkg_name):
not_found_pkgs.append(pkg_name)
continue
found_versions = [_parse_version(version) for version in found_pkgs_dict[pkg_name]]
expected_version = _parse_version(pkg["version"])
if expected_version not in found_versions:
invalid_pkg_versions[pkg_name] = {
"found_versions": found_versions,
"required_version": expected_version,
}
if not_found_pkgs:
raise RpmVersionException(
'\n'.join([
"The following packages were not found to be installed: {}".format('\n '.join([
"{}".format(pkg)
for pkg in not_found_pkgs
]))
]),
not_found_pkgs,
)
if invalid_pkg_versions:
raise RpmVersionException(
'\n '.join([
"The following packages were found to be installed with an incorrect version: {}".format('\n'.join([
" \n{}\n Required version: {}\n Found versions: {}".format(
pkg_name,
pkg["required_version"],
', '.join([version for version in pkg["found_versions"]]))
for pkg_name, pkg in invalid_pkg_versions.items()
]))
]),
invalid_pkg_versions,
)
def _parse_version(version_str):
segs = version_str.split('.')
if not segs or len(segs) <= 2:
return version_str
return '.'.join(segs[0:2])
if __name__ == '__main__':
main()
| 3,988 | 1,197 |
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CAMEO: Computer Aided Metabolic Engineering & Optimization
Cameo is a high-level python library developed to aid the in silico
strain design process in metabolic engineering projects. The library
provides a modular architecture that enables the efficient construction
of custom analysis workflows.
Example
-------
from cameo import load_model
# load a model from SBML format (can be found under cameo/tests/data)
model = load_model('EcoliCore.xml')
# optimize the model and print the objective value
solution = model.optimize()
print 'Objective value:', solution.f
# Determine a set of gene deletions that will optimize the production
# of a desired compound
from cameo.strain_design.heuristic import GeneKnockoutOptimization
from cameo.strain_design.heuristic.objective_functions import biomass_product_coupled_yield
from cameo.flux_analysis.simulation import fba
objective = biomass_product_coupled_yield("Ec_biomass_iJO1366_core_53p95M",
"EX_succ_lp_e_rp_", "EX_glc_lp_e_rp_")
optimization = GeneKnockoutOptimization(model=model, objective_function=of,
simulation_method=fba, heuristic_method=inspyred.ec.GA)
optimization.run(max_evaluations=2000, n=1,
mutation_rate=0.3, view=cameo.parallel.SequentialView(),
product="EX_succ_lp_e_rp_", num_elites=1)
"""
import os
import sys
from cameo import config
from cameo.util import get_system_info, in_ipnb
if sys.version_info[0] == 2:
import imp
def find_module(name):
try:
imp.find_module(name)
return True
except ImportError:
return False
elif sys.version_info[0] == 3:
if sys.version_info[1] <= 3:
from importlib import find_loader as _find
else:
from importlib.util import find_spec as _find
def find_module(name):
return _find(name) is not None
_cameo_path = __path__[0]
_cameo_data_path = os.path.join(_cameo_path, 'data')
# fix - if matplotlib is installed it is not possible to import cameo without importing matplotlib on jupyter notebook.
if find_module("matplotlib") and in_ipnb():
from IPython import get_ipython
ipython = get_ipython()
ipython.magic("matplotlib inline")
system_info = get_system_info()
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from cameo.io import load_model
from cameo import models
from .flux_analysis.analysis import flux_variability_analysis, phenotypic_phase_plane
from .flux_analysis.simulation import fba, pfba
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
del os, sys, in_ipnb, get_system_info, find_module
| 3,335 | 1,050 |
# -*- coding:utf-8 -*-
import logging
import re
from time import sleep
import requests
import urllib3
from app.utils.spider_utils import getHtmlTree, verifyProxyFormat
from app.utils.web_request import WebRequest
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s.%(msecs).03d - %(filename)s:%(lineno)d %(levelname)s]: %(message)s')
log = logging.getLogger(__name__)
class FetchFreeProxy(object):
@staticmethod
def ip66(count=20):
"""
代理66 http://www.66ip.cn/
:param count: 提取数量
:return:
"""
urls = [
"http://www.66ip.cn/nmtq.php?getnum=60&isp=0&anonymoustype=0&start=&ports=&export=&ipaddress=&area=1&proxytype=2&api=66ip"
]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Accept': '*/*',
'Connection': 'keep-alive',
'Accept-Language': 'zh-CN,zh;q=0.8'}
try:
import js2py
session = requests.Session()
session.verify = False
# -----------------------------2019-08-16 最早期版本
# src = session.get("http://www.66ip.cn/", headers=headers).text
#
# src = src.split("</script>")[0] + '}'
# src = src.replace("<script>", "function test() {")
# src = src.replace("while(z++)try{eval(", ';var num=10;while(z++)try{var tmp=')
# src = src.replace(");break}", ";num--;if(tmp.search('cookie') != -1 | num<0){return tmp}}")
# ctx = js2py.eval_js(src)
# src = ctx.test()
# src = src[src.find("document.cookie="): src.find("};if((")]
# src = src.replace("document.cookie=", "")
# src = "function test() {var window={}; return %s }" % src
# cookie = js2py.eval_js(src).test()
# js_cookie = cookie.split(";")[0].split("=")[-1]
# -----------------------------2019-08-16 更新版本需要破解cookies
# content = ''.join(re.findall('<script>(.*?)</script>', content))
# function_js = content.replace('eval', 'return')
# function_content = "function getClearance(){" + function_js + "};"
# self.context.execute(function_content)
# # 一级解密结果
# decoded_result = self.context.getClearance()
# function_js_result = 'var a' + decoded_result.split('document.cookie')[1].split("Path=/;'")[
# 0] + "Path=/;';return a;"
# # s = re.sub(r'document.create.*?firstChild.href', '"{}"'.format(self.start_url), s)
# function_content_result = "function getClearanceResult(){" + function_js_result + "};"
# self.context.execute(function_content_result)
# # 二次解密结果
# decoded_content = self.context.getClearanceResult()
# jsl_clearance = decoded_content.split(';')[0]
except Exception as e:
print(e)
return
for url in urls:
try:
# cookies={"__jsl_clearance": js_cookie}
html = session.get(url.format(count), headers=headers).text
ips = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}", html)
for ip in ips:
yield ip.strip()
except Exception as e:
print(e)
pass
@staticmethod
def goubanjia():
"""
guobanjia http://www.goubanjia.com/
:return:
"""
url = "http://www.goubanjia.com/"
tree = getHtmlTree(url)
proxy_list = tree.xpath('//td[@class="ip"]')
# 此网站有隐藏的数字干扰,或抓取到多余的数字或.符号
# 需要过滤掉<p style="display:none;">的内容
xpath_str = """.//*[not(contains(@style, 'display: none'))
and not(contains(@style, 'display:none'))
and not(contains(@class, 'port'))
]/text()
"""
for each_proxy in proxy_list:
try:
# :符号裸放在td下,其他放在div span p中,先分割找出ip,再找port
ip_addr = ''.join(each_proxy.xpath(xpath_str))
# HTML中的port是随机数,真正的端口编码在class后面的字母中。
# 比如这个:
# <span class="port CFACE">9054</span>
# CFACE解码后对应的是3128。
port = 0
for _ in each_proxy.xpath(".//span[contains(@class, 'port')]"
"/attribute::class")[0]. \
replace("port ", ""):
port *= 10
port += (ord(_) - ord('A'))
port /= 8
yield '{}:{}'.format(ip_addr, int(port))
except Exception as e:
pass
@staticmethod
def kuaidaili():
"""
快代理 https://www.kuaidaili.com
"""
url_list = [
'https://www.kuaidaili.com/free/inha/',
'https://www.kuaidaili.com/free/intr/'
]
for url in url_list:
tree = getHtmlTree(url)
proxy_list = tree.xpath('.//table//tr')
sleep(1) # 必须sleep 不然第二条请求不到数据
for tr in proxy_list[1:]:
yield ':'.join(tr.xpath('./td/text()')[0:2])
@staticmethod
def coderbusy():
"""
码农代理 https://proxy.coderbusy.com/
:return:
"""
urls = ['https://proxy.coderbusy.com/']
for url in urls:
tree = getHtmlTree(url)
proxy_list = tree.xpath('.//table//tr')
for tr in proxy_list[1:]:
tr_data=tr.xpath('./td/text()')
ip_port=tr_data[0:2]
location=tr_data[-1].strip()
if location in ['腾讯云','阿里云','移动','联通','电信', '世纪互联']: yield ':'.join(ip_port)
# yield ':'.join(tr.xpath('./td/text()')[0:2])
@staticmethod
def ip3366():
"""
云代理 http://www.ip3366.net/free/
:return:
"""
urls = ['http://www.ip3366.net/free/?stype=1',
"http://www.ip3366.net/free/?stype=2"
]
request = WebRequest()
for url in urls:
r = request.get(url, timeout=10)
proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\s\S]*?<td>(\d+)</td>', r.text)
for proxy in proxies:
yield ":".join(proxy)
@staticmethod
def jiangxianli(page_count=2):
"""
http://ip.jiangxianli.com/?page=
免费代理库
:return:
"""
for i in range(1, page_count + 1):
url = 'http://ip.jiangxianli.com/?page={}'.format(i)
html_tree = getHtmlTree(url)
tr_list = html_tree.xpath("/html/body/div[1]/div/div[1]/div[2]/table/tbody/tr")
if len(tr_list) == 0:
continue
for tr in tr_list:
yield tr.xpath("./td[2]/text()")[0] + ":" + tr.xpath("./td[3]/text()")[0]
@staticmethod
def data5u():
'''
无忧代理,免费10个
:return:
'''
url_list = [
'http://www.data5u.com/',
]
for url in url_list:
html_tree = getHtmlTree(url)
ul_list = html_tree.xpath('//ul[@class="l2"]')
for ul in ul_list:
try:
yield ':'.join(ul.xpath('.//li/text()')[0:2])
except Exception as e:
print(e)
@staticmethod
def xicidaili(page_count=1):
url_list = [
'http://www.xicidaili.com/nn/', # 高匿
]
for each_url in url_list:
for i in range(1, page_count + 1):
page_url = each_url + str(i)
tree = getHtmlTree(page_url)
proxy_list = tree.xpath('.//table[@id="ip_list"]//tr[position()>1]')
for proxy in proxy_list:
try:
yield ':'.join(proxy.xpath('./td/text()')[0:2])
except Exception as e:
pass
# @staticmethod
# def proxylistplus():
# urls = ['https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-1']
# request = WebRequest()
# for url in urls:
# r = request.get(url)
# proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\s\S]*?<td>(\d+)</td>', r.text)
# for proxy in proxies:
# yield ':'.join(proxy)
# @staticmethod
# def iphai():
# """
# IP海 http://www.iphai.com/free/ng
# :return:
# """
# urls = [
# 'http://www.iphai.com/free/ng',
# 'http://www.iphai.com/free/np',
# 'http://www.iphai.com/free/wg',
# 'http://www.iphai.com/free/wp'
# ]
# request = WebRequest()
# for url in urls:
# r = request.get(url, timeout=10)
# proxies = re.findall(r'<td>\s*?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*?</td>[\s\S]*?<td>\s*?(\d+)\s*?</td>',
# r.text)
# for proxy in proxies:
# yield ":".join(proxy)
# @staticmethod
# def ip181(days=1):
# url = 'http://www.ip181.com/'
# html_tree = getHtmlTree(url)
# try:
# tr_list = html_tree.xpath('//tr')[1:]
# for tr in tr_list:
# yield ':'.join(tr.xpath('./td/text()')[0:2])
# except Exception as e:
# pass
# @staticmethod
# def mimiip():
# url_gngao = ['http://www.mimiip.com/gngao/%s' % n for n in range(1, 10)] # 国内高匿
# url_gnpu = ['http://www.mimiip.com/gnpu/%s' % n for n in range(1, 10)] # 国内普匿
# url_gntou = ['http://www.mimiip.com/gntou/%s' % n for n in range(1, 10)] # 国内透明
# url_list = url_gngao + url_gnpu + url_gntou
#
# request = WebRequest()
# for url in url_list:
# r = request.get(url)
# proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\w\W].*<td>(\d+)</td>', r.text)
# for proxy in proxies:
# yield ':'.join(proxy)
# @staticmethod
# def xundaili():
# '''
# 讯代理
# :return:
# '''
# url = 'http://www.xdaili.cn/ipagent/freeip/getFreeIps?page=1&rows=10'
# request = WebRequest()
# try:
# res = request.get(url).json()
# for row in res['RESULT']['rows']:
# yield '{}:{}'.format(row['ip'], row['port'])
# except Exception as e:
# pass
# @staticmethod
# def cnproxy():
# urls = ['http://cn-proxy.com/', 'http://cn-proxy.com/archives/218']
# request = WebRequest()
# for url in urls:
# r = request.get(url)
# proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\w\W]<td>(\d+)</td>', r.text)
# for proxy in proxies:
# yield ':'.join(proxy)
# @staticmethod
# def proxylist():
# urls = ['https://proxy-list.org/english/index.php?p=%s' % n for n in range(1, 10)]
# request = WebRequest()
# import base64
# for url in urls:
# r = request.get(url)
# proxies = re.findall(r"Proxy\('(.*?)'\)", r.text)
# for proxy in proxies:
# yield base64.b64decode(proxy).decode()
def checkAllProxy():
"""
检查getFreeProxy所有代理获取函数运行情况
Returns:
None
"""
import inspect
member_list = inspect.getmembers(FetchFreeProxy, predicate=inspect.isfunction)
proxy_count_dict = dict()
for func_name, func in member_list:
log.debug(u"开始运行代理: {}".format(func_name))
try:
proxy_list = [_ for _ in func() if verifyProxyFormat(_)]
proxy_count_dict[func_name] = len(proxy_list)
except Exception as e:
log.error(u"代理获取函数 {} 运行出错!".format(func_name))
log.error(str(e))
log.info(u"所有函数运行完毕 " + "***" * 5)
for func_name, func in member_list:
log.debug(u"函数: {n}, 获取到代理数: {c}".format(n=func_name, c=proxy_count_dict.get(func_name, 0)))
def checkSingleProxy(func):
"""
检查指定的FetchFreeProxy某个function运行情况
Args:
func: FetchFreeProxy中某个可调用方法
Returns:
None
"""
func_name = getattr(func, '__name__', "None")
log.info("start running func: {}".format(func_name))
count = 0
for proxy in func():
if verifyProxyFormat(proxy):
log.debug("{} fetch proxy: {}".format(func_name, proxy))
count += 1
log.debug("{n} completed, fetch proxy number: {c}".format(n=func_name, c=count))
if __name__ == '__main__':
# proxylistplus(FetchFreeProxy.proxylistplus)
print(checkSingleProxy(FetchFreeProxy.coderbusy))
| 12,967 | 4,678 |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode[]} lists
# @return {ListNode}
def mergeKLists(self, lists):
lists = [i for i in lists if i]
if not lists: return None
if len(lists) == 1: return lists[0]
dummy = ListNode(0)
d = dummy
lists.sort(key=lambda x: x.val)
while lists:
temp = lists[0]
d.next = temp
temp = temp.next
d = d.next
d.next = None
if temp:
lists[0] = temp
lists.sort(key=lambda x: x.val)
else:
lists = lists[1:]
return dummy.next
| 786 | 231 |
def notas(*n, show=False):
"""
-> Função que lê varias notas e retorna um dicionario com dados
:param n: Lê varias notas (numero indefinido)
:param show: Mostra a situação do aluno (opc)
:return: Retorna um dicionario
"""
dados = dict()
dados['total'] = len(n)
dados['maior'] = max(n)
dados['menor'] = min(n)
dados['media'] = sum(n)/dados['total']
if show:
if dados['media'] >= 7:
dados['situação'] = 'BOA !'
elif 7 > dados['media'] > 5:
dados['situação'] = 'RAZOAVEL !'
elif dados['media'] <= 5:
dados['situaçãos'] = 'RUIM !'
return dados
user = list()
t = bool()
while True:
user.append(float(input('Informe uma nota: ')))
resp = ' '
while resp not in 'SsNn':
resp = (str(input('Deseja continuar: [S/N] '))).strip()[0]
if resp in 'Ss':
break
if resp in 'Nn':
break
print('\033[31m:<errozin>: Informe apenas os valores S ou N !\033[m')
if resp in 'Nn':
break
most = ' '
while most not in 'SsNn':
most = (str(input('Deseja mostra a situação? [S/N] '))).strip()[0]
if most in 'Ss':
t = True
break
elif most in 'Nn':
t = False
break
print('\033[31m:<errozin>: Informe apenas os valores S ou N ! \033[m')
tot = (notas(user, show=t))
print(tot)
| 1,384 | 532 |
from nltk import sent_tokenize, word_tokenize, pos_tag, ne_chunk
sentence = 'Usually I go to the hospital when I am afraid. When I sould go there?'
sentences_splitted = sent_tokenize(sentence)
sentence_words_splitted = [word_tokenize(s) for s in sentences_splitted]
question = [ne_chunk(pos_tag(s)) for s in sentences_splitted]
labeled_sentence = []
helping_verbs = ['is', 'am', 'are', 'was', 'were', 'be', 'being', 'been', 'has', 'have', 'had', 'do', 'does', 'did',
'will', 'shall', 'should', 'would']
for sentence in sentence_words_splitted:
if 'wh' in sentence[0] or '?' in sentence[-1] or sentence[0] in helping_verbs: # First word is where, when, which, who, what... and not helping verbs in the first word
labeled_sentence.append(sentence)
| 779 | 267 |
#!/usr/bin/env python3
'''
translator.py: 3 address code -> TAM translator.
@author: Hugo Araujo de Sousa [2013007463]
@email: hugosousa@dcc.ufmg.br
@DCC053 - Compiladores I - UFMG
'''
# TODO: Need to handle floating point literals.
# TAM does not provide arithmetic routines for floating point!?
import argparse as ap
from quadruple import Quadruple
from math import floor
# Global variables.
input_file = None
output_file = None
# Sizes (in 2B words) of the grammar types.
TSIZES = {'int': 2, 'float': 4, 'char': 1, 'bool': 1}
MAX_SIZE = TSIZES['float']
# Stack top
ST = 0
# Code stack top
CT = 0
# Address, on the stack, of the variables.
addresses = {}
# Types of the variables.
types = {}
# Dictionary which returns the Quadruple by label.
labels = {}
# Instruction format
INSTR = '{}\t{}\t{}\t{}\t; {}\n'
# Instruction buffer
INSTR_BUFFER = []
################################################################################
def str2bool(string):
''' Converts a string to bool.
@param string: String to be converted.
@type string: String.
@return: Boolean that represents the string.
@rtype: Bool.
'''
return string.lower() == 'true'
def parse_arguments():
''' Add command line arguments to the program.
@return: Command line arguments.
@rtype: argparse.Namespace.
'''
parser = ap.ArgumentParser()
parser.add_argument('INPUT_FILE', type=str, help='Name of input file')
parser.add_argument('OUTPUT_FILE', type=str, help='Name of output file')
return parser.parse_args()
def add_instr(instr, quad):
''' Print instruction to output file.
@param instr: Instruction to print.
@type instr: String.
@param quad: Quadruple that generated the instruction.
@type quad: Quadruple.
'''
global CT
INSTR_BUFFER.append((instr, quad))
CT += 1
def read_decls():
''' Read the program's declarations.
'''
global ST, CT
print('-------------------BEGIN INPUT-------------------')
while True:
line = input_file.readline()
print(line.strip('\n'))
if len(line) <= 2:
break
else:
line = line.replace('[', '')
line = line.replace(']', '')
args = line.split()
if len(args) < 3: # Simple variable
if args[1] not in addresses:
size = TSIZES[args[0]]
addresses[args[1]] = ST
ST += size
types[args[1]] = args[0]
else: # Array
if args[2] not in addresses:
size = TSIZES[args[1]] * int(args[0])
addresses[args[2]] = ST
ST += size
types[args[2]] = args[1]
add_instr(INSTR.format(10, 0, 0, size, 'PUSH ' + str(size)), None)
def build_quadruples():
''' Build quadruples from the isntruction in the source code.
@return quads: Quadruples built.
@rtype quads: List of Quadruple.
'''
global CT, ST
quads = []
for line in input_file: # Get all quadruples in source code
print(line.strip('\n'))
newQuad = None
line_args = line.split()
L = []
if ':' in line_args[0]: # Collect Quadruple labels
L = [int(x[1:]) for x in line_args[0].split(':') if x != '']
del line_args[0]
if len(line_args) != 0: # Non empty quadruples
if 'if' in line_args[0]: # Conditional
op = line_args[0]
if len(line_args) == 6:
cond = line_args[1:4]
else:
cond = line_args[1:2]
branch = int(line_args[-1][1:])
newQuad = Quadruple(None, cond, None, op, branch)
elif 'goto' == line_args[0]: # Unconditional jump
branch = int(line_args[1][1:])
newQuad = Quadruple(None, None, None, line_args[0], branch)
else: # Operation
dst = line_args[0]
if dst not in addresses: # Allocate memory for temporaries
addresses[dst] = ST
ST += MAX_SIZE
types[dst] = 'float'
add_instr(INSTR.format(10, 0, 0, MAX_SIZE,
'PUSH ' + str(MAX_SIZE)), None)
# Get operator and operands
if line_args[1] == '[': # Array indexing l-value
op = '[]='
op1 = line_args[2]
op2 = line_args[5]
newQuad = Quadruple(dst, op1, op2, op)
else:
if len(line_args) == 3: # Simple copy assignments
op1 = line_args[2]
newQuad = Quadruple(dst, op1, None, None)
elif len(line_args) == 5: # Arithmetic
op = line_args[3]
op1 = line_args[2]
op2 = line_args[4]
newQuad = Quadruple(dst, op1, op2, op)
elif len(line_args) == 6: # Array indexing r-value
op = '=[]'
op1 = line_args[2]
op2 = line_args[4]
newQuad = Quadruple(dst, op1, op2, op)
else: # Unary
op = line_args[2]
op2 = line_args[3]
newQuad = Quadruple(dst, None, op2, op)
if newQuad:
quads.append(newQuad)
for label in L: # Each label points to their proper quadruple
labels[label] = newQuad
print('--------------------END INPUT--------------------')
return quads
def translate(quads):
''' Translate quadruples to TAM code.
Types of quadruples:
1. Conditional jump.
2. Unconditional jump.
3. Array indexing l-value assignment.
4. Array indexing r-value assignment.
5. Simple variable copy assignments.
6. Arithmetic assignment.
7. Unary assignment.
@param quads: Quadruples to translate.
@type quads: List of Quadruple.
'''
for quad in quads:
quad.address = CT
quad_type = quad.type
if quad_type == 1: # Conditional jump.
# Push the condition bool value to stack.
cond = quad.op1
if len(cond) == 3: # Relational operation
if cond[0] in addresses: # Operand is variable
addr_op1 = addresses[cond[0]]
op1_size = TSIZES[types[cond[0]]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if cond[0] == 'true' or cond[0] == 'false':
literal = int(str2bool(cond[0]))
else:
literal = int(floor(float(cond[0])))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
if cond[2] in addresses: # Operand is variable
addr_op1 = addresses[cond[2]]
op1_size = TSIZES[types[cond[2]]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if cond[2] == 'true' or cond[2] == 'false':
literal = int(str2bool(cond[2]))
else:
literal = int(floor(float(cond[2])))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Perform comparison
relop = cond[1]
if relop == '<':
mnemo = 'lt'
d = 13
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
elif relop == '<=':
mnemo = 'le'
d = 14
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
elif relop == '>=':
mnemo = 'ge'
d = 15
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
elif relop == '>':
mnemo = 'gt'
d = 16
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
else:
# Push operators size.
op_size = TSIZES[types[cond[0]]]
add_instr(INSTR.format(3, 0, 0, op_size,
'LOADL ' + str(op_size)), quad)
if relop == '==':
mnemo = 'eq'
d = 17
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
else: # !=
mnemo = 'ne'
d = 18
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
else: # Simple boolean
if cond[0] in addresses: # Operand is variable
addr_op1 = addresses[cond[0]]
op1_size = TSIZES[types[cond[0]]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if cond[0] == 'true' or cond[0] == 'false':
literal = int(str2bool(cond[0]))
else:
literal = int(floor(float(cond[0])))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Jump to label according to result
n = 1 if quad.operator == 'if' else 0
add_instr(INSTR.format(14, 0, n, '{}',
'JUMPIF(' + str(n) + ') {}[CB]'), quad)
elif quad_type == 2: # Unconditional jump.
add_instr(INSTR.format(12, 0, 0, '{}', 'JUMP {}[CB]'), quad)
elif quad_type == 3: # Array indexing l-value assignment.
if quad.op2 in addresses: # Operand 2 is variable
addr_op2 = addresses[quad.op2]
op2_size = TSIZES[types[quad.op2]]
add_instr(INSTR.format(1, 4, 0, addr_op2,
'LOADA ' + str(addr_op2) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op2_size, 0,
'LOADI(' + str(op2_size) + ')'), quad)
else: # Operand 2 is literal
if quad.op2 == 'true' or quad.op2 == 'false':
literal = int(str2bool(quad.op2))
else:
literal = int(floor(float(quad.op2)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Get array element address with offset.
# 1. Push offset to stack
if quad.op1 in addresses: # Operand is variable
addr_op1 = addresses[quad.op1]
op1_size = TSIZES[types[quad.op1]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if quad.op1 == 'true' or quad.op1 == 'false':
literal = int(str2bool(quad.op1))
else:
literal = int(floor(float(quad.op1)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# 2. Push base address to stack
addr_base = addresses[quad.dst]
add_instr(INSTR.format(1, 4, 0, addr_base,
'LOADA ' + str(addr_base) + '[SB]'), quad)
# 3. Add them up.
mnemo = 'add'
d = 8
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
# 4. Store r-value in that address.
dst_size = TSIZES[types[quad.dst]]
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
elif quad_type == 4: # Array indexing r-value assignment.
# Get array element address with offset.
# 1. Push offset to stack
if quad.op2 in addresses: # Operand is variable
addr_op2 = addresses[quad.op2]
op2_size = TSIZES[types[quad.op2]]
add_instr(INSTR.format(1, 4, 0, addr_op2,
'LOADA ' + str(addr_op2) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op2_size) + ')'), quad)
else: # Operand is not variable
if quad.op2 == 'true' or quad.op2 == 'false':
literal = int(str2bool(quad.op2))
else:
literal = int(floor(float(quad.op2)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# 2. Push base address to stack
addr_base = addresses[quad.op1]
add_instr(INSTR.format(1, 4, 0, addr_base,
'LOADA ' + str(addr_base) + '[SB]'), quad)
# 3. Add them up.
mnemo = 'add'
d = 8
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
# 4. Get r-value
op_size = TSIZES[types[quad.op1]]
add_instr(INSTR.format(2, 0, op_size, 0,
'LOADI(' + str(op_size) + ')'), quad)
# Push destination address onto stack and store r-value there.
addr_dst = addresses[quad.dst]
dst_size = TSIZES[types[quad.dst]]
add_instr(INSTR.format(1, 4, 0, addr_dst,
'LOADA ' + str(addr_dst) + '[SB]'), quad)
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
elif quad_type == 5: # Simple variable copy assignments.
if quad.op1 in addresses: # Operand is variable
addr_op1 = addresses[quad.op1]
op1_size = TSIZES[types[quad.op1]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if quad.op1 == 'true' or quad.op1 == 'false':
literal = int(str2bool(quad.op1))
else:
literal = int(floor(float(quad.op1)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
addr_dst = addresses[quad.dst]
dst_size = TSIZES[types[quad.dst]]
add_instr(INSTR.format(1, 4, 0, addr_dst,
'LOADA ' + str(addr_dst) + '[SB]'), quad)
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
elif quad_type == 6: # Arithmetic assignment.
addr_dst = addresses[quad.dst]
dst_size = TSIZES[types[quad.dst]]
if quad.op1 in addresses: # Operand 1 is variable
addr_op1 = addresses[quad.op1]
op1_size = TSIZES[types[quad.op1]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand 1 is literal
if quad.op1 == 'true' or quad.op1 == 'false':
literal = int(str2bool(quad.op1))
else:
literal = int(floor(float(quad.op1)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
if quad.op2 in addresses: # Operand 2 is variable
addr_op2 = addresses[quad.op2]
op2_size = TSIZES[types[quad.op2]]
add_instr(INSTR.format(1, 4, 0, addr_op2,
'LOADA ' + str(addr_op2) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op2_size, 0,
'LOADI(' + str(op2_size) + ')'), quad)
else: # Operand 2 is literal
if quad.op2 == 'true' or quad.op2 == 'false':
literal = int(str2bool(quad.op2))
else:
literal = int(floor(float(quad.op2)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Perform operation
if quad.operator == '+':
mnemo = 'add'
d = 8
elif quad.operator == '-':
mnemo = 'sub'
d = 9
elif quad.operator == '*':
mnemo = 'mult'
d = 10
else:
mnemo = 'div'
d = 11
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
add_instr(INSTR.format(1, 4, 0, addr_dst,
'LOADA ' + str(addr_dst) + '[SB]'), quad)
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
elif quad_type == 7: # Unary assignment.
addr_dst = addresses[quad.dst]
dst_size = TSIZES[types[quad.dst]]
add_instr(INSTR.format(3, 0, 0, 0,
'LOADL 0'), quad)
if quad.op2 in addresses: # Operand 2 is variable
addr_op2 = addresses[quad.op2]
op2_size = TSIZES[types[quad.op2]]
add_instr(INSTR.format(1, 4, 0, addr_op2,
'LOADA ' + str(addr_op2) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op2_size, 0,
'LOADI(' + str(op2_size) + ')'), quad)
else: # Operand 2 is literal
if quad.op2 == 'true' or quad.op2 == 'false':
literal = int(str2bool(quad.op2))
else:
literal = int(floor(float(quad.op2)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Perform operation
d = 9
mnemo = 'sub'
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
add_instr(INSTR.format(1, 4, 0, addr_dst,
'LOADA ' + str(addr_dst) + '[SB]'), quad)
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
add_instr(INSTR.format(15, 0, 0, 0, 'HALT'), None)
def backpatching():
''' Perform backpatching to assign labels. '''
for i in range(len(INSTR_BUFFER)):
instruction = INSTR_BUFFER[i][0]
quadruple = INSTR_BUFFER[i][1]
if '{}' in instruction:
branch_label = quadruple.branch
branch_quadruple = labels[branch_label]
if branch_quadruple == None:
branch_address = CT
else:
branch_address = branch_quadruple.address
INSTR_BUFFER[i] = \
(instruction.format(branch_address, branch_address), quadruple)
def finish():
''' Finishes translation. '''
input_file.close()
for (instr, quad) in INSTR_BUFFER:
output_file.write(instr)
output_file.close()
def main():
global input_file, output_file
args = parse_arguments()
input_file = open(args.INPUT_FILE, 'r')
output_file = open(args.OUTPUT_FILE, 'w')
read_decls()
quads = build_quadruples()
translate(quads)
backpatching()
finish()
################################################################################
main()
| 16,079 | 7,740 |
# encoding: utf-8
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# train settings
flags.DEFINE_integer('batch_size', 40, 'the number of images in a batch.')
flags.DEFINE_integer('training_data_type', 1, '0: directly feed, 1: tfrecords')
#flags.DEFINE_string('train_tfrecords', 'data/train_caltech_random.tfrecords', 'path to tfrecords file for train.')
flags.DEFINE_string('train_tfrecords', 'data/train_ex_norm.tfrecords', 'path to tfrecords file for train.')
flags.DEFINE_integer('image_height', 256, 'image height.')
flags.DEFINE_integer('image_width', 256, 'image width.')
flags.DEFINE_integer('image_depth', 3, 'image depth.')
flags.DEFINE_integer('crop_size', 227, 'crop size of image.')
flags.DEFINE_float('learning_rate', 1e-2, 'initial learning rate.')
flags.DEFINE_float('learning_rate_decay_factor', 0.1, 'learning rate decay factor.')
flags.DEFINE_float('num_epochs_per_decay', 350.0, 'epochs after which learning rate decays.')
flags.DEFINE_float('moving_average_decay', 0.9999, 'decay to use for the moving averate.')
flags.DEFINE_integer('num_examples_per_epoch_for_train', 400, 'the number of examples per epoch train.')
flags.DEFINE_integer('num_examples_per_epoch_for_eval', 400, 'the number of examples per eposh eval.')
flags.DEFINE_string('tower_name', 'tower', 'multiple GPU prefix.')
#flags.DEFINE_integer('num_classes', 10, 'the number of classes.')
flags.DEFINE_integer('num_classes', 5, 'the number of classes.')
flags.DEFINE_integer('num_threads', 8, 'the number of threads.')
flags.DEFINE_boolean('fine_tuning', False, 'fine tuning.')
flags.DEFINE_string('trained_model', 'trained_model/caffenet.npy' , 'trained model to use fine tuning.')
# output logs settings
flags.DEFINE_string('train_dir', 'train', 'directory where to write even logs and checkpoint')
flags.DEFINE_integer('max_steps', 100000, 'the number of batches to run.')
flags.DEFINE_boolean('log_device_placement', False, 'where to log device placement.')
# evaluate settings
flags.DEFINE_string('eval_dir', 'eval', 'directory where to write event logs.')
flags.DEFINE_string('eval_tfrecords', 'data/train_ex_norm.tfrecords', 'path to tfrecords file for eval')
flags.DEFINE_string('checkpoint_dir', 'train', 'directory where to read model checkpoints.')
flags.DEFINE_integer('eval_interval_secs', 60*3, 'How to often to run the eval.'),
flags.DEFINE_integer('num_examples', 100, 'the number of examples to run.')
flags.DEFINE_boolean('run_once', False, 'whether to run eval only once.')
| 2,501 | 876 |