hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794c3f671b4c6e3fd613ab302640a225fb26e73b
| 29,598
|
py
|
Python
|
torch/nn/modules/container.py
|
ljhOfGithub/pytorch
|
c568f7b16f2a98d72ff5b7c6c6161b67b2c27514
|
[
"Intel"
] | 2
|
2022-02-14T13:56:03.000Z
|
2022-02-14T13:56:05.000Z
|
torch/nn/modules/container.py
|
ljhOfGithub/pytorch
|
c568f7b16f2a98d72ff5b7c6c6161b67b2c27514
|
[
"Intel"
] | 1
|
2019-07-23T15:23:32.000Z
|
2019-07-23T15:32:23.000Z
|
torch/nn/modules/container.py
|
ljhOfGithub/pytorch
|
c568f7b16f2a98d72ff5b7c6c6161b67b2c27514
|
[
"Intel"
] | 2
|
2019-07-23T14:37:31.000Z
|
2019-07-23T14:47:13.000Z
|
import warnings
from collections import OrderedDict, abc as container_abcs
from itertools import chain, islice
import operator
import torch
from .module import Module
from ..parameter import Parameter
from torch._jit_internal import _copy_to_script_wrapper
from typing import Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union
T = TypeVar('T', bound=Module)
class Container(Module):
def __init__(self, **kwargs: Any) -> None:
super(Container, self).__init__()
# DeprecationWarning is ignored by default <sigh>
warnings.warn("nn.Container is deprecated. All of it's functionality "
"is now implemented in nn.Module. Subclass that instead.")
for key, value in kwargs.items():
self.add_module(key, value)
class Sequential(Module):
r"""A sequential container.
Modules will be added to it in the order they are passed in the
constructor. Alternatively, an ``OrderedDict`` of modules can be
passed in. The ``forward()`` method of ``Sequential`` accepts any
input and forwards it to the first module it contains. It then
"chains" outputs to inputs sequentially for each subsequent module,
finally returning the output of the last module.
The value a ``Sequential`` provides over manually calling a sequence
of modules is that it allows treating the whole container as a
single module, such that performing a transformation on the
``Sequential`` applies to each of the modules it stores (which are
each a registered submodule of the ``Sequential``).
What's the difference between a ``Sequential`` and a
:class:`torch.nn.ModuleList`? A ``ModuleList`` is exactly what it
sounds like--a list for storing ``Module`` s! On the other hand,
the layers in a ``Sequential`` are connected in a cascading way.
Example::
# Using Sequential to create a small model. When `model` is run,
# input will first be passed to `Conv2d(1,20,5)`. The output of
# `Conv2d(1,20,5)` will be used as the input to the first
# `ReLU`; the output of the first `ReLU` will become the input
# for `Conv2d(20,64,5)`. Finally, the output of
# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
model = nn.Sequential(
nn.Conv2d(1,20,5),
nn.ReLU(),
nn.Conv2d(20,64,5),
nn.ReLU()
)
# Using Sequential with OrderedDict. This is functionally the
# same as the above code
model = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(1,20,5)),
('relu1', nn.ReLU()),
('conv2', nn.Conv2d(20,64,5)),
('relu2', nn.ReLU())
]))
"""
_modules: Dict[str, Module] # type: ignore[assignment]
@overload
def __init__(self, *args: Module) -> None:
...
@overload
def __init__(self, arg: 'OrderedDict[str, Module]') -> None:
...
def __init__(self, *args):
super(Sequential, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx) -> T:
"""Get the idx-th item of the iterator"""
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError('index {} is out of range'.format(idx))
idx %= size
return next(islice(iterator, idx, None))
@_copy_to_script_wrapper
def __getitem__(self, idx) -> Union['Sequential', T]:
if isinstance(idx, slice):
return self.__class__(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx: int, module: Module) -> None:
key: str = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx: Union[slice, int]) -> None:
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
@_copy_to_script_wrapper
def __dir__(self):
keys = super(Sequential, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
# NB: We can't really type check this function as the type of input
# may change dynamically (as is tested in
# TestScript.test_sequential_intermediary_types). Cannot annotate
# with Any as TorchScript expects a more precise type
def forward(self, input):
for module in self:
input = module(input)
return input
def append(self, module: Module) -> 'Sequential':
r"""Appends a given module to the end.
Args:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
class ModuleList(Module):
r"""Holds submodules in a list.
:class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but
modules it contains are properly registered, and will be visible by all
:class:`~torch.nn.Module` methods.
Args:
modules (iterable, optional): an iterable of modules to add
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
"""
_modules: Dict[str, Module] # type: ignore[assignment]
def __init__(self, modules: Optional[Iterable[Module]] = None) -> None:
super(ModuleList, self).__init__()
if modules is not None:
self += modules
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
@_copy_to_script_wrapper
def __getitem__(self, idx: int) -> Union[Module, 'ModuleList']:
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx: int, module: Module) -> None:
idx = self._get_abs_string_index(idx)
return setattr(self, str(idx), module)
def __delitem__(self, idx: Union[int, slice]) -> None:
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
delattr(self, str(k))
else:
delattr(self, self._get_abs_string_index(idx))
# To preserve numbering, self._modules is being reconstructed with modules after deletion
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
def __iadd__(self, modules: Iterable[Module]) -> 'ModuleList':
return self.extend(modules)
def __add__(self, other: Iterable[Module]) -> 'ModuleList':
combined = ModuleList()
for i, module in enumerate(chain(self, other)):
combined.add_module(str(i), module)
return combined
@_copy_to_script_wrapper
def __dir__(self):
keys = super(ModuleList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def insert(self, index: int, module: Module) -> None:
r"""Insert a given module before a given index in the list.
Args:
index (int): index to insert.
module (nn.Module): module to insert
"""
for i in range(len(self._modules), index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
def append(self, module: Module) -> 'ModuleList':
r"""Appends a given module to the end of the list.
Args:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def extend(self, modules: Iterable[Module]) -> 'ModuleList':
r"""Appends modules from a Python iterable to the end of the list.
Args:
modules (iterable): iterable of modules to append
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError("ModuleList.extend should be called with an "
"iterable, but got " + type(modules).__name__)
offset = len(self)
for i, module in enumerate(modules):
self.add_module(str(offset + i), module)
return self
# remove forward alltogether to fallback on Module's _forward_unimplemented
class ModuleDict(Module):
r"""Holds submodules in a dictionary.
:class:`~torch.nn.ModuleDict` can be indexed like a regular Python dictionary,
but modules it contains are properly registered, and will be visible by all
:class:`~torch.nn.Module` methods.
:class:`~torch.nn.ModuleDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~torch.nn.ModuleDict.update`, the order of the merged
``OrderedDict``, ``dict`` (started from Python 3.6) or another
:class:`~torch.nn.ModuleDict` (the argument to
:meth:`~torch.nn.ModuleDict.update`).
Note that :meth:`~torch.nn.ModuleDict.update` with other unordered mapping
types (e.g., Python's plain ``dict`` before Python version 3.6) does not
preserve the order of the merged mapping.
Args:
modules (iterable, optional): a mapping (dictionary) of (string: module)
or an iterable of key-value pairs of type (string, module)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.choices = nn.ModuleDict({
'conv': nn.Conv2d(10, 10, 3),
'pool': nn.MaxPool2d(3)
})
self.activations = nn.ModuleDict([
['lrelu', nn.LeakyReLU()],
['prelu', nn.PReLU()]
])
def forward(self, x, choice, act):
x = self.choices[choice](x)
x = self.activations[act](x)
return x
"""
_modules: Dict[str, Module] # type: ignore[assignment]
def __init__(self, modules: Optional[Mapping[str, Module]] = None) -> None:
super(ModuleDict, self).__init__()
if modules is not None:
self.update(modules)
@_copy_to_script_wrapper
def __getitem__(self, key: str) -> Module:
return self._modules[key]
def __setitem__(self, key: str, module: Module) -> None:
self.add_module(key, module)
def __delitem__(self, key: str) -> None:
del self._modules[key]
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[str]:
return iter(self._modules)
@_copy_to_script_wrapper
def __contains__(self, key: str) -> bool:
return key in self._modules
def clear(self) -> None:
"""Remove all items from the ModuleDict.
"""
self._modules.clear()
def pop(self, key: str) -> Module:
r"""Remove key from the ModuleDict and return its module.
Args:
key (string): key to pop from the ModuleDict
"""
v = self[key]
del self[key]
return v
@_copy_to_script_wrapper
def keys(self) -> Iterable[str]:
r"""Return an iterable of the ModuleDict keys.
"""
return self._modules.keys()
@_copy_to_script_wrapper
def items(self) -> Iterable[Tuple[str, Module]]:
r"""Return an iterable of the ModuleDict key/value pairs.
"""
return self._modules.items()
@_copy_to_script_wrapper
def values(self) -> Iterable[Module]:
r"""Return an iterable of the ModuleDict values.
"""
return self._modules.values()
def update(self, modules: Mapping[str, Module]) -> None:
r"""Update the :class:`~torch.nn.ModuleDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`modules` is an ``OrderedDict``, a :class:`~torch.nn.ModuleDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Args:
modules (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Module`,
or an iterable of key-value pairs of type (string, :class:`~torch.nn.Module`)
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError("ModuleDict.update should be called with an "
"iterable of key/value pairs, but got " +
type(modules).__name__)
if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
for key, module in modules.items():
self[key] = module
else:
# modules here can be a list with two items
for j, m in enumerate(modules):
if not isinstance(m, container_abcs.Iterable):
raise TypeError("ModuleDict update sequence element "
"#" + str(j) + " should be Iterable; is" +
type(m).__name__)
if not len(m) == 2:
raise ValueError("ModuleDict update sequence element "
"#" + str(j) + " has length " + str(len(m)) +
"; 2 is required")
# modules can be Mapping (what it's typed at), or a list: [(name1, module1), (name2, module2)]
# that's too cumbersome to type correctly with overloads, so we add an ignore here
self[m[0]] = m[1] # type: ignore[assignment]
# remove forward alltogether to fallback on Module's _forward_unimplemented
class ParameterList(Module):
r"""Holds parameters in a list.
:class:`~torch.nn.ParameterList` can be used like a regular Python
list, but Tensors that are :class:`~torch.nn.Parameter` are properly registered,
and will be visible by all :class:`~torch.nn.Module` methods.
Note that the constructor, assigning an element of the list, the
:meth:`~torch.nn.ParameterDict.append` method and the :meth:`~torch.nn.ParameterDict.extend`
method will convert any :class:`~torch.Tensor` into :class:`~torch.nn.Parameter`.
Args:
parameters (iterable, optional): an iterable of elements to add to the list.
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
"""
def __init__(self, values: Optional[Iterable[Any]] = None) -> None:
super(ParameterList, self).__init__()
self._size = 0
if values is not None:
self += values
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
@overload
def __getitem__(self, idx: int) -> Any:
...
@overload
def __getitem__(self: T, idx: slice) -> T:
...
def __getitem__(self, idx):
if isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
out = self.__class__()
for i in range(start, stop, step):
out.append(self[i])
return out
else:
idx = self._get_abs_string_index(idx)
return getattr(self, str(idx))
def __setitem__(self, idx: int, param: Any) -> None:
# Note that all other function that add an entry to the list part of
# the ParameterList end up here. So this is the only place where we need
# to wrap things into Parameter if needed.
# Objects added via setattr() are not in the list part and thus won't
# call into this function.
idx = self._get_abs_string_index(idx)
if isinstance(param, torch.Tensor) and not isinstance(param, Parameter):
param = Parameter(param)
return setattr(self, str(idx), param)
def __len__(self) -> int:
return self._size
def __iter__(self) -> Iterator[Any]:
return iter(self[i] for i in range(len(self)))
def __iadd__(self, parameters: Iterable[Any]) -> 'ParameterList':
return self.extend(parameters)
def __dir__(self):
return list(range(self._size))
def append(self, value: Any) -> 'ParameterList':
"""Appends a given value at the end of the list.
Args:
value (Any): value to append
"""
new_idx = len(self)
self._size += 1
self[new_idx] = value
return self
def extend(self, values: Iterable[Any]) -> 'ParameterList':
"""Appends values from a Python iterable to the end of the list.
Args:
values (iterable): iterable of values to append
"""
# Tensor is an iterable but we never want to unpack it here
if not isinstance(values, container_abcs.Iterable) or isinstance(values, torch.Tensor):
raise TypeError("ParameterList.extend should be called with an "
"iterable, but got " + type(values).__name__)
for value in values:
self.append(value)
return self
def extra_repr(self) -> str:
child_lines = []
for k, p in enumerate(self):
if isinstance(p, torch.Tensor):
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = '{} containing: [{} of size {}{}]'.format(
"Parameter" if isinstance(p, Parameter) else "Tensor",
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
else:
child_lines.append(' (' + str(k) + '): Object of type: ' + type(p).__name__)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, *args, **kwargs):
raise RuntimeError('ParameterList should not be called.')
class ParameterDict(Module):
r"""Holds parameters in a dictionary.
ParameterDict can be indexed like a regular Python dictionary, but Parameters it
contains are properly registered, and will be visible by all Module methods.
Other objects are treated as would be done by a regular Python dictionary
:class:`~torch.nn.ParameterDict` is an **ordered** dictionary.
:meth:`~torch.nn.ParameterDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping. On the other hand, ``OrderedDict`` or another :class:`~torch.nn.ParameterDict`
will preserve their ordering.
Note that the constructor, assigning an element of the dictionary and the
:meth:`~torch.nn.ParameterDict.update` method will convert any :class:`~torch.Tensor` into
:class:`~torch.nn.Parameter`.
Args:
values (iterable, optional): a mapping (dictionary) of
(string : Any) or an iterable of key-value pairs
of type (string, Any)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterDict({
'left': nn.Parameter(torch.randn(5, 10)),
'right': nn.Parameter(torch.randn(5, 10))
})
def forward(self, x, choice):
x = self.params[choice].mm(x)
return x
"""
def __init__(self, parameters: Any = None) -> None:
super(ParameterDict, self).__init__()
self._keys: Dict[str, None] = {}
if parameters is not None:
self.update(parameters)
def _key_to_attr(self, key: str) -> str:
if not isinstance(key, str):
raise TypeError("Index given to ParameterDict cannot be used as a key as it is "
f"not a string (type is '{type(key).__name__}'). Open an issue on "
"github if you need non-string keys.")
else:
# Use the key as-is so that `.named_parameters()` returns the right thing
return key
def __getitem__(self, key: str) -> Any:
attr = self._key_to_attr(key)
return getattr(self, attr)
def __setitem__(self, key: str, value: Any) -> None:
# Note that all other function that add an entry to the dictionary part of
# the ParameterDict end up here. So this is the only place where we need
# to wrap things into Parameter if needed.
# Objects added via setattr() are not in the dictionary part and thus won't
# call into this function.
self._keys[key] = None
attr = self._key_to_attr(key)
if isinstance(value, torch.Tensor) and not isinstance(value, Parameter):
value = Parameter(value)
setattr(self, attr, value)
def __delitem__(self, key: str) -> None:
del self._keys[key]
attr = self._key_to_attr(key)
delattr(self, attr)
def __len__(self) -> int:
return len(self._keys)
def __iter__(self) -> Iterator[str]:
return iter(self._keys)
def __reversed__(self) -> Iterator[str]:
return reversed(list(self._keys))
def copy(self) -> 'ParameterDict':
"""Returns a copy of this :class:`~torch.nn.ParameterDict` instance.
"""
# We have to use an OrderedDict because the ParameterDict constructor
# behaves differently on plain dict vs OrderedDict
return ParameterDict(OrderedDict((k, self[k]) for k in self._keys))
def __contains__(self, key: str) -> bool:
return key in self._keys
def setdefault(self, key: str, default: Optional[Any] = None) -> Any:
"""If key is in the ParameterDict, return its value.
If not, insert `key` with a parameter `default` and return `default`.
`default` defaults to `None`.
Args:
key (string): key to set default for
default (Any): the parameter set to the key
"""
if key not in self:
self[key] = default
return self[key]
def clear(self) -> None:
"""Remove all items from the ParameterDict.
"""
for k in self._keys.copy():
del self[k]
def pop(self, key: str) -> Any:
r"""Remove key from the ParameterDict and return its parameter.
Args:
key (string): key to pop from the ParameterDict
"""
v = self[key]
del self[key]
return v
def popitem(self) -> Tuple[str, Any]:
"""Remove and return the last inserted `(key, parameter)` pair
from the ParameterDict
"""
k, _ = self._keys.popitem()
# We need the key in the _keys to be able to access/del
self._keys[k] = None
val = self[k]
del self[k]
return k, val
def get(self, key: str, default: Optional[Any] = None) -> Any:
r"""Return the parameter associated with key if present.
Otherwise return default if provided, None if not.
Args:
key (string): key to get from the ParameterDict
default (Parameter, optional): value to return if key not present
"""
return self[key] if key in self else default
def fromkeys(self, keys: Iterable[str], default: Optional[Any] = None) -> 'ParameterDict':
r"""Return a new ParameterDict with the keys provided
Args:
keys (iterable, string): keys to make the new ParameterDict from
default (Parameter, optional): value to set for all keys
"""
return ParameterDict(((k, default) for k in keys))
def keys(self) -> Iterable[str]:
r"""Return an iterable of the ParameterDict keys.
"""
return self._keys.keys()
def items(self) -> Iterable[Tuple[str, Any]]:
r"""Return an iterable of the ParameterDict key/value pairs.
"""
return ((k, self[k]) for k in self._keys)
def values(self) -> Iterable[Any]:
r"""Return an iterable of the ParameterDict values.
"""
return (self[k] for k in self._keys)
def update(self, parameters: Union[Mapping[str, Any], 'ParameterDict']) -> None:
r"""Update the :class:`~torch.nn.ParameterDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`parameters` is an ``OrderedDict``, a :class:`~torch.nn.ParameterDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Args:
parameters (iterable): a mapping (dictionary) from string to
:class:`~torch.nn.Parameter`, or an iterable of
key-value pairs of type (string, :class:`~torch.nn.Parameter`)
"""
if not isinstance(parameters, container_abcs.Iterable):
raise TypeError("ParametersDict.update should be called with an "
"iterable of key/value pairs, but got " +
type(parameters).__name__)
if isinstance(parameters, (OrderedDict, ParameterDict)):
for key, parameter in parameters.items():
self[key] = parameter
elif isinstance(parameters, container_abcs.Mapping):
for key, parameter in sorted(parameters.items()):
self[key] = parameter
else:
for j, p in enumerate(parameters):
if not isinstance(p, container_abcs.Iterable):
raise TypeError("ParameterDict update sequence element "
"#" + str(j) + " should be Iterable; is" +
type(p).__name__)
if not len(p) == 2:
raise ValueError("ParameterDict update sequence element "
"#" + str(j) + " has length " + str(len(p)) +
"; 2 is required")
# parameters as length-2 list too cumbersome to type, see ModuleDict.update comment
self[p[0]] = p[1] # type: ignore[assignment]
def extra_repr(self) -> str:
child_lines = []
for k, p in self.items():
if isinstance(p, torch.Tensor):
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = '{} containing: [{} of size {}{}]'.format(
"Parameter" if isinstance(p, Parameter) else "Tensor",
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
else:
child_lines.append(' (' + str(k) + '): Object of type: ' + type(p).__name__)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError('ParameterDict should not be called.')
def __or__(self, other: 'ParameterDict') -> 'ParameterDict':
copy = self.copy()
copy.update(other)
return copy
def __ror__(self, other: 'ParameterDict') -> 'ParameterDict':
copy = other.copy()
copy.update(self)
return copy
def __ior__(self, other : 'ParameterDict') -> 'ParameterDict':
self.update(other)
return self
| 37.849105
| 110
| 0.590412
|
794c4095d63c8dab8431fbf3d7c42d6dcfed42ae
| 2,205
|
py
|
Python
|
packages/syft/src/syft/core/adp/ledger_store.py
|
BearerPipelineTest/PySyft
|
5d4abfbc53c8d3a48f85606d43f7fd9b05a16b3d
|
[
"Apache-2.0"
] | null | null | null |
packages/syft/src/syft/core/adp/ledger_store.py
|
BearerPipelineTest/PySyft
|
5d4abfbc53c8d3a48f85606d43f7fd9b05a16b3d
|
[
"Apache-2.0"
] | null | null | null |
packages/syft/src/syft/core/adp/ledger_store.py
|
BearerPipelineTest/PySyft
|
5d4abfbc53c8d3a48f85606d43f7fd9b05a16b3d
|
[
"Apache-2.0"
] | null | null | null |
# future
from __future__ import annotations
# stdlib
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
# third party
from nacl.signing import VerifyKey
from pydantic import BaseSettings
import redis
# syft absolute
import syft as sy
# relative
from ...util import size_mb
from .abstract_ledger_store import AbstractDataSubjectLedger
from .abstract_ledger_store import AbstractLedgerStore
class DictLedgerStore(AbstractLedgerStore):
def __init__(self, *args: Tuple[Any, ...], **kwargs: Any) -> None:
self.kv_store: Dict[VerifyKey, AbstractDataSubjectLedger] = {}
def get(self, key: VerifyKey) -> AbstractDataSubjectLedger:
return self.kv_store[key]
def set(self, key: VerifyKey, value: AbstractDataSubjectLedger) -> None:
self.kv_store[key] = value
class RedisLedgerStore(AbstractLedgerStore):
def __init__(self, settings: Optional[BaseSettings] = None) -> None:
if settings is None:
raise Exception("RedisStore requires Settings")
self.settings = settings
try:
self.redis: redis.client.Redis = redis.Redis(
host=settings.REDIS_HOST,
port=self.settings.REDIS_PORT,
db=self.settings.LEDGER_DB_ID,
)
except Exception as e:
print("failed to load redis", e)
raise e
def get(self, key: VerifyKey) -> AbstractDataSubjectLedger:
try:
key_str = bytes(key).hex()
buf = self.redis.get(key_str)
if buf is None:
raise KeyError()
return sy.deserialize(buf, from_bytes=True)
except Exception as e:
print(f"Failed to get ledger from database. {e}")
raise e
def set(self, key: VerifyKey, value: AbstractDataSubjectLedger) -> None:
try:
key_str = bytes(key).hex()
buf = sy.serialize(value, to_bytes=True)
print(f"Saving DataSubjectLedger of size: {int(size_mb(buf))} MB")
self.redis.set(key_str, buf)
except Exception as e:
print(f"Failed to set ledger to database. {e}")
raise e
| 31.056338
| 78
| 0.642177
|
794c411997498fc1621b3c5bb8bdeabc988f489b
| 2,991
|
py
|
Python
|
tests/operators/vector/test_abs_ad_001.py
|
laekov/akg
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
[
"Apache-2.0"
] | 1
|
2020-08-31T02:43:43.000Z
|
2020-08-31T02:43:43.000Z
|
tests/operators/vector/test_abs_ad_001.py
|
laekov/akg
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
[
"Apache-2.0"
] | null | null | null |
tests/operators/vector/test_abs_ad_001.py
|
laekov/akg
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from base import TestBase
from nose.plugins.attrib import attr
from test_run.abs_ad_run import abs_ad_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_abs_ad_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag,opfuncname,testRunArgs, setdimArgs
("000_abs_input_1_1", abs_ad_run, ((1, 1), "float16")),
("001_abs_input_2_1", abs_ad_run, ((2, 1), "float16")),
("002_abs_input_2_2_2", abs_ad_run, ((2, 2, 2), "float16")),
("003_abs_input_1280_1280", abs_ad_run, ((1280, 1280), "float16")),
("004_abs_input_1280_30522", abs_ad_run, ((1280, 30522), "float16")),
("005_abs_input_8192_1024", abs_ad_run, ((8192, 1024), "float16")),
("006_abs_input_1280_1024", abs_ad_run, ((1280, 1024), "float16")),
("007_abs_input_64_128_1024", abs_ad_run, ((64, 128, 1024), "float16")),
("008_abs_input_16_16", abs_ad_run, ((16, 16), "float16")),
("009_abs_input_2_1", abs_ad_run, ((33, 1), "float16")),
]
self.testarg_cloud = [
# testflag,opfuncname,testRunArgs, setdimArgs
#("000_abs_input_1_1",abs_run,((1, 1), "float32"), ["rpc_cloud", "rpc_mini"]),
]
return
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_run(self):
self.common_run(self.testarg[0:4])
@pytest.mark.level2
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_debug(self):
self.common_run(self.testarg[4:8])
@pytest.mark.aicmodel
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_run_cloud(self):
self.common_run(self.testarg_cloud)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
| 35.607143
| 90
| 0.614176
|
794c41f351aa0798145eca7eb93aefb85b5fe3cc
| 323
|
py
|
Python
|
examples/restorear.py
|
songfei9315/python-sdk
|
b0f83c5338d68f84decb18729802cd0034f52575
|
[
"MIT"
] | 494
|
2015-01-08T09:56:30.000Z
|
2022-03-28T03:15:22.000Z
|
examples/restorear.py
|
songfei9315/python-sdk
|
b0f83c5338d68f84decb18729802cd0034f52575
|
[
"MIT"
] | 189
|
2015-01-26T06:46:19.000Z
|
2022-03-28T04:06:16.000Z
|
examples/restorear.py
|
songfei9315/python-sdk
|
b0f83c5338d68f84decb18729802cd0034f52575
|
[
"MIT"
] | 306
|
2015-01-24T05:56:05.000Z
|
2021-12-21T12:15:57.000Z
|
# -*- coding: utf-8 -*-
# flake8: noqa
from qiniu import Auth
from qiniu import BucketManager
access_key = ''
secret_key = ''
q = Auth(access_key, secret_key)
bucket = BucketManager(q)
bucket_name = '13'
key = 'fb8539c39f65d74b4e70db9133c1e9d5.mp4'
ret,info = bucket.restoreAr(bucket_name,key,3)
print(ret)
print(info)
| 17.944444
| 46
| 0.733746
|
794c42ddc5f840fa3454cb3c0641854cd3280213
| 4,061
|
py
|
Python
|
qa/rpc-tests/merkle_blocks.py
|
zevno/zevno-core
|
f546a48aaaf55c268633fcd8d04fc7c41c7b2bc8
|
[
"MIT"
] | 1
|
2020-12-10T00:17:10.000Z
|
2020-12-10T00:17:10.000Z
|
qa/rpc-tests/merkle_blocks.py
|
zevno/zevno-core
|
f546a48aaaf55c268633fcd8d04fc7c41c7b2bc8
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/merkle_blocks.py
|
zevno/zevno-core
|
f546a48aaaf55c268633fcd8d04fc7c41c7b2bc8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test gettxoutproof and verifytxoutproof RPCs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir))
self.nodes.append(start_node(1, self.options.tmpdir))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir))
self.nodes.append(start_node(3, self.options.tmpdir, ["-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
tx_fee = Decimal('0.00001')
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500 - tx_fee})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500 - tx_fee})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 500 - tx_fee*2})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
# Doesn't apply to ZEVNO Core - we have txindex always on
# assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
| 46.147727
| 120
| 0.669293
|
794c433a5ef1e26fef7ba6c0b6ece751d199f659
| 5,746
|
py
|
Python
|
mayan/apps/documents/settings.py
|
prezi/mayan-edms
|
e9bc10a056c3379b57115c6e83022f48c6298e1d
|
[
"Apache-2.0"
] | 4
|
2019-02-17T08:35:42.000Z
|
2019-03-28T06:02:11.000Z
|
mayan/apps/documents/settings.py
|
zhoubear/mayan-edms
|
e9bc10a056c3379b57115c6e83022f48c6298e1d
|
[
"Apache-2.0"
] | 1
|
2018-10-11T13:01:34.000Z
|
2018-10-11T13:01:34.000Z
|
mayan/apps/documents/settings.py
|
prezi/mayan-edms
|
e9bc10a056c3379b57115c6e83022f48c6298e1d
|
[
"Apache-2.0"
] | 3
|
2019-01-29T13:21:57.000Z
|
2019-10-27T03:20:15.000Z
|
from __future__ import unicode_literals
import os
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from smart_settings import Namespace
from .literals import DEFAULT_LANGUAGE, DEFAULT_LANGUAGE_CODES
namespace = Namespace(name='documents', label=_('Documents'))
setting_documentimagecache_storage = namespace.add_setting(
global_name='DOCUMENTS_CACHE_STORAGE_BACKEND',
default='django.core.files.storage.FileSystemStorage', help_text=_(
'Path to the Storage subclass to use when storing the cached '
'document image files.'
), quoted=True
)
setting_documentimagecache_storage_arguments = namespace.add_setting(
global_name='DOCUMENTS_CACHE_STORAGE_BACKEND_ARGUMENTS',
default='{{location: {}}}'.format(
os.path.join(settings.MEDIA_ROOT, 'document_cache')
), help_text=_(
'Arguments to pass to the DOCUMENT_CACHE_STORAGE_BACKEND.'
), quoted=True,
)
setting_disable_base_image_cache = namespace.add_setting(
global_name='DOCUMENTS_DISABLE_BASE_IMAGE_CACHE', default=False,
help_text=_(
'Disables the first cache tier which stores high resolution, '
'non transformed versions of documents\'s pages.'
)
)
setting_disable_transformed_image_cache = namespace.add_setting(
global_name='DOCUMENTS_DISABLE_TRANSFORMED_IMAGE_CACHE', default=False,
help_text=_(
'Disables the second cache tier which stores medium to low '
'resolution, transformed (rotated, zoomed, etc) versions '
'of documents\' pages.'
)
)
setting_display_height = namespace.add_setting(
global_name='DOCUMENTS_DISPLAY_HEIGHT', default=''
)
setting_display_width = namespace.add_setting(
global_name='DOCUMENTS_DISPLAY_WIDTH', default='3600'
)
setting_favorite_count = namespace.add_setting(
global_name='DOCUMENTS_FAVORITE_COUNT', default=40,
help_text=_(
'Maximum number of favorite documents to remember per user.'
)
)
setting_fix_orientation = namespace.add_setting(
global_name='DOCUMENTS_FIX_ORIENTATION', default=False,
help_text=_(
'Detect the orientation of each of the document\'s pages '
'and create a corresponding rotation transformation to '
'display it rightside up. This is an experimental '
'feature and it is disabled by default.'
)
)
setting_language = namespace.add_setting(
global_name='DOCUMENTS_LANGUAGE', default=DEFAULT_LANGUAGE,
help_text=_('Default documents language (in ISO639-3 format).')
)
setting_language_codes = namespace.add_setting(
global_name='DOCUMENTS_LANGUAGE_CODES', default=DEFAULT_LANGUAGE_CODES,
help_text=_('List of supported document languages. In ISO639-3 format.')
)
settings_document_page_image_cache_time = namespace.add_setting(
global_name='DOCUMENTS_PAGE_IMAGE_CACHE_TIME', default='31556926',
help_text=_(
'Time in seconds that the browser should cache the supplied document '
'images. The default of 31559626 seconds corresponde to 1 year.'
)
)
setting_preview_height = namespace.add_setting(
global_name='DOCUMENTS_PREVIEW_HEIGHT', default=''
)
setting_preview_width = namespace.add_setting(
global_name='DOCUMENTS_PREVIEW_WIDTH', default='800'
)
setting_print_height = namespace.add_setting(
global_name='DOCUMENTS_PRINT_HEIGHT', default=''
)
setting_print_width = namespace.add_setting(
global_name='DOCUMENTS_PRINT_WIDTH', default='3600'
)
setting_recent_access_count = namespace.add_setting(
global_name='DOCUMENTS_RECENT_ACCESS_COUNT', default=40,
help_text=_(
'Maximum number of recently accessed (created, edited, viewed) '
'documents to remember per user.'
)
)
setting_recent_added_count = namespace.add_setting(
global_name='DOCUMENTS_RECENT_ADDED_COUNT', default=40,
help_text=_(
'Maximum number of recently created documents to show.'
)
)
setting_rotation_step = namespace.add_setting(
global_name='DOCUMENTS_ROTATION_STEP', default=90,
help_text=_(
'Amount in degrees to rotate a document page per user interaction.'
)
)
setting_storage_backend = namespace.add_setting(
global_name='DOCUMENTS_STORAGE_BACKEND',
default='django.core.files.storage.FileSystemStorage', help_text=_(
'Path to the Storage subclass to use when storing document '
'files.'
)
)
setting_storage_backend_arguments = namespace.add_setting(
global_name='DOCUMENTS_STORAGE_BACKEND_ARGUMENTS',
default='{{location: {}}}'.format(
os.path.join(settings.MEDIA_ROOT, 'document_storage')
), help_text=_('Arguments to pass to the DOCUMENT_STORAGE_BACKEND.')
)
setting_thumbnail_height = namespace.add_setting(
global_name='DOCUMENTS_THUMBNAIL_HEIGHT', default='', help_text=_(
'Height in pixels of the document thumbnail image.'
)
)
setting_thumbnail_width = namespace.add_setting(
global_name='DOCUMENTS_THUMBNAIL_WIDTH', default='800', help_text=(
'Width in pixels of the document thumbnail image.'
)
)
setting_zoom_max_level = namespace.add_setting(
global_name='DOCUMENTS_ZOOM_MAX_LEVEL', default=300,
help_text=_(
'Maximum amount in percent (%) to allow user to zoom in a document '
'page interactively.'
)
)
setting_zoom_min_level = namespace.add_setting(
global_name='DOCUMENTS_ZOOM_MIN_LEVEL', default=25,
help_text=_(
'Minimum amount in percent (%) to allow user to zoom out a document '
'page interactively.'
)
)
setting_zoom_percent_step = namespace.add_setting(
global_name='DOCUMENTS_ZOOM_PERCENT_STEP', default=25,
help_text=_(
'Amount in percent zoom in or out a document page per user '
'interaction.'
)
)
| 37.070968
| 78
| 0.74417
|
794c43848c2e3bba779c4fee8979da2737b872ff
| 1,511
|
py
|
Python
|
python/ppml/src/bigdl/ppml/algorithms/hfl_nn.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
python/ppml/src/bigdl/ppml/algorithms/hfl_nn.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
python/ppml/src/bigdl/ppml/algorithms/hfl_nn.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.dllib.utils.common import JavaValue
class HflNN(JavaValue):
"""
HFL NN class, users could build custom NN structure in this class
"""
def __init__(self, jvalue, *args):
bigdl_type = "float"
super(JavaValue, self).__init__(jvalue, bigdl_type, *args)
def fit(self, x, y, epochs):
"""
:param x: data, could be Numpy NdArray or Pandas DataFrame
:param y: label, could be Numpy NdArray or Pandas DataFrame
:param epochs: training epochs
:return:
"""
pass
def evaluate(self, x, y):
"""
:param x: data, could be Numpy NdArray or Pandas DataFrame
:param y: label, could be Numpy NdArray or Pandas DataFrame
:return:
"""
pass
def predict(self, x):
"""
:param x: data, could be Numpy NdArray or Pandas DataFrame
:return:
"""
pass
| 29.627451
| 74
| 0.647915
|
794c4441c01ae050a9e680708e354fffd75de17f
| 6,674
|
py
|
Python
|
notebooks/CleanTweets.py
|
TamatiB/restitution_africa2021
|
a5d640075813350386ff52180a51af2e1367a67f
|
[
"Apache-2.0"
] | null | null | null |
notebooks/CleanTweets.py
|
TamatiB/restitution_africa2021
|
a5d640075813350386ff52180a51af2e1367a67f
|
[
"Apache-2.0"
] | null | null | null |
notebooks/CleanTweets.py
|
TamatiB/restitution_africa2021
|
a5d640075813350386ff52180a51af2e1367a67f
|
[
"Apache-2.0"
] | null | null | null |
import emoji
import string
import nltk
from nltk.corpus import stopwords
from nltk.corpus import wordnet
import re
import time
nltk.download('stopwords')
nltk.download('punkt')
def clean_tweets(df,content,col_name,
general_clean=True,lemma=True,stem=False,remove_tag=True,remove_mention=True,
remove_emoji=False, remove_stopword=True,min_length=2, untokenized_return=True
):
'''
This function gives multiple options for cleaning tweets
df: Dataframe that you will be changing
col_name: New column name that will be added to the dataframe with the cleaned tweets
general_clean: A bool that can be used to remove links/images/punctuation it will remove all things
lemma: a bool that will lemmatize your text
stem: a bool that will stem your text
remove_tag: a bool that will remove the entire tag from your text
remove_mention: a bool that will remove the entire mention from your text
remove_emoji: a bool that will remove Hex Emojis
remove_stopword = a bool that will remove stopwords from your text
min_length: an integer determining the shortest word and is defaulted to length=2
untokenized_return: a bool that returns the text in string format
'''
start = time.time()
df1=df.copy()
df1.loc[:,col_name] = df1[content].apply(lambda x: min_word_length(str(x),min_length=2))
if remove_mention:
df1.loc[:,col_name] = df1[col_name].apply(lambda x: remove_mentions(x))
if remove_tag:
df1.loc[:,col_name] = df1[col_name].apply(lambda x: remove_tags(x))
if remove_stopword:
df1.loc[:,col_name] = df1[col_name].apply(lambda x: remove_stopwords(x))
if general_clean:
df1.loc[:,col_name] = df1[col_name].apply(lambda x: format_body(x))
if remove_emoji:
df1.loc[:,col_name] = df1[col_name].apply(lambda x: remove_emojis(x))
if lemma:
df1.loc[:,col_name] = df1[col_name].apply(lambda x: lemmatize(x))
if stem:
df1.loc[:,col_name] = df1[col_name].apply(lambda x: stemmer(x))
if untokenized_return:
df1.loc[:,col_name] = df1[col_name].apply(lambda x: untokenize(x))
print("time taken to clean tweets: {}s. Use the [{}] column to perform your analysis/modeling on".format(time.time()-start,col_name))
return df1.copy()
def min_word_length(text,min_length):
'''
This function will remove all words less than or equal to the min_length specified
text: input text to function
min_length: an integer specifying the length of words to remove
eg: if min_length = 2
Before: "Hello this is a tweet"
After: "Hello this tweet"
'''
# Remove single characters from tweets
text = ' '.join(i for i in text.split() if not (i.isalpha() and len(i)<=min_length))
return text
def lemmatize(text):
'''
This will lemmatize the text using the WordNetLemmatizer using
parts of speech tagging for better lemmatizing and
return in a tokenized format
'''
lemmatizer = nltk.WordNetLemmatizer()
lemma = [lemmatizer.lemmatize(w, get_wordnet_pos(w)) for w in nltk.word_tokenize(text)]
return lemma
def stemmer(text):
'''
This will stem the text using the Porter Stemmer from NLTK
and returns in a tokenized format
'''
stemmer = nltk.PorterStemmer()
stem = [stemmer.stem(w) for w in nltk.word_tokenize(text)]
return stem
def remove_tags(text):
'''
This will remove the entire tag including the word
returns string format of the text
'''
# Remove mentions since there is a column for this [This below removes the entire tag completely]
text = re.sub('\s([#][\w_-]+)','',text)
return text
def remove_mentions(text):
'''
This will remove the entire mention including the word
returns string format of the text
'''
# Remove mentions since there is a column for this [This below removes the entire mention completely]
text = re.sub(r"@(\w+)",' ',text,flags=re.MULTILINE)
return text
def remove_emojis(text):
'''
Removes the hex styled emojis
returns a string format of the text
'''
# Remove EMojis from the tweets
allchars = [word for word in text]
emoji_list = [c for c in allchars if c in emoji.UNICODE_EMOJI]
no_emoji = ([word for word in text.split() if not any(i in word for i in emoji_list)])
no_emoji_utf = ' '.join([word.encode("ascii", "ignore").decode() for word in no_emoji])
return no_emoji_utf
def remove_stopwords(text):
'''
Remove stopwords from the text
returns a string format of the text
'''
stopwords_eng = stopwords.words('english')
tokenized_doc = text.lower().split()
no_stopwords = " ".join([item for item in tokenized_doc if item not in stopwords_eng])
return no_stopwords
def untokenize(text):
'''
Untokenizes the tokenized text
returns a string format of the text
'''
untokenized = " ".join([item for item in text])
return untokenized
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def format_body(text):
'''
This provides general required cleaning from the text:
- Links
- Images
- Funny characters that are uniquely looking punctuation
- Normal Punctuation
- Removes ellipsis
- Removes \xa0 which is left over from the removal of some links
- Drops everything to lower case
Returns the text in a string format
'''
# Remove links
text = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+','',text, flags=re.MULTILINE)
text = re.sub('pic.twitter(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+','',text, flags=re.MULTILINE)
text = text.replace('-',' ')
text = text.replace('’','')
text = text.replace('“','')
text = text.replace('−','')
text = text.replace('”','')
text = text.replace('‘','')
# Remove punctuation
text =text.translate(str.maketrans(dict.fromkeys(string.punctuation)))
# df.loc[:,'CLEAN_CONTENT'] = df['CLEAN_CONTENT'].apply(lambda x :re.sub(r'[^\w\s]','',x))
# Remove Ellipsis
text = text.replace('…','')
# Remove weird part at the end of the URL
text = text.replace('\xa0','')
# Remove new lines
text = re.sub('\r\n|\r|\n',r' ',text)
# Make all lower case
text = text.lower()
return text
| 36.271739
| 140
| 0.649985
|
794c444bb7482a9f35cca341f59a931783ae0a43
| 3,148
|
py
|
Python
|
baekjoon/23290.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
baekjoon/23290.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
baekjoon/23290.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
import copy
def shark_mov(loc, path, num_fish, visited):
global max_fish, results
if len(path) == 3:
if num_fish == max_fish:
results.append(path)
elif num_fish > max_fish:
max_fish = num_fish
results = [path]
return None
r, c = loc
for i in range(1, 5):
nr, nc = r + s_mov[i][0], c + s_mov[i][1]
if 0 <= nr < 4 and 0 <= nc < 4:
n_path = path + str(i)
visited_ = copy.deepcopy(visited)
if visited_[nr][nc]:
tmp_fish = num_fish
else:
tmp_fish = num_fish + sum(board[nr][nc])
visited_[nr][nc] = True
shark_mov((nr, nc), n_path, tmp_fish, visited_)
M, S = map(int, input().split())
board = [[[0] * 8 for _ in range(4)] for _ in range(4)]
for _ in range(M):
r, c, d = map(int, input().split())
board[r - 1][c - 1][d - 1] += 1
smell = [[0] * 4 for _ in range(4)]
r, c = map(int, input().split())
shark = (r - 1, c - 1)
smell[r - 1][c - 1] = 1
# 0 ~ 7, 좌 좌상 상 상우 우 우하 하 좌하, 시계 방향
mov =[(0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1)]
# 상 1 좌 2 하 3 우 4
s_mov = [(0, 0), (-1, 0), (0, -1), (1, 0), (0, 1)]
for _ in range(S):
print('start')
for ele in board:
print(ele)
print()
# 복제
board_ = copy.deepcopy(board)
# 물고기 이동
for r in range(4):
for c in range(4):
for d in range(8):
if board_[r][c][d] > 0: # d 방향에 element 있으면
for i in range(8):
dr, dc = mov[(d - i) % 8]
if 0 <= r + dr < 4 and 0 <= c + dc < 4 and smell[r + dr][c + dc] == 0:
tmp = board_[r][c][d]
board[r+dr][c+dc][(d - i) % 8] += tmp
board[r][c][d] -= tmp
break
print('smell')
for ele in smell:
print(ele)
print()
print('move')
for ele in board:
print(ele)
print()
# 상어 이동
max_fish = 0
results = []
visited = [[False] * 4 for _ in range(4)]
shark_mov(shark, '', 0, visited) # 초기 상어 위치의 물고기는 사라지지 않는다
results.sort()
paths = results[0]
for path in paths:
r, c = shark[0] + s_mov[int(path)][0], shark[1] + s_mov[int(path)][1]
shark = (r, c)
if sum(board[r][c]) > 0:
smell[r][c] = 3
board[r][c] = [0] * 8
print('shark move')
print(results)
for ele in board:
print(ele)
print()
# 냄새 정리
for r in range(4):
for c in range(4):
if smell[r][c] > 0:
smell[r][c] -= 1
if smell[shark[0]][shark[1]] == 0:
smell[shark[0]][shark[1]] = 1
# 복제
for r in range(4):
for c in range(4):
for i in range(8):
board[r][c][i] += board_[r][c][i]
print('copy')
for ele in board:
print(ele)
print()
print('smell')
for ele in smell:
print(ele)
print()
out = 0
for r in range(4):
for c in range(4):
out += sum(board[r][c])
print(out)
| 27.373913
| 94
| 0.444409
|
794c4495c48dbd20a94b2e79aad619a741a0c18f
| 918
|
py
|
Python
|
plotly/validators/parcoords/__init__.py
|
Sandy1811/plotly.py
|
32b67018b0c286a9bc099506c3c77186fb69446a
|
[
"MIT"
] | 1
|
2021-12-25T01:26:41.000Z
|
2021-12-25T01:26:41.000Z
|
plotly/validators/parcoords/__init__.py
|
biswapanda/plotly.py
|
d4c965c2f978dc5c591d73ba8911ffe9ec874dab
|
[
"MIT"
] | null | null | null |
plotly/validators/parcoords/__init__.py
|
biswapanda/plotly.py
|
d4c965c2f978dc5c591d73ba8911ffe9ec874dab
|
[
"MIT"
] | 1
|
2021-06-21T07:53:04.000Z
|
2021-06-21T07:53:04.000Z
|
from ._visible import VisibleValidator
from ._uid import UidValidator
from ._tickfont import TickfontValidator
from ._stream import StreamValidator
from ._showlegend import ShowlegendValidator
from ._selectedpoints import SelectedpointsValidator
from ._rangefont import RangefontValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._line import LineValidator
from ._legendgroup import LegendgroupValidator
from ._labelfont import LabelfontValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._domain import DomainValidator
from ._dimensiondefaults import DimensionValidator
from ._dimensions import DimensionsValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
| 39.913043
| 52
| 0.880174
|
794c46a3720ec42f2fe095b8f72c1a6b39a59295
| 1,057
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/peer_express_route_circuit_connection_paged.py
|
xiafu-msft/azure-sdk-for-python
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/peer_express_route_circuit_connection_paged.py
|
xiafu-msft/azure-sdk-for-python
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/peer_express_route_circuit_connection_paged.py
|
xiafu-msft/azure-sdk-for-python
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class PeerExpressRouteCircuitConnectionPaged(Paged):
"""
A paging container for iterating over a list of :class:`PeerExpressRouteCircuitConnection <azure.mgmt.network.v2019_02_01.models.PeerExpressRouteCircuitConnection>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PeerExpressRouteCircuitConnection]'}
}
def __init__(self, *args, **kwargs):
super(PeerExpressRouteCircuitConnectionPaged, self).__init__(*args, **kwargs)
| 37.75
| 175
| 0.622517
|
794c48bc3d30e6b3779e5a5a1afbbac23a84e700
| 7,433
|
py
|
Python
|
src/util.py
|
jtwaleson/serverless-slack-quotes-bot
|
a97937977e7f050eda9e8fad9f2d4e982cc0c26a
|
[
"MIT"
] | 2
|
2020-06-21T13:05:48.000Z
|
2020-06-21T19:53:04.000Z
|
src/util.py
|
jtwaleson/serverless-slack-quotes-bot
|
a97937977e7f050eda9e8fad9f2d4e982cc0c26a
|
[
"MIT"
] | null | null | null |
src/util.py
|
jtwaleson/serverless-slack-quotes-bot
|
a97937977e7f050eda9e8fad9f2d4e982cc0c26a
|
[
"MIT"
] | null | null | null |
import base64
import boto3
import hashlib
import hmac
import os
import requests
import time
try:
SLACK_SIGNING_SECRET = os.environ["SLACK_SIGNING_SECRET"]
except KeyError:
raise Exception(
"Set SLACK_SIGNING_SECRET as an environment variable. "
"You can find this on the Slack 'App Settings' under "
"'Signing Secret'."
)
try:
BEARER_TOKEN = os.environ["SLACK_BEARER_TOKEN"]
except KeyError:
raise Exception(
"Set SLACK_BEARER_TOKEN as an environment variable. "
"You can find this on the Slack 'App Settings' under "
"'OAuth Tokens for Your Workspace'."
)
DATABASE_INDEX_NAME = "team-timestamp-index"
DATABASE_TABLE = "quotes"
SLACK_CALLBACK_HANDLERS = {
"command": {},
"block_actions": {},
"view_submission": {},
# "view_closed": {},
# "shortcut": {},
# "message_actions": {},
}
db_table = boto3.resource("dynamodb").Table(DATABASE_TABLE)
def _slack_callback_handler(callback_type, command):
global SLACK_CALLBACK_HANDLERS
if command in SLACK_CALLBACK_HANDLERS[callback_type]:
raise Exception(f"{callback_type} {command} is already registered")
def wrapper(handler):
SLACK_CALLBACK_HANDLERS[callback_type][command] = handler
return handler
return wrapper
def slack_command(command):
return _slack_callback_handler("command", command)
def slack_block_action(action):
return _slack_callback_handler("block_actions", action)
def slack_view_submission(action):
return _slack_callback_handler("view_submission", action)
def extract_key_from_payload(callback_type, data):
if callback_type == "block_actions":
if "actions" not in data:
raise Exception("data does not contain actions, help!")
if len(data["actions"]) > 1:
raise Exception("I can not yet handle more than one action per response")
return data["actions"][0]["action_id"]
elif callback_type == "view_submission":
return data["view"]["callback_id"]
raise Exception("Key could not be extracted")
def validate_signature(event):
print("validating signature")
# 1 - get data from request
request_timestamp = event["headers"]["x-slack-request-timestamp"]
provided_signature = event["headers"]["x-slack-signature"]
body = base64.b64decode(event["body"])
# 2 - validate timestamp
if abs(time.time() - int(request_timestamp)) > 60 * 5:
raise Exception(
"Message timestamp is stale, something is wrong. "
"Are you replaying an old request?"
)
# 3 - create signature
sig_basestring = str.encode("v0:" + request_timestamp + ":") + body
calculated_signature = (
"v0="
+ hmac.new(
str.encode(SLACK_SIGNING_SECRET),
msg=sig_basestring,
digestmod=hashlib.sha256,
).hexdigest()
)
# 4 - ensure calculated signature matches provided signature
if not hmac.compare_digest(calculated_signature, provided_signature):
raise Exception("Signature does not match, will not execute request")
def send_message(channel_id, blocks, text, post_at=None):
payload = {
"blocks": blocks,
"channel": channel_id,
"text": text,
}
if post_at is None:
print("sending message to", channel_id)
url = "https://slack.com/api/chat.postMessage"
else:
print("scheduling message to", channel_id, "at", post_at)
payload["post_at"] = post_at
url = "https://slack.com/api/chat.scheduleMessage"
r = requests.post(
url,
headers={
"Content-type": "application/json",
"Authorization": f"Bearer {BEARER_TOKEN}",
},
json=payload,
)
r.raise_for_status()
if not r.json()["ok"]:
if "error" in r.json():
raise Exception(r.json()["error"])
raise Exception(r.json()["response_metadata"]["messages"])
if post_at is None:
return r.json()["ts"]
else:
return r.json()["scheduled_message_id"]
def delete_scheduled_message(channel_id, msg_id):
print("deleting scheduled message", channel_id, msg_id)
r = requests.post(
"https://slack.com/api/chat.deleteScheduledMessage",
headers={
"Content-type": "application/json",
"Authorization": f"Bearer {BEARER_TOKEN}",
},
json={
"channel": channel_id,
"scheduled_message_id": msg_id,
},
)
if not r.json()["ok"]:
raise Exception(r.json()["response_metadata"]["messages"])
r.raise_for_status()
def update_message(blocks, channel_id, ts_id):
print("updating message to", channel_id)
r = requests.post(
"https://slack.com/api/chat.update",
headers={
"Content-type": "application/json",
"Authorization": f"Bearer {BEARER_TOKEN}",
},
json={
"channel": channel_id,
"blocks": blocks,
"ts": ts_id,
},
)
if not r.json()["ok"]:
raise Exception(r.json()["response_metadata"]["messages"])
r.raise_for_status()
def open_view(trigger_id, blocks, text, submit_text, callback_id):
print("opening view", trigger_id, callback_id)
r = requests.post(
"https://slack.com/api/views.open",
headers={
"Content-type": "application/json",
"Authorization": f"Bearer {BEARER_TOKEN}",
},
json={
"trigger_id": trigger_id,
"view": {
"type": "modal",
"callback_id": callback_id,
"title": {"type": "plain_text", "text": text},
"blocks": blocks,
"submit": {
"type": "plain_text",
"text": submit_text,
},
},
},
)
if not r.json()["ok"]:
raise Exception(r.json()["response_metadata"]["messages"])
r.raise_for_status()
def update_view(view, view_id):
print("updating view", view_id)
r = requests.post(
"https://slack.com/api/views.update",
headers={
"Content-type": "application/json",
"Authorization": f"Bearer {BEARER_TOKEN}",
},
json={
"view": view,
"view_id": view_id,
},
)
if not r.json()["ok"]:
raise Exception(r.json()["response_metadata"]["messages"])
r.raise_for_status()
def get_all_scheduled_posts(channel):
past_cursors = set()
payload = {
"channel": channel,
}
while True:
print("getting", payload)
r = requests.get(
"https://slack.com/api/chat.scheduledMessages.list",
headers={
"Content-type": "application/json",
"Authorization": f"Bearer {BEARER_TOKEN}",
},
json=payload,
)
r.raise_for_status()
data = r.json()
yield from data["scheduled_messages"]
del data["scheduled_messages"]
print(data)
if "next_cursor" in data["response_metadata"]:
payload["cursor"] = data["response_metadata"]["next_cursor"]
if len(payload["cursor"]) == 0 or payload["cursor"] in past_cursors:
return
past_cursors.add(payload["cursor"])
else:
return
| 29.14902
| 85
| 0.593973
|
794c48e06d03025e6627347b1e502910817f3d08
| 37,392
|
py
|
Python
|
src/sage/manifolds/point.py
|
LaisRast/sage
|
5fb2a6ea44400e469caee82748cf863ca0c5f724
|
[
"BSL-1.0"
] | null | null | null |
src/sage/manifolds/point.py
|
LaisRast/sage
|
5fb2a6ea44400e469caee82748cf863ca0c5f724
|
[
"BSL-1.0"
] | null | null | null |
src/sage/manifolds/point.py
|
LaisRast/sage
|
5fb2a6ea44400e469caee82748cf863ca0c5f724
|
[
"BSL-1.0"
] | null | null | null |
r"""
Points of Topological Manifolds
The class :class:`ManifoldPoint` implements points of a
topological manifold.
A :class:`ManifoldPoint` object can have coordinates in
various charts defined on the manifold. Two points are declared
equal if they have the same coordinates in the same chart.
AUTHORS:
- Eric Gourgoulhon, Michal Bejger (2013-2015) : initial version
REFERENCES:
- [Lee2011]_
- [Lee2013]_
EXAMPLES:
Defining a point in `\RR^3` by its spherical coordinates::
sage: M = Manifold(3, 'R^3', structure='topological')
sage: U = M.open_subset('U') # the domain of spherical coordinates
sage: c_spher.<r,th,ph> = U.chart(r'r:(0,+oo) th:(0,pi):\theta ph:(0,2*pi):periodic:\phi')
We construct the point in the coordinates in the default chart of ``U``
(``c_spher``)::
sage: p = U((1, pi/2, pi), name='P')
sage: p
Point P on the 3-dimensional topological manifold R^3
sage: latex(p)
P
sage: p in U
True
sage: p.parent()
Open subset U of the 3-dimensional topological manifold R^3
sage: c_spher(p)
(1, 1/2*pi, pi)
sage: p.coordinates(c_spher) # equivalent to above
(1, 1/2*pi, pi)
Computing the coordinates of ``p`` in a new chart::
sage: c_cart.<x,y,z> = U.chart() # Cartesian coordinates on U
sage: spher_to_cart = c_spher.transition_map(c_cart,
....: [r*sin(th)*cos(ph), r*sin(th)*sin(ph), r*cos(th)])
sage: c_cart(p) # evaluate P's Cartesian coordinates
(-1, 0, 0)
Points can be compared::
sage: p1 = U((1, pi/2, pi))
sage: p1 == p
True
sage: q = U((2, pi/2, pi))
sage: q == p
False
even if they were initially not defined within the same coordinate chart::
sage: p2 = U((-1,0,0), chart=c_cart)
sage: p2 == p
True
The `2\pi`-periodicity of the `\phi` coordinate is also taken into account
for the comparison::
sage: p3 = U((1, pi/2, 5*pi))
sage: p3 == p
True
sage: p4 = U((1, pi/2, -pi))
sage: p4 == p
True
"""
#*****************************************************************************
# Copyright (C) 2015 Eric Gourgoulhon <eric.gourgoulhon@obspm.fr>
# Copyright (C) 2015 Michal Bejger <bejger@camk.edu.pl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.element import Element
from sage.misc.decorators import options
from sage.symbolic.expression import Expression
from sage.rings.integer_ring import ZZ
class ManifoldPoint(Element):
r"""
Point of a topological manifold.
This is a Sage *element* class, the corresponding *parent* class
being :class:`~sage.manifolds.manifold.TopologicalManifold`
or :class:`~sage.manifolds.subset.ManifoldSubset`.
INPUT:
- ``parent`` -- the manifold subset to which the point belongs
- ``coords`` -- (default: ``None``) the point coordinates (as a tuple
or a list) in the chart ``chart``
- ``chart`` -- (default: ``None``) chart in which the coordinates are
given; if ``None``, the coordinates are assumed to refer to the
default chart of ``parent``
- ``name`` -- (default: ``None``) name given to the point
- ``latex_name`` -- (default: ``None``) LaTeX symbol to denote the point;
if ``None``, the LaTeX symbol is set to ``name``
- ``check_coords`` -- (default: ``True``) determines whether ``coords``
are valid coordinates for the chart ``chart``; for symbolic
coordinates, it is recommended to set ``check_coords`` to ``False``
EXAMPLES:
A point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: (a, b) = var('a b') # generic coordinates for the point
sage: p = M.point((a, b), name='P'); p
Point P on the 2-dimensional topological manifold M
sage: p.coordinates() # coordinates of P in the subset's default chart
(a, b)
Since points are Sage *elements*, the *parent* of which being the
subset on which they are defined, it is equivalent to write::
sage: p = M((a, b), name='P'); p
Point P on the 2-dimensional topological manifold M
A point is an element of the manifold subset in which it has
been defined::
sage: p in M
True
sage: p.parent()
2-dimensional topological manifold M
sage: U = M.open_subset('U', coord_def={c_xy: x>0})
sage: q = U.point((2,1), name='q')
sage: q.parent()
Open subset U of the 2-dimensional topological manifold M
sage: q in U
True
sage: q in M
True
By default, the LaTeX symbol of the point is deduced from its name::
sage: latex(p)
P
But it can be set to any value::
sage: p = M.point((a, b), name='P', latex_name=r'\mathcal{P}')
sage: latex(p)
\mathcal{P}
Points can be drawn in 2D or 3D graphics thanks to the
method :meth:`plot`.
"""
def __init__(self, parent, coords=None, chart=None, name=None,
latex_name=None, check_coords=True):
r"""
Construct a manifold point.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,3), name='p'); p
Point p on the 2-dimensional topological manifold M
sage: TestSuite(p).run()
sage: U = M.open_subset('U', coord_def={X: x<0})
sage: q = U((-1,2), name='q'); q
Point q on the 2-dimensional topological manifold M
sage: TestSuite(q).run()
"""
if parent.is_empty():
raise TypeError(f'cannot define a point on the {parent} because it has been declared empty')
Element.__init__(self, parent)
parent._has_defined_points = True
self._manifold = parent.manifold() # a useful shortcut
self._coordinates = {} # dictionary of the point coordinates in various
# charts, with the charts as keys
if coords is not None:
if len(coords) != parent.manifold().dimension():
raise ValueError("the number of coordinates must be equal " +
"to the manifold's dimension")
from sage.manifolds.manifold import TopologicalManifold
if chart is None:
chart = parent._def_chart
elif isinstance(parent, TopologicalManifold):
if chart not in parent._atlas:
raise ValueError("the {} has not been".format(chart) +
"defined on the {}".format(parent))
if check_coords:
if not chart.valid_coordinates(*coords):
raise ValueError("the coordinates {}".format(coords) +
" are not valid on the {}".format(chart))
for schart in chart._supercharts:
self._coordinates[schart] = tuple(coords)
for schart in chart._subcharts:
if schart != chart:
if schart.valid_coordinates(*coords):
self._coordinates[schart] = tuple(coords)
self._name = name
if latex_name is None:
self._latex_name = self._name
else:
self._latex_name = latex_name
def _repr_(self):
r"""
Return a string representation of the point.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3))
sage: p._repr_()
'Point on the 2-dimensional topological manifold M'
sage: p = M((2,-3), name='p')
sage: p._repr_()
'Point p on the 2-dimensional topological manifold M'
sage: repr(p) # indirect doctest
'Point p on the 2-dimensional topological manifold M'
"""
description = "Point"
if self._name is not None:
description += " " + self._name
description += " on the {}".format(self._manifold)
return description
def _latex_(self):
r"""
Return a LaTeX representation of the point.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3))
sage: p._latex_()
'\\mbox{Point on the 2-dimensional topological manifold M}'
sage: p = M((2,-3), name='p')
sage: p._latex_()
'p'
sage: p = M((2,-3), name='p', latex_name=r'\mathcal{P}')
sage: p._latex_()
'\\mathcal{P}'
sage: latex(p) # indirect doctest
\mathcal{P}
"""
if self._latex_name is None:
return r'\mbox{' + str(self) + r'}'
return self._latex_name
def coordinates(self, chart=None, old_chart=None):
r"""
Return the point coordinates in the specified chart.
If these coordinates are not already known, they are computed from
known ones by means of change-of-chart formulas.
An equivalent way to get the coordinates of a point is to let the
chart acting on the point, i.e. if ``X`` is a chart and ``p`` a
point, one has ``p.coordinates(chart=X) == X(p)``.
INPUT:
- ``chart`` -- (default: ``None``) chart in which the coordinates
are given; if none are provided, the coordinates are assumed to
refer to the subset's default chart
- ``old_chart`` -- (default: ``None``) chart from which the
coordinates in ``chart`` are to be computed; if ``None``, a chart
in which the point's coordinates are already known will be picked,
privileging the subset's default chart
EXAMPLES:
Spherical coordinates of a point on `\RR^3`::
sage: M = Manifold(3, 'M', structure='topological')
sage: c_spher.<r,th,ph> = M.chart(r'r:(0,+oo) th:(0,pi):\theta ph:(0,2*pi):\phi') # spherical coordinates
sage: p = M.point((1, pi/2, pi))
sage: p.coordinates() # coordinates in the manifold's default chart
(1, 1/2*pi, pi)
Since the default chart of ``M`` is ``c_spher``, it is equivalent to
write::
sage: p.coordinates(c_spher)
(1, 1/2*pi, pi)
An alternative way to get the coordinates is to let the chart act
on the point (from the very definition of a chart)::
sage: c_spher(p)
(1, 1/2*pi, pi)
A shortcut for ``coordinates`` is ``coord``::
sage: p.coord()
(1, 1/2*pi, pi)
Computing the Cartesian coordinates from the spherical ones::
sage: c_cart.<x,y,z> = M.chart() # Cartesian coordinates
sage: c_spher.transition_map(c_cart, [r*sin(th)*cos(ph),
....: r*sin(th)*sin(ph), r*cos(th)])
Change of coordinates from Chart (M, (r, th, ph)) to Chart (M, (x, y, z))
The computation is performed by means of the above change
of coordinates::
sage: p.coord(c_cart)
(-1, 0, 0)
sage: p.coord(c_cart) == c_cart(p)
True
Coordinates of a point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: (a, b) = var('a b') # generic coordinates for the point
sage: P = M.point((a, b), name='P')
Coordinates of ``P`` in the manifold's default chart::
sage: P.coord()
(a, b)
Coordinates of ``P`` in a new chart::
sage: c_uv.<u,v> = M.chart()
sage: ch_xy_uv = c_xy.transition_map(c_uv, [x-y, x+y])
sage: P.coord(c_uv)
(a - b, a + b)
Coordinates of ``P`` in a third chart::
sage: c_wz.<w,z> = M.chart()
sage: ch_uv_wz = c_uv.transition_map(c_wz, [u^3, v^3])
sage: P.coord(c_wz, old_chart=c_uv)
(a^3 - 3*a^2*b + 3*a*b^2 - b^3, a^3 + 3*a^2*b + 3*a*b^2 + b^3)
Actually, in the present case, it is not necessary to specify
``old_chart='uv'``. Note that the first command erases all
the coordinates except those in the chart ``c_uv``::
sage: P.set_coord((a-b, a+b), c_uv)
sage: P._coordinates
{Chart (M, (u, v)): (a - b, a + b)}
sage: P.coord(c_wz)
(a^3 - 3*a^2*b + 3*a*b^2 - b^3, a^3 + 3*a^2*b + 3*a*b^2 + b^3)
sage: P._coordinates # random (dictionary output)
{Chart (M, (u, v)): (a - b, a + b),
Chart (M, (w, z)): (a^3 - 3*a^2*b + 3*a*b^2 - b^3,
a^3 + 3*a^2*b + 3*a*b^2 + b^3)}
"""
if chart is None:
dom = self.parent()
chart = dom._def_chart
def_chart = chart
else:
dom = chart.domain()
def_chart = dom._def_chart
if self not in dom:
raise ValueError("the point does not belong to the domain " +
"of {}".format(chart))
if chart not in self._coordinates:
# Check whether chart corresponds to a superchart of a chart
# in which the coordinates are known:
for ochart in self._coordinates:
if chart in ochart._supercharts or chart in ochart._subcharts:
self._coordinates[chart] = self._coordinates[ochart]
return self._coordinates[chart]
# If this point is reached, some change of coordinates must be
# performed
if old_chart is not None:
s_old_chart = old_chart
s_chart = chart
else:
# A chart must be found as a starting point of the computation
# The domain's default chart is privileged:
if (def_chart in self._coordinates
and (def_chart, chart) in dom._coord_changes):
old_chart = def_chart
s_old_chart = def_chart
s_chart = chart
else:
for ochart in self._coordinates:
for subchart in ochart._subcharts:
if (subchart, chart) in dom._coord_changes:
old_chart = ochart
s_old_chart = subchart
s_chart = chart
break
if old_chart is not None:
break
if old_chart is None:
# Some search involving the subcharts of chart is
# performed:
for schart in chart._subcharts:
for ochart in self._coordinates:
for subchart in ochart._subcharts:
if (subchart, schart) in dom._coord_changes:
old_chart = ochart
s_old_chart = subchart
s_chart = schart
break
if old_chart is not None:
break
if old_chart is not None:
break
if old_chart is None:
raise ValueError("the coordinates of {}".format(self) +
" in the {}".format(chart) + " cannot be computed " +
"by means of known changes of charts.")
else:
chcoord = dom._coord_changes[(s_old_chart, s_chart)]
self._coordinates[chart] = chcoord(*self._coordinates[old_chart])
return self._coordinates[chart]
coord = coordinates
def set_coordinates(self, coords, chart=None):
r"""
Sets the point coordinates in the specified chart.
Coordinates with respect to other charts are deleted, in order to
avoid any inconsistency. To keep them, use the method :meth:`add_coord`
instead.
INPUT:
- ``coords`` -- the point coordinates (as a tuple or a list)
- ``chart`` -- (default: ``None``) chart in which the coordinates
are given; if none are provided, the coordinates are assumed to
refer to the subset's default chart
EXAMPLES:
Setting coordinates to a point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M.point()
We set the coordinates in the manifold's default chart::
sage: p.set_coordinates((2,-3))
sage: p.coordinates()
(2, -3)
sage: X(p)
(2, -3)
A shortcut for ``set_coordinates`` is ``set_coord``::
sage: p.set_coord((2,-3))
sage: p.coord()
(2, -3)
Let us introduce a second chart on the manifold::
sage: Y.<u,v> = M.chart()
sage: X_to_Y = X.transition_map(Y, [x+y, x-y])
If we set the coordinates of ``p`` in chart ``Y``, those in chart ``X``
are lost::
sage: Y(p)
(-1, 5)
sage: p.set_coord(Y(p), chart=Y)
sage: p._coordinates
{Chart (M, (u, v)): (-1, 5)}
"""
self._coordinates.clear()
self.add_coord(coords, chart)
set_coord = set_coordinates
def add_coordinates(self, coords, chart=None):
r"""
Adds some coordinates in the specified chart.
The previous coordinates with respect to other charts are kept. To
clear them, use :meth:`set_coord` instead.
INPUT:
- ``coords`` -- the point coordinates (as a tuple or a list)
- ``chart`` -- (default: ``None``) chart in which the coordinates
are given; if none are provided, the coordinates are assumed to
refer to the subset's default chart
.. WARNING::
If the point has already coordinates in other charts, it
is the user's responsibility to make sure that the coordinates
to be added are consistent with them.
EXAMPLES:
Setting coordinates to a point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M.point()
We give the point some coordinates in the manifold's default chart::
sage: p.add_coordinates((2,-3))
sage: p.coordinates()
(2, -3)
sage: X(p)
(2, -3)
A shortcut for ``add_coordinates`` is ``add_coord``::
sage: p.add_coord((2,-3))
sage: p.coord()
(2, -3)
Let us introduce a second chart on the manifold::
sage: Y.<u,v> = M.chart()
sage: X_to_Y = X.transition_map(Y, [x+y, x-y])
If we add coordinates for ``p`` in chart ``Y``, those in chart ``X``
are kept::
sage: p.add_coordinates((-1,5), chart=Y)
sage: p._coordinates # random (dictionary output)
{Chart (M, (u, v)): (-1, 5), Chart (M, (x, y)): (2, -3)}
On the contrary, with the method :meth:`set_coordinates`, the
coordinates in charts different from ``Y`` would be lost::
sage: p.set_coordinates((-1,5), chart=Y)
sage: p._coordinates
{Chart (M, (u, v)): (-1, 5)}
"""
if len(coords) != self.parent().manifold()._dim:
raise ValueError("the number of coordinates must be equal to " +
"the manifold's dimension.")
if chart is None:
chart = self.parent()._def_chart
else:
if chart not in self.parent()._atlas:
raise ValueError("the {}".format(chart) + " has not been " +
"defined on the {}".format(self.parent()))
self._coordinates[chart] = coords
add_coord = add_coordinates
def __eq__(self, other):
r"""
Compares the current point with another one.
EXAMPLES:
Comparison with coordinates in the same chart::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3), chart=X)
sage: q = M((2,-3), chart=X)
sage: p == q
True
sage: q = M((-2,-3), chart=X)
sage: p == q
False
Comparison with coordinates of other in a subchart::
sage: U = M.open_subset('U', coord_def={X: x>0})
sage: XU = X.restrict(U)
sage: q = U((2,-3), chart=XU)
sage: p == q and q == p
True
sage: q = U((1,-3), chart=XU)
sage: p == q or q == p
False
Comparison requiring a change of chart::
sage: Y.<u,v> = U.chart()
sage: XU_to_Y = XU.transition_map(Y, (ln(x), x+y))
sage: XU_to_Y.inverse()(u,v)
(e^u, v - e^u)
sage: q = U((ln(2),-1), chart=Y)
sage: p == q and q == p
True
sage: q = U((ln(3),1), chart=Y)
sage: p == q or q == p
False
Comparison with periodic coordinates::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart('x y:period=2')
sage: p = M((0,1))
sage: q = M((0,3))
sage: p == q and q == p
True
sage: q = M((0,2))
sage: p == q or q == p
False
sage: Y.<u,v> = M.chart('u:(0,2*pi):periodic v')
sage: p = M((0,1), chart=Y)
sage: q = M((-4*pi,1), chart=Y)
sage: p == q and q == p
True
sage: q = M((3*pi,1), chart=Y)
sage: p == q or q == p
False
"""
if other is self:
return True
if not isinstance(other, ManifoldPoint):
return False
if other.parent().manifold() != self.parent().manifold():
return False
# Search for a common chart to compare the coordinates
common_chart = None
# the subset's default chart is privileged:
# FIXME: Make this a better test
if hasattr(self.parent(), '_def_chart'): # self.parent() is open
def_chart = self.parent()._def_chart
else:
def_chart = self.parent().manifold()._def_chart
if def_chart in self._coordinates and def_chart in other._coordinates:
common_chart = def_chart
else:
for chart in self._coordinates:
if chart in other._coordinates:
common_chart = chart
break
if common_chart is None:
# A common chart is searched via a coordinate transformation,
# privileging the default chart
if def_chart in self._coordinates:
try:
other.coordinates(def_chart)
common_chart = def_chart
except ValueError:
pass
if common_chart is None:
if def_chart in other._coordinates:
try:
self.coordinates(def_chart)
common_chart = def_chart
except ValueError:
pass
if common_chart is None:
# At this stage, a common chart is searched via a coordinate
# transformation from any chart
for chart in self._coordinates:
try:
other.coordinates(chart)
common_chart = chart
break
except ValueError:
pass
else:
# Attempt a coordinate transformation in the reverse way:
for chart in other._coordinates:
try:
self.coordinates(chart)
common_chart = chart
break
except ValueError:
pass
if common_chart is None:
return False
#!# Another option would be:
# raise ValueError("no common chart has been found to compare " +
# "{} and {}".format(self, other))
periods = common_chart.periods()
for ind, (xs, xo) in enumerate(zip(self._coordinates[common_chart],
other._coordinates[common_chart])):
diff = xs - xo
period = periods[ind]
if period is not None:
if not (diff/period in ZZ):
return False
else:
if isinstance(diff, Expression) and not diff.is_trivial_zero():
return False
elif not (diff == 0):
return False
return True
def __ne__(self, other):
r"""
Non-equality operator.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3), chart=X)
sage: q = M((0,1), chart=X)
sage: p != q
True
sage: p != M((2,-3), chart=X)
False
"""
return not (self == other)
def __hash__(self):
r"""
Return the hash of ``self``.
This hash function is set to constant on a given manifold, to fulfill
Python's credo::
p == q ==> hash(p) == hash(q)
This is necessary since ``p`` and ``q`` may be created in
different coordinate systems and nevertheless be equal.
.. TODO::
Find a better hash function.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M((2,-3), chart=X)
sage: hash(p) == hash(M)
True
"""
return hash(self.parent().manifold())
@options(size=10, color='black', label_color=None, fontsize=10, label_offset=0.1)
def plot(self, chart=None, ambient_coords=None, mapping=None,
label=None, parameters=None, **kwds):
r"""
For real manifolds, plot ``self`` in a Cartesian graph based
on the coordinates of some ambient chart.
The point is drawn in terms of two (2D graphics) or three (3D graphics)
coordinates of a given chart, called hereafter the *ambient chart*.
The domain of the ambient chart must contain the point, or its image
by a continuous manifold map `\Phi`.
INPUT:
- ``chart`` -- (default: ``None``) the ambient chart (see above); if
``None``, the ambient chart is set the default chart of
``self.parent()``
- ``ambient_coords`` -- (default: ``None``) tuple containing the 2
or 3 coordinates of the ambient chart in terms of which the plot
is performed; if ``None``, all the coordinates of the ambient
chart are considered
- ``mapping`` -- (default: ``None``)
:class:`~sage.manifolds.continuous_map.ContinuousMap`; continuous
manifold map `\Phi` providing the link between the current point
`p` and the ambient chart ``chart``: the domain of ``chart`` must
contain `\Phi(p)`; if ``None``, the identity map is assumed
- ``label`` -- (default: ``None``) label printed next to the point;
if ``None``, the point's name is used
- ``parameters`` -- (default: ``None``) dictionary giving the numerical
values of the parameters that may appear in the point coordinates
- ``size`` -- (default: 10) size of the point once drawn as a small
disk or sphere
- ``color`` -- (default: ``'black'``) color of the point
- ``label_color`` -- (default: ``None``) color to print the label;
if ``None``, the value of ``color`` is used
- ``fontsize`` -- (default: 10) size of the font used to print the
label
- ``label_offset`` -- (default: 0.1) determines the separation between
the point and its label
OUTPUT:
- a graphic object, either an instance of
:class:`~sage.plot.graphics.Graphics` for a 2D plot (i.e. based on
2 coordinates of the ambient chart) or an instance of
:class:`~sage.plot.plot3d.base.Graphics3d` for a 3D plot (i.e.
based on 3 coordinates of the ambient chart)
EXAMPLES:
Drawing a point on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: p = M.point((1,3), name='p')
sage: g = p.plot(X)
sage: print(g)
Graphics object consisting of 2 graphics primitives
sage: gX = X.plot(max_range=4) # plot of the coordinate grid
sage: g + gX # display of the point atop the coordinate grid
Graphics object consisting of 20 graphics primitives
.. PLOT::
M = Manifold(2, 'M', structure='topological')
X = M.chart('x y'); x,y = X[:]
p = M.point((1,3), name='p')
g = p.plot(X)
gX = X.plot(max_range=4)
sphinx_plot(g+gX)
Actually, since ``X`` is the default chart of the open set in which
``p`` has been defined, it can be skipped in the arguments of
``plot``::
sage: g = p.plot()
sage: g + gX
Graphics object consisting of 20 graphics primitives
Call with some options::
sage: g = p.plot(chart=X, size=40, color='green', label='$P$',
....: label_color='blue', fontsize=20, label_offset=0.3)
sage: g + gX
Graphics object consisting of 20 graphics primitives
.. PLOT::
M = Manifold(2, 'M', structure='topological')
X = M.chart('x y'); x,y = X[:]
p = M.point((1,3), name='p')
g = p.plot(chart=X, size=40, color='green', label='$P$', \
label_color='blue', fontsize=20, label_offset=0.3)
gX = X.plot(max_range=4)
sphinx_plot(g+gX)
Use of the ``parameters`` option to set a numerical value of some
symbolic variable::
sage: a = var('a')
sage: q = M.point((a,2*a), name='q')
sage: gq = q.plot(parameters={a:-2}, label_offset=0.2)
sage: g + gX + gq
Graphics object consisting of 22 graphics primitives
.. PLOT::
M = Manifold(2, 'M', structure='topological')
X = M.chart('x y'); x,y = X[:]
p = M.point((1,3), name='p')
g = p.plot(chart=X, size=40, color='green', label='$P$', \
label_color='blue', fontsize=20, label_offset=0.3)
var('a')
q = M.point((a,2*a), name='q')
gq = q.plot(parameters={a:-2}, label_offset=0.2)
gX = X.plot(max_range=4)
sphinx_plot(g+gX+gq)
The numerical value is used only for the plot::
sage: q.coord()
(a, 2*a)
Drawing a point on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', structure='topological')
sage: X.<x,y,z> = M.chart()
sage: p = M.point((2,1,3), name='p')
sage: g = p.plot()
sage: print(g)
Graphics3d Object
sage: gX = X.plot(number_values=5) # coordinate mesh cube
sage: g + gX # display of the point atop the coordinate mesh
Graphics3d Object
Call with some options::
sage: g = p.plot(chart=X, size=40, color='green', label='P_1',
....: label_color='blue', fontsize=20, label_offset=0.3)
sage: g + gX
Graphics3d Object
An example of plot via a mapping: plot of a point on a 2-sphere viewed
in the 3-dimensional space ``M``::
sage: S2 = Manifold(2, 'S^2', structure='topological')
sage: U = S2.open_subset('U') # the open set covered by spherical coord.
sage: XS.<th,ph> = U.chart(r'th:(0,pi):\theta ph:(0,2*pi):\phi')
sage: p = U.point((pi/4, pi/8), name='p')
sage: F = S2.continuous_map(M, {(XS, X): [sin(th)*cos(ph),
....: sin(th)*sin(ph), cos(th)]}, name='F')
sage: F.display()
F: S^2 → M
on U: (th, ph) ↦ (x, y, z) = (cos(ph)*sin(th), sin(ph)*sin(th), cos(th))
sage: g = p.plot(chart=X, mapping=F)
sage: gS2 = XS.plot(chart=X, mapping=F, number_values=9)
sage: g + gS2
Graphics3d Object
Use of the option ``ambient_coords`` for plots on a 4-dimensional
manifold::
sage: M = Manifold(4, 'M', structure='topological')
sage: X.<t,x,y,z> = M.chart()
sage: p = M.point((1,2,3,4), name='p')
sage: g = p.plot(X, ambient_coords=(t,x,y), label_offset=0.4) # the coordinate z is skipped
sage: gX = X.plot(X, ambient_coords=(t,x,y), number_values=5) # long time
sage: g + gX # 3D plot # long time
Graphics3d Object
sage: g = p.plot(X, ambient_coords=(t,y,z), label_offset=0.4) # the coordinate x is skipped
sage: gX = X.plot(X, ambient_coords=(t,y,z), number_values=5) # long time
sage: g + gX # 3D plot # long time
Graphics3d Object
sage: g = p.plot(X, ambient_coords=(y,z), label_offset=0.4) # the coordinates t and x are skipped
sage: gX = X.plot(X, ambient_coords=(y,z))
sage: g + gX # 2D plot
Graphics object consisting of 20 graphics primitives
.. PLOT::
M = Manifold(4, 'M', structure='topological')
X = M.chart('t x y z'); t,x,y,z = X[:]
p = M.point((1,2,3,4), name='p')
g = p.plot(X, ambient_coords=(y,z), label_offset=0.4)
gX = X.plot(X, ambient_coords=(y,z))
sphinx_plot(g+gX)
"""
from sage.plot.point import point2d
from sage.plot.text import text
from sage.plot.graphics import Graphics
from sage.plot.plot3d.shapes2 import point3d, text3d
from sage.manifolds.chart import Chart
if self._manifold.base_field_type() != 'real':
raise NotImplementedError('plot of points on manifolds over fields different'
' from the real field is not implemented')
# The ambient chart:
if chart is None:
chart = self.parent().default_chart()
elif not isinstance(chart, Chart):
raise TypeError("the argument 'chart' must be a coordinate chart")
# The effective point to be plotted:
if mapping is None:
eff_point = self
else:
eff_point = mapping(self)
# The coordinates of the ambient chart used for the plot:
if ambient_coords is None:
ambient_coords = chart[:]
elif not isinstance(ambient_coords, tuple):
ambient_coords = tuple(ambient_coords)
nca = len(ambient_coords)
if nca != 2 and nca != 3:
raise TypeError("invalid number of ambient coordinates: {}".format(nca))
# Extract the kwds options
size = kwds['size']
color = kwds['color']
label_color = kwds['label_color']
fontsize = kwds['fontsize']
label_offset = kwds['label_offset']
# The point coordinates:
coords = eff_point.coord(chart)
xx = chart[:]
xp = [coords[xx.index(c)] for c in ambient_coords]
if parameters is not None:
xps = [coord.substitute(parameters) for coord in xp]
xp = xps
xlab = [coord + label_offset for coord in xp]
if label_color is None:
label_color = color
resu = Graphics()
if nca == 2:
if label is None:
label = r'$' + self._latex_name + r'$'
resu += (point2d(xp, color=color, size=size) +
text(label, xlab, fontsize=fontsize, color=label_color))
else:
if label is None:
label = self._name
resu += (point3d(xp, color=color, size=size) +
text3d(label, xlab, fontsize=fontsize, color=label_color))
return resu
| 37.466934
| 117
| 0.527091
|
794c49e60fe9ac11ab32091da47d6c97c2072eb2
| 265
|
py
|
Python
|
utils/file-tester/tests/__init__.py
|
stephenfuqua/Ed-Fi-X-Fizz
|
94597eda585d4f62f69c12e2a58fa8e8846db11b
|
[
"Apache-2.0"
] | 3
|
2020-10-15T10:29:59.000Z
|
2020-12-01T21:40:55.000Z
|
utils/file-tester/tests/__init__.py
|
stephenfuqua/Ed-Fi-X-Fizz
|
94597eda585d4f62f69c12e2a58fa8e8846db11b
|
[
"Apache-2.0"
] | 40
|
2020-08-17T21:08:33.000Z
|
2021-02-02T19:56:09.000Z
|
utils/file-tester/tests/__init__.py
|
stephenfuqua/Ed-Fi-X-Fizz
|
94597eda585d4f62f69c12e2a58fa8e8846db11b
|
[
"Apache-2.0"
] | 10
|
2021-06-10T16:27:27.000Z
|
2021-12-27T12:31:57.000Z
|
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
| 53
| 85
| 0.777358
|
794c4bc95b34104b3942c43908ac63e842eb2ef7
| 3,971
|
py
|
Python
|
tensorflow_gan/examples/mnist/data_provider_test.py
|
sanidhyamangal/gan
|
540ab76c04b5ad80cefa068e0f349b80ea4decb1
|
[
"Apache-2.0"
] | 1
|
2022-01-05T11:48:21.000Z
|
2022-01-05T11:48:21.000Z
|
tensorflow_gan/examples/mnist/data_provider_test.py
|
HabibMrad/gan-1
|
6a2bf12f968d0a913e8040121edc8bb6e0680a08
|
[
"Apache-2.0"
] | 1
|
2021-02-24T00:51:29.000Z
|
2021-02-24T00:51:29.000Z
|
tensorflow_gan/examples/mnist/data_provider_test.py
|
HabibMrad/gan-1
|
6a2bf12f968d0a913e8040121edc8bb6e0680a08
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for data_provider."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.mnist import data_provider
mock = tf.test.mock
class DataProviderTest(tf.test.TestCase):
def setUp(self):
super(DataProviderTest, self).setUp()
mock_imgs = np.zeros([28, 28, 1], dtype=np.uint8)
mock_lbls = np.ones([], dtype=np.int64)
self.mock_ds = tf.data.Dataset.from_tensors({
'image': mock_imgs,
'label': mock_lbls
})
@mock.patch.object(data_provider, 'tfds', autospec=True)
def test_provide_dataset(self, mock_tfds):
batch_size = 5
mock_tfds.load.return_value = self.mock_ds
ds = data_provider.provide_dataset('test', batch_size)
self.assertIsInstance(ds, tf.data.Dataset)
output = tf.data.get_output_classes(ds)
self.assertIsInstance(output, dict)
self.assertSetEqual(set(output.keys()), set(['images', 'labels']))
self.assertEqual(output['images'], tf.Tensor)
self.assertEqual(output['labels'], tf.Tensor)
shapes = tf.data.get_output_shapes(ds)
self.assertIsInstance(shapes, dict)
self.assertSetEqual(set(shapes.keys()), set(['images', 'labels']))
self.assertIsInstance(shapes['images'], tf.TensorShape)
self.assertIsInstance(shapes['labels'], tf.TensorShape)
self.assertListEqual(shapes['images'].as_list(), [batch_size, 28, 28, 1])
self.assertListEqual(shapes['labels'].as_list(), [batch_size, 10])
types = tf.data.get_output_types(ds)
self.assertIsInstance(types, dict)
self.assertSetEqual(set(types.keys()), set(['images', 'labels']))
self.assertEqual(types['images'], tf.float32)
self.assertEqual(types['labels'], tf.float32)
next_batch = tf.data.make_one_shot_iterator(ds).get_next()
images = next_batch['images']
labels = next_batch['labels']
with self.cached_session() as sess:
images, labels = sess.run([images, labels])
self.assertEqual(images.shape, (batch_size, 28, 28, 1))
self.assertTrue(np.all(np.abs(images) <= 1))
self.assertEqual(labels.shape, (batch_size, 10))
@mock.patch.object(data_provider, 'tfds', autospec=True)
def test_provide_data(self, mock_tfds):
batch_size = 5
mock_tfds.load.return_value = self.mock_ds
images, labels = data_provider.provide_data('test', batch_size)
with self.cached_session() as sess:
sess.run(tf.tables_initializer())
images, labels = sess.run([images, labels])
self.assertTupleEqual(images.shape, (batch_size, 28, 28, 1))
self.assertTrue(np.all(np.abs(images) <= 1))
self.assertTupleEqual(labels.shape, (batch_size, 10))
@mock.patch.object(data_provider, 'tfds', autospec=True)
def test_provide_data_can_be_reinitialized(self, mock_tfds):
if tf.executing_eagerly():
# Trying to access properties or call methods on the result of
# self.session().
return
batch_size = 5
mock_tfds.load.return_value = self.mock_ds
images, labels = data_provider.provide_data('test', batch_size)
with self.session() as sess:
sess.run([images, labels])
sess.run([images, labels])
with self.session() as sess:
sess.run([images, labels])
sess.run([images, labels])
if __name__ == '__main__':
tf.test.main()
| 34.530435
| 77
| 0.709393
|
794c4bf962d6f2cbc52027178b58848d481f3719
| 12,930
|
py
|
Python
|
modules/networks.py
|
vliu15/munit
|
5789d96590519d729f89c9501eba7692fa7054ef
|
[
"MIT"
] | 3
|
2021-03-04T01:48:03.000Z
|
2021-12-16T06:55:10.000Z
|
modules/networks.py
|
vliu15/munit
|
5789d96590519d729f89c9501eba7692fa7054ef
|
[
"MIT"
] | null | null | null |
modules/networks.py
|
vliu15/munit
|
5789d96590519d729f89c9501eba7692fa7054ef
|
[
"MIT"
] | null | null | null |
# The MIT License
#
# Copyright (c) 2020 Vincent Liu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import torch
import torch.nn as nn
from modules.loss import GinormousCompositeLoss
from modules.layers import AdaptiveInstanceNorm2d, LayerNorm2d
class ResidualBlock(nn.Module):
''' Implements a residual block with (Adaptive) Instance Normalization '''
def __init__(self, channels, s_dim=None, h_dim=None):
super().__init__()
self.conv1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, channels, kernel_size=3)
),
)
self.conv2 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, channels, kernel_size=3)
),
)
self.use_style = s_dim is not None and h_dim is not None
if self.use_style:
self.norm1 = AdaptiveInstanceNorm2d(channels, s_dim, h_dim)
self.norm2 = AdaptiveInstanceNorm2d(channels, s_dim, h_dim)
else:
self.norm1 = nn.InstanceNorm2d(channels)
self.norm2 = nn.InstanceNorm2d(channels)
self.activation = nn.ReLU()
def forward(self, x, s=None):
x_id = x
x = self.conv1(x)
x = self.norm1(x, s) if self.use_style else self.norm1(x)
x = self.activation(x)
x = self.conv2(x)
x = self.norm2(x, s) if self.use_style else self.norm2(x)
return x + x_id
class ContentEncoder(nn.Module):
''' Implements a MUNIT encoder for content '''
def __init__(self, base_channels=64, n_downsample=2, n_res_blocks=4):
super().__init__()
channels = base_channels
# input convolutional layer
layers = [
nn.ReflectionPad2d(3),
nn.utils.spectral_norm(
nn.Conv2d(3, channels, kernel_size=7)
),
nn.InstanceNorm2d(channels),
nn.ReLU(inplace=True),
]
# downsampling layers
for i in range(n_downsample):
layers += [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, 2 * channels, kernel_size=4, stride=2)
),
nn.InstanceNorm2d(2 * channels),
nn.ReLU(inplace=True),
]
channels *= 2
# residual blocks with non-adaptive instance normalization
layers += [
ResidualBlock(channels) for _ in range(n_res_blocks)
]
self.layers = nn.Sequential(*layers)
self.out_channels = channels
def forward(self, x):
return self.layers(x)
@property
def channels(self):
return self.out_channels
class StyleEncoder(nn.Module):
''' Implements a MUNIT encoder for style '''
n_deepen_layers = 2
def __init__(self, base_channels=64, n_downsample=4, s_dim=8):
super().__init__()
channels = base_channels
# input convolutional layer
layers = [
nn.ReflectionPad2d(3),
nn.utils.spectral_norm(
nn.Conv2d(3, channels, kernel_size=7, padding=0)
),
nn.ReLU(inplace=True),
]
# downsampling layers
for i in range(self.n_deepen_layers):
layers += [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, 2 * channels, kernel_size=4, stride=2)
),
nn.ReLU(inplace=True),
]
channels *= 2
for i in range(n_downsample - self.n_deepen_layers):
layers += [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, channels, kernel_size=4, stride=2)
),
nn.ReLU(inplace=True),
]
# apply global pooling and pointwise convolution to style_channels
layers += [
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(channels, s_dim, kernel_size=1),
]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class Decoder(nn.Module):
'''
Decoder Class
Values:
in_channels: number of channels from encoder output, a scalar
n_upsample: number of upsampling layers, a scalar
n_res_blocks: number of residual blocks, a scalar
s_dim: the dimension of the style tensor (s), a scalar
h_dim: the hidden dimension of the MLP, a scalar
'''
def __init__(self, in_channels, n_upsample=2, n_res_blocks=4, s_dim=8, h_dim=256):
super().__init__()
channels = in_channels
# residual blocks with adaptive instance norm
self.res_blocks = nn.ModuleList([
ResidualBlock(channels, s_dim) for _ in range(n_res_blocks)
])
# upsampling blocks
layers = []
for i in range(n_upsample):
layers += [
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d(2),
nn.utils.spectral_norm(
nn.Conv2d(channels, channels // 2, kernel_size=5)
),
LayerNorm2d(channels // 2),
]
channels //= 2
layers += [
nn.ReflectionPad2d(3),
nn.utils.spectral_norm(
nn.Conv2d(channels, 3, kernel_size=7)
),
nn.Tanh(),
]
self.layers = nn.Sequential(*layers)
def forward(self, x, s):
for res_block in self.res_blocks:
x = res_block(x, s=s)
x = self.layers(x)
return x
class Generator(nn.Module):
''' Implements a MUNIT generator '''
def __init__(
self,
base_channels: int = 64,
n_c_downsample: int = 2,
n_s_downsample: int = 4,
n_res_blocks: int = 4,
s_dim: int = 8,
h_dim: int = 256,
):
super().__init__()
self.c_enc = ContentEncoder(
base_channels=base_channels, n_downsample=n_c_downsample, n_res_blocks=n_res_blocks,
)
self.s_enc = StyleEncoder(
base_channels=base_channels, n_downsample=n_s_downsample, s_dim=s_dim,
)
self.dec = Decoder(
self.c_enc.channels, n_upsample=n_c_downsample, n_res_blocks=n_res_blocks, s_dim=s_dim, h_dim=h_dim,
)
def encode(self, x):
content = self.c_enc(x)
style = self.s_enc(x)
return (content, style)
def decode(self, content, style):
return self.dec(content, style)
class Discriminator(nn.Module):
''' Implements a MUNIT discriminator '''
def __init__(
self,
base_channels: int = 64,
n_layers: int = 3,
n_discriminators: int = 3,
):
super().__init__()
self.discriminators = nn.ModuleList([
self.patchgan_discriminator(base_channels, n_layers) for _ in range(n_discriminators)
])
self.downsample = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
@staticmethod
def patchgan_discriminator(base_channels, n_layers):
'''
Function that constructs and returns one PatchGAN discriminator module.
'''
channels = base_channels
# input convolutional layer
layers = [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(3, channels, kernel_size=4, stride=2),
),
nn.LeakyReLU(0.2, inplace=True),
]
# hidden convolutional layers
for _ in range(n_layers):
layers += [
nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(channels, 2 * channels, kernel_size=4, stride=2)
),
nn.LeakyReLU(0.2, inplace=True),
]
channels *= 2
# output projection layer
layers += [
nn.utils.spectral_norm(
nn.Conv2d(channels, 1, kernel_size=1)
),
]
return nn.Sequential(*layers)
def forward(self, x):
outputs = []
for discriminator in self.discriminators:
outputs.append(discriminator(x))
x = self.downsample(x)
return outputs
class MUNIT(nn.Module):
''' Implements the MUNIT model in full '''
def __init__(
self,
gen_channels: int = 64,
n_c_downsample: int = 2,
n_s_downsample: int = 4,
n_res_blocks: int = 4,
s_dim: int = 8,
h_dim: int = 256,
dis_channels: int = 64,
n_layers: int = 3,
n_discriminators: int = 3,
scale_loss_weights_to_one: bool = True,
):
super().__init__()
self.gen_a = Generator(
base_channels=gen_channels, n_c_downsample=n_c_downsample, n_s_downsample=n_s_downsample, n_res_blocks=n_res_blocks, s_dim=s_dim, h_dim=h_dim,
)
self.gen_b = Generator(
base_channels=gen_channels, n_c_downsample=n_c_downsample, n_s_downsample=n_s_downsample, n_res_blocks=n_res_blocks, s_dim=s_dim, h_dim=h_dim,
)
self.dis_a = Discriminator(
base_channels=dis_channels, n_layers=n_layers, n_discriminators=n_discriminators,
)
self.dis_b = Discriminator(
base_channels=dis_channels, n_layers=n_layers, n_discriminators=n_discriminators,
)
self.s_dim = s_dim
self.loss = GinormousCompositeLoss
self.scale_loss_weights_to_one = scale_loss_weights_to_one
def forward(self, x_a: torch.tensor, x_b: torch.tensor):
s_a = torch.randn(x_a.size(0), self.s_dim, 1, 1, device=x_a.device).to(x_a.dtype)
s_b = torch.randn(x_b.size(0), self.s_dim, 1, 1, device=x_b.device).to(x_b.dtype)
# encode real x and compute image reconstruction loss
x_a_loss, c_a, s_a_fake = self.loss.image_recon_loss(x_a, self.gen_a)
x_b_loss, c_b, s_b_fake = self.loss.image_recon_loss(x_b, self.gen_b)
# decode real (c, s) and compute latent reconstruction loss
c_b_loss, s_a_loss, x_ba = self.loss.latent_recon_loss(c_b, s_a, self.gen_a)
c_a_loss, s_b_loss, x_ab = self.loss.latent_recon_loss(c_a, s_b, self.gen_b)
# compute adversarial losses
gen_a_adv_loss = self.loss.adversarial_loss(x_ba, self.dis_a, False)
gen_b_adv_loss = self.loss.adversarial_loss(x_ab, self.dis_b, False)
# sum up losses for gen
gen_loss = (
10 * x_a_loss + c_b_loss + s_a_loss + gen_a_adv_loss + \
10 * x_b_loss + c_a_loss + s_b_loss + gen_b_adv_loss
)
if self.scale_loss_weights_to_one:
gen_loss = gen_loss * 0.1
# sum up losses for dis
dis_loss = (
self.loss.adversarial_loss(x_ba.detach(), self.dis_a, False) + \
self.loss.adversarial_loss(x_a.detach(), self.dis_a, True) + \
self.loss.adversarial_loss(x_ab.detach(), self.dis_b, False) + \
self.loss.adversarial_loss(x_b.detach(), self.dis_b, True)
)
return gen_loss, dis_loss, x_ab, x_ba
def infer(self, x_a: torch.tensor, x_b: torch.tensor, encode_style: bool = True):
self.eval()
if not encode_style:
s_a = torch.ones(x_a.shape, self.z_dim, 1, 1, device=x_a.device).to(x_a.dtype)
s_b = torch.ones(x_b.shape, self.z_dim, 1, 1, device=x_b.device).to(x_b.dtype)
c_a, _ = self.gen_a.encode(x_a)
c_b, _ = self.gen_b.encode(x_b)
else:
c_a, s_a = self.gen_a.encode(x_a)
c_b, s_b = self.gen_b.encode(x_b)
x_ba = self.gen_a.decode(c_b, s_a)
x_ab = self.gen_b.decode(c_a, s_b)
return x_ba, x_ab
| 33.324742
| 154
| 0.594509
|
794c4c555392f069064fb3b88f959e69ff233e1d
| 31,434
|
py
|
Python
|
detest/testlink_import/models.py
|
tahpee/detest
|
9cf3a434ee17d4867e8cc49bc501ae0f29535f59
|
[
"MIT"
] | null | null | null |
detest/testlink_import/models.py
|
tahpee/detest
|
9cf3a434ee17d4867e8cc49bc501ae0f29535f59
|
[
"MIT"
] | null | null | null |
detest/testlink_import/models.py
|
tahpee/detest
|
9cf3a434ee17d4867e8cc49bc501ae0f29535f59
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
# TestLink Open Source Project - http://testlink.sourceforge.net/
# This script is distributed under the GNU General Public License 2 or later.
# ---------------------------------------------------------------------------------------
# @filesource testlink_create_tables.sql
#
# SQL script - create all DB tables for MySQL
# tables are in alphabetic order
#
# ATTENTION: do not use a different naming convention, that one already in use.
#
# IMPORTANT NOTE:
# each NEW TABLE added here NEED TO BE DEFINED in object.class.php getDBTables()
#
# IMPORTANT NOTE - DATETIME or TIMESTAMP
# Extracted from MySQL Manual
#
# The TIMESTAMP column type provides a type that you can use to automatically
# mark INSERT or UPDATE operations with the current date and time.
# If you have multiple TIMESTAMP columns in a table, only the first one is updated automatically.
#
# Knowing this is clear that we can use in interchangable way DATETIME or TIMESTAMP
#
# Naming convention for column regarding date/time of creation or change
#
# Right or wrong from TL 1.7 we have used
#
# creation_ts
# modification_ts
#
# Then no other naming convention has to be used as:
# create_ts, modified_ts
#
# CRITIC:
# Because this file will be processed during installation doing text replaces
# to add TABLE PREFIX NAME, any NEW DDL CODE added must be respect present
# convention regarding case and spaces between DDL keywords.
#
# ---------------------------------------------------------------------------------------
# @internal revisions
#
# ---------------------------------------------------------------------------------------
# CREATE TABLE /*prefix*/assignment_types (
# `id` int(10) unsigned NOT NULL auto_increment,
# `fk_table` varchar(30) default '',
# `description` varchar(100) NOT NULL default 'unknown',
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
# CREATE TABLE /*prefix*/assignment_status (
# `id` int(10) unsigned NOT NULL auto_increment,
# `description` varchar(100) NOT NULL default 'unknown',
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
#
#
class Attachments(models.Model):
# CREATE TABLE /*prefix*/attachments (
id = models.IntegerField(primary_key=True) # `id` int(10) unsigned NOT NULL auto_increment,
fk_id = models.IntegerField() # `fk_id` int(10) unsigned NOT NULL default '0',
fk_table = models.CharField(max_length=250) # `fk_table` varchar(250) default '',
title = models.CharField(max_length=250) # `title` varchar(250) default '',
description = models.CharField(max_length=250) # `description` varchar(250) default '',
file_name = models.CharField(max_length=250) # `file_name` varchar(250) NOT NULL default '',
file_path = models.CharField(max_length=250) # `file_path` varchar(250) default '',
file_size = models.IntegerField() # `file_size` int(11) NOT NULL default '0',
file_type = models.CharField(max_length=250) # `file_type` varchar(250) NOT NULL default '',
date_added = models.DateTimeField() # `date_added` datetime NOT NULL default '0000-00-00 00:00:00',
content = models.TextField() # `content` longblob,
# `compression_type` int(11) NOT NULL default '0',
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
class Builds(models.Model):
# CREATE TABLE /*prefix*/builds (
id = models.IntegerField(primary_key=True) # `id` int(10) unsigned NOT NULL auto_increment,
testplan_id = models.IntegerField() # `testplan_id` int(10) unsigned NOT NULL default '0',
name = models.CharField(max_length=100) # `name` varchar(100) NOT NULL default 'undefined',
notes = models.TextField() # `notes` text,
active = models.SmallIntegerField() # `active` tinyint(1) NOT NULL default '1',
is_open = models.SmallIntegerField() # `is_open` tinyint(1) NOT NULL default '1',
author_id = models.IntegerField() # `author_id` int(10) unsigned default NULL,
creation_ts = models.DateTimeField() # `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
release_date = models.DateField() # `release_date` date NULL,
closed_on_date = models.DateField() # `closed_on_date` date NULL,
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/name (`testplan_id`,`name`),
# KEY /*prefix*/testplan_id (`testplan_id`)
# ) DEFAULT CHARSET=utf8 COMMENT='Available ';
#
#
# CREATE TABLE /*prefix*/cfield_build_design_values (
# `field_id` int(10) NOT NULL default '0',
# `node_id` int(10) NOT NULL default '0',
# `value` varchar(4000) NOT NULL default '',
# PRIMARY KEY (`field_id`,`node_id`),
# KEY /*prefix*/idx_cfield_build_design_values (`node_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/cfield_design_values (
# `field_id` int(10) NOT NULL default '0',
# `node_id` int(10) NOT NULL default '0',
# `value` varchar(4000) NOT NULL default '',
# PRIMARY KEY (`field_id`,`node_id`),
# KEY /*prefix*/idx_cfield_design_values (`node_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/cfield_execution_values (
# `field_id` int(10) NOT NULL default '0',
# `execution_id` int(10) NOT NULL default '0',
# `testplan_id` int(10) NOT NULL default '0',
# `tcversion_id` int(10) NOT NULL default '0',
# `value` varchar(4000) NOT NULL default '',
# PRIMARY KEY (`field_id`,`execution_id`,`testplan_id`,`tcversion_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/cfield_node_types (
# `field_id` int(10) NOT NULL default '0',
# `node_type_id` int(10) NOT NULL default '0',
# PRIMARY KEY (`field_id`,`node_type_id`),
# KEY /*prefix*/idx_custom_fields_assign (`node_type_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/cfield_testprojects (
# `field_id` int(10) unsigned NOT NULL default '0',
# `testproject_id` int(10) unsigned NOT NULL default '0',
# `display_order` smallint(5) unsigned NOT NULL default '1',
# `location` smallint(5) unsigned NOT NULL default '1',
# `active` tinyint(1) NOT NULL default '1',
# `required` tinyint(1) NOT NULL default '0',
# `required_on_design` tinyint(1) NOT NULL default '0',
# `required_on_execution` tinyint(1) NOT NULL default '0',
# PRIMARY KEY (`field_id`,`testproject_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/cfield_testplan_design_values (
# `field_id` int(10) NOT NULL default '0',
# `link_id` int(10) NOT NULL default '0' COMMENT 'point to testplan_tcversion id',
# `value` varchar(4000) NOT NULL default '',
# PRIMARY KEY (`field_id`,`link_id`),
# KEY /*prefix*/idx_cfield_tplan_design_val (`link_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# # new fields to display custom fields in new areas
# # test case linking to testplan (test plan design)
# CREATE TABLE /*prefix*/custom_fields (
# `id` int(10) NOT NULL auto_increment,
# `name` varchar(64) NOT NULL default '',
# `label` varchar(64) NOT NULL default '' COMMENT 'label to display on user interface' ,
# `type` smallint(6) NOT NULL default '0',
# `possible_values` varchar(4000) NOT NULL default '',
# `default_value` varchar(4000) NOT NULL default '',
# `valid_regexp` varchar(255) NOT NULL default '',
# `length_min` int(10) NOT NULL default '0',
# `length_max` int(10) NOT NULL default '0',
# `show_on_design` tinyint(3) unsigned NOT NULL default '1' COMMENT '1=> show it during specification design',
# `enable_on_design` tinyint(3) unsigned NOT NULL default '1' COMMENT '1=> user can write/manage it during specification design',
# `show_on_execution` tinyint(3) unsigned NOT NULL default '0' COMMENT '1=> show it during test case execution',
# `enable_on_execution` tinyint(3) unsigned NOT NULL default '0' COMMENT '1=> user can write/manage it during test case execution',
# `show_on_testplan_design` tinyint(3) unsigned NOT NULL default '0' ,
# `enable_on_testplan_design` tinyint(3) unsigned NOT NULL default '0' ,
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/idx_custom_fields_name (`name`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/db_version (
# `version` varchar(50) NOT NULL default 'unknown',
# `upgrade_ts` datetime NOT NULL default '0000-00-00 00:00:00',
# `notes` text,
# PRIMARY KEY (`version`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/events (
# `id` int(10) unsigned NOT NULL auto_increment,
# `transaction_id` int(10) unsigned NOT NULL default '0',
# `log_level` smallint(5) unsigned NOT NULL default '0',
# `source` varchar(45) default NULL,
# `description` text NOT NULL,
# `fired_at` int(10) unsigned NOT NULL default '0',
# `activity` varchar(45) default NULL,
# `object_id` int(10) unsigned default NULL,
# `object_type` varchar(45) default NULL,
# PRIMARY KEY (`id`),
# KEY /*prefix*/transaction_id (`transaction_id`),
# KEY /*prefix*/fired_at (`fired_at`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/execution_bugs (
# `execution_id` int(10) unsigned NOT NULL default '0',
# `bug_id` varchar(64) NOT NULL default '0',
# PRIMARY KEY (`execution_id`,`bug_id`)
# ) DEFAULT CHARSET=utf8;
#
class Executions(models.Model):
# CREATE TABLE /*prefix*/executions (
id = models.IntegerField(primary_key=True) # id int(10) unsigned NOT NULL auto_increment,
build_id = models.IntegerField() # build_id int(10) NOT NULL default '0',
tester_id = models.IntegerField() # tester_id int(10) unsigned default NULL,
execution_ts = models.DateTimeField() # execution_ts datetime default NULL,
status = models.CharField(max_length=1) # status char(1) default NULL,
testplan_id = models.IntegerField() # testplan_id int(10) unsigned NOT NULL default '0',
tcversion_id = models.IntegerField() # tcversion_id int(10) unsigned NOT NULL default '0',
tcversion_number = models.SmallIntegerField() # tcversion_number smallint(5) unsigned NOT NULL default '1',
platform_id = models.IntegerField() # platform_id int(10) unsigned NOT NULL default '0',
execution_type = models.SmallIntegerField() # execution_type tinyint(1) NOT NULL default '1' COMMENT '1 -> manual, 2 -> automated',
execution_duration = models.DecimalField(6, 2) # execution_duration decimal(6,2) NULL COMMENT 'NULL will be considered as NO DATA Provided by user',
notes = models.TextField() # notes text,
# PRIMARY KEY (id),
# KEY /*prefix*/executions_idx1(testplan_id,tcversion_id,platform_id,build_id),
# KEY /*prefix*/executions_idx2(execution_type)
#
# ) DEFAULT CHARSET=utf8;
#
#
class Execution_TCSteps(models.Model):
# CREATE TABLE /*prefix*/execution_tcsteps (
id = models.IntegerField(primary_key=True) # id int(10) unsigned NOT NULL auto_increment,
execution_id = models.IntegerField() # execution_id int(10) unsigned NOT NULL default '0',
tcstep_id = models.IntegerField() # tcstep_id int(10) unsigned NOT NULL default '0',
notes = models.TextField() # notes text,
status = models.CharField(max_length=1) # status char(1) default NULL,
# PRIMARY KEY (id),
# UNIQUE KEY /*prefix*/execution_tcsteps_idx1(`execution_id`,`tcstep_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/inventory (
# id int(10) unsigned NOT NULL auto_increment,
# `testproject_id` INT( 10 ) UNSIGNED NOT NULL ,
# `owner_id` INT(10) UNSIGNED NOT NULL ,
# `name` VARCHAR(255) NOT NULL ,
# `ipaddress` VARCHAR(255) NOT NULL ,
# `content` TEXT NULL ,
# `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
# `modification_ts` TIMESTAMP NOT NULL,
# PRIMARY KEY (`id`),
# KEY /*prefix*/inventory_idx1 (`testproject_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/keywords (
# `id` int(10) unsigned NOT NULL auto_increment,
# `keyword` varchar(100) NOT NULL default '',
# `testproject_id` int(10) unsigned NOT NULL default '0',
# `notes` text,
# PRIMARY KEY (`id`),
# KEY /*prefix*/testproject_id (`testproject_id`),
# KEY /*prefix*/keyword (`keyword`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/milestones (
# id int(10) unsigned NOT NULL auto_increment,
# testplan_id int(10) unsigned NOT NULL default '0',
# target_date date NULL,
# start_date date NOT NULL default '0000-00-00',
# a tinyint(3) unsigned NOT NULL default '0',
# b tinyint(3) unsigned NOT NULL default '0',
# c tinyint(3) unsigned NOT NULL default '0',
# name varchar(100) NOT NULL default 'undefined',
# PRIMARY KEY (id),
# KEY /*prefix*/testplan_id (`testplan_id`),
# UNIQUE KEY /*prefix*/name_testplan_id (`name`,`testplan_id`)
# ) DEFAULT CHARSET=utf8;
class node_types(models.Model):
# CREATE TABLE /*prefix*/node_types (
id = models.IntegerField(primary_key=True) # `id` int(10) unsigned NOT NULL auto_increment,
description = models.CharField(max_length=100) # `description` varchar(100) NOT NULL default 'testproject',
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
class nodes_hierarchy(models.Model):
# CREATE TABLE /*prefix*/nodes_hierarchy (
id = models.IntegerField(primary_key=True) # `id` int(10) unsigned NOT NULL auto_increment,
name = models.CharField(max_length=100) # `name` varchar(100) default NULL,
parent_id = models.IntegerField() # `parent_id` int(10) unsigned default NULL,
node_type_id = models.IntegerField() # `node_type_id` int(10) unsigned NOT NULL default '1',
node_order = models.IntegerField() # `node_order` int(10) unsigned default NULL,
# PRIMARY KEY (`id`),
# KEY /*prefix*/pid_m_nodeorder (`parent_id`,`node_order`)
# ) DEFAULT CHARSET=utf8;
class Platforms(models.Model):
# CREATE TABLE /*prefix*/platforms (
id = models.IntegerField(primary_key=True) # id int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
name = models.CharField(max_length=100) # name varchar(100) NOT NULL,
testproject_id = models.IntegerField() # testproject_id int(10) UNSIGNED NOT NULL,
notes = models.TextField() # notes text NOT NULL,
# PRIMARY KEY (id),
# UNIQUE KEY /*prefix*/idx_platforms (testproject_id,name)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/req_coverage (
# `req_id` int(10) NOT NULL,
# `testcase_id` int(10) NOT NULL,
# `author_id` int(10) unsigned default NULL,
# `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
# `review_requester_id` int(10) unsigned default NULL,
# `review_request_ts` TIMESTAMP NULL DEFAULT NULL,
# KEY /*prefix*/req_testcase (`req_id`,`testcase_id`)
# ) DEFAULT CHARSET=utf8 COMMENT='relation test case ** requirements';
#
# CREATE TABLE /*prefix*/req_specs (
# `id` int(10) unsigned NOT NULL,
# `testproject_id` int(10) unsigned NOT NULL,
# `doc_id` varchar(64) NOT NULL,
# PRIMARY KEY (`id`),
# KEY /*prefix*/testproject_id (`testproject_id`),
# UNIQUE KEY /*prefix*/req_spec_uk1(`doc_id`,`testproject_id`)
# ) DEFAULT CHARSET=utf8 COMMENT='Dev. Documents (e.g. System Requirements Specification)';
#
# CREATE TABLE /*prefix*/requirements (
# `id` int(10) unsigned NOT NULL,
# `srs_id` int(10) unsigned NOT NULL,
# `req_doc_id` varchar(64) NOT NULL,
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/requirements_req_doc_id (`srs_id`,`req_doc_id`)
# ) DEFAULT CHARSET=utf8;
#
# CREATE TABLE /*prefix*/req_versions (
# `id` int(10) unsigned NOT NULL,
# `version` smallint(5) unsigned NOT NULL default '1',
# `revision` smallint(5) unsigned NOT NULL default '1',
# `scope` text,
# `status` char(1) NOT NULL default 'V',
# `type` char(1) default NULL,
# `active` tinyint(1) NOT NULL default '1',
# `is_open` tinyint(1) NOT NULL default '1',
# `expected_coverage` int(10) NOT NULL default '1',
# `author_id` int(10) unsigned default NULL,
# `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
# `modifier_id` int(10) unsigned default NULL,
# `modification_ts` datetime NOT NULL default '0000-00-00 00:00:00',
# `log_message` text,
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
#
# CREATE TABLE /*prefix*/req_relations (
# `id` int(10) unsigned NOT NULL auto_increment,
# `source_id` int(10) unsigned NOT NULL,
# `destination_id` int(10) unsigned NOT NULL,
# `relation_type` smallint(5) unsigned NOT NULL default '1',
# `author_id` int(10) unsigned default NULL,
# `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/rights (
# `id` int(10) unsigned NOT NULL auto_increment,
# `description` varchar(100) NOT NULL default '',
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/rights_descr (`description`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/risk_assignments (
# `id` int(10) unsigned NOT NULL auto_increment,
# `testplan_id` int(10) unsigned NOT NULL default '0',
# `node_id` int(10) unsigned NOT NULL default '0',
# `risk` char(1) NOT NULL default '2',
# `importance` char(1) NOT NULL default 'M',
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/risk_assignments_tplan_node_id (`testplan_id`,`node_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/role_rights (
# `role_id` int(10) NOT NULL default '0',
# `right_id` int(10) NOT NULL default '0',
# PRIMARY KEY (`role_id`,`right_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/roles (
# `id` int(10) unsigned NOT NULL auto_increment,
# `description` varchar(100) NOT NULL default '',
# `notes` text,
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/role_rights_roles_descr (`description`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/testcase_keywords (
# `testcase_id` int(10) unsigned NOT NULL default '0',
# `keyword_id` int(10) unsigned NOT NULL default '0',
# PRIMARY KEY (`testcase_id`,`keyword_id`)
# ) DEFAULT CHARSET=utf8;
#
class TCVersions(models.Model):
# CREATE TABLE /*prefix*/tcversions (
id = models.IntegerField(primary_key=True) # `id` int(10) unsigned NOT NULL,
tc_external_id = models.IntegerField() # `tc_external_id` int(10) unsigned NULL,
version = models.SmallIntegerField() # `version` smallint(5) unsigned NOT NULL default '1',
layout = models.SmallIntegerField() # `layout` smallint(5) unsigned NOT NULL default '1',
status = models.SmallIntegerField() # `status` smallint(5) unsigned NOT NULL default '1',
summary = models.TextField() # `summary` text,
preconditions = models.TextField() # `preconditions` text,
importance = models.SmallIntegerField() # `importance` smallint(5) unsigned NOT NULL default '2',
author_id = models.IntegerField() # `author_id` int(10) unsigned default NULL,
creation_ts = models.DateTimeField() # `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updater_id = models.IntegerField() # `updater_id` int(10) unsigned default NULL,
modification_ts = models.DateTimeField() # `modification_ts` datetime NOT NULL default '0000-00-00 00:00:00',
active = models.SmallIntegerField() # `active` tinyint(1) NOT NULL default '1',
is_open = models.SmallIntegerField() # `is_open` tinyint(1) NOT NULL default '1',
execution_type = models.SmallIntegerField # `execution_type` tinyint(1) NOT NULL default '1' COMMENT '1 -> manual, 2 -> automated',
estimated_duration = models.DecimalField(6, 2) # `estimated_exec_duration` decimal(6,2) NULL COMMENT 'NULL will be considered as NO DATA Provided by user',
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
#
#
class TCSteps(models.Model):
# CREATE TABLE /*prefix*/tcsteps (
id = models.IntegerField(primary_key=True) # id int(10) unsigned NOT NULL,
step_number = models.IntegerField() # step_number INT NOT NULL DEFAULT '1',
actions = models.TextField() # actions TEXT,
expected_results = models.TextField() # expected_results TEXT,
active = models.SmallIntegerField() # active tinyint(1) NOT NULL default '1',
execution_type = models.SmallIntegerField() # execution_type tinyint(1) NOT NULL default '1' COMMENT '1 -> manual, 2 -> automated',
# PRIMARY KEY (id)
# ) DEFAULT CHARSET=utf8;
class Testplan_TCVersions(models.Model):
# CREATE TABLE /*prefix*/testplan_tcversions (
id = models.IntegerField(primary_key=True) # id int(10) unsigned NOT NULL auto_increment,
testplan_id = models.IntegerField() # testplan_id int(10) unsigned NOT NULL default '0',
tcversion_id = models.IntegerField() # tcversion_id int(10) unsigned NOT NULL default '0',
node_order = models.IntegerField() # node_order int(10) unsigned NOT NULL default '1',
urgency = models.SmallIntegerField() # urgency smallint(5) NOT NULL default '2',
platform_id = models.IntegerField() # platform_id int(10) unsigned NOT NULL default '0',
author_id = models.IntegerField() # author_id int(10) unsigned default NULL,
# creation_ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
# PRIMARY KEY (id),
# UNIQUE KEY /*prefix*/testplan_tcversions_tplan_tcversion (testplan_id,tcversion_id,platform_id)
# ) DEFAULT CHARSET=utf8;
class TestPlans(models.Model):
# CREATE TABLE /*prefix*/testplans (
id = models.IntegerField(primary_key=True) # `id` int(10) unsigned NOT NULL,
testproject_id = models.IntegerField() # `testproject_id` int(10) unsigned NOT NULL default '0',
notes = models.TextField() # `notes` text,
active = models.SmallIntegerField() # `active` tinyint(1) NOT NULL default '1',
is_open = models.SmallIntegerField() # `is_open` tinyint(1) NOT NULL default '1',
is_public = models.SmallIntegerField() # `is_public` tinyint(1) NOT NULL default '1',
api_key = models.CharField(max_length=64, unique=True) # `api_key` varchar(64) NOT NULL default '829a2ded3ed0829a2dedd8ab81dfa2c77e8235bc3ed0d8ab81dfa2c77e8235bc',
# PRIMARY KEY (`id`),
# KEY /*prefix*/testplans_testproject_id_active (`testproject_id`,`active`),
# UNIQUE KEY /*prefix*/testplans_api_key (`api_key`)
# ) DEFAULT CHARSET=utf8;
class Testplan_Platforms(models.Model):
# CREATE TABLE /*prefix*/testplan_platforms (
id = models.IntegerField(primary_key=True) # id int(10) unsigned NOT NULL auto_increment,
testplan_id = models.IntegerField() # testplan_id int(10) unsigned NOT NULL,
platform_id = models.IntegerField() # platform_id int(10) unsigned NOT NULL,
# PRIMARY KEY (id),
# UNIQUE KEY /*prefix*/idx_testplan_platforms(testplan_id,platform_id)
# ) DEFAULT CHARSET=utf8 COMMENT='Connects a testplan with platforms';
#
class TestProjects(models.Model):
# CREATE TABLE /*prefix*/testprojects (
id = models.IntegerField(primary_key=True) # `id` int(10) unsigned NOT NULL,
notes = models.TextField() # `notes` text,
color = models.CharField(max_length=12) # `color` varchar(12) NOT NULL default '#9BD',
active = models.SmallIntegerField() # `active` tinyint(1) NOT NULL default '1',
option_reqs = models.SmallIntegerField() # `option_reqs` tinyint(1) NOT NULL default '0',
option_priority = models.SmallIntegerField() # `option_priority` tinyint(1) NOT NULL default '0',
option_automation = models.SmallIntegerField() # `option_automation` tinyint(1) NOT NULL default '0',
options = models.TextField() # `options` text,
prefix = models.CharField(max_length=16, unique=True) # `prefix` varchar(16) NOT NULL,
tc_counter = models.IntegerField() # `tc_counter` int(10) unsigned NOT NULL default '0',
is_public = models.SmallIntegerField() # `is_public` tinyint(1) NOT NULL default '1',
issue_tracker_enabled = models.SmallIntegerField() # `issue_tracker_enabled` tinyint(1) NOT NULL default '0',
reqmgr_integration_enabled = models.SmallIntegerField() # `reqmgr_integration_enabled` tinyint(1) NOT NULL default '0',
api_key = models.CharField(max_length=64, unique=True) # `api_key` varchar(64) NOT NULL default '0d8ab81dfa2c77e8235bc829a2ded3edfa2c78235bc829a27eded3ed0d8ab81d',
# PRIMARY KEY (`id`),
# KEY /*prefix*/testprojects_id_active (`id`,`active`),
# UNIQUE KEY /*prefix*/testprojects_prefix (`prefix`),
# UNIQUE KEY /*prefix*/testprojects_api_key (`api_key`)
# ) DEFAULT CHARSET=utf8;
#
class TestSuites(models.Model):
# CREATE TABLE /*prefix*/testsuites (
id = models.IntegerField(primary_key=True) # `id` int(10) unsigned NOT NULL,
details = models.TextField() # `details` text,
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/transactions (
# `id` int(10) unsigned NOT NULL auto_increment,
# `entry_point` varchar(45) NOT NULL default '',
# `start_time` int(10) unsigned NOT NULL default '0',
# `end_time` int(10) unsigned NOT NULL default '0',
# `user_id` int(10) unsigned NOT NULL default '0',
# `session_id` varchar(45) default NULL,
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/user_assignments (
# `id` int(10) unsigned NOT NULL auto_increment,
# `type` int(10) unsigned NOT NULL default '1',
# `feature_id` int(10) unsigned NOT NULL default '0',
# `user_id` int(10) unsigned default '0',
# `build_id` int(10) unsigned default '0',
# `deadline_ts` datetime NULL,
# `assigner_id` int(10) unsigned default '0',
# `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
# `status` int(10) unsigned default '1',
# PRIMARY KEY (`id`),
# KEY /*prefix*/user_assignments_feature_id (`feature_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/users (
# `id` int(10) unsigned NOT NULL auto_increment,
# `login` varchar(30) NOT NULL default '',
# `password` varchar(32) NOT NULL default '',
# `role_id` int(10) unsigned NOT NULL default '0',
# `email` varchar(100) NOT NULL default '',
# `first` varchar(30) NOT NULL default '',
# `last` varchar(30) NOT NULL default '',
# `locale` varchar(10) NOT NULL default 'en_GB',
# `default_testproject_id` int(10) default NULL,
# `active` tinyint(1) NOT NULL default '1',
# `script_key` varchar(32) NULL,
# `cookie_string` varchar(64) NOT NULL default '',
# `auth_method` varchar(10) NULL default '',
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/users_login (`login`),
# UNIQUE KEY /*prefix*/users_cookie_string (`cookie_string`)
# ) DEFAULT CHARSET=utf8 COMMENT='User information';
#
#
# CREATE TABLE /*prefix*/user_testproject_roles (
# `user_id` int(10) NOT NULL default '0',
# `testproject_id` int(10) NOT NULL default '0',
# `role_id` int(10) NOT NULL default '0',
# PRIMARY KEY (`user_id`,`testproject_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/user_testplan_roles (
# `user_id` int(10) NOT NULL default '0',
# `testplan_id` int(10) NOT NULL default '0',
# `role_id` int(10) NOT NULL default '0',
# PRIMARY KEY (`user_id`,`testplan_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/object_keywords (
# `id` int(10) unsigned NOT NULL auto_increment,
# `fk_id` int(10) unsigned NOT NULL default '0',
# `fk_table` varchar(30) default '',
# `keyword_id` int(10) unsigned NOT NULL default '0',
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
#
#
# # not used - group users for large companies
# CREATE TABLE /*prefix*/user_group (
# `id` int(10) unsigned NOT NULL auto_increment,
# `title` varchar(100) NOT NULL,
# `description` text,
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/idx_user_group (`title`)
# ) DEFAULT CHARSET=utf8;
#
#
# # not used - group users for large companies
# CREATE TABLE /*prefix*/user_group_assign (
# `usergroup_id` int(10) unsigned NOT NULL,
# `user_id` int(10) unsigned NOT NULL,
# UNIQUE KEY /*prefix*/idx_user_group_assign (`usergroup_id`,`user_id`)
# ) DEFAULT CHARSET=utf8;
#
#
#
#
# # ----------------------------------------------------------------------------------
# # BUGID 4056
# # ----------------------------------------------------------------------------------
# CREATE TABLE /*prefix*/req_revisions (
# `parent_id` int(10) unsigned NOT NULL,
# `id` int(10) unsigned NOT NULL,
# `revision` smallint(5) unsigned NOT NULL default '1',
# `req_doc_id` varchar(64) NULL, /* it's OK to allow a simple update query on code */
# `name` varchar(100) NULL,
# `scope` text,
# `status` char(1) NOT NULL default 'V',
# `type` char(1) default NULL,
# `active` tinyint(1) NOT NULL default '1',
# `is_open` tinyint(1) NOT NULL default '1',
# `expected_coverage` int(10) NOT NULL default '1',
# `log_message` text,
# `author_id` int(10) unsigned default NULL,
# `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
# `modifier_id` int(10) unsigned default NULL,
# `modification_ts` datetime NOT NULL default '0000-00-00 00:00:00',
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/req_revisions_uidx1 (`parent_id`,`revision`)
# ) DEFAULT CHARSET=utf8;
#
#
#
# # ----------------------------------------------------------------------------------
# # TICKET 4661
# # ----------------------------------------------------------------------------------
# CREATE TABLE /*prefix*/req_specs_revisions (
# `parent_id` int(10) unsigned NOT NULL,
# `id` int(10) unsigned NOT NULL,
# `revision` smallint(5) unsigned NOT NULL default '1',
# `doc_id` varchar(64) NULL, /* it's OK to allow a simple update query on code */
# `name` varchar(100) NULL,
# `scope` text,
# `total_req` int(10) NOT NULL default '0',
# `status` int(10) unsigned default '1',
# `type` char(1) default NULL,
# `log_message` text,
# `author_id` int(10) unsigned default NULL,
# `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
# `modifier_id` int(10) unsigned default NULL,
# `modification_ts` datetime NOT NULL default '0000-00-00 00:00:00',
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/req_specs_revisions_uidx1 (`parent_id`,`revision`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/issuetrackers
# (
# `id` int(10) unsigned NOT NULL auto_increment,
# `name` varchar(100) NOT NULL,
# `type` int(10) default 0,
# `cfg` text,
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/issuetrackers_uidx1 (`name`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/testproject_issuetracker
# (
# `testproject_id` int(10) unsigned NOT NULL,
# `issuetracker_id` int(10) unsigned NOT NULL,
# PRIMARY KEY (`testproject_id`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/reqmgrsystems
# (
# `id` int(10) unsigned NOT NULL auto_increment,
# `name` varchar(100) NOT NULL,
# `type` int(10) default 0,
# `cfg` text,
# PRIMARY KEY (`id`),
# UNIQUE KEY /*prefix*/reqmgrsystems_uidx1 (`name`)
# ) DEFAULT CHARSET=utf8;
#
#
# CREATE TABLE /*prefix*/testproject_reqmgrsystem
# (
# `testproject_id` int(10) unsigned NOT NULL,
# `reqmgrsystem_id` int(10) unsigned NOT NULL,
# PRIMARY KEY (`testproject_id`)
# ) DEFAULT CHARSET=utf8;
#
# CREATE TABLE /*prefix*/text_templates (
# id int(10) unsigned NOT NULL,
# type smallint(5) unsigned NOT NULL,
# title varchar(100) NOT NULL,
# template_data text,
# author_id int(10) unsigned default NULL,
# creation_ts datetime NOT NULL default '1900-00-00 01:00:00',
# is_public tinyint(1) NOT NULL default '0',
# UNIQUE KEY idx_text_templates (type,title)
# ) DEFAULT CHARSET=utf8 COMMENT='Global Project Templates';
#
#
#
# CREATE TABLE /*prefix*/testcase_relations (
# `id` int(10) unsigned NOT NULL auto_increment,
# `source_id` int(10) unsigned NOT NULL,
# `destination_id` int(10) unsigned NOT NULL,
# `relation_type` smallint(5) unsigned NOT NULL default '1',
# `author_id` int(10) unsigned default NULL,
# `creation_ts` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
# PRIMARY KEY (`id`)
# ) DEFAULT CHARSET=utf8;
| 43.297521
| 168
| 0.683114
|
794c4cd95e60816eb37e9336d7f94b3442db6abf
| 7,437
|
py
|
Python
|
unit-tests/py/rspy/acroname.py
|
Moktarino/librealsense
|
fdd863f30b9e71d1c34f226aa0e62659ddb46dbb
|
[
"Apache-2.0"
] | 6,457
|
2016-01-21T03:56:07.000Z
|
2022-03-31T11:57:15.000Z
|
unit-tests/py/rspy/acroname.py
|
Moktarino/librealsense
|
fdd863f30b9e71d1c34f226aa0e62659ddb46dbb
|
[
"Apache-2.0"
] | 8,393
|
2016-01-21T09:47:28.000Z
|
2022-03-31T22:21:42.000Z
|
unit-tests/py/rspy/acroname.py
|
Moktarino/librealsense
|
fdd863f30b9e71d1c34f226aa0e62659ddb46dbb
|
[
"Apache-2.0"
] | 4,874
|
2016-01-21T09:20:08.000Z
|
2022-03-31T15:18:00.000Z
|
"""
Brainstem Acroname Hub
See documentation for brainstem here:
https://acroname.com/reference/python/index.html
"""
from rspy import log
if __name__ == '__main__':
import os, sys, getopt
def usage():
ourname = os.path.basename( sys.argv[0] )
print( 'Syntax: acroname [options]' )
print( ' Control the acroname USB hub' )
print( 'Options:' )
print( ' --enable Enable all ports' )
print( ' --recycle Recycle all ports' )
sys.exit(2)
try:
opts,args = getopt.getopt( sys.argv[1:], '',
longopts = [ 'help', 'recycle', 'enable' ])
except getopt.GetoptError as err:
print( '-F-', err ) # something like "option -a not recognized"
usage()
if args or not opts:
usage()
# See the end of the file for all the option handling
try:
import brainstem
except ModuleNotFoundError:
log.w( 'No acroname library is available!' )
raise
hub = None
class NoneFoundError( RuntimeError ):
"""
"""
def __init__( self, message = None ):
super().__init__( self, message or 'no Acroname module found' )
def discover():
"""
Return all Acroname module specs in a list. Raise NoneFoundError if one is not found!
"""
log.d( 'discovering Acroname modules ...' )
# see https://acroname.com/reference/_modules/brainstem/module.html#Module.discoverAndConnect
try:
log.debug_indent()
specs = brainstem.discover.findAllModules( brainstem.link.Spec.USB )
if not specs:
raise NoneFoundError()
for spec in specs:
log.d( '...', spec )
finally:
log.debug_unindent()
return specs
def connect( spec = None ):
"""
Connect to the hub. Raises RuntimeError on failure
"""
global hub
if not hub:
hub = brainstem.stem.USBHub3p()
if spec:
specs = [spec]
else:
specs = discover()
spec = specs[0]
result = hub.connectFromSpec( spec )
if result != brainstem.result.Result.NO_ERROR:
raise RuntimeError( "failed to connect to acroname (result={})".format( result ))
elif len(specs) > 1:
log.d( 'connected to', spec )
def is_connected():
try:
connect()
return True
except Exception:
return False
def disconnect():
global hub
hub.disconnect()
del hub
hub = None
def all_ports():
"""
:return: a list of all possible ports, even if currently unoccupied or disabled
"""
return range(8)
def ports():
"""
:return: a list of all ports currently occupied (and enabled)
"""
occupied_ports = []
for port in all_ports():
if port_power( port ) > 0.0:
occupied_ports.append( port )
return occupied_ports
def is_port_enabled( port ):
return port_state( port ) == "OK"
def port_state( port ):
if port < 0 or port > 7:
raise ValueError( "port number must be [0-7]" )
#
global hub
status = hub.usb.getPortState( port )
#
if status.value == 0:
return "Disabled"
if status.value == 11:
return "Disconnected"
if status.value > 100:
return "OK"
return "Unknown Error ({})".format( status.value )
def enable_ports( ports = None, disable_other_ports = False, sleep_on_change = 0 ):
"""
Set enable state to provided ports
:param ports: List of port numbers; if not provided, enable all ports
:param disable_other_ports: if True, the ports not in the list will be disabled
:param sleep_on_change: Number of seconds to sleep if any change is made
:return: True if no errors found, False otherwise
"""
global hub
result = True
changed = False
for port in all_ports():
#
if ports is None or port in ports:
if not is_port_enabled( port ):
action_result = hub.usb.setPortEnable( port )
if action_result != brainstem.result.Result.NO_ERROR:
result = False
else:
changed = True
#
elif disable_other_ports:
if is_port_enabled( port ):
action_result = hub.usb.setPortDisable( port )
if action_result != brainstem.result.Result.NO_ERROR:
result = False
else:
changed = True
#
if changed and sleep_on_change:
import time
time.sleep( sleep_on_change )
#
return result
def disable_ports( ports ):
"""
:param ports: List of port numbers
:return: True if no errors found, False otherwise
"""
global hub
result = True
for port in ports:
#
action_result = hub.usb.setPortDisable( port )
if action_result != brainstem.result.Result.NO_ERROR:
result = False
#
return result
def recycle_ports( portlist = None, timeout = 2 ):
"""
Disable and enable a port
:param timeout: how long to wait before re-enabling
:return: True if everything OK, False otherwise
"""
if portlist is None:
portlist = ports()
#
result = disable_ports( portlist )
#
import time
time.sleep( timeout )
#
result = enable_ports( portlist ) and result
#
return result
def set_ports_usb2( portlist = None, timeout = 100e-3 ):
"""
Set USB ports to USB2
"""
if portlist is None:
portlist = ports()
#
recycle_ports( portlist, timeout = timeout )
#
global hub
for port in portlist:
hub.usb.setSuperSpeedDataEnable( port )
hub.usb.setHiSpeedDataEnable( port )
hub.usb.setSuperSpeedDataDisable( port )
def set_ports_usb3( portlist = None, timeout = 100e-3 ):
"""
Set USB ports to support USB3
"""
if portlist is None:
portlist = ports()
#
recycle_ports( portlist, timeout = timeout )
#
global hub
for port in portlist:
hub.usb.setSuperSpeedDataEnable( port )
hub.usb.setHiSpeedDataEnable( port )
hub.usb.setHiSpeedDataDisable( port )
def port_power( port ):
"""
"""
if port < 0 or port > 7:
raise ValueError("port number can be only within 0 and 7 (inclusive)")
#
global hub
micro_volt = hub.usb.getPortVoltage( port )
micro_curr = hub.usb.getPortCurrent( port )
volt = float(micro_volt.value) / 10.0 ** 6
amps = float(micro_curr.value) / 10.0 ** 6
#
return volt * amps
def get_port_from_usb( first_usb_index, second_usb_index ):
"""
Based on last two USB location index, provide the port number
"""
acroname_port_usb_map = {(4, 4): 0,
(4, 3): 1,
(4, 2): 2,
(4, 1): 3,
(3, 4): 4,
(3, 3): 5,
(3, 2): 6,
(3, 1): 7,
}
return acroname_port_usb_map[(first_usb_index, second_usb_index)]
if __name__ == '__main__':
for opt,arg in opts:
if opt in ('--enable'):
connect()
enable_ports() # so ports() will return all
elif opt in ('--recycle'):
connect()
enable_ports() # so ports() will return all
recycle_ports()
| 26.003497
| 97
| 0.573081
|
794c4d210b5bb5a185568c24775e111286c347b0
| 19,878
|
py
|
Python
|
rudolph/pipelines.py
|
WildGenie/ru-dolph
|
c80a320a60dcb60ccb66b86c3421e16e33235d97
|
[
"Apache-2.0"
] | null | null | null |
rudolph/pipelines.py
|
WildGenie/ru-dolph
|
c80a320a60dcb60ccb66b86c3421e16e33235d97
|
[
"Apache-2.0"
] | null | null | null |
rudolph/pipelines.py
|
WildGenie/ru-dolph
|
c80a320a60dcb60ccb66b86c3421e16e33235d97
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from glob import glob
from os.path import join
from datetime import datetime
import torch
import torchvision
import transformers
import more_itertools
import numpy as np
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torchvision.transforms as T
from tqdm.auto import tqdm
from einops import rearrange
from . import utils
from .model.utils import get_attention_mask, get_t2t_attention_mask
def generate_codebooks(
text,
tokenizer,
model,
top_k, top_p, images_num,
image_prompts=None,
temperature=1.0, bs=8,
seed=None, use_cache=True,
):
# TODO docstring
if seed is not None:
utils.seed_everything(seed)
else:
seed = int((datetime.utcnow().timestamp() * 10 ** 6) % (2 ** 32 - 1))
utils.seed_everything(seed)
vocab_size = model.get_param('vocab_size')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
image_seq_length = model.get_param('image_seq_length')
total_seq_length = l_text_seq_length + image_seq_length + r_text_seq_length
device = model.get_param('device')
text = text.lower().strip()
encoded = tokenizer.encode_text(text, text_seq_length=r_text_seq_length)
codebooks = []
for chunk in more_itertools.chunked(range(images_num), bs):
chunk_bs = len(chunk)
with torch.no_grad():
attention_mask = torch.tril(torch.ones((chunk_bs, 1, total_seq_length, total_seq_length), device=device))
out = encoded.unsqueeze(0).repeat(chunk_bs, 1).to(device)
has_cache = False
if image_prompts is not None:
prompts_idx, prompts = image_prompts.image_prompts_idx, image_prompts.image_prompts
prompts = prompts.repeat(chunk_bs, 1)
for idx in tqdm(range(l_text_seq_length, l_text_seq_length + image_seq_length)):
idx -= l_text_seq_length
if image_prompts is not None and idx in prompts_idx:
out = torch.cat((out, prompts[:, idx].unsqueeze(1)), dim=-1)
else:
logits, has_cache = model(out, attention_mask,
has_cache=has_cache, use_cache=use_cache, return_loss=False)
logits = logits[:, -1, vocab_size:]
logits /= temperature
filtered_logits = transformers.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = torch.nn.functional.softmax(filtered_logits, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
codebooks.append(out[:, -image_seq_length:])
return torch.cat(codebooks)
def generate_captions(
pil_img,
tokenizer,
model,
vae,
template='',
top_k=32, top_p=0.6, captions_num=128,
temperature=1.0, bs=64,
seed=None, use_cache=True,
):
if seed is None:
seed = int((datetime.utcnow().timestamp() * 10 ** 6) % (2 ** 32 - 1))
utils.seed_everything(seed)
vocab_size = model.get_param('vocab_size')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
image_seq_length = model.get_param('image_seq_length')
device = model.get_param('device')
image_transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.RandomResizedCrop(image_tokens_per_dim * 8,
scale=(1., 1.),
ratio=(1., 1.)),
T.ToTensor()
])
img = image_transform(pil_img)
template = template.lower().strip()
template_encoded = tokenizer.encode_text(template, text_seq_length=r_text_seq_length)
template_size = (template_encoded != 0).sum() - 1 # eos
template_encoded = template_encoded[:template_size]
generated_tokens = []
for chunk in more_itertools.chunked(range(captions_num), bs):
chunk_bs = len(chunk)
with torch.no_grad():
masks = torch.ones(chunk_bs, r_text_seq_length, dtype=torch.int32)
attention_mask = get_attention_mask(masks, chunk_bs, l_text_seq_length, image_tokens_per_dim,
r_text_seq_length, device)
images = img.unsqueeze(0).repeat((chunk_bs, 1, 1, 1)).to(device)
image_input_ids = vae.get_codebook_indices(images)
out = torch.cat((
torch.zeros((chunk_bs, l_text_seq_length), dtype=torch.int64).to(device),
image_input_ids,
template_encoded.repeat(chunk_bs, 1).to(device),
), dim=1)
has_cache = False
for _ in tqdm(range(
l_text_seq_length + image_seq_length + template_size,
l_text_seq_length + image_seq_length + r_text_seq_length
)):
logits, has_cache = model(out, attention_mask,
has_cache=has_cache, use_cache=use_cache, return_loss=False)
logits = logits[:, -1, :vocab_size]
logits /= temperature
filtered_logits = transformers.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = torch.nn.functional.softmax(filtered_logits, dim=-1)
sample = torch.multinomial(probs, 1)
indexes = torch.where(sample >= vocab_size - l_text_seq_length)
sample[indexes] = 3
out = torch.cat((out, sample), dim=-1)
generated_tokens.append(out[:, -r_text_seq_length:])
generated_tokens = torch.cat(generated_tokens)
texts = set()
for tokens in generated_tokens:
end = torch.where(tokens == 3)[0].shape[0] or tokens.shape[0]
text = tokenizer.decode_text(tokens[:end]).strip()
if text:
texts.add(text)
return list(texts)
def show(pil_images, nrow=4, size=14, save_dir=None, show=True):
"""
:param pil_images: list of images in PIL
:param nrow: number of rows
:param size: size of the images
:param save_dir: dir for separately saving of images, example: save_dir='./pics'
"""
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
count = len(glob(join(save_dir, 'img_*.png')))
for i, pil_image in enumerate(pil_images):
pil_image.save(join(save_dir, f'img_{count+i}.png'))
pil_images = [pil_image.convert('RGB') for pil_image in pil_images]
imgs = torchvision.utils.make_grid(utils.pil_list_to_torch_tensors(pil_images), nrow=nrow)
if not isinstance(imgs, list):
imgs = [imgs.cpu()]
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False, figsize=(size, size))
for i, img in enumerate(imgs):
img = img.detach()
img = torchvision.transforms.functional.to_pil_image(img)
if save_dir is not None:
count = len(glob(join(save_dir, 'group_*.png')))
img.save(join(save_dir, f'group_{count+i}.png'))
if show:
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if show:
fix.show()
plt.show()
def self_reranking_by_text(
text,
codebooks,
tokenizer,
model,
bs=64,
):
vocab_size = model.get_param('vocab_size')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
image_seq_length = model.get_param('image_seq_length')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
device = model.get_param('device')
text = text.lower().strip()
encoded = tokenizer.encode_text(text, text_seq_length=r_text_seq_length)
mask = torch.zeros(r_text_seq_length, dtype=torch.int64)
mask[encoded != 0] = 1
ppl_text, ppl_image = [], []
for chunk in more_itertools.chunked(codebooks, bs):
chunk_bs = len(chunk)
with torch.no_grad():
attention_mask = get_attention_mask(
mask.unsqueeze(0).repeat(chunk_bs, 1).to(device),
chunk_bs, l_text_seq_length, image_tokens_per_dim, r_text_seq_length, device
)
input_ids = torch.cat((
torch.zeros((chunk_bs, l_text_seq_length), dtype=torch.int64).to(device),
torch.stack(chunk),
encoded.unsqueeze(0).repeat(chunk_bs, 1).to(device),
), dim=1)
logits, _ = model(input_ids, attention_mask, has_cache=False, use_cache=False, return_loss=False)
logits = rearrange(logits, 'b n c -> b c n')
image_logits = logits[:, vocab_size:,
l_text_seq_length:l_text_seq_length + image_seq_length - 1].contiguous().float()
r_text_logits = logits[:, :vocab_size, -r_text_seq_length:-1].contiguous().float()
input_ids = input_ids.contiguous().long()
ppl_image.append(
ce_to_ppl(F.cross_entropy(
image_logits,
input_ids[:, l_text_seq_length + 1:l_text_seq_length + image_seq_length],
reduction='none',
))
)
ppl_text.append(
ce_to_ppl(F.cross_entropy(
r_text_logits,
input_ids[:, -(r_text_seq_length - 1):],
ignore_index=0,
reduction='none',
))
)
return torch.cat(ppl_text), torch.cat(ppl_image)
def self_reranking_by_image(
texts,
pil_img,
tokenizer,
model,
vae,
bs=64,
seed=42,
):
if seed is not None:
utils.seed_everything(seed)
vocab_size = model.get_param('vocab_size')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
image_seq_length = model.get_param('image_seq_length')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
device = model.get_param('device')
image_transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.RandomResizedCrop(image_tokens_per_dim * 8,
scale=(1., 1.),
ratio=(1., 1.)),
T.ToTensor()
])
img = image_transform(pil_img)
ppl_text, ppl_image = [], []
for chunk in more_itertools.chunked(texts, bs):
chunk_bs = len(chunk)
with torch.no_grad():
chunk_encoded, masks = [], []
for text in chunk:
text = text.lower().strip()
encoded = tokenizer.encode_text(text, text_seq_length=r_text_seq_length)
mask = torch.zeros(r_text_seq_length, dtype=torch.int64)
mask[encoded != 0] = 1
chunk_encoded.append(encoded)
masks.append(mask)
chunk_encoded = torch.stack(chunk_encoded)
masks = torch.stack(masks)
attention_mask = get_attention_mask(
masks.to(device),
chunk_bs, l_text_seq_length, image_tokens_per_dim, r_text_seq_length, device
)
images = img.unsqueeze(0).repeat((chunk_bs, 1, 1, 1)).to(device)
image_input_ids = vae.get_codebook_indices(images)
input_ids = torch.cat((
chunk_encoded.to(device),
image_input_ids,
chunk_encoded.to(device),
), dim=1)
logits, _ = model(input_ids, attention_mask, has_cache=False, use_cache=False, return_loss=False)
logits = rearrange(logits, 'b n c -> b c n')
image_logits = logits[:, vocab_size:,
l_text_seq_length:l_text_seq_length + image_seq_length - 1].contiguous().float()
l_text_logits = logits[:, :vocab_size, :l_text_seq_length - 1].contiguous().float()
input_ids = input_ids.contiguous().long()
ppl_image.append(
ce_to_ppl(F.cross_entropy(
image_logits,
input_ids[:, l_text_seq_length + 1:l_text_seq_length + image_seq_length],
reduction='none',
))
)
ppl_text.append(
ce_to_ppl(F.cross_entropy(
l_text_logits,
input_ids[:, 1:l_text_seq_length],
ignore_index=0,
reduction='none',
))
)
ppl_text = torch.cat(ppl_text)
ppl_image = torch.cat(ppl_image)
return ppl_text, ppl_image
def zs_clf(pil_img, classes, model, tokenizer, vae, bs=8, template=None):
"""
classes - list of strings
template - prefix template
"""
template = template or '{}'
vocab_size = model.get_param('vocab_size')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
device = model.get_param('device')
image_transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.RandomResizedCrop(image_tokens_per_dim * 8,
scale=(1., 1.),
ratio=(1., 1.)),
T.ToTensor()
])
encoded, masks = [], []
for _class in classes:
text = template.format(_class).lower().strip()
class_encoded = tokenizer.encode_text(text, text_seq_length=r_text_seq_length)
encoded.append(class_encoded)
mask = torch.zeros(r_text_seq_length, dtype=torch.int64)
mask[class_encoded != 0] = 1
masks.append(mask)
encoded = torch.stack(encoded, 0)
masks = torch.stack(masks, 0)
with torch.no_grad():
img = image_transform(pil_img)
images = img.unsqueeze(0).to(device)
image_input_ids = vae.get_codebook_indices(images)
ppl_text, ppl_image = [], [] # noqa
for indexes in more_itertools.chunked(range(len(classes)), bs):
chunk_encoded = encoded[indexes]
chunk_masks = masks[indexes]
chunk_bs = chunk_encoded.shape[0]
attention_mask = get_attention_mask(chunk_masks, chunk_bs, l_text_seq_length,
image_tokens_per_dim, r_text_seq_length, device)
input_ids = torch.cat((
torch.zeros(l_text_seq_length, dtype=torch.int64).repeat(chunk_bs, 1).to(device),
image_input_ids.repeat(chunk_bs, 1),
chunk_encoded.to(device),
), dim=1)
logits, _ = model(input_ids, attention_mask, has_cache=False, use_cache=False, return_loss=False)
logits = rearrange(logits, 'b n c -> b c n')
r_text_logits = logits[:, :vocab_size, -r_text_seq_length:-1].contiguous()
chunk_ppl_text = ce_to_ppl(F.cross_entropy(
r_text_logits[:, :, :],
input_ids[:, -(r_text_seq_length - 1):],
ignore_index=0,
reduction='none',
))
ppl_text.append(chunk_ppl_text)
ppl_text = torch.cat(ppl_text)
ppl_text = ppl_text / ppl_text.norm(dim=0, keepdim=True)
scores = ppl_text.softmax(0)
pred = scores.argmin().item()
return {
'label': pred,
'class': classes[pred],
'scores': scores.cpu().numpy(),
}
def generate_texts(
tokenizer,
model,
template='',
top_k=32, top_p=0.8, texts_num=128,
temperature=1.0, bs=64,
seed=None, use_cache=True,
):
if seed is None:
seed = int((datetime.utcnow().timestamp() * 10 ** 6) % (2 ** 32 - 1))
utils.seed_everything(seed)
vocab_size = model.get_param('vocab_size')
image_tokens_per_dim = model.get_param('image_tokens_per_dim')
l_text_seq_length = model.get_param('l_text_seq_length')
r_text_seq_length = model.get_param('r_text_seq_length')
device = model.get_param('device')
template = template.lower().strip()
template_encoded = tokenizer.encode_text(template, text_seq_length=l_text_seq_length)
template_size = (template_encoded != 0).sum() - 1 # eos
template_encoded = template_encoded[:template_size]
generated_tokens = []
for chunk in more_itertools.chunked(range(texts_num), bs):
chunk_bs = len(chunk)
with torch.no_grad():
attention_mask = get_t2t_attention_mask(chunk_bs, l_text_seq_length, image_tokens_per_dim,
r_text_seq_length, device)
out = template_encoded.repeat(chunk_bs, 1).to(device)
has_cache = False
for _ in tqdm(range(template_size, l_text_seq_length)):
logits, has_cache = model(out, attention_mask,
has_cache=has_cache, use_cache=use_cache, return_loss=False)
logits = logits[:, -1, :vocab_size]
logits /= temperature
filtered_logits = transformers.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = torch.nn.functional.softmax(filtered_logits, dim=-1)
sample = torch.multinomial(probs, 1)
indexes = torch.where(sample > vocab_size - l_text_seq_length)
sample[indexes] = 3
out = torch.cat((out, sample), dim=-1)
generated_tokens.append(out[:, :l_text_seq_length])
generated_tokens = torch.cat(generated_tokens)
texts = set()
for tokens in generated_tokens:
end = torch.where(tokens == 3)[0].shape[0] or tokens.shape[0]
text = tokenizer.decode_text(tokens[:end]).strip()
if text:
texts.add(text)
texts = list(texts)
ppl_text = []
for chunk in more_itertools.chunked(texts, bs):
chunk_bs = len(chunk)
with torch.no_grad():
chunk_encoded = []
for text in chunk:
text = text.lower().strip()
encoded = tokenizer.encode_text(text, text_seq_length=l_text_seq_length)
chunk_encoded.append(encoded)
chunk_encoded = torch.stack(chunk_encoded)
attention_mask = get_t2t_attention_mask(
chunk_bs, l_text_seq_length, image_tokens_per_dim, r_text_seq_length, device
)
input_ids = chunk_encoded.to(device)
logits, _ = model(input_ids, attention_mask, has_cache=False, use_cache=False, return_loss=False)
logits = rearrange(logits, 'b n c -> b c n')
l_text_logits = logits[:, :vocab_size, :l_text_seq_length - 1].contiguous().float()
input_ids = input_ids.contiguous().long()
ppl_text.append(
ce_to_ppl(F.cross_entropy(
l_text_logits,
input_ids[:, 1:l_text_seq_length],
ignore_index=0,
reduction='none',
))
)
ppl_text = torch.cat(ppl_text)
result = []
for idx in ppl_text.argsort():
idx = idx.item()
result.append({
'text': texts[idx],
'ppl': round(ppl_text[idx].item(), 2),
})
return result
def ce_to_ppl(ce):
indexes = torch.where(ce)
ce[indexes] = torch.exp(ce[indexes])
ppl = ce.sum(1) / torch.unique(indexes[0], return_counts=True)[1]
return ppl
| 38.374517
| 117
| 0.596237
|
794c4df286d489cb639298346e39553f3b57e99d
| 12,905
|
py
|
Python
|
ozopython/compiler.py
|
malakhajji565/Ozobot-Python
|
97ff7324039b0402fcdc4cd1c9ad05eddc5735ac
|
[
"MIT"
] | 32
|
2017-04-09T07:10:40.000Z
|
2022-03-08T08:09:34.000Z
|
ozopython/compiler.py
|
malakhajji565/Ozobot-Python
|
97ff7324039b0402fcdc4cd1c9ad05eddc5735ac
|
[
"MIT"
] | 1
|
2017-12-20T16:58:53.000Z
|
2018-02-09T15:26:44.000Z
|
ozopython/compiler.py
|
malakhajji565/Ozobot-Python
|
97ff7324039b0402fcdc4cd1c9ad05eddc5735ac
|
[
"MIT"
] | 10
|
2017-04-09T19:02:19.000Z
|
2022-03-22T20:58:19.000Z
|
from ast import *
builtins = [
'color',
'wait',
'move',
'rotate',
'wheels',
'random',
'get_surface_color',
'terminate',
'abs',
'follow_line_to_intersect_or_end',
'set_line_speed',
'pick_direction',
'move_straight_until_line',
'there_is_way',
'get_line_speed',
'get_intersect_or_line_end_color',
]
colors = {
'BLACK': 0,
'RED': 1,
'GREEN': 2,
'YELLOW': 3,
'BLUE': 4,
'MAGENTA': 5,
'CYAN': 6,
'WHITE': 7,
}
directions = {
'STRAIGHT': 1,
'LEFT': 2,
'RIGHT': 4,
'BACK': 8,
}
terminate = {
'OFF': 0,
'FOLLOW': 1,
'IDLE': 2,
}
VERSION = [0x01, 0x03]
KILL = [0x00, 0xAE]
class CompileException(BaseException):
def __init__(self, msg, node = None):
if node is None:
super(CompileException, self).__init__(msg)
else:
super(CompileException, self).__init__("{0}:{1}".format(node.lineno - 1, node.col_offset), msg)
class Compiler:
def __init__(self):
self.bytecode = []
self.variable_counter = 0x2a
self.variables = {}
self.functions = {}
self.compiled_functions = {}
def calc_checksum(self):
result = 0
for byte in self.bytecode:
result -= byte
if result < 0:
result += 256
self.bytecode.append(result)
def get_length_bytes(self):
div = len(self.bytecode) // 256
remainder = (len(self.bytecode)) % 256
first_byte = 3
second_byte = 219 - len(self.bytecode)
while second_byte < 0:
first_byte -= 1
second_byte += 256
if first_byte < 0:
raise CompileException('Maximum bytecode length exceeded')
# return [(219 - len(self.bytecode)) % 256, len(self.bytecode) // 256, (len(self.bytecode)) % 256]
return [first_byte, second_byte, div, remainder]
def compile(self, root):
self.compile_stmt(root)
if len(self.bytecode) == 0:
return []
if self.bytecode[-1] != 0xae:
self.bytecode.extend(KILL)
# compile functions
for index, value in enumerate(self.bytecode):
if type(value) == str:
if value in self.compiled_functions.keys():
jump_index = self.compiled_functions[value]
self.bytecode[index] = 0x90
self.bytecode[index + 1] = jump_index // 256
self.bytecode[index + 2] = jump_index % 256
else:
self.bytecode[index] = 0x90
self.bytecode[index + 1] = len(self.bytecode) // 256
self.bytecode[index + 2] = len(self.bytecode) % 256
self.compiled_functions[value] = len(self.bytecode)
for n in self.functions[value]:
self.compile_stmt(n)
self.push(0x91)
self.bytecode = [0x01] + self.get_length_bytes() + self.bytecode
self.calc_checksum()
return self.bytecode
def compile_stmt(self, node):
if type(node) == Module:
for n in node.body:
self.compile_stmt(n)
elif type(node) == Expr:
self.compile_expr(node.value)
elif type(node) == Assign:
self.assign(node.targets, node.value)
elif type(node) == If:
self.if_stmt(node)
elif type(node) == While:
self.while_loop(node)
elif type(node) == FunctionDef:
self.function_def(node)
else:
raise CompileException('Unsupported statement type %s.\n%s' % (str(type(node)), str(vars(node))), node)
def compile_expr(self, node):
if type(node) == Call:
self.call(node)
elif type(node) == Num:
self.num(node)
elif type(node) == Name:
self.get_var(node)
elif type(node) == NameConstant:
self.name_constant(node)
elif type(node) == BoolOp:
self.bool_op(node)
elif type(node) == Compare:
self.compare(node)
elif type(node) == UnaryOp:
self.unary_op(node)
elif type(node) == BinOp:
self.bin_op(node)
else:
raise CompileException('Unsupported expression type %s.\n%s' % (str(type(node)), str(vars(node))), node)
def assign(self, targets, value):
for target in targets:
if type(target) != Name:
raise CompileException('Values can only be assigned to variables', target)
if target.id in colors.keys():
raise CompileException('Variable name cannot be one of the built in colors', target)
if target.id in directions.keys():
raise CompileException('Variable name cannot be one of the built in directions', target)
if target.id in self.variables:
key = self.variables[target.id]
else:
key = self.variable_counter
self.variables[target.id] = key
self.variable_counter += 1
self.compile_expr(value)
self.bytecode.extend([key, 0x93])
def call(self, node):
if node.func.id in builtins:
getattr(self, node.func.id)(*node.args)
elif node.func.id in self.functions.keys():
self.push(node.func.id)
self.push(0x00)
self.push(0x00)
else:
raise CompileException("Unknown function call %s" % node.func.id, node)
def num(self, node):
value = node.n
if value > 127:
raise CompileException("Value %s outside of valid range" % value, node)
self.push(value)
def get_var(self, node):
if node.id in colors.keys():
self.push(colors[node.id])
elif node.id in directions.keys():
self.push(directions[node.id])
elif node.id in terminate.keys():
self.push(terminate[node.id])
else:
if node.id not in self.variables:
raise CompileException('Undefined variable %s.' % node.id, node)
key = self.variables[node.id]
self.bytecode.extend([key, 0x92])
def if_stmt(self, node):
self.compile_expr(node.test)
self.push(0x80)
self.push(0)
index = len(self.bytecode) - 1
self.push(0x97)
for n in node.body:
self.compile_stmt(n)
self.bytecode[index] = len(self.bytecode[index:]) + 1
if len(node.orelse) > 0:
self.bytecode[index] += 3
self.push(0xba)
self.push(0)
index = len(self.bytecode) - 1
self.push(0x97)
for n in node.orelse:
self.compile_stmt(n)
self.bytecode[index] = len(self.bytecode[index:]) + 1
def name_constant(self, node):
if type(node.value) != bool:
raise CompileException('Only boolean constant type is supported. %s' % type(node.value), node)
self.push(1 if node.value else 0)
def bool_op(self, node):
self.compile_expr(node.values[0])
for i in range(1, len(node.values)):
self.compile_expr(node.values[i])
if type(node.op) == And:
self.push(0xa2)
elif type(node.op) == Or:
self.push(0xa3)
else:
raise CompileException("Unknown operator %s" % type(node.op), node.op)
def compare(self, node):
self.compile_expr(node.left)
for i in range(len(node.ops)):
self.compile_expr(node.comparators[i])
self.compare_ops(node.ops[i])
def compare_ops(self, op):
if type(op) == Eq:
self.push(0xa4)
elif type(op) == NotEq:
self.push(0xa4)
self.push(0x8a)
elif type(op) == Lt:
self.push(0x9c)
self.push(0x8a)
elif type(op) == LtE:
self.push(0x9d)
self.push(0x8a)
elif type(op) == Gt:
self.push(0x9d)
elif type(op) == GtE:
self.push(0x9c)
else:
raise CompileException('Unsupported operator', op)
def unary_op(self, node):
if type(node.op) == Not:
self.compile_expr(node.operand)
self.push(0x8a)
elif type(node.op) == USub:
self.compile_expr(node.operand)
# self.bytecode[-1] -= 1
self.push(0x8b)
else:
raise CompileException('Unsupported operator', node.op)
def bin_op(self, node):
self.compile_expr(node.left)
self.compile_expr(node.right)
if type(node.op) == Add:
self.push(0x85)
elif type(node.op) == Sub:
self.push(0x86)
elif type(node.op) == Mult:
self.push(0x87)
elif type(node.op) == Div:
self.push(0x88)
elif type(node.op) == Mod:
self.push(0x89)
else:
raise CompileException('Unsupported operator', node.op)
def while_loop(self, node):
# Infinite loop
if type(node.test) == NameConstant and node.test.value:
jump_index = len(self.bytecode)
for n in node.body:
self.compile_stmt(n)
self.push(0xba)
self.push(256 - len(self.bytecode[jump_index:]) + 1)
elif type(node.test) == NameConstant and not node.test.value:
return
else:
jump_back_index = len(self.bytecode)
self.compile_expr(node.test)
self.push(0x80)
self.push(0)
jump_index = len(self.bytecode) - 1
self.push(0x97)
for n in node.body:
self.compile_stmt(n)
self.push(0xba)
self.push(256 - len(self.bytecode[jump_back_index:]) + 1)
self.bytecode[jump_index] = len(self.bytecode[jump_index:]) + 1
def function_def(self, node):
self.functions[node.name] = node.body
def move(self, distance, speed):
self.compile_expr(distance)
self.compile_expr(speed)
self.push(0x9e)
def wait(self, seconds, centisec):
self.compile_expr(seconds)
if self.bytecode[-1] == 0:
del self.bytecode[-1]
else:
self.bytecode.extend([0x64, 0x9b, 0x1, 0x86, 0x94, 0x0, 0x9d, 0x8a, 0x80, 0xf8, 0x97, 0x96])
self.compile_expr(centisec)
self.push(0x9b)
def color(self, red, green, blue):
self.compile_expr(red)
self.compile_expr(green)
self.compile_expr(blue)
self.push(0xb8)
def rotate(self, degree, speed):
self.compile_expr(degree)
self.compile_expr(speed)
self.push(0x98)
def wheels(self, left, right):
self.compile_expr(left)
self.compile_expr(right)
self.push(0x9f)
def random(self, low, high):
self.compile_expr(high)
self.compile_expr(low)
self.push(0x8c)
def get_surface_color(self):
self.push(0x0e)
self.push(0x92)
def terminate(self, value):
self.compile_expr(value)
self.push(0xae)
def abs(self, value):
self.compile_expr(value)
self.push(0xa8)
def follow_line_to_intersect_or_end(self):
self.bytecode.extend([0x01, 0xa0, 0xac, 0xad, 0x9a, 0x10, 0xa4, 0x80, 0xfd, 0x00, 0xa0, 0x01, 0x29, 0x93])
def set_line_speed(self, speed):
self.compile_expr(speed)
self.push(0x18)
self.push(0x93)
def move_straight_until_line(self, speed):
self.compile_expr(speed)
self.bytecode.extend([0x94, 0x94, 0x9f, 0xac, 0x08, 0x92, 0x80, 0xfa, 0x97, 0x96, 0x00, 0x00, 0x9f, 0xc6, 0x01, 0xa0, 0xac, 0xad, 0x9a, 0x10, 0xa4, 0x80, 0xfd, 0x97, 0x00, 0xa0, 0x01, 0x29, 0x93])
def pick_direction(self, direction):
if type(direction) != Name and direction.id not in directions.keys():
raise CompileException('Unsupported direction', direction)
self.compile_expr(direction)
self.bytecode.extend([0x94, 0x10, 0x92, 0x81, 0x8a, 0xb7, 0x29, 0x92, 0x8a, 0xb7, 0x1f, 0x93, 0x01, 0xa0, 0xad, 0x9a, 0x14, 0xa4, 0x80, 0xfd, 0x00, 0xa0, 0x00, 0x29, 0x93])
def there_is_way(self, direction):
if type(direction) != Name and direction.id not in directions.keys():
raise CompileException('Unsupported direction', direction)
self.push(0x10)
self.push(0x92)
self.compile_expr(direction)
self.push(0x81)
def get_line_speed(self):
self.push(0x18)
self.push(0x92)
def get_intersect_or_line_end_color(self):
self.push(0x0f)
self.push(0x92)
def push(self, byte):
self.bytecode.append(byte)
| 30.293427
| 204
| 0.556993
|
794c4eb95167cc71f893220c2568a77c6729699e
| 6,947
|
py
|
Python
|
FictionTools/amitools/amitools/rom/rombuilder.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 38
|
2021-06-18T12:56:15.000Z
|
2022-03-12T20:38:40.000Z
|
FictionTools/amitools/amitools/rom/rombuilder.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 2
|
2021-06-20T16:28:12.000Z
|
2021-11-17T21:33:56.000Z
|
FictionTools/amitools/amitools/rom/rombuilder.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 6
|
2021-06-18T18:18:36.000Z
|
2021-12-22T08:01:32.000Z
|
import os
import struct
from amitools.binfmt.Relocate import Relocate
from .kickrom import KickRomAccess
class RomEntryRaw:
def __init__(self, name, data, relocs=None):
self.name = name
self.data = data
self.relocs = relocs
def get_size(self):
return len(self.data)
def get_data(self, addr):
if self.relocs is None or len(self.relocs) == 0:
return self.data
else:
data = bytearray(self.data)
self._relocate(data, addr)
return data
def _relocate(self, data, addr):
for pos in self.relocs:
val = struct.unpack_from(">I", data, pos)
val += addr
struct.pack_into(">I", data, pos, val)
class RomEntryBinImg:
def __init__(self, name, bin_img):
self.name = name
self.bin_img = bin_img
self.relocator = Relocate(bin_img)
def get_size(self):
return self.relocator.get_total_size()
def get_data(self, addr):
return self.relocator.relocate_one_block(addr)
class RomEntryRomHdr:
def __init__(self, name, skip, jmp_addr):
self.name = name
self.skip = skip
self.jmp_addr = jmp_addr
def get_size(self):
return self.skip + 8
def get_data(self, addr):
data = chr(0xFF) * self.skip
hdr = struct.pack(">II", 0x11114EF9, self.jmp_addr)
return data + hdr
class RomEntryPadding:
def __init__(self, skip, value=0):
self.skip = skip
self.value = value
def get_size(self):
return self.skip
def get_data(self, addr):
return chr(self.value) * self.skip
class RomBuilder:
def __init__(self, size=512, base_addr=0xF80000, fill_byte=0xFF):
self.size = size # in KiB
self.base_addr = base_addr
self.fill_byte = fill_byte
self.size_bytes = size * 1024
# state
self.modules = []
self.rom_off = 0
self.left_bytes = self.size_bytes
self.data_off = 0
self.error = None
def get_error(self):
return self.error
def get_data_offset(self):
return self.data_off
def get_rom_offset(self):
return self.rom_off + self.data_off
def get_bytes_left(self):
return self.left_bytes
def does_fit(self, num_bytes):
if num_bytes <= self.left_bytes:
return True
return False
def _add_entry(self, entry):
n = entry.get_size()
if not self.does_fit(n):
self.error = "module '%s' does not fit into ROM!" % (entry.name)
return None
# add entry
self.modules.append(entry)
self.data_off += n
self.left_bytes -= n
return entry
def build_file_list(self, names):
files = []
for mod in names:
# is an index file?
if mod.endswith(".txt"):
base_path = os.path.dirname(mod)
with open(mod, "r") as fh:
for line in fh:
name = line.strip()
if len(name) > 0:
f = os.path.join(base_path, name)
files.append(f)
else:
files.append(mod)
return files
def add_module(self, name, data, relocs=None):
e = RomEntryRaw(name, data, relocs)
return self._add_entry(e)
def add_bin_img(self, name, bin_img):
e = RomEntryBinImg(name, bin_img)
return self._add_entry(e)
def add_padding(self, skip, value=0):
e = RomEntryPadding(skip, value)
return self._add_entry(e)
def build_rom(self):
rom_data = bytearray(self.size_bytes)
# fill in modules
addr = self.base_addr + self.rom_off
off = self.rom_off
for mod in self.modules:
n = mod.get_size()
rom_data[off : off + n] = mod.get_data(addr)
off += n
addr += n
# fill empty space
fill = self.fill_byte
while off < self.size_bytes:
rom_data[off] = fill
off += 1
return rom_data
class KickRomBuilder(RomBuilder):
def __init__(self, size, kickety_split=True, rom_ver=None, **kw_args):
RomBuilder.__init__(self, size, **kw_args)
self.rom_ver = rom_ver
# do we need a rom header at 256k border? (the original ROMs do this)
if size == 512:
self.kickety_split = kickety_split
self.split_offset = 0x40000
else:
self.kickety_split = False
self.split_offset = None
# check size
if size not in (256, 512):
raise ValueError("KickROM size must be 256 or 512 KiB!")
# we need a footer
self.left_bytes -= KickRomAccess.FOOTER_SIZE
# extra rom header takes 8
if self.kickety_split:
self.left_bytes -= KickRomAccess.ROMHDR_SIZE
def cross_kickety_split(self, num_bytes):
if self.kickety_split:
new_off = self.data_off + num_bytes
return self.data_off < self.split_offset and new_off > self.split_offset
else:
return False
def add_kickety_split(self):
jump_addr = self.base_addr + 2
skip = self.split_offset - self.data_off
e = RomEntryRomHdr("KicketySplit", skip, jump_addr)
return self._add_entry(e)
def build_rom(self):
rom_data = RomBuilder.build_rom(self)
# add kick sum
kh = KickRomAccess(rom_data)
# ensure that first module brought the header
if not kh.check_header():
self.error = "First KickROM module does not contain RomHdr!"
return None
# write custom rev?
if self.rom_ver is not None:
kh.write_rom_ver_rev(self.rom_ver)
# write missing entries in footer
kh.write_ext_footer()
return rom_data
class ExtRomBuilder(RomBuilder):
def __init__(
self, size, rom_ver=None, add_footer=False, kick_addr=0xF80000, **kw_args
):
RomBuilder.__init__(self, size, **kw_args)
# kick addr for jump
self.kick_addr = kick_addr
# set ROM version
if rom_ver is None:
self.rom_ver = (45, 10)
else:
self.rom_ver = rom_ver
# add footer
self.add_footer = add_footer
if add_footer:
self.left_bytes -= KickRomAccess.FOOTER_SIZE
# account for header
self.left_bytes -= KickRomAccess.EXT_HEADER_SIZE
self.rom_off = KickRomAccess.EXT_HEADER_SIZE
def build_rom(self):
rom_data = RomBuilder.build_rom(self)
# write a header
kh = KickRomAccess(rom_data)
kh.write_ext_header(self.kick_addr + 2, self.rom_ver)
# write footer
if self.add_footer:
kh.write_ext_footer()
return rom_data
| 29.312236
| 84
| 0.582842
|
794c4f071d19ac38842117582aa6e29c0ca4b406
| 4,622
|
py
|
Python
|
theano/misc/tests/test_pycuda_example.py
|
royxue/Theano
|
626104a8c2b16898d270dc99e16a3ddb4a74678e
|
[
"BSD-3-Clause"
] | 2
|
2015-01-20T04:53:37.000Z
|
2015-01-20T04:53:40.000Z
|
theano/misc/tests/test_pycuda_example.py
|
RoyXue/Theano
|
626104a8c2b16898d270dc99e16a3ddb4a74678e
|
[
"BSD-3-Clause"
] | null | null | null |
theano/misc/tests/test_pycuda_example.py
|
RoyXue/Theano
|
626104a8c2b16898d270dc99e16a3ddb4a74678e
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy
import theano
import theano.misc.pycuda_init
if not theano.misc.pycuda_init.pycuda_available:
from nose.plugins.skip import SkipTest
raise SkipTest("Pycuda not installed. Skip test of theano op"
" with pycuda code.")
import theano.sandbox.cuda as cuda_ndarray
if not cuda_ndarray.cuda_available:
from nose.plugins.skip import SkipTest
raise SkipTest('Optional package cuda disabled')
import theano
import theano.tensor as T
from theano.misc.pycuda_example import (PycudaElemwiseSourceModuleOp,
# PycudaElemwiseKernelOp,
PycudaElemwiseSourceModuleMakeThunkOp)
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode(
'FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def test_pycuda_elemwise_source_module():
for shape in [(5, 5), (10, 49), (50, 49), (500, 501)]:
for op in [theano.scalar.basic.mul, theano.scalar.basic.add]:
x = T.fmatrix('x')
y = T.fmatrix('y')
elemwise_op = theano.tensor.Elemwise(op)
pycuda_op = PycudaElemwiseSourceModuleOp(op)
pycuda_op_thunk = PycudaElemwiseSourceModuleMakeThunkOp(op)
f = theano.function([x, y], elemwise_op(x, y), mode=mode_with_gpu)
f2 = theano.function([x, y],
theano.sandbox.cuda.host_from_gpu(
pycuda_op(x, y)),
mode=mode_with_gpu)
mode_pycuda = mode_with_gpu.including("local_pycuda_gpu_elemwise")
f3 = theano.function([x, y], elemwise_op(x, y),
mode=mode_pycuda)
f4 = theano.function([x, y],
theano.sandbox.cuda.host_from_gpu(
pycuda_op_thunk(x, y)),
mode=mode_with_gpu)
assert any([isinstance(node.op, theano.sandbox.cuda.GpuElemwise)
for node in f.maker.fgraph.toposort()])
assert any([isinstance(node.op, PycudaElemwiseSourceModuleOp)
for node in f2.maker.fgraph.toposort()])
assert any([isinstance(node.op, PycudaElemwiseSourceModuleOp)
for node in f3.maker.fgraph.toposort()])
assert any([isinstance(node.op,
PycudaElemwiseSourceModuleMakeThunkOp)
for node in f4.maker.fgraph.toposort()])
val1 = numpy.asarray(numpy.random.rand(*shape), dtype='float32')
val2 = numpy.asarray(numpy.random.rand(*shape), dtype='float32')
assert numpy.allclose(f(val1, val2), f2(val1, val2))
assert numpy.allclose(f(val1, val2), f3(val1, val2))
assert numpy.allclose(f(val1, val2), f4(val1, val2))
# print f(val1,val2)
# print f2(val1,val2)
"""
#commented as it work only with old pycuda version.
def test_pycuda_elemwise_kernel():
x = T.fmatrix('x')
y = T.fmatrix('y')
f = theano.function([x, y], x + y, mode=mode_with_gpu)
print f.maker.fgraph.toposort()
mode_pycuda = mode_with_gpu.including("local_pycuda_gpu_elemwise_kernel")
f2 = theano.function([x, y], x + y, mode=mode_pycuda)
print f2.maker.fgraph.toposort()
assert any([isinstance(node.op, theano.sandbox.cuda.GpuElemwise)
for node in f.maker.fgraph.toposort()])
assert any([isinstance(node.op, PycudaElemwiseKernelOp)
for node in f2.maker.fgraph.toposort()])
val1 = numpy.asarray(numpy.random.rand(5, 5), dtype='float32')
val2 = numpy.asarray(numpy.random.rand(5, 5), dtype='float32')
#val1 = numpy.ones((5,5))
#val2 = numpy.arange(25).reshape(5,5)
assert (f(val1, val2) == f2(val1, val2)).all()
print f(val1, val2)
print f2(val1, val2)
x3 = T.ftensor3('x')
y3 = T.ftensor3('y')
z3 = T.ftensor3('y')
f4 = theano.function([x3, y3, z3], x3 * y3 + z3, mode=mode_pycuda)
print f4.maker.fgraph.toposort()
assert any([isinstance(node.op, PycudaElemwiseKernelOp)
for node in f4.maker.fgraph.toposort()])
val1 = numpy.random.rand(2, 2, 2)
print val1
print f4(val1, val1, val1)
assert numpy.allclose(f4(val1, val1, val1), val1 * val1 + val1)
"""
| 42.796296
| 78
| 0.606015
|
794c4f537cf89372fd0a5148b01966fa709bd7a9
| 17,148
|
py
|
Python
|
titan_client/models/entities_schema_links_reports.py
|
intel471/titan-client-python
|
b12a2bc73604cf1a7cb0b6e97c81b5af9dee7bfe
|
[
"MIT"
] | 2
|
2021-08-23T08:41:44.000Z
|
2021-08-29T15:09:27.000Z
|
titan_client/models/entities_schema_links_reports.py
|
intel471/titan-client-python
|
b12a2bc73604cf1a7cb0b6e97c81b5af9dee7bfe
|
[
"MIT"
] | 1
|
2021-09-16T18:12:22.000Z
|
2021-09-22T16:12:11.000Z
|
titan_client/models/entities_schema_links_reports.py
|
intel471/titan-client-python
|
b12a2bc73604cf1a7cb0b6e97c81b5af9dee7bfe
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Titan API v1
# Introduction The Intel 471 API is organized around the principles of REST. Our API lets you gather results from our platform with anything that can send a HTTP request, including cURL and modern internet browsers. Access to this API requires an API token which is managed from your account settings. Intel 471 reserves the right to add fields to our API however we will provide backwards compatibility and older version support so that it will be possible to choose exact versions that provide a response with an older structure. This documentation tracks all API versions and it is possible to compare this version which has changes highlighted. Please consider not storing information provided by API locally as we constantly improving our data set and want you to have the most updated information. # Authentication Authenticate to the Intel 471 API by providing your API key in the request. Your API key carries many privileges so please do not expose them on public web resources. Authentication to the API occurs by providing your email address as the login and API key as password in the authorization header via HTTP Basic Auth. Your API key can be found in the [API](https://portal.intel471.com/api) section on the portal. # Accessing API ## Via internet browser Just open url: `https://api.intel471.com/v1/reports` Browser will ask for credentials, provide your email as login and API key as password. ## Via curl command line utility Type in terminal the following command: ``` curl -u <YOU EMAIL>:<YOUR API KEY> https://api.intel471.com/v1/reports ``` ## CURL usage examples This section covers some Watchers API requests. ### List watcher groups: Type in terminal the following command: *curl -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups* ### Create watcher group: To create watcher group you need to pass a json body to request. Passing json body possible in two ways: #### Write json to request *curl -d'{\"name\": \"group_name\", \"description\": \"Description\"}' -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups* #### Write json to file and call it *curl -d\"@json_file_name\" -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups* ### Create free text search watcher: *curl -d'{\"type\": \"search\", \"freeTextPattern\": \"text to search\", \"notificationChannel\": \"website\"}' -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups/\"GROUP UID\"/watchers* ### Create specific search watcher: *curl -d'{\"type\": \"search\", \"patterns\":[ { \"types\": \"Actor\" , \"pattern\": \"swisman\" } ], \"notificationChannel\": \"website\" }' -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups/\"GROUP UID\"/watchers* ## Via Python Execute the following script: ``` import urllib2, base64 username = \"<YOU EMAIL>\" apikey = \"<YOUR API KEY>\" request = urllib2.Request(\"https://api.intel471.com/v1/reports\") base64string = base64.encodestring('%s:%s' % (username, apikey)).replace('\\n', '') request.add_header(\"Authorization\", \"Basic %s\" % base64string) result = urllib2.urlopen(request) response_in_json = result.read() print response_in_json ``` # API integration best practice with your application When accessing our API from your application don't do AJAX calls directly from web browser to https://api.intel471.com/. We do not allow CORS requests from browser due to potential security issues. Instead we suggest you look to establish a kind of a server side proxy in your application which will pass requests to our API. For example: you can send a request from browser javascript to your server side, for instance to url `/apiproxy/actors?actor=hacker` which will be internally passed to `https://api.intel471.com/v1/actors?actor=hacker` (with authentication headers added) and response will be sent back to the browser. # Versioning support We are consistently improving our API and occasionally bring in changes to the API based on customer feedback. The current API version can be seen in the drop down boxes for each version. We are providing API backwards compatibility when possible. All requests are prefixed with the major version number, for example `/v1`: ``` https://api.intel471.com/v1/reports ``` Different major versions are not compatible and imply significant response structure changes. Minor versions differences might include extra fields in response or provide new request parameter support. To stick to the specific version, just add the following extra parameter to the request, for example: `?v=1.2.0`. If you specify a not existing version, it will be brought down to the nearest existing one. For example, parameter `?v=1.5.4` will call API of version 1.3.0 — the latest available; `?v=1.2.9` will awake version 1.2.0 and so on. Omitting the version parameter from your request means you will always use the latest version of the API. We highly recommend you always add the version parameter to be safe on API updates and code your integration in a way to accept possible future extra fields added to the response object. ``` https://api.intel471.com/v1/tags?prettyPrint - will return response for the latest API version (v.1.1.0) https://api.intel471.com/v1/tags?prettyPrint&v=1.1.0 - absolutely the same request with the version explicitly specified https://api.intel471.com/v1/reports?prettyPrint&v=1.0.0 - will return response compatible with the older version ``` # noqa: E501
The version of the OpenAPI document: 1.18.0
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from titan_client.configuration import Configuration
class EntitiesSchemaLinksReports(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'admiralty_code': 'str',
'date_of_information': 'int',
'motivation': 'list[str]',
'portal_report_url': 'str',
'released': 'int',
'source_characterization': 'str',
'subject': 'str',
'uid': 'str'
}
attribute_map = {
'admiralty_code': 'admiraltyCode',
'date_of_information': 'dateOfInformation',
'motivation': 'motivation',
'portal_report_url': 'portalReportUrl',
'released': 'released',
'source_characterization': 'sourceCharacterization',
'subject': 'subject',
'uid': 'uid'
}
def __init__(self, admiralty_code=None, date_of_information=None, motivation=None, portal_report_url=None, released=None, source_characterization=None, subject=None, uid=None, local_vars_configuration=None): # noqa: E501
"""EntitiesSchemaLinksReports - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._admiralty_code = None
self._date_of_information = None
self._motivation = None
self._portal_report_url = None
self._released = None
self._source_characterization = None
self._subject = None
self._uid = None
self.discriminator = None
if admiralty_code is not None:
self.admiralty_code = admiralty_code
if date_of_information is not None:
self.date_of_information = date_of_information
if motivation is not None:
self.motivation = motivation
self.portal_report_url = portal_report_url
self.released = released
if source_characterization is not None:
self.source_characterization = source_characterization
self.subject = subject
self.uid = uid
@property
def admiralty_code(self):
"""Gets the admiralty_code of this EntitiesSchemaLinksReports. # noqa: E501
Code as described [here](http://en.wikipedia.org/wiki/Admiralty_code). All Fintel reports have admiraltyCode=`A1`. # noqa: E501
:return: The admiralty_code of this EntitiesSchemaLinksReports. # noqa: E501
:rtype: str
"""
return self._admiralty_code
@admiralty_code.setter
def admiralty_code(self, admiralty_code):
"""Sets the admiralty_code of this EntitiesSchemaLinksReports.
Code as described [here](http://en.wikipedia.org/wiki/Admiralty_code). All Fintel reports have admiraltyCode=`A1`. # noqa: E501
:param admiralty_code: The admiralty_code of this EntitiesSchemaLinksReports. # noqa: E501
:type admiralty_code: str
"""
if (self.local_vars_configuration.client_side_validation and
admiralty_code is not None and not re.search(r'^[A-F][1-6]$', admiralty_code)): # noqa: E501
raise ValueError(r"Invalid value for `admiralty_code`, must be a follow pattern or equal to `/^[A-F][1-6]$/`") # noqa: E501
self._admiralty_code = admiralty_code
@property
def date_of_information(self):
"""Gets the date_of_information of this EntitiesSchemaLinksReports. # noqa: E501
Date of information as Epoch Time. # noqa: E501
:return: The date_of_information of this EntitiesSchemaLinksReports. # noqa: E501
:rtype: int
"""
return self._date_of_information
@date_of_information.setter
def date_of_information(self, date_of_information):
"""Sets the date_of_information of this EntitiesSchemaLinksReports.
Date of information as Epoch Time. # noqa: E501
:param date_of_information: The date_of_information of this EntitiesSchemaLinksReports. # noqa: E501
:type date_of_information: int
"""
self._date_of_information = date_of_information
@property
def motivation(self):
"""Gets the motivation of this EntitiesSchemaLinksReports. # noqa: E501
Actor's `motivation`. `CC` for Cyber Crime, `CE` for Cyber Espionage, `HA` for Hacktivism. # noqa: E501
:return: The motivation of this EntitiesSchemaLinksReports. # noqa: E501
:rtype: list[str]
"""
return self._motivation
@motivation.setter
def motivation(self, motivation):
"""Sets the motivation of this EntitiesSchemaLinksReports.
Actor's `motivation`. `CC` for Cyber Crime, `CE` for Cyber Espionage, `HA` for Hacktivism. # noqa: E501
:param motivation: The motivation of this EntitiesSchemaLinksReports. # noqa: E501
:type motivation: list[str]
"""
self._motivation = motivation
@property
def portal_report_url(self):
"""Gets the portal_report_url of this EntitiesSchemaLinksReports. # noqa: E501
URL to the report on the portal. # noqa: E501
:return: The portal_report_url of this EntitiesSchemaLinksReports. # noqa: E501
:rtype: str
"""
return self._portal_report_url
@portal_report_url.setter
def portal_report_url(self, portal_report_url):
"""Sets the portal_report_url of this EntitiesSchemaLinksReports.
URL to the report on the portal. # noqa: E501
:param portal_report_url: The portal_report_url of this EntitiesSchemaLinksReports. # noqa: E501
:type portal_report_url: str
"""
if self.local_vars_configuration.client_side_validation and portal_report_url is None: # noqa: E501
raise ValueError("Invalid value for `portal_report_url`, must not be `None`") # noqa: E501
self._portal_report_url = portal_report_url
@property
def released(self):
"""Gets the released of this EntitiesSchemaLinksReports. # noqa: E501
Date the `report` was `released. # noqa: E501
:return: The released of this EntitiesSchemaLinksReports. # noqa: E501
:rtype: int
"""
return self._released
@released.setter
def released(self, released):
"""Sets the released of this EntitiesSchemaLinksReports.
Date the `report` was `released. # noqa: E501
:param released: The released of this EntitiesSchemaLinksReports. # noqa: E501
:type released: int
"""
if self.local_vars_configuration.client_side_validation and released is None: # noqa: E501
raise ValueError("Invalid value for `released`, must not be `None`") # noqa: E501
self._released = released
@property
def source_characterization(self):
"""Gets the source_characterization of this EntitiesSchemaLinksReports. # noqa: E501
Characterization of the reports source. # noqa: E501
:return: The source_characterization of this EntitiesSchemaLinksReports. # noqa: E501
:rtype: str
"""
return self._source_characterization
@source_characterization.setter
def source_characterization(self, source_characterization):
"""Sets the source_characterization of this EntitiesSchemaLinksReports.
Characterization of the reports source. # noqa: E501
:param source_characterization: The source_characterization of this EntitiesSchemaLinksReports. # noqa: E501
:type source_characterization: str
"""
self._source_characterization = source_characterization
@property
def subject(self):
"""Gets the subject of this EntitiesSchemaLinksReports. # noqa: E501
Report's `subject`. # noqa: E501
:return: The subject of this EntitiesSchemaLinksReports. # noqa: E501
:rtype: str
"""
return self._subject
@subject.setter
def subject(self, subject):
"""Sets the subject of this EntitiesSchemaLinksReports.
Report's `subject`. # noqa: E501
:param subject: The subject of this EntitiesSchemaLinksReports. # noqa: E501
:type subject: str
"""
if self.local_vars_configuration.client_side_validation and subject is None: # noqa: E501
raise ValueError("Invalid value for `subject`, must not be `None`") # noqa: E501
self._subject = subject
@property
def uid(self):
"""Gets the uid of this EntitiesSchemaLinksReports. # noqa: E501
Unique report identifier. # noqa: E501
:return: The uid of this EntitiesSchemaLinksReports. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this EntitiesSchemaLinksReports.
Unique report identifier. # noqa: E501
:param uid: The uid of this EntitiesSchemaLinksReports. # noqa: E501
:type uid: str
"""
if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EntitiesSchemaLinksReports):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EntitiesSchemaLinksReports):
return True
return self.to_dict() != other.to_dict()
| 50.884273
| 5,526
| 0.677922
|
794c4f9d605bf60f4d8ab3fde40dc8279e56fc47
| 37,098
|
py
|
Python
|
prophyc/tests/test_model.py
|
florczakraf/prophy
|
a42a6151a77b31afa05300fc2e1f52cc15a298cf
|
[
"MIT"
] | 14
|
2015-02-19T22:00:37.000Z
|
2020-11-30T03:03:55.000Z
|
prophyc/tests/test_model.py
|
florczakraf/prophy
|
a42a6151a77b31afa05300fc2e1f52cc15a298cf
|
[
"MIT"
] | 31
|
2015-06-22T11:11:10.000Z
|
2021-05-12T06:35:47.000Z
|
prophyc/tests/test_model.py
|
florczakraf/prophy
|
a42a6151a77b31afa05300fc2e1f52cc15a298cf
|
[
"MIT"
] | 16
|
2015-06-12T06:48:06.000Z
|
2019-11-26T22:48:13.000Z
|
# -*- coding: utf-8 -*-
import pytest
import prophyc # noqa
from prophyc import model
def test_node_eq():
a = model.Enum("name", [], "docstr 1")
b = model.Enum("name", [], "docstr 2")
c = model.Struct("name", [], "docstr 3")
assert a == b
assert a != c
def assert_repr_reproduces(object_):
""" The second assertion assumes that __eq__ checks actual equality. In fact - it skips docstring. """
assert repr(eval(repr(object_))) == repr(object_)
assert eval(repr(object_)) == object_
return True
def test_typedef_str():
typedef = model.Typedef("my_typedef", "u8")
assert str(typedef) == "typedef u8 my_typedef;"
def test_typedef_repr():
assert_repr_reproduces(model.Typedef('my_typedef', 'u8', docstring='comment'))
def test_struct_repr():
struct_a = model.Struct("MyStruct", [
model.StructMember("a", "u8"),
model.StructMember("b", "cint16_t"),
model.StructMember("c", "u32", size=3, docstring="no_docstring"),
], docstring="no_docstring_")
assert repr(struct_a) == """\
prophyc.model.Struct(
'MyStruct',
[
prophyc.model.StructMember('a', 'u8'),
prophyc.model.StructMember('b', 'cint16_t'),
prophyc.model.StructMember('c', 'u32', size=3, docstring='no_docstring'),
],
'no_docstring_',
)"""
assert_repr_reproduces(struct_a)
assert struct_a == eval(repr(struct_a))
assert_repr_reproduces(struct_a)
def test_struct_str():
struct_with_arrays = model.Struct("MyStruct", [
model.StructMember("sizer_field", "u8"),
model.StructMember("b", "UserDefinedType"),
model.StructMember("c", "UserDefinedType", optional=True),
model.StructMember("fixed_array", "u32", size=3, docstring="no_docstring"),
model.StructMember("dynamic_array", "u32", bound='num_of_dynamic_array'),
model.StructMember("limited_array", "r64", bound='num_of_limited_array', size=3),
model.StructMember("ext_sized_1", "i32", bound="sizer_field"),
model.StructMember("ext_sized_2", "i16", bound="sizer_field"),
model.StructMember("greedy_array", "u8", greedy=True),
], docstring="no_docstring_")
# todo: its against documentation
assert str(struct_with_arrays) == """\
struct MyStruct {
u8 sizer_field;
UserDefinedType b;
UserDefinedType* c;
u32 fixed_array[3];
u32 dynamic_array<@num_of_dynamic_array>;
r64 limited_array<3>;
i32 ext_sized_1<@sizer_field>;
i16 ext_sized_2<@sizer_field>;
u8 greedy_array<...>;
};
"""
def test_union_repr():
union = model.Union("MyUnion", [
model.UnionMember("a", "u8", 1),
model.UnionMember("b", "u16", 2),
model.UnionMember("c", "u32", 3, docstring="deff")
])
assert_repr_reproduces(union)
assert str(union) == """\
union MyUnion {
1: u8 a;
2: u16 b;
3: u32 c;
};
"""
def test_larger_model_str(larger_model):
processed, _ = model.evaluate_model(larger_model)
assert "\n".join(str(node) for node in processed) == """\
typedef i16 a;
typedef a c;
#include some_defs;
#include cplx;
union the_union {
0: IncludedStruct a;
1: cint16_t field_with_a_long_name;
2: cint32_t field_with_a_longer_name;
4090: i32 other;
};
enum E1 {
E1_A = '0';
E1_B_has_a_long_name = '1';
E1_C_desc = '2';
};
enum E2 {
E2_A = '0';
};
const CONST_A = '6';
const CONST_B = '0';
struct StructMemberKinds {
i16 member_without_docstring;
i16 ext_size;
cint16_t* optional_element;
cint16_t fixed_array[3];
cint16_t samples<@ext_size>;
r64 limited_array<4>;
cint16_t greedy<...>;
};
"""
def test_larger_model_repr(larger_model):
assert_repr_reproduces(larger_model)
NODES_CONSTRUCTORS = [
(model.Constant, ("ño", "value")),
(model.EnumMember, ("ño", "value")),
(model.Typedef, ("ño", "value")),
(model.StructMember, ("ño", "value")),
(model.UnionMember, ("ño", "tp_", 2)),
(model.Include, ("ño", [])),
(model.Struct, ("ño", [],)),
(model.Union, ("ño", [])),
(model.Enum, ("ño", [])),
]
@pytest.mark.parametrize("k, args", NODES_CONSTRUCTORS)
def test_unicode_handling(k, args):
a = k(*args, docstring="used weird letter from 'jalapeño' word")
b = k(*args, docstring="used weird letter from 'jalapeño' word")
assert a == b
assert a.docstring == b.docstring
def test_split_after():
generator = model.split_after([1, 42, 2, 3, 42, 42, 5], lambda x: x == 42)
assert [x for x in generator] == [[1, 42], [2, 3, 42], [42], [5]]
def test_model_sort_enums():
nodes = [
model.Typedef("B", "A"),
model.Typedef("C", "A"),
model.Enum("A", []),
]
model.topological_sort(nodes)
assert ["A", "B", "C"] == [node.name for node in nodes]
def test_model_sort_typedefs():
nodes = [
model.Typedef("A", "X"),
model.Typedef("C", "B"),
model.Typedef("B", "A"),
model.Typedef("E", "D"),
model.Typedef("D", "C"),
]
model.topological_sort(nodes)
assert [node.name for node in nodes] == ["A", "B", "C", "D", "E"]
assert [dep for node in nodes for dep in node.dependencies()] == ["X", "A", "B", "C", "D"]
def test_model_sort_structs():
nodes = [
model.Struct("C", [
model.StructMember("a", "B"),
model.StructMember("b", "A"),
model.StructMember("c", "D"),
]),
model.Struct("B", [
model.StructMember("a", "X"),
model.StructMember("b", "A"),
model.StructMember("c", "Y"),
]),
model.Struct("A", [
model.StructMember("a", "X"),
model.StructMember("b", "Y"),
model.StructMember("c", "Z"),
]),
]
model.topological_sort(nodes)
assert [node.name for node in nodes] == ["A", "B", "C"]
assert [tuple(node.dependencies()) for node in nodes] == [('X', 'Y', 'Z'), ('X', 'A', 'Y'), ('B', 'A', 'D')]
def test_model_sort_struct_with_two_deps():
nodes = [
model.Struct("C", [model.StructMember("a", "B")]),
model.Struct("B", [model.StructMember("a", "A")]),
model.Struct("A", [model.StructMember("a", "X")]),
]
model.topological_sort(nodes)
assert [node.name for node in nodes] == ["A", "B", "C"]
def test_model_sort_struct_with_multiple_dependencies():
nodes = [
model.Struct("D", [
model.StructMember("a", "A"),
model.StructMember("b", "B"),
model.StructMember("c", "C"),
]),
model.Struct("C", [
model.StructMember("a", "A"),
model.StructMember("b", "B"),
]),
model.Struct("B", [
model.StructMember("a", "A"),
]),
model.Typedef("A", "TTypeX"),
]
model.topological_sort(nodes)
assert [node.name for node in nodes] == ["A", "B", "C", "D"]
def test_model_sort_union():
nodes = [
model.Typedef("C", "B"),
model.Union("B", [
model.UnionMember("a", "A", "0"),
model.UnionMember("b", "A", "1"),
]),
model.Struct("A", [
model.StructMember("a", "X"),
]),
]
model.topological_sort(nodes)
assert [node.name for node in nodes] == ["A", "B", "C"]
def test_model_sort_constants():
nodes = [
model.Constant("C_C", "C_A + C_B"),
model.Constant("C_A", "1"),
model.Constant("C_B", "2"),
]
model.topological_sort(nodes)
assert nodes == [("C_A", "1"), ("C_B", "2"), ("C_C", "C_A + C_B")]
def test_cross_reference_structs():
nodes = [
model.Struct("A", [
model.StructMember("a", "u8"),
]),
model.Struct("B", [
model.StructMember("a", "A"),
model.StructMember("b", "u8"),
]),
model.Struct("C", [
model.StructMember("a", "A"),
model.StructMember("b", "B"),
model.StructMember("c", "NON_EXISTENT"),
]),
model.Struct("D", [
model.StructMember("a", "A"),
model.StructMember("b", "B"),
model.StructMember("c", "C"),
])
]
constants = model.cross_reference(nodes)
assert [n.name for n in nodes] == ['A', 'B', 'C', 'D']
definition_names = [[x.definition.name if x.definition else None for x in y.members] for y in nodes]
assert definition_names == [
[None],
['A', None],
['A', 'B', None],
['A', 'B', 'C'],
]
assert [tuple(n.dependencies()) for n in nodes] == [
('u8',),
('A', 'u8'),
('A', 'B', 'NON_EXISTENT'),
('A', 'B', 'C'),
]
assert constants == {}
def test_cross_reference_typedef():
nodes = [
model.Struct("A", [
model.StructMember("a", "u8"),
]),
model.Typedef("B", "A"),
model.Struct("C", [
model.StructMember("a", "A"),
model.StructMember("b", "B"),
]),
model.Typedef("D", "B"),
]
model.cross_reference(nodes)
assert nodes[1].definition.name == "A"
assert nodes[2].members[1].definition.definition.name == "A"
assert nodes[3].definition.name == "B"
assert nodes[3].definition.definition.name == "A"
def test_cross_symbols_from_includes():
nodes = [
model.Include('x', [
model.Include('y', [
model.Typedef('ala', 'u32')
]),
model.Struct('ola', [
model.StructMember('a', 'ala'),
]),
]),
model.Struct('ula', [
model.StructMember('a', 'ola'),
model.StructMember('b', 'ala'),
])
]
model.cross_reference(nodes)
assert nodes[1].members[0].definition == model.Struct('ola', [model.StructMember('a', 'ala')], '')
assert nodes[1].members[1].definition == model.Typedef('ala', 'u32')
# cross-reference only needs to link definitions of first level of nodes
assert nodes[0].members[1].members[0] == model.StructMember('a', 'ala', None)
assert nodes[0].members[1].members[0].definition is None
def test_cross_reference_array_size_from_includes():
nodes = [
model.Include('x', [
model.Include('y', [
model.Constant('NUM_HEX', '0xf'),
model.Constant('NUM_DEC', '3'),
]),
model.Enum('E', [
model.EnumMember('E1', 'NUM_HEX'),
model.EnumMember('E3', 'NUM_DEC'),
]),
]),
model.Struct('X', [
model.StructMember('x', 'u32', size='NUM_DEC'),
model.StructMember('y', 'u32', size='E1'),
model.StructMember('z', 'u32', size='UNKNOWN'),
model.StructMember('a', 'u32', size='E3'),
])
]
constants = model.cross_reference(nodes)
assert nodes[1].members[0].numeric_size == 3
assert nodes[1].members[1].numeric_size == 15
assert nodes[1].members[2].numeric_size is None
assert nodes[1].members[3].numeric_size == 3
assert constants == {
'E1': 15,
'E3': 3,
'NUM_DEC': 3,
'NUM_HEX': 15,
}
def test_cross_reference_numeric_size_of_expression():
nodes = [
model.Constant('A', 12),
model.Constant('B', 15),
model.Constant('C', 'A*B'),
model.Struct('X', [
model.StructMember('x', 'u32', size='C'),
]),
]
constants = model.cross_reference(nodes)
assert nodes[3].members[0].numeric_size == 180
assert constants == {'A': 12, 'B': 15, 'C': 180}
def test_cross_reference_expression_as_array_size():
nodes = [
model.Struct('X', [
model.StructMember('x', 'u32', size='2 * 3'),
]),
]
model.cross_reference(nodes)
assert nodes[0].members[0].numeric_size == 6
class WarnFake(object):
def __init__(self):
self.msgs = []
def __call__(self, msg):
self.msgs.append(msg)
def test_cross_reference_typedef_warnings():
nodes = [model.Typedef('X', 'Unknown')]
warn = WarnFake()
model.cross_reference(nodes, warn)
assert warn.msgs == ["type 'Unknown' not found"]
def test_cross_reference_struct_warnings():
nodes = [model.Struct('X', [model.StructMember('x', 'TypeUnknown', size='12 + NumUnknown')])]
warn = WarnFake()
model.cross_reference(nodes, warn)
assert warn.msgs == ["type 'TypeUnknown' not found", "numeric constant 'NumUnknown' not found"]
def test_cross_reference_union_warnings():
nodes = [model.Union('X', [model.UnionMember('x', 'TypeUnknown', '42')])]
warn = WarnFake()
model.cross_reference(nodes, warn)
assert warn.msgs == ["type 'TypeUnknown' not found"]
def test_cross_reference_no_warning_about_primitive_types():
warn = WarnFake()
model.cross_reference([model.Typedef('X', 'u8')], warn)
model.cross_reference([model.Typedef('X', 'u16')], warn)
model.cross_reference([model.Typedef('X', 'u32')], warn)
model.cross_reference([model.Typedef('X', 'u64')], warn)
model.cross_reference([model.Typedef('X', 'i8')], warn)
model.cross_reference([model.Typedef('X', 'i16')], warn)
model.cross_reference([model.Typedef('X', 'i32')], warn)
model.cross_reference([model.Typedef('X', 'i64')], warn)
model.cross_reference([model.Typedef('X', 'r32')], warn)
model.cross_reference([model.Typedef('X', 'r64')], warn)
model.cross_reference([model.Typedef('X', 'byte')], warn)
assert warn.msgs == []
def test_cross_reference_quadratic_complexity_include_performance_bug():
"""
If type and numeric definitions from includes are processed each time,
compilation times can skyrocket...
"""
FACTOR = 10
nodes = [model.Constant('X', 42), model.Typedef('Y', 'u8')] * FACTOR
for i in range(FACTOR):
nodes = [model.Include('inc%s' % i, nodes)] * FACTOR
nodes.append(model.Struct('Z', [model.StructMember('x', 'u8', size='X')]))
"""This line will kill your cpu if cross-referencing algorithm is quadratic"""
model.cross_reference(nodes)
assert nodes[-1].members[0].numeric_size == 42
def test_evaluate_kinds_arrays():
nodes = [
model.Struct("A", [
model.StructMember("a", "u8"),
model.StructMember("b", "u8", optional=True),
model.StructMember("c", "u8", size="5"),
model.StructMember("d_len", "u8"),
model.StructMember("d", "u8", bound="d_len", size="5"),
model.StructMember("e_len", "u8"),
model.StructMember("e", "u8", bound="e_len"),
model.StructMember("f", "u8", greedy=True),
]),
]
model.cross_reference(nodes)
model.evaluate_stiffness_kinds(nodes)
assert [x.kind for x in nodes[0].members] == [
model.Kind.FIXED,
model.Kind.FIXED,
model.Kind.FIXED,
model.Kind.FIXED,
model.Kind.FIXED,
model.Kind.FIXED,
model.Kind.FIXED,
model.Kind.FIXED,
]
def test_evaluate_kinds_struct_records():
nodes = [
model.Struct("Fix", [
model.StructMember("a", "u8"),
]),
model.Struct("Dyn", [
model.StructMember("a_len", "u8"),
model.StructMember("a", "u8", bound="a_len"),
]),
model.Struct("X", [
model.StructMember("a", "Dyn"),
model.StructMember("b_len", "u8"),
model.StructMember("b", "Fix", bound="b_len"),
model.StructMember("c", "Fix", greedy=True),
]),
]
model.cross_reference(nodes)
model.evaluate_stiffness_kinds(nodes)
assert [x.kind for x in nodes] == [
model.Kind.FIXED,
model.Kind.DYNAMIC,
model.Kind.UNLIMITED,
]
assert [x.kind for x in nodes[2].members] == [
model.Kind.DYNAMIC,
model.Kind.FIXED,
model.Kind.FIXED,
model.Kind.FIXED,
]
def test_evaluate_kinds_with_typedefs():
nodes = [
model.Struct("Empty", []),
model.Struct("Dynamic", [
model.StructMember("a_len", "u8"),
model.StructMember("a", "u8", bound="a_len"),
]),
model.Struct("Fixed", [
model.StructMember("a", "u8", size="10"),
]),
model.Struct("Limited", [
model.StructMember("a_len", "u8"),
model.StructMember("a", "u8", bound="a_len", size="10"),
]),
model.Struct("Greedy", [
model.StructMember("a", "byte", greedy=True),
]),
model.Struct("DynamicWrapper", [
model.StructMember("a", "Dynamic"),
]),
model.Struct("GreedyWrapper", [
model.StructMember("a", "Greedy"),
]),
model.Struct("GreedyDynamic", [
model.StructMember("a", "Dynamic", greedy=True),
]),
model.Typedef("TU8", "u8"),
model.Typedef("TDynamic", "Dynamic"),
model.Typedef("TGreedy", "Greedy"),
model.Struct("TypedefedU8", [
model.StructMember("a", "TU8"),
]),
model.Struct("TypedefedDynamic", [
model.StructMember("a", "TDynamic"),
]),
model.Struct("TypedefedGreedy", [
model.StructMember("a", "TGreedy"),
]),
model.Typedef("TTDynamic", "TDynamic"),
model.Typedef("TTTDynamic", "TTDynamic"),
model.Struct("DeeplyTypedefed", [
model.StructMember("a", "TTTDynamic"),
]),
]
model.cross_reference(nodes)
model.evaluate_stiffness_kinds(nodes)
assert [x.kind for x in nodes if isinstance(x, model.Struct)] == [
model.Kind.FIXED,
model.Kind.DYNAMIC,
model.Kind.FIXED,
model.Kind.FIXED,
model.Kind.UNLIMITED,
model.Kind.DYNAMIC,
model.Kind.UNLIMITED,
model.Kind.UNLIMITED,
model.Kind.FIXED,
model.Kind.DYNAMIC,
model.Kind.UNLIMITED,
model.Kind.DYNAMIC,
]
def test_partition_fixed():
nodes = [
model.Struct("Fixed", [
model.StructMember("a", "u8"),
model.StructMember("b", "u8"),
model.StructMember("c", "u8"),
])
]
model.cross_reference(nodes)
model.evaluate_stiffness_kinds(nodes)
main, parts = model.partition(nodes[0].members)
assert [x.name for x in main] == ["a", "b", "c"]
assert [[x.name for x in part] for part in parts] == []
def test_partition_many_arrays():
nodes = [
model.Struct("ManyArrays", [
model.StructMember("num_of_a", "u8"),
model.StructMember("a", "u8", bound="num_of_a"),
model.StructMember("num_of_b", "u8"),
model.StructMember("b", "u8", bound="num_of_b"),
model.StructMember("num_of_c", "u8"),
model.StructMember("c", "u8", bound="num_of_c"),
]),
]
model.cross_reference(nodes)
model.evaluate_stiffness_kinds(nodes)
main, parts = model.partition(nodes[0].members)
assert [x.name for x in main] == ["num_of_a", "a"]
assert [[x.name for x in part] for part in parts] == [["num_of_b", "b"], ["num_of_c", "c"]]
def test_partition_many_arrays_mixed():
nodes = [
model.Struct("ManyArraysMixed", [
model.StructMember("num_of_a", "u8"),
model.StructMember("num_of_b", "u8"),
model.StructMember("a", "u8", bound="num_of_a"),
model.StructMember("b", "u8", bound="num_of_b"),
]),
]
model.cross_reference(nodes)
model.evaluate_stiffness_kinds(nodes)
main, parts = model.partition(nodes[0].members)
assert [x.name for x in main] == ["num_of_a", "num_of_b", "a"]
assert [[x.name for x in part] for part in parts] == [["b"]]
def test_partition_dynamic_struct():
nodes = [
model.Struct("Dynamic", [
model.StructMember("num_of_a", "u8"),
model.StructMember("a", "u8", bound="num_of_a"),
]),
model.Struct("X", [
model.StructMember("a", "u8"),
model.StructMember("b", "Dynamic"),
model.StructMember("c", "u8"),
])
]
model.cross_reference(nodes)
model.evaluate_stiffness_kinds(nodes)
main, parts = model.partition(nodes[1].members)
assert [x.name for x in main] == ["a", "b"]
assert [[x.name for x in part] for part in parts] == [["c"]]
def test_partition_many_dynamic_structs():
nodes = [
model.Struct("Dynamic", [
model.StructMember("num_of_a", "u8"),
model.StructMember("a", "u8", bound="num_of_a"),
]),
model.Struct("X", [
model.StructMember("a", "Dynamic"),
model.StructMember("b", "Dynamic"),
model.StructMember("c", "Dynamic"),
]),
]
model.cross_reference(nodes)
model.evaluate_stiffness_kinds(nodes)
main, parts = model.partition(nodes[1].members)
assert [x.name for x in main] == ["a"]
assert [[x.name for x in part] for part in parts] == [["b"], ["c"]]
def process(nodes, warn=None):
model.cross_reference(nodes)
model.evaluate_stiffness_kinds(nodes)
model.evaluate_sizes(nodes, **(warn and {'warn': warn} or {}))
return nodes
def process_with_warnings(nodes):
warnings = []
process(nodes, lambda warning: warnings.append(warning))
return nodes, warnings
def get_size_alignment_padding(node):
return (
isinstance(node, model.StructMember) and
(node.byte_size, node.alignment, node.padding) or
(node.byte_size, node.alignment)
)
def get_members_and_node(node):
return node.members + [node]
def test_evaluate_sizes_struct():
nodes = process([
model.Struct('X', [
model.StructMember('x', 'u16'),
model.StructMember('y', 'u8'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(2, 2, 0),
(1, 1, 1),
(4, 2),
]
def test_evaluate_sizes_nested_struct():
nodes = process([
model.Struct('U16', [
model.StructMember('x', 'u16'),
]),
model.Struct('X', [
model.StructMember('x', 'u8'),
model.StructMember('y', 'U16'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(1, 1, 1),
(2, 2, 0),
(4, 2),
]
def test_evaluate_sizes_fixed_array():
nodes = process([
model.Struct('X', [
model.StructMember('x', 'u32'),
model.StructMember('y', 'u8', size='3'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(4, 4, 0),
(3, 1, 1),
(8, 4),
]
def test_evaluate_sizes_dynamic_array():
nodes = process([
model.Struct('X', [
model.StructMember('num_of_x', 'u32'),
model.StructMember('x', 'u8', bound='num_of_x'),
]),
model.Struct('Y', [
model.StructMember('x', 'u8'),
model.StructMember('y', 'X'),
model.StructMember('z', 'u8'),
]),
model.Struct('Z', [
model.StructMember('x', 'X'),
model.StructMember('y', 'u64'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(4, 4, 0),
(0, 1, -4),
(4, 4),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(1, 1, 3),
(4, 4, 0),
(1, 1, -4),
(12, 4),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[2]))) == [
(4, 4, -8),
(8, 8, 0),
(16, 8),
]
def test_evaluate_sizes_limited_array():
nodes = process([
model.Struct('X', [
model.StructMember('num_of_x', 'u32'),
model.StructMember('x', 'u8', bound='num_of_x', size='2'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(4, 4, 0),
(2, 1, 2),
(8, 4),
]
def test_evaluate_sizes_greedy_array():
nodes = process([
model.Struct('X', [
model.StructMember('num_of_x', 'u32'),
model.StructMember('x', 'u8', greedy=True),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(4, 4, 0),
(0, 1, -4),
(4, 4),
]
def test_evaluate_sizes_partial_padding():
nodes = process([
model.Struct('D', [
model.StructMember('num_of_x', 'u32'),
model.StructMember('x', 'u32', bound='num_of_x'),
]),
model.Struct('X', [
model.StructMember('num_of_x', 'u32'),
model.StructMember('x', 'u8', bound='num_of_x'),
model.StructMember('y', 'u8'),
model.StructMember('z', 'u64'),
]),
model.Struct('Y', [
model.StructMember('num_of_x', 'u32'),
model.StructMember('x', 'u8', bound='num_of_x'),
model.StructMember('num_of_y', 'u32'),
model.StructMember('y', 'u64', bound='num_of_y'),
]),
model.Struct('Z', [
model.StructMember('d1', 'D'),
model.StructMember('x', 'u8'),
model.StructMember('d2', 'D'),
model.StructMember('y1', 'u8'),
model.StructMember('y2', 'u64'),
model.StructMember('d3', 'D'),
model.StructMember('z1', 'u8'),
model.StructMember('z2', 'u8'),
model.StructMember('z3', 'u16'),
]),
model.Struct('ZZZ', [
model.StructMember('num_of_x', 'u32'),
model.StructMember('x', 'u16', bound='num_of_x'),
model.StructMember('y', 'u16'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(4, 4, 0),
(0, 1, -8),
(1, 8, 7),
(8, 8, 0),
(24, 8),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[2]))) == [
(4, 4, 0),
(0, 1, -8),
(4, 8, 4),
(0, 8, 0),
(16, 8),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[3]))) == [
(4, 4, 0),
(1, 4, 3),
(4, 4, -8),
(1, 8, 7),
(8, 8, 0),
(4, 4, 0),
(1, 2, 0),
(1, 1, 0),
(2, 2, -8),
(40, 8),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[4]))) == [
(4, 4, 0),
(0, 2, 0),
(2, 2, -4),
(8, 4),
]
def test_evaluate_sizes_typedef():
nodes = process([
model.Typedef('T1', 'u32'),
model.Struct('X', [
model.StructMember('x', 'T1'),
]),
model.Typedef('T2', 'T1'),
model.Struct('Y', [
model.StructMember('x', 'T2'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(4, 4, 0),
(4, 4),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[3]))) == [
(4, 4, 0),
(4, 4),
]
def test_evaluate_sizes_enum():
nodes = process([
model.Enum('E', [
model.EnumMember('E1', '1'),
]),
model.Struct('X', [
model.StructMember('x', 'E'),
model.StructMember('y', 'i8'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(4, 4, 0),
(1, 1, 3),
(8, 4),
]
def test_evaluate_sizes_floats():
nodes = process([
model.Struct('X', [
model.StructMember('x', 'r32'),
model.StructMember('y', 'r64'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(4, 4, 4),
(8, 8, 0),
(16, 8),
]
def test_evaluate_sizes_bytes():
nodes = process([
model.Struct('X', [
model.StructMember('x', 'byte'),
model.StructMember('y', 'byte', size=3),
model.StructMember('num_of_z', 'u32'),
model.StructMember('z', 'byte', bound='num_of_z'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(1, 1, 0),
(3, 1, 0),
(4, 4, 0),
(0, 1, -4),
(8, 4),
]
def test_evaluate_sizes_optional():
nodes = process([
model.Struct('X', [
model.StructMember('x', 'u32'),
]),
model.Struct('O1', [
model.StructMember('x', 'u8', optional=True),
model.StructMember('y', 'u16', optional=True),
model.StructMember('z', 'u32', optional=True),
model.StructMember('a', 'u64', optional=True),
]),
model.Struct('O2', [
model.StructMember('x', 'X', optional=True),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(5, 4, 3),
(6, 4, 2),
(8, 4, 0),
(16, 8, 0),
(40, 8),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[2]))) == [
(8, 4, 0),
(8, 4),
]
def test_evaluate_sizes_union():
nodes = process([
model.Union('X', [
model.UnionMember('x', 'u32', '1'),
model.UnionMember('y', 'u32', '2'),
model.UnionMember('z', 'u32', '3'),
]),
model.Union('Y', [
model.UnionMember('x', 'u64', '1'),
]),
model.Union('Z', [
model.UnionMember('x', 'X', '1'),
model.UnionMember('y', 'Y', '2'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(4, 4),
(4, 4),
(4, 4),
(8, 4),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(8, 8),
(16, 8),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[2]))) == [
(8, 4),
(16, 8),
(24, 8),
]
def test_evaluate_sizes_union_with_padding():
nodes = process([
model.Union('X', [
model.UnionMember('x', 'u8', '1'),
]),
model.Union('Y', [
model.UnionMember('x', 'u8', '1'),
model.UnionMember('y', 'u64', '2'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(1, 1),
(8, 4),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(1, 1),
(8, 8),
(16, 8),
]
def test_evaluate_sizes_empty():
nodes = process([
model.Struct('X', []),
model.Union('X', []),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [(0, 1)]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [(4, 4)]
def test_evaluate_sizes_unknown():
nodes, warnings = process_with_warnings([
model.Struct('X', [
model.StructMember('x', 'u8'),
model.StructMember('y', 'U'),
model.StructMember('z', 'u32'),
]),
model.Union('Y', [
model.UnionMember('x', 'u32', '1'),
model.UnionMember('y', 'U', '2'),
model.UnionMember('z', 'u32', '3'),
]),
model.Typedef('U16', 'U'),
model.Struct('Z', [
model.StructMember('x', 'U16'),
model.StructMember('y', 'Unknown'),
]),
])
assert warnings == [
'X::y has unknown type "U"',
'Y::y has unknown type "U"',
'Z::x has unknown type "U"',
'Z::y has unknown type "Unknown"',
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[0]))) == [
(1, 1, None),
(None, None, None),
(4, 4, None),
(None, None),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(4, 4),
(None, None),
(4, 4),
(None, None),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[3]))) == [
(None, None, None),
(None, None, None),
(None, None),
]
def test_evaluate_sizes_array_with_named_size():
nodes = process([
model.Constant('NUM', '3'),
model.Enum('E', [
model.EnumMember('E1', '1'),
model.EnumMember('E3', 'NUM'),
]),
model.Struct('X', [
model.StructMember('x', 'u32', size='NUM'),
model.StructMember('y', 'u32', size='E1'),
model.StructMember('z', 'u32', size='E3'),
]),
model.Struct('Y', [
model.StructMember('x', 'u32', size='UNKNOWN'),
model.StructMember('y', 'u32'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[2]))) == [
(12, 4, 0),
(4, 4, 0),
(12, 4, 0),
(28, 4),
]
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[3]))) == [
(None, None, None),
(4, 4, None),
(None, None),
]
def test_evaluate_sizes_with_include():
nodes = process([
model.Include('input', [
model.Enum('E', [
model.EnumMember('E1', '1'),
]),
]),
model.Struct('X', [
model.StructMember('x', 'E'),
model.StructMember('y', 'i8'),
]),
])
assert list(map(get_size_alignment_padding, get_members_and_node(nodes[1]))) == [
(4, 4, 0),
(1, 1, 3),
(8, 4),
]
def test_enum_str_and_repr():
enum = model.Enum('TheEnum', [
model.EnumMember('E1', 1),
model.EnumMember('E2', '2'),
model.EnumMember('E3', '3'),
])
assert str(enum) == """\
enum TheEnum {
E1 = 1;
E2 = '2';
E3 = '3';
};
"""
assert_repr_reproduces(enum)
def test_wrong_struct_members_type_definition():
expected_msg = "struct 'A' members must be a list, got str instead."
with pytest.raises(model.ModelError, match=expected_msg):
model.Struct("A", "string")
def test_wrong_struct_member_type():
expected_msg = "Each member of struct 'A' has to be a StructMember instance. Got str at index 1."
with pytest.raises(model.ModelError, match=expected_msg):
model.Struct("A", [
model.StructMember('field_name', 'u32'),
"string",
])
expected_msg = "Each member of struct 'A' has to be a StructMember instance. Got UnionMember at index 0."
with pytest.raises(model.ModelError, match=expected_msg):
model.Struct("A", [
model.UnionMember('field_name', 'u32', 2),
])
def test_wrong_union_member_type():
expected_msg = "Each member of union 'U' has to be a UnionMember instance. Got str at index 1."
with pytest.raises(model.ModelError, match=expected_msg):
model.Union("U", [
model.UnionMember('field_name', 'u32', 0),
"string",
])
expected_msg = "Each member of union 'U' has to be a UnionMember instance. Got StructMember at index 0."
with pytest.raises(model.ModelError, match=expected_msg):
model.Union("U", [
model.StructMember('field_name', 'u32'),
])
def test_duplicated_identifiers_struct():
with pytest.raises(model.ModelError, match="Duplicated 'field_name' identifier in struct A"):
model.Struct("A", [
model.StructMember('field_name', 'u32'),
model.StructMember('field_name', 'u16'),
])
def test_duplicated_identifiers_union():
with pytest.raises(model.ModelError, match="Duplicated 'field_name' identifier in union U"):
model.Union("U", [
model.UnionMember('field_name', 'u32', 0),
model.UnionMember('field_name', 'u16', 1),
])
def test_bad_creation():
with pytest.raises(model.ModelError, match="Got model node name of 'float' type, expected string."):
model.Struct(3.14159, [])
def test_bad_creation_typedef():
msg = "Typedef.definition should be string, Typedef, Enum, Struct or Union, got: float."
with pytest.raises(model.ModelError, match=msg):
model.Typedef("a", "b", 3.14159)
def test_not_implemented():
a = model.ModelNode("a", "b", "c")
with pytest.raises(NotImplementedError, match="To be overridden in ModelNode class."):
a.dependencies()
def test_not_implemented2():
with pytest.raises(NotImplementedError, match="Abstract method to be overriden in _Serializable"):
model._Serializable.calc_wire_stiffness()
def test_bad_attribute():
a = model.Include("this", [])
expected_message = "Use of value property is forbidden for Include. Use 'Include.members' instead."
with pytest.raises(model.ModelError, match=expected_message):
a.value
| 29.073668
| 112
| 0.548224
|
794c4fa9a81056eab6917bf11dced2d861f046e3
| 410
|
py
|
Python
|
pydantic_models.py
|
OmarThinks/flask_encryption_endpoint
|
f101b2ff313c0dc076d03175740165f8789cd600
|
[
"MIT"
] | null | null | null |
pydantic_models.py
|
OmarThinks/flask_encryption_endpoint
|
f101b2ff313c0dc076d03175740165f8789cd600
|
[
"MIT"
] | null | null | null |
pydantic_models.py
|
OmarThinks/flask_encryption_endpoint
|
f101b2ff313c0dc076d03175740165f8789cd600
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel, constr
message_constraint = constr(max_length=1000000000000)
original_constraint = constr(max_length=1000000)
passphrase_constraint = constr(min_length=2, max_length=10000)
class DecryptionInputs(BaseModel):
message : str
passphrase : passphrase_constraint
class EncryptionInputs(BaseModel):
original : original_constraint
passphrase : passphrase_constraint
| 21.578947
| 62
| 0.814634
|
794c501f4fef74a25da03344c253abe288192edb
| 1,347
|
py
|
Python
|
platypush/message/event/stt.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | 228
|
2018-01-30T11:17:09.000Z
|
2022-03-24T11:22:26.000Z
|
platypush/message/event/stt.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | 167
|
2017-12-11T19:35:38.000Z
|
2022-03-27T14:45:30.000Z
|
platypush/message/event/stt.py
|
BlackLight/runbullet
|
8d26c8634d2677b4402f0a21b9ab8244b44640db
|
[
"MIT"
] | 16
|
2018-05-03T07:31:56.000Z
|
2021-12-05T19:27:37.000Z
|
from platypush.message.event import Event
class SttEvent(Event):
""" Base class for speech-to-text events """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class SpeechStartedEvent(SttEvent):
"""
Event triggered when speech starts being detected.
"""
pass
class SpeechDetectedEvent(SttEvent):
"""
Event triggered when speech is detected.
"""
def __init__(self, speech: str, *args, **kwargs):
"""
:param speech: Speech detected, as a string
"""
super().__init__(*args, speech=speech.strip(), **kwargs)
class ConversationDetectedEvent(SpeechDetectedEvent):
"""
Event triggered when speech is detected after a hotword.
"""
pass
class HotwordDetectedEvent(SttEvent):
"""
Event triggered when a custom hotword is detected.
"""
def __init__(self, hotword: str = '', *args, **kwargs):
"""
:param hotword: The detected user hotword.
"""
super().__init__(*args, hotword=hotword, **kwargs)
class SpeechDetectionStartedEvent(SttEvent):
"""
Event triggered when the speech detection engine starts.
"""
pass
class SpeechDetectionStoppedEvent(SttEvent):
"""
Event triggered when the speech detection engine stops.
"""
pass
# vim:sw=4:ts=4:et:
| 21.725806
| 64
| 0.637713
|
794c51a8d6a944a91c636ecc1d2871a104aba607
| 551
|
py
|
Python
|
Password/Password.py
|
Amirkhaksar/QueraQuestion
|
1042aef6a04cc798b0ed2847124ea1a45a007c47
|
[
"BSD-3-Clause"
] | null | null | null |
Password/Password.py
|
Amirkhaksar/QueraQuestion
|
1042aef6a04cc798b0ed2847124ea1a45a007c47
|
[
"BSD-3-Clause"
] | null | null | null |
Password/Password.py
|
Amirkhaksar/QueraQuestion
|
1042aef6a04cc798b0ed2847124ea1a45a007c47
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Language: python
ID: 17902
QLink: https://quera.org/problemset/17902/
Author: AmirZoyber
'''
import collections
k = int(input())
passw = [int(a) for a in input()]
l=[];c=0
for i in range(k):
l.append([int(a) for a in input()])
for i in range(len(l)):
a_list = collections.deque(l[i])
if (l[i].index(passw[i])<=4):
while(list(a_list)[0]!=passw[i]):
a_list.rotate(-1)
c+=1
elif(l[i].index(passw[i])>=5):
while(list(a_list)[0]!=passw[i]):
a_list.rotate(1)
c+=1
print(c)
| 22.958333
| 42
| 0.555354
|
794c52107fc12982da3927bbf59d01daf079bb50
| 23,242
|
py
|
Python
|
tensorflow/python/kernel_tests/random/stateless_random_ops_test.py
|
KosingZhu/tensorflow
|
7ac2521a4e609ddef0f0ea3ffc2e76102da934d7
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/kernel_tests/random/stateless_random_ops_test.py
|
govl-psb/tensorflow-1
|
60028072a1c3b4376e145b6fea8e4ccd3324377f
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/kernel_tests/random/stateless_random_ops_test.py
|
govl-psb/tensorflow-1
|
60028072a1c3b4376e145b6fea8e4ccd3324377f
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random ops."""
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_stateless_random_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops as stateless
from tensorflow.python.platform import test
# Note that in theory each test will reset the eager context and may choose to
# hide some devices, so we shouldn't cache this transient info. Tests in this
# file don't make those config changes, so caching is fine. It provides a good
# speed-up.
_cached_device = None
def get_device():
global _cached_device
if _cached_device is not None:
return _cached_device
# Precedence from high to low
for device_type in ('XLA_GPU', 'GPU', 'XLA_CPU', 'CPU'):
devices = config.list_logical_devices(device_type)
if devices:
_cached_device = devices[0]
return _cached_device
raise ValueError('Cannot find any suitable device. Available devices: %s' %
config.list_logical_devices())
BEFORE_EXPIRE = (2020, 10, 24)
AFTER_EXPIRE = (2020, 10, 26)
def invert_philox(key, value):
"""Invert the Philox bijection."""
key = np.array(key, dtype=np.uint32)
value = np.array(value, dtype=np.uint32)
step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32)
for n in range(10)[::-1]:
key0, key1 = key + n * step
v0 = value[3] * 0x991a7cdb & 0xffffffff
v2 = value[1] * 0x6d7cae67 & 0xffffffff
hi0 = v0 * 0xD2511F53 >> 32
hi1 = v2 * 0xCD9E8D57 >> 32
v1 = hi1 ^ value[0] ^ key0
v3 = hi0 ^ value[2] ^ key1
value = v0, v1, v2, v3
return np.array(value)
SEEDS = ((7, 17), (11, 5), (2, 3))
SEED_TYPES = [dtypes.int32, dtypes.int64]
def float_cases(shape_dtypes=(None,)):
cases = (
# Uniform distribution, with and without range
('uniform', stateless.stateless_random_uniform, random_ops.random_uniform,
{}),
('uniform2', stateless.stateless_random_uniform,
random_ops.random_uniform, dict(minval=2.2, maxval=7.1)),
# Normal distribution, with and without mean+stddev
('normal', stateless.stateless_random_normal, random_ops.random_normal,
{}),
('normal2', stateless.stateless_random_normal, random_ops.random_normal,
dict(mean=2, stddev=3)),
# Truncated normal distribution, with and without mean+stddev
('trnorm', stateless.stateless_truncated_normal,
random_ops.truncated_normal, {}),
('trnorm2', stateless.stateless_truncated_normal,
random_ops.truncated_normal, dict(mean=3, stddev=4)),
)
# Explicitly passing in params because capturing cell variable from loop is
# problematic in Python
def wrap(op, dtype, shape, shape_dtype, seed, **kwargs):
device_type = get_device().device_type
# Some dtypes are not supported on some devices
if (dtype == dtypes.float16 and device_type in ('XLA_GPU', 'XLA_CPU') or
dtype == dtypes.bfloat16 and device_type == 'GPU'):
dtype = dtypes.float32
shape_ = (constant_op.constant(shape, dtype=shape_dtype)
if shape_dtype is not None else shape)
return op(seed=seed, shape=shape_, dtype=dtype, **kwargs)
def _name(a):
if hasattr(a, 'name'):
return a.name
else:
return a
for dtype in dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64:
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
for name, stateless_op, stateful_op, kwargs in cases:
yield (('%s_%s_%s_%s' %
(name, _name(dtype), shape, _name(shape_dtype))).replace(
' ', ''),
functools.partial(wrap, stateless_op, dtype, shape,
shape_dtype, **kwargs),
functools.partial(wrap, stateful_op, dtype, shape, shape_dtype,
**kwargs))
def int_cases(shape_dtypes=(None,), minval_maxval=None):
def wrap(op, minval, maxval, shape, shape_dtype, dtype, seed, **kwargs):
shape_ = (constant_op.constant(shape, dtype=shape_dtype)
if shape_dtype is not None else shape)
return op(
seed=seed, shape=shape_, minval=minval, maxval=maxval, dtype=dtype,
**kwargs)
if minval_maxval is None:
minval_maxval = ((2, 11111),)
for minval, maxval in minval_maxval:
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
for dtype in dtypes.int32, dtypes.int64:
yield ('uniform_%s_%s' % (minval, maxval),
functools.partial(wrap, stateless.stateless_random_uniform,
minval, maxval, shape, shape_dtype, dtype),
functools.partial(wrap, random_ops.random_uniform, minval,
maxval, shape, shape_dtype, dtype))
def multinomial_cases():
num_samples = 10
def wrap(op, logits, logits_dtype, output_dtype, seed):
return op(seed=seed,
logits=constant_op.constant(logits, dtype=logits_dtype),
num_samples=num_samples, output_dtype=output_dtype)
for logits_dtype in np.float16, np.float32, np.float64:
for output_dtype in dtypes.int32, dtypes.int64:
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
yield ('multinomial',
functools.partial(wrap, stateless.stateless_multinomial, logits,
logits_dtype, output_dtype),
functools.partial(wrap, random_ops.multinomial, logits,
logits_dtype, output_dtype))
def gamma_cases():
def wrap(op, alpha, dtype, shape, seed):
return op(seed=seed, shape=shape,
alpha=constant_op.constant(alpha, dtype=dtype), dtype=dtype)
for dtype in np.float16, np.float32, np.float64:
for alpha in ([[.5, 1., 2.]], [[0.5, 0.5], [0.8, 0.2], [0.25, 0.75]]):
yield ('gamma',
functools.partial(wrap, stateless.stateless_random_gamma, alpha,
dtype, (10,) + tuple(np.shape(alpha))),
functools.partial(wrap, random_ops.random_gamma, alpha, dtype,
(10,)))
def poisson_cases():
def wrap(op, lam, lam_dtype, out_dtype, shape, seed):
return op(seed=seed, shape=shape,
lam=constant_op.constant(lam_dtype(lam), dtype=lam_dtype),
dtype=out_dtype)
for lam_dtype in np.float16, np.float32, np.float64, np.int32, np.int64:
for out_dtype in np.float16, np.float32, np.float64, np.int32, np.int64:
for lam in ([[5.5, 1., 2.]], [[7.5, 10.5], [3.8, 8.2], [1.25, 9.75]]):
yield ('poisson',
functools.partial(wrap, stateless.stateless_random_poisson, lam,
lam_dtype, out_dtype,
(10,) + tuple(np.shape(lam))),
functools.partial(wrap, random_ops.random_poisson, lam,
lam_dtype, out_dtype, (10,)))
class StatelessOpsTest(test.TestCase, parameterized.TestCase):
def _test_match(self, case, seed):
# Stateless ops should be the same as stateful ops on the first call
# after seed scrambling.
key = 0x3ec8f720, 0x02461e29
preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
with ops.device(get_device().name):
_, stateless_op, stateful_op = case
random_seed.set_random_seed(seed[0])
stateful = stateful_op(seed=seed[1])
pure = stateless_op(seed=preseed)
self.assertAllEqual(stateful, pure)
def _test_match_stateless_cpu_gpu(self, case, seed):
# Stateless ops should produce the same result on CPUs and GPUs.
_, stateless_op, _ = case
with ops.device('CPU'):
result_cpu = stateless_op(seed=seed)
with ops.device(get_device().name):
result_gpu = stateless_op(seed=seed)
self.assertAllClose(result_cpu, result_gpu)
def _test_old_and_new_stateless_match(self, case, seed):
"""Tests that the new stateless ops match the old stateless ones."""
with ops.device(get_device().name):
_, stateless_op, _ = case
with compat.forward_compatibility_horizon(*BEFORE_EXPIRE):
old = stateless_op(seed=seed)
with compat.forward_compatibility_horizon(*AFTER_EXPIRE):
new = stateless_op(seed=seed)
self.assertAllClose(old, new)
def _test_explicit_alg(self, case, seed):
"""Tests that alg=philox and alg=None are the same (on CPU/GPU)."""
with ops.device(get_device().name):
_, stateless_op, _ = case
implicit_alg = stateless_op(seed=seed)
# All device types allowed in this test will result in Philox
explicit_alg = stateless_op(seed=seed, alg='philox')
self.assertAllClose(implicit_alg, explicit_alg)
def _test_determinism(self, case, seed_type):
# Stateless values should be equal iff the seeds are equal (roughly)
seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension
with self.test_session(), ops.device(get_device().name):
_, stateless_op, _ = case
if context.executing_eagerly():
values = [
(seed, stateless_op(seed=constant_op.constant(seed, seed_type)))
for seed in seeds]
else:
# Have this branch because the above branch is too slow in graph
# mode
seed_t = array_ops.placeholder(seed_type, shape=[2])
pure = stateless_op(seed=seed_t)
values = [
(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds
]
for s0, v0 in values:
for s1, v1 in values:
if dtypes.as_dtype(v0.dtype) != dtypes.bfloat16:
self.assertEqual(s0 == s1, np.all(v0 == v1))
elif s0 == s1:
# Skip the s0 != s1 case because v0 and v1 can be either equal or
# unequal in that case due to bfloat16's low precision
self.assertAllEqual(v0, v1)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(float_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchFloat(self, case, seed):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Skip on XLA because XLA kernels do not support int64 '
'seeds needed by this test.')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(int_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchInt(self, case, seed):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Skip on XLA because XLA kernels do not support int64 '
'seeds needed by this test.')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(multinomial_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchMultinomial(self, case, seed):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(gamma_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchGamma(self, case, seed):
if get_device().device_type == 'GPU':
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(gamma_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testStatelessGammaCpuGpuMatch(self, case, seed):
if get_device().device_type != 'GPU':
# This test compares the numbers produced by the CPU and GPU kernel for
# stateless_random_gamma.
self.skipTest('This test requires GPU')
self._test_match_stateless_cpu_gpu(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(poisson_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchPoisson(self, case, seed):
if get_device().device_type == 'GPU':
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(float_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testOldAndNewStatelessMatchFloat(self, case, seed):
self._test_old_and_new_stateless_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(
int_cases(minval_maxval=((2, 11111), (None, None)))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testOldAndNewStatelessMatchInt(self, case, seed):
self._test_old_and_new_stateless_match(case, seed)
@parameterized.named_parameters(
('_%s_%s' % (case[0], case_id), case)
for case_id, case in enumerate(float_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testExplicitAlgFloat(self, case):
seed = (7, 17)
self._test_explicit_alg(case, seed)
@parameterized.named_parameters(
('_%s_%s' % (case[0], case_id), case)
for case_id, case in enumerate(
int_cases(minval_maxval=((2, 11111), (None, None)))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testExplicitAlgInt(self, case):
seed = (7, 17)
self._test_explicit_alg(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(
float_cases(shape_dtypes=(dtypes.int32, dtypes.int64))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismFloat(self, case, seed_type):
if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU',
'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest(
'Skip on XLA because XLA kernels do not support int64 seeds.')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(
int_cases(shape_dtypes=(dtypes.int32, dtypes.int64))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismInt(self, case, seed_type):
if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU',
'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest(
'Skip on XLA because XLA kernels do not support int64 seeds.')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(multinomial_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismMultinomial(self, case, seed_type):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(gamma_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismGamma(self, case, seed_type):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(poisson_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismPoisson(self, case, seed_type):
if get_device().device_type == 'GPU':
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_determinism(case, seed_type)
@test_util.run_v2_only
def testGetKeyCounterAlg(self):
seed = [1, 2]
key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter(
seed)
self.assertAllEqual(key.shape, [1])
self.assertAllEqual(counter.shape, [2])
alg = gen_stateless_random_ops_v2.stateless_random_get_alg()
self.assertAllEqual(alg.shape, [])
def assertDTypeEqual(self, a, b):
self.assertEqual(dtypes.as_dtype(a), dtypes.as_dtype(b))
def assertNoEqualPair(self, ls):
for i in range(len(ls)):
for j in range(i + 1, len(ls)):
self.assertFalse(math_ops.reduce_all(ls[i] == ls[j]))
@parameterized.parameters(['int32', 'int64'])
@test_util.run_v2_only
def testSplit(self, dtype):
"""Test for `split`."""
seed = constant_op.constant([1, 2], dtype=dtype)
new_seed = stateless.split(seed, 3)
self.assertEqual(new_seed.shape, [3, 2])
self.assertDTypeEqual(new_seed.dtype, dtype)
self.assertNoEqualPair([seed] + array_ops.unstack(new_seed))
@parameterized.parameters(['int32', 'int64'])
@test_util.run_v2_only
def testFoldIn(self, dtype):
"""Test for `fold_in`."""
orig_seed = constant_op.constant([1, 2], dtype='int32')
seed = stateless.fold_in(orig_seed, constant_op.constant(3, dtype=dtype))
new_seeds = []
new_seeds.append(seed)
seed = stateless.fold_in(seed, constant_op.constant(4, dtype=dtype))
new_seeds.append(seed)
for s in new_seeds:
self.assertEqual(s.shape, [2])
self.assertDTypeEqual(s.dtype, dtype)
self.assertNoEqualPair([math_ops.cast(orig_seed, dtype)] + new_seeds)
@test_util.run_v2_only
def testErrors(self):
"""Tests that proper errors are raised.
"""
shape = [2, 3]
with self.assertRaisesWithPredicateMatch(
ValueError,
'minval must be a scalar; got a tensor of shape '):
@def_function.function
def f():
stateless.stateless_random_uniform(
shape=shape, seed=[1, 2], minval=array_ops.zeros(shape, 'int32'),
maxval=100, dtype='int32')
f()
with self.assertRaisesWithPredicateMatch(
ValueError,
'maxval must be a scalar; got a tensor of shape '):
@def_function.function
def f2():
stateless.stateless_random_uniform(
shape=shape, seed=[1, 2], minval=0,
maxval=array_ops.ones(shape, 'int32') * 100,
dtype='int32')
f2()
if __name__ == '__main__':
config.set_soft_device_placement(False)
context.context().enable_xla_devices()
test.main()
| 43.770245
| 116
| 0.669478
|
794c524666831f2334fb52291a73ddc62f4e66b2
| 1,560
|
py
|
Python
|
wham/apis/instagram/models.py
|
PaulWay/django-wham
|
ee76d57e1f85e0bcefbb7a844539e4bb3bb68b1c
|
[
"MIT"
] | 101
|
2015-01-30T00:59:02.000Z
|
2021-03-28T20:29:09.000Z
|
wham/apis/instagram/models.py
|
PaulWay/django-wham
|
ee76d57e1f85e0bcefbb7a844539e4bb3bb68b1c
|
[
"MIT"
] | 6
|
2015-02-23T06:17:21.000Z
|
2021-06-10T19:40:41.000Z
|
wham/apis/instagram/models.py
|
PaulWay/django-wham
|
ee76d57e1f85e0bcefbb7a844539e4bb3bb68b1c
|
[
"MIT"
] | 18
|
2015-02-23T05:42:34.000Z
|
2021-02-03T16:21:02.000Z
|
from django.db import models
# https://api.instagram.com/v1/tags/djangocon/media/recent?client_id=c3cdcbff22f649f1a08bedf12be1ca86
from django.db import models
from wham.fields import WhamCharField, WhamManyToManyField, WhamImageUrlField
from wham.models import WhamModel
class InstagramMeta:
base_url = 'https://api.instagram.com/v1/'
auth_for_public_get = 'API_KEY'
api_key_settings_name = 'INSTAGRAM_CLIENT_ID'
api_key_param = 'client_id'
class InstagramTag(WhamModel):
name = WhamCharField(max_length=255, primary_key=True)
posts = WhamManyToManyField(
# https://api.instagram.com/v1/tags/djangocon/media/recent
'InstagramPost',
related_name='tags',
wham_endpoint='tags/{{id}}/media/recent',
wham_results_path=('data',)
)
class Meta:
db_table = 'instagram_tag'
class WhamMeta(InstagramMeta):
endpoint = 'tags'
detail_base_result_path = ('data',) #can we make this less verbose??
def __unicode__(self):
return self.name
class InstagramPost(WhamModel):
id = WhamCharField(max_length=255, primary_key=True)
type = WhamCharField(max_length=10)
image_url = WhamImageUrlField(wham_result_path=('images', 'standard_resolution', 'url'))
class Meta:
db_table = 'instagram_media'
class WhamMeta(InstagramMeta):
class Search:
endpoint = 'tags/{{tag}}/media/recent'
results_path = ('data',)
fields = ('tag',)
def __unicode__(self):
return self.image_url
| 27.368421
| 101
| 0.683974
|
794c5295f3cd97e5e8b257d51fbdc04bffee82d6
| 9,346
|
py
|
Python
|
sdk/python/pulumi_azure_native/apimanagement/v20170301/tag.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/apimanagement/v20170301/tag.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/apimanagement/v20170301/tag.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
tag_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] display_name: Tag name.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] tag_id: Tag identifier. Must be unique in the current API Management service instance.
"""
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
if tag_id is not None:
pulumi.set(__self__, "tag_id", tag_id)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the API Management service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="tagId")
def tag_id(self) -> Optional[pulumi.Input[str]]:
"""
Tag identifier. Must be unique in the current API Management service instance.
"""
return pulumi.get(self, "tag_id")
@tag_id.setter
def tag_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_id", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
display_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
tag_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Tag Contract details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] display_name: Tag name.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] tag_id: Tag identifier. Must be unique in the current API Management service instance.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Tag Contract details.
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
display_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
tag_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["tag_id"] = tag_id
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:Tag"), pulumi.Alias(type_="azure-native:apimanagement:Tag"), pulumi.Alias(type_="azure-nextgen:apimanagement:Tag"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:Tag"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Tag"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:Tag"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Tag"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:Tag"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Tag"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:Tag"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:Tag"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:Tag"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Tag"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:Tag"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:Tag"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:Tag"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:Tag"), pulumi.Alias(type_="azure-native:apimanagement/v20210101preview:Tag"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20210101preview:Tag")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Tag, __self__).__init__(
'azure-native:apimanagement/v20170301:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TagArgs.__new__(TagArgs)
__props__.__dict__["display_name"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
| 45.149758
| 1,315
| 0.652793
|
794c53acdeada557fdb53999ee592bbaa1698308
| 50,282
|
py
|
Python
|
tensorflow/python/distribute/tpu_strategy_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 8
|
2021-08-03T03:57:10.000Z
|
2021-12-13T01:19:02.000Z
|
tensorflow/python/distribute/tpu_strategy_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/distribute/tpu_strategy_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 1
|
2019-11-01T05:38:49.000Z
|
2019-11-01T05:38:49.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPUStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute import tpu_values
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import flags
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import server_lib
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
def get_tpu_cluster_resolver():
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy(enable_packed_var=False):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
strategy = tpu_lib.TPUStrategyV2(resolver)
strategy._enable_packed_variable_in_eager_mode = enable_packed_var
return strategy
# TPU tests which don't use TPUStrategy.
class TPUTest(test.TestCase):
def test_multiple_initialize_system(self):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
with test.mock.patch.object(logging, "warning") as mock_log:
tpu_strategy_util.initialize_tpu_system(resolver)
self.assertRegex(str(mock_log.call_args), "already been initialized")
def test_tpu_tf_function_same_device(self):
with ops.device("/device:TPU:0"):
a = variables.Variable(1)
@function.defun_with_attributes(attributes={"_noinline": True})
def get_a_plus_one():
return a + 1
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def foo(x):
with ops.device("/device:TPU:0"):
b = x + get_a_plus_one()
return b + 1
result = foo(a)
self.assertAllEqual(4, result)
def test_tpu_return_int32(self):
with ops.device("/device:TPU:0"):
a = variables.Variable(0)
@def_function.function
def foo():
return a + 1
@def_function.function
def bar():
with ops.device("/device:TPU:1"):
return foo()
with ops.device("/device:CPU:0"):
result = bar() + 1
self.assertAllEqual(result, 2)
def test_tpu_output_device(self):
def foo():
return 1 + 1
func1 = function.defun_with_attributes(
foo, attributes={"_XlaMustCompile": False})
func2 = function.defun_with_attributes(
foo, attributes={
"_OutputsOnOpDevice": True,
"_XlaMustCompile": False
})
with ops.device("/device:TPU:0"):
ret1 = func1()
ret2 = func2()
self.assertAllEqual(ret1.backing_device,
"/job:localhost/replica:0/task:0/device:CPU:0")
self.assertAllEqual(ret2.backing_device,
"/job:localhost/replica:0/task:0/device:TPU:0")
def test_on_demand_op_with_dynamic_output(self):
if FLAGS.tpu_use_tfrt:
self.skipTest("Support dynamic output in TFRT, see b/192576400")
with ops.device("/device:TPU:0"):
where_output = array_ops.where([True, False, True])
self.assertAllEqual(where_output, [[0], [2]])
with ops.device("/device:TPU:0"):
repeat_output = array_ops.repeat(math_ops.range(2), [1, 4])
self.assertAllEqual(repeat_output, [0, 1, 1, 1, 1])
@parameterized.named_parameters([("PackedVar", True), ("", False)])
class TPUStrategyTest(test.TestCase, parameterized.TestCase):
def test_handle_in_cross_replica_context(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
v = variables.Variable(1.0)
@def_function.function
def func():
self.assertEndsWith(v.handle.device, "device:TPU:0")
return v + 1.0
ret = func()
self.assertAllEqual(ret, 2.0)
def testStaticHashTableDatasetFnHostTrainingLoop(self, enable_packed_var):
self._dataset_fn_tracing_count = 0
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
vals = [0, 1, 2]
keys_tensor = constant_op.constant(
list(range(len(vals))), dtype=dtypes.int64)
vals_tensor = constant_op.constant(vals)
initializer = lookup_ops.KeyValueTensorInitializer(
keys_tensor, vals_tensor)
per_worker_table = lookup_ops.StaticHashTable(
initializer, default_value=-1)
@def_function.function
def dataset_fn(input_context):
tensor = constant_op.constant([0, 1, 3], dtype=dtypes.int64)
global_batch_size = 2
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = dataset_ops.Dataset.from_tensors(tensor).repeat().batch(
batch_size, drop_remainder=True)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
dataset = dataset.prefetch(2) # This prefetches 2 batches per device.
dataset = dataset.map(per_worker_table.lookup)
self._dataset_fn_tracing_count += 1
return dataset
dist_iterator = iter(
strategy.experimental_distribute_datasets_from_function(dataset_fn))
@def_function.function
def step_fn(inputs):
# inputs should be [0, 1, -1]
return math_ops.reduce_sum(inputs)
def train_steps(iterator, steps):
for _ in math_ops.range(steps):
strategy.run(step_fn, args=(next(iterator),))
train_steps(dist_iterator, steps=5)
self.assertEqual(self._dataset_fn_tracing_count, 1)
def test_function_compile_with_xla(self, enable_packed_var):
if FLAGS.tpu_use_tfrt:
self.skipTest(
"This test triggers _XlaCompile and XlaLaunch which are not "
"supported in tfrt yet. We should avoid using these kernels on TPU. "
"However, it is a workaround to support b/129842431. We need more "
"discussion about how to support it in the long term.")
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
v = variables.Variable(1.0)
@def_function.function
def func():
return v.read_value() + 1.0
with ops.device("/device:TPU:0"):
self.assertAllEqual(func(), 2.0)
def test_sequential_runs(self, enable_packed_var):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_strategy_util.initialize_tpu_system(resolver)
# Computation replicated to all cores.
device_assignment = device_assignment_lib.DeviceAssignment.build(
topology, num_replicas=2)
strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment)
strategy._enable_packed_variable_in_eager_mode = enable_packed_var
# Computation on the 1st core.
device_assignment2 = device_assignment_lib.DeviceAssignment.build(
topology, num_replicas=1)
strategy2 = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment2)
def computation(x):
return math_ops.square(x)
@def_function.function
def train_step():
outputs = strategy.experimental_local_results(
strategy.run(computation, args=([2., 2.],)))
outputs2 = strategy2.run(
computation, args=([outputs[0]],))
return outputs2
self.assertAllEqual([[16., 16.]], train_step())
def test_device_switch_case(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
a = variables.Variable(1)
inference_iteration = variables.Variable(-1)
def inference_fn(x, i):
return a + x + i
@def_function.function
def run_inference(x):
def do_inference(device, inference_fn, i):
with ops.device(device):
return inference_fn(x, i)
branch_fns = {
0: (lambda: do_inference("/device:TPU:0", inference_fn, 0)),
1: (lambda: do_inference("/device:TPU:1", inference_fn, 1)),
}
branch_index = inference_iteration.assign_add(1, use_locking=True) % 2
return control_flow_ops.switch_case(branch_index, branch_fns)
self.assertAllEqual(2., run_inference(1)) # Use TPU core 0.
self.assertAllEqual(3., run_inference(1)) # Use TPU core 1.
def test_recover_from_compilation_failures(self, enable_packed_var):
# TODO(b/148150981): Stop skipping this test once recovery works
# for non-local TPU.
if FLAGS.tpu:
self.skipTest("Recovery fails for non-local TPU, see b/148150981")
# Disable automatic outside compilation.
config.set_soft_device_placement(False)
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def compilation_failure_run():
def computation():
return random_ops.random_gamma([10], [0.5, 1.5])
return strategy.run(computation)
with self.assertRaises(errors.OpError):
compilation_failure_run()
@def_function.function
def good_run():
def computation():
return random_ops.random_normal([10])
return strategy.run(computation)
good_run()
def test_dynamic_shape_with_outside_compilation_failure(
self, enable_packed_var):
# Enable automatic outside compilation.
config.set_soft_device_placement(True)
strategy = get_tpu_strategy(enable_packed_var)
dataset = dataset_ops.Dataset.from_tensors(("string", 1.0)).repeat().batch(
2, drop_remainder=False)
dataset = strategy.experimental_distribute_dataset(dataset)
iterator = iter(dataset)
@def_function.function
def train_fn(iterator):
def step_fn(inputs):
input0, input1 = inputs
return array_ops.size(input0), math_ops.reduce_sum(input1)
return strategy.experimental_local_results(
strategy.run(step_fn, args=(next(iterator),)))
with self.assertRaises(errors.InvalidArgumentError):
logging.info(train_fn(iterator))
def test_computation_on_subset_cores(self, enable_packed_var):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_strategy_util.initialize_tpu_system(resolver)
all_core_strategy = tpu_lib.TPUStrategyV2(resolver)
all_core_strategy._enable_packed_variable_in_eager_mode = enable_packed_var
with all_core_strategy.scope():
v = variables.Variable(0.0,
aggregation=variables.VariableAggregation.MEAN)
# Computation on the 1st core.
device_assignment = device_assignment_lib.DeviceAssignment.build(
topology, num_replicas=1)
first_core_strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment)
first_core_strategy._enable_packed_variable_in_eager_mode = (
enable_packed_var)
# Computation on the 2nd core.
device_assignment2 = device_assignment_lib.DeviceAssignment(
topology, [[[0, 0, 0, 1]]])
second_core_strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment2)
second_core_strategy._enable_packed_variable_in_eager_mode = (
enable_packed_var)
@def_function.function
def train_step():
def step_fn():
return v + 1.0
all_core_strategy.run(step_fn)
r1 = first_core_strategy.run(step_fn)
r2 = second_core_strategy.run(step_fn)
return r1 + r2
train_step()
self.assertAllEqual(2., train_step())
def test_worker_devices_on_subset_cores(self, enable_packed_var):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_strategy_util.initialize_tpu_system(resolver)
# Strategy for the 1st core.
device_assignment = device_assignment_lib.DeviceAssignment.build(
topology, num_replicas=1)
first_core_strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment)
first_core_strategy._enable_packed_variable_in_eager_mode = (
enable_packed_var)
# Strategy for the 2nd core.
device_assignment2 = device_assignment_lib.DeviceAssignment(
topology, [[[0, 0, 0, 1]]])
second_core_strategy = tpu_lib.TPUStrategyV2(
resolver, experimental_device_assignment=device_assignment2)
second_core_strategy._enable_packed_variable_in_eager_mode = (
enable_packed_var)
self.assertLen(first_core_strategy.extended.worker_devices, 1)
self.assertEndsWith(first_core_strategy.extended.worker_devices[0],
"device:TPU:0")
self.assertLen(second_core_strategy.extended.worker_devices, 1)
self.assertEndsWith(second_core_strategy.extended.worker_devices[0],
"device:TPU:1")
def test_control_output_in_while_body_fn(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
v = variables.Variable(
0.0, aggregation=variables.VariableAggregation.MEAN)
@def_function.function
def train_step():
def step_fn():
v.assign_add(1)
for _ in math_ops.range(2):
strategy.run(step_fn)
train_step()
self.assertEqual(2.0, v.numpy())
def test_cluster_conditional_with_dynamic_shape(self, enable_packed_var):
if FLAGS.tpu_use_tfrt:
self.skipTest("Support dynamic output in TFRT, see b/192576400")
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def shape_list(tensor):
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dynamic_shape = array_ops.shape(input=tensor)
for index in non_static_indexes:
shape[index] = dynamic_shape[index]
return shape
def step_fn(condition):
where = array_ops.where(condition)
if array_ops.shape(where)[0] > 0:
tensor_shape = shape_list(where)
d1 = tensor_shape[0]
d2 = tensor_shape[1]
where = array_ops.reshape(where, [d1, d2])
return where
return strategy.run(step_fn, args=([True, False, True],))
outputs = strategy.experimental_local_results(train_step())
self.assertAllEqual(outputs[0].numpy(), [[0], [2]])
def test_cluster_in_graph_and_while_body_fn(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def step_fn(prev):
s = prev + 1
return s
def init_fn():
return array_ops.zeros(shape=())
prev = strategy.run(init_fn)
for _ in math_ops.range(10):
prev = strategy.run(step_fn, args=(prev,))
return strategy.reduce(reduce_util.ReduceOp.SUM, prev, axis=None)
sum_val = train_step().numpy().astype(float)
self.assertEqual(sum_val, strategy.num_replicas_in_sync * 10)
def test_two_clusters_with_same_fn(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def foo(x):
return strategy.run(lambda x: x + 1, (x,))
@def_function.function
def bar(x):
foo(x)
return foo(x)
bar(1)
def test_tpu_variable_run_argument(self, enable_packed_var):
# TPUStrategy.run() casts inputs to Tensor, but has logic to preserve
# variables to avoid unintuitive errors.
# Here we test that a TPUDistributedVariable passed to TPUStrategy.run()
# remains a variable.
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
tpu_variable = variables.Variable(1)
def replica_step(first_arg, variable):
del first_arg # Just here to make sure we're not relying on arg position.
if variable is not None:
self.assertIsInstance(variable, tpu_values.TPUDistributedVariable)
@def_function.function
def step():
strategy.run(
replica_step, args=(
2,
tpu_variable,
))
step()
def test_tpu_run_arg_parsing(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
tpu_vars = [variables.Variable(1)]
def only_star_args(*args):
del args
def pos_and_star_args(first_arg, *args):
del first_arg
del args
def named_args(first_arg, second_arg):
del first_arg
del second_arg
def star_args_and_kw_only(*args, kw):
del args
del kw
# pylint:disable=function-redefined
@def_function.function
def step():
strategy.run(only_star_args, args=(2,))
step()
@def_function.function
def step():
strategy.run(named_args, kwargs={"first_arg": 2, "second_arg": 3})
step()
with self.assertRaisesRegex(TypeError, r"got multiple values for argument"):
@def_function.function
def step():
strategy.run(
named_args, args=(1,), kwargs={
"first_arg": 2,
"second_arg": 3
})
step()
with self.assertRaisesRegex(ValueError,
r"cannot handle Variables passed to \*args"):
@def_function.function
def step():
strategy.run(
only_star_args, args=(
2,
tpu_vars,
))
step()
@def_function.function
def step():
strategy.run(pos_and_star_args, args=(2, 3, 4))
step()
@def_function.function
def step():
strategy.run(star_args_and_kw_only, args=(2, 3), kwargs={"kw": tpu_vars})
step()
with self.assertRaisesRegex(ValueError,
r"mix of positional args and \*args"):
@def_function.function
def step():
strategy.run(pos_and_star_args, args=(tpu_vars, 3, 4))
step()
with self.assertRaisesRegex(ValueError, r"Too many positional arguments"):
@def_function.function
def step():
strategy.run(named_args, args=(2, 3, 4))
step()
class DummyClass:
@def_function.function
def method(self, arg_1):
del arg_1
def step(self):
strategy.run(self.method, args=(tpu_vars,))
DummyClass().step()
# pylint:enable=function-redefined
def test_using_external_variable_inside_tf_function(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync * 2,
output_type=dtypes.float32).batch(strategy.num_replicas_in_sync)
input_iterator = iter(strategy.experimental_distribute_dataset(dataset))
v = variables.Variable(2.0)
@def_function.function
def train_step(data):
def computation(inputs):
return inputs + v
return strategy.run(computation, args=(data,))
expected_result = [[x + 2.] for x in range(0, strategy.num_replicas_in_sync)
]
self.assertAllEqual(
expected_result,
strategy.experimental_local_results(train_step(next(input_iterator))))
# TODO(b/145574622): Remove this test once it is re-enabled in values_test.py.
def test_all_reduce_on_sync_on_read_variable(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync, output_type=dtypes.float32).batch(
strategy.num_replicas_in_sync, drop_remainder=True)
input_iterator = iter(strategy.experimental_distribute_dataset(dataset))
with strategy.scope():
w = variables.Variable(
(0.,),
shape=(1,),
trainable=False,
synchronization=variables.VariableSynchronization.ON_READ,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA)
@def_function.function
def run(iterator):
def computation(x):
w.assign(x + w)
return w
def all_reduce(x):
ctx = distribution_strategy_context.get_replica_context()
return ctx.all_reduce("SUM", w) + x
outputs = strategy.run(computation, args=(next(iterator),))
outputs2 = strategy.experimental_local_results(
strategy.run(all_reduce, args=(outputs,)))
return outputs2
data = range(0, strategy.num_replicas_in_sync)
data_sum = sum(data)
expected_result = [
[x + data_sum] for x in range(0, strategy.num_replicas_in_sync)
]
self.assertAllEqual(expected_result, run(input_iterator))
self.assertAllEqual((0.,), w.read_value())
def test_run_output_on_device(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
def computation(x):
return math_ops.square(x)
@def_function.function
def train_step():
outputs = strategy.experimental_local_results(
strategy.run(computation, args=(2,)))
return outputs
results = train_step()
self.assertAllEqual([4., 4.], results)
self.assertAllEqual("/job:localhost/replica:0/task:0/device:TPU:0",
results[0].backing_device)
self.assertAllEqual("/job:localhost/replica:0/task:0/device:TPU:1",
results[1].backing_device)
def test_run_passing_and_returning_nones(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def computation(x):
return x
# Note that this input None is nested.
outputs = strategy.experimental_local_results(
strategy.run(computation, args=([1, [2, None]],)))
return outputs
results = train_step()
self.assertAllEqual(1, results[0][0])
self.assertAllEqual(2, results[0][1][0])
self.assertIsNone(results[0][1][1])
def test_run_passing_and_returning_empty_list(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def computation(x):
return x
outputs = strategy.experimental_local_results(
strategy.run(computation, args=([],)))
return outputs
self.assertEqual([], train_step()[0])
def test_run_passing_and_returning_empty_dict(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
@def_function.function
def train_step():
def computation(x):
return x
outputs = strategy.experimental_local_results(
strategy.run(computation, args=({},)))
return outputs
self.assertEqual({}, train_step()[0])
def test_composite_input_output(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
if strategy.num_replicas_in_sync != 2:
self.skipTest("Test assumes two replicas.")
with strategy.scope():
table = variables.Variable(
initial_value=[[0.0, 1.0], [3.0, 7.0]], dtype=dtypes.float32)
@def_function.function
def sparse_lookup(iterator):
def tpu_function(sparse):
# Assumes dense_shape is (2, *)
looked_up = array_ops.gather(table, sparse.values)
segment_sum = math_ops.unsorted_segment_sum(
looked_up, sparse.indices[:, 0], 2)
return sparse, segment_sum
return nest.map_structure(
strategy.experimental_local_results,
strategy.run(tpu_function, args=(next(iterator),)))
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(2)
def make_sparse(_):
return sparse_tensor.SparseTensor(
indices=array_ops.constant([[0, 0], [1, 0], [1, 1]],
dtype=dtypes.int64),
values=array_ops.constant([0, 0, 1], dtype=dtypes.int32),
dense_shape=array_ops.constant([2, 2], dtype=dtypes.int64))
return dataset.map(make_sparse)
dataset = iter(
strategy.distribute_datasets_from_function(
dataset_fn,
distribute_lib.InputOptions(experimental_fetch_to_device=False)))
sparse, result = sparse_lookup(dataset)
# All replicas return identical reults.
for replica in range(strategy.num_replicas_in_sync):
self.assertIsInstance(sparse[replica], sparse_tensor.SparseTensor)
self.assertAllEqual(sparse[replica].indices, [[0, 0], [1, 0], [1, 1]])
self.assertAllEqual(sparse[replica].values, [0, 0, 1])
self.assertAllEqual(sparse[replica].dense_shape, [2, 2])
self.assertAllEqual(result[replica], [[0.0, 1.0], [3.0, 8.0]])
def test_composite_input_non_flat_output(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
if strategy.num_replicas_in_sync != 2:
self.skipTest("Test assumes two replicas.")
with strategy.scope():
table = variables.Variable(
initial_value=[[0.0, 1.0], [3.0, 7.0]], dtype=dtypes.float32)
@def_function.function
def sparse_lookup(iterator):
def tpu_function(sparse):
# Assumes dense_shape is (2, *)
looked_up = array_ops.gather(table, sparse.values)
segment_sum = math_ops.unsorted_segment_sum(
looked_up, sparse.indices[:, 0], 2)
return {"sparse": sparse, "segment_sum": segment_sum}
return nest.map_structure(
strategy.experimental_local_results,
strategy.run(tpu_function, args=(next(iterator),)))
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(2)
def make_sparse(_):
return sparse_tensor.SparseTensor(
indices=array_ops.constant([[0, 0], [1, 0], [1, 1]],
dtype=dtypes.int64),
values=array_ops.constant([0, 0, 1], dtype=dtypes.int32),
dense_shape=array_ops.constant([2, 2], dtype=dtypes.int64))
return dataset.map(make_sparse)
dataset = iter(
strategy.distribute_datasets_from_function(
dataset_fn,
distribute_lib.InputOptions(experimental_fetch_to_device=False)))
output = sparse_lookup(dataset)
# All replicas return identical reults.
for replica in range(strategy.num_replicas_in_sync):
self.assertIsInstance(output["sparse"][replica],
sparse_tensor.SparseTensor)
self.assertAllEqual(output["sparse"][replica].indices,
[[0, 0], [1, 0], [1, 1]])
self.assertAllEqual(output["sparse"][replica].values, [0, 0, 1])
self.assertAllEqual(output["sparse"][replica].dense_shape, [2, 2])
self.assertAllEqual(output["segment_sum"][replica],
[[0.0, 1.0], [3.0, 8.0]])
def test_composite_input_dynamic_shapes_outside_compilation(
self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
if strategy.num_replicas_in_sync != 2:
self.skipTest("Test assumes two replicas.")
table = variables.Variable(
initial_value=[[0.0, 1.0], [3.0, 7.0]], dtype=dtypes.float32)
@def_function.function
def sparse_lookup(iterator):
def tpu_function(sparse):
lookup = tpu.outside_compilation(
embedding_ops.safe_embedding_lookup_sparse, table, sparse)
return math_ops.reduce_sum(lookup, axis=0)
return strategy.experimental_local_results(
strategy.run(tpu_function, args=(next(iterator),)))
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(2)
def make_sparse(i):
indices = array_ops.constant([[0, 0], [1, 0], [1, 1]],
dtype=dtypes.int64)[0:2 + i]
values = array_ops.constant([0, 0, 1], dtype=dtypes.int32)[0:2 + i]
shape = [
array_ops.constant([2], dtype=dtypes.int64),
array_ops.expand_dims(1 + i, axis=0)
]
dense_shape = array_ops.concat(shape, axis=0)
return sparse_tensor.SparseTensor(
indices=indices, values=values, dense_shape=dense_shape)
return dataset.map(make_sparse)
dataset = iter(
strategy.distribute_datasets_from_function(
dataset_fn,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
result = sparse_lookup(dataset)
self.assertAllEqual(result, [[0.0, 2.0], [1.5, 5.0]])
def test_composite_input_with_non_flat_components(self, enable_packed_var):
strategy = get_tpu_strategy(enable_packed_var)
class TestCompositeTypeSpec(type_spec.TypeSpec):
def __init__(self, component_type_spec):
self._component_type_spec = component_type_spec
@property
def value_type(self):
return TestComposite
def _to_components(self, value):
return value.values
def _from_components(self, components):
return TestComposite(components[0], components[1][0], components[1][1])
@property
def _component_specs(self):
return [self._component_type_spec,
[self._component_type_spec, self._component_type_spec]]
def _serialize(self):
return (self._component_type_spec,)
class TestComposite(composite_tensor.CompositeTensor):
def __init__(self, value1, value2, value3):
self.values = [value1, [value2, value3]]
@property
def _type_spec(self):
return TestCompositeTypeSpec(
tensor_spec.TensorSpec.from_tensor(self.values[0]))
def _shape_invariant_to_type_spec(self, shape):
return [shape, [shape, shape]]
@def_function.function
def test_fn(test_composite):
def tpu_function(composite):
return (composite,
composite.values[0] + (
composite.values[1][0] + composite.values[1][1])/2)
return nest.map_structure(
strategy.experimental_local_results,
strategy.run(tpu_function, args=(test_composite,)))
a = array_ops.constant([0.1])
b = array_ops.constant([1.2])
c = array_ops.constant([-0.4])
test_composite = TestComposite(a, b, c)
composite, result = test_fn(test_composite)
# All replicas return identical reults.
for replica in range(strategy.num_replicas_in_sync):
self.assertIsInstance(composite[replica], TestComposite)
self.assertAllEqual(composite[replica].values[0], a)
self.assertAllEqual(composite[replica].values[1][0], b)
self.assertAllEqual(composite[replica].values[1][1], c)
self.assertAllEqual(result[replica], array_ops.constant([0.50000006]))
def test_per_device_tracing_of_mirrored_variables(self, enable_packed_var):
# Define trace_count as a list to avoid python scoping error
trace_count = [0]
strategy = get_tpu_strategy(enable_packed_var)
with strategy.scope():
variable = variables.Variable(0.0)
@def_function.function
def add_one():
trace_count[0] = trace_count[0] + 1
return math_ops.add(variable, constant_op.constant(1.0))
@def_function.function
def update_variable():
for device in set(strategy.extended.worker_devices):
with ops.device(device):
add_one()
with strategy.scope():
update_variable.get_concrete_function()
self.assertLen(strategy.extended.worker_devices, trace_count[0])
class TPUStrategyDataPrefetchTest(test.TestCase):
def test_prefetch_to_device_default(self):
strategy = get_tpu_strategy()
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync * 2,
output_type=dtypes.float32).batch(strategy.num_replicas_in_sync)
# Check default, should prefetch to TPU.
dataset_item = next(iter(strategy.experimental_distribute_dataset(dataset)))
dataset_location = tf_device.DeviceSpec.from_string(
dataset_item.values[0].device)
self.assertEqual(dataset_location.device_type, "TPU")
def test_prefetch_to_device_tpu(self):
strategy = get_tpu_strategy()
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync * 2,
output_type=dtypes.float32).batch(strategy.num_replicas_in_sync)
input_options = distribute_lib.InputOptions(
experimental_fetch_to_device=True)
dataset_item = next(iter(strategy.experimental_distribute_dataset(
dataset, options=input_options)))
dataset_location = tf_device.DeviceSpec.from_string(
dataset_item.values[0].device)
self.assertEqual(dataset_location.device_type, "TPU")
def test_prefetch_to_device_cpu(self):
strategy = get_tpu_strategy()
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync * 2,
output_type=dtypes.float32).batch(strategy.num_replicas_in_sync)
# Should be CPU when prefetch_to_device is False.
input_options = distribute_lib.InputOptions(
experimental_fetch_to_device=False)
dataset_item = next(iter(strategy.experimental_distribute_dataset(
dataset, options=input_options)))
dataset_location = tf_device.DeviceSpec.from_string(
dataset_item.values[0].device)
self.assertEqual(dataset_location.device_type, "CPU")
def test_prefetch_to_device_sparse_dataset(self):
strategy = get_tpu_strategy()
# Values here aren't important.
dataset = dataset_ops.Dataset.from_tensors(
sparse_tensor.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]],
values=[1, 2, 3],
dense_shape=[2, 2]))
dataset = dataset.repeat()
dataset = dataset.batch(strategy.num_replicas_in_sync)
with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"):
iter(strategy.experimental_distribute_dataset(dataset))
def test_prefetch_to_device_ragged_dataset(self):
strategy = get_tpu_strategy()
# Values here aren't important.
dataset = dataset_ops.Dataset.from_tensors(
ragged_tensor.RaggedTensor.from_row_splits(
values=[1, 2, 3],
row_splits=[0, 2, 3]))
dataset = dataset.repeat()
dataset = dataset.batch(strategy.num_replicas_in_sync)
with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"):
iter(strategy.experimental_distribute_dataset(dataset))
def test_prefetch_to_device_sparse_dataset_fn(self):
strategy = get_tpu_strategy()
def dataset_fn(ctx):
del ctx
# Values here aren't important.
dataset = dataset_ops.Dataset.from_tensors(
sparse_tensor.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]],
values=[1, 2, 3],
dense_shape=[2, 2]))
dataset = dataset.repeat()
return dataset.batch(strategy.num_replicas_in_sync)
with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"):
iter(strategy.distribute_datasets_from_function(dataset_fn))
def test_prefetch_to_device_ragged_dataset_fn(self):
strategy = get_tpu_strategy()
def dataset_fn(ctx):
del ctx
# Values here aren't important.
dataset = dataset_ops.Dataset.from_tensors(
ragged_tensor.RaggedTensor.from_row_splits(
values=[1, 2, 3],
row_splits=[0, 2, 3]))
dataset = dataset.repeat()
return dataset.batch(strategy.num_replicas_in_sync)
with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"):
iter(strategy.distribute_datasets_from_function(dataset_fn))
class TPUStrategyDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase):
def test_update_config_proto(self):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
strategy = tpu_lib.TPUStrategyV2(resolver)
config_proto = config_pb2.ConfigProto()
cluster_spec = server_lib.ClusterSpec({"worker": ["fake1", "fake2"]})
with test.mock.patch.object(
resolver, "cluster_spec", return_value=cluster_spec):
new_config = strategy.update_config_proto(config_proto)
# Verify cluster_def.
self.assertProtoEquals(cluster_spec.as_cluster_def(),
new_config.cluster_def)
# Verify isolate_session_state
self.assertTrue(new_config.isolate_session_state)
def test_make_input_fn_iterable(self):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
distribution = get_tpu_strategy()
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterable(distribution, input_fn, expected_values)
def test_make_input_fn_iterator(self):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
distribution = get_tpu_strategy()
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator,
distribution.extended.worker_devices,
expected_values)
def test_num_replicas_in_sync(self):
strategy = get_tpu_strategy()
self.assertEqual(2, strategy.num_replicas_in_sync)
def test_call_and_merge_exceptions(self):
strategy = get_tpu_strategy()
self._test_call_and_merge_exceptions(strategy)
def test_numpy_dataset(self):
strategy = get_tpu_strategy()
self._test_numpy_dataset(strategy, run_in_function=True)
def test_global_step_update(self):
strategy = get_tpu_strategy()
self._test_global_step_update(strategy)
def test_run(self):
strategy = get_tpu_strategy()
self._test_run(strategy, run_in_function=True)
def test_summary_for_replica_zero_only(self):
strategy = get_tpu_strategy()
self._test_summary_for_replica_zero_only(strategy)
def test_all_reduce_sum(self):
strategy = get_tpu_strategy()
self._test_all_reduce_sum(strategy, run_in_function=True)
def test_all_reduce_sum_gradients(self):
strategy = get_tpu_strategy()
self._test_all_reduce_sum_gradients(strategy, run_in_function=True)
def test_all_reduce_sum_gradient_tape(self):
strategy = get_tpu_strategy()
self._test_all_reduce_sum_gradient_tape(strategy, run_in_function=True)
def test_all_reduce_mean(self):
strategy = get_tpu_strategy()
self._test_all_reduce_mean(strategy, run_in_function=True)
def test_all_reduce_mean_gradients(self):
strategy = get_tpu_strategy()
self._test_all_reduce_mean_gradients(strategy, run_in_function=True)
def test_all_reduce_mean_gradient_tape(self):
strategy = get_tpu_strategy()
self._test_all_reduce_mean_gradient_tape(strategy, run_in_function=True)
def test_reduce(self):
strategy = get_tpu_strategy()
inputs = strategy.make_input_fn_iterator(
lambda _: dataset_ops.Dataset.from_tensor_slices([2., 3.]))
self.evaluate(inputs.initialize())
per_replica_outputs = strategy.run(
def_function.function(math_ops.square), args=(next(inputs),))
with strategy.scope():
mean = strategy.reduce(reduce_util.ReduceOp.MEAN, per_replica_outputs,
axis=None)
self.assertEqual(6.5, self.evaluate(mean))
def test_constraint(self):
strategy = get_tpu_strategy()
with strategy.scope():
variable = variables.Variable(initial_value=2.,
constraint=lambda x: 0. * x + 1.)
self.assertEqual(variable.value().numpy(), 2)
@def_function.function
def update_variable():
variable.assign_add(1)
variable.assign(variable.constraint(variable))
update_variable()
self.assertEqual(variable.value().numpy(), 1)
def test_trainable_variables(self):
strategy = get_tpu_strategy()
self._test_trainable_variable(strategy)
def test_model_parallelism(self):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=[[[0, 0, 0, 0], [0, 0, 0, 1]]])
strategy = tpu_lib.TPUStrategyV2(
resolver,
experimental_device_assignment=device_assignment)
with strategy.scope():
v = variables.Variable(2.)
with strategy.extended.experimental_logical_device(1):
w = variables.Variable(3.)
self.assertLen(strategy.experimental_local_results(v), 1)
self.assertLen(strategy.experimental_local_results(w), 1)
self.assertEqual("/job:localhost/replica:0/task:0/device:TPU:0",
strategy.experimental_local_results(v)[0].device)
self.assertEqual("/job:localhost/replica:0/task:0/device:TPU:1",
strategy.experimental_local_results(w)[0].device)
logical_devices = []
@def_function.function
def f(x):
replica_ctx = distribution_strategy_context.get_replica_context()
with replica_ctx.experimental_logical_device(0):
y = v * x
with replica_ctx.experimental_logical_device(1):
z = w * y
logical_devices.append((y.device, z.device))
return z
result = strategy.run(f, args=(5.,))
self.assertEqual(
[("/device:TPU_REPLICATED_CORE:0", "/device:TPU_REPLICATED_CORE:1")],
logical_devices)
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(30., self.evaluate(result))
def test_model_parallelism_checkpointing(self):
class PartitionedModel(module.Module):
def __init__(self, v, w):
super(PartitionedModel, self).__init__()
assert distribution_strategy_context.has_strategy()
strategy = distribution_strategy_context.get_strategy()
with strategy.extended.experimental_logical_device(0):
self.v = variables.Variable(v)
with strategy.extended.experimental_logical_device(1):
self.w = variables.Variable(w)
def __call__(self, x):
replica_ctx = distribution_strategy_context.get_replica_context()
with replica_ctx.experimental_logical_device(0):
y = self.v * x
with replica_ctx.experimental_logical_device(1):
z = self.w * y
return z
def change_weights_op(self, v_new, w_new):
return control_flow_ops.group([self.v.assign(v_new),
self.w.assign(w_new)])
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=[[[0, 0, 0, 0], [0, 0, 0, 1]]])
strategy = tpu_lib.TPUStrategyV2(
resolver,
experimental_device_assignment=device_assignment)
with strategy.scope():
model = PartitionedModel(2., 3.)
checkpoint_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = util.Checkpoint(model=model)
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
checkpoint.save(file_prefix=checkpoint_prefix)
self.evaluate(model.change_weights_op(1., 4.))
result = strategy.run(def_function.function(model), args=(5.0,))
self.assertEqual(20., self.evaluate(result))
status = checkpoint.restore(
checkpoint_management.latest_checkpoint(checkpoint_dir))
status.run_restore_ops(sess) # must run restore op in non-eager mode.
status.assert_consumed()
status.assert_existing_objects_matched()
result = strategy.run(def_function.function(model), args=(5.0,))
self.assertEqual(30., self.evaluate(result))
class DeviceAssignmentTest(test.TestCase):
def test_core_assignment(self):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=[[[0, 0, 0, 0]]])
self.assertAllEqual([[[0, 0, 0, 0]]], device_assignment.core_assignment)
self.assertEqual(1, device_assignment.num_cores_per_replica)
self.assertEqual(1, device_assignment.num_replicas)
self.assertEqual("/task:0/device:TPU:0", device_assignment.tpu_device())
self.assertEqual("/task:0/device:CPU:0", device_assignment.host_device())
def test_device_assignment_strategy_properties(self):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=[[[0, 0, 0, 0]]])
strategy = tpu_lib.TPUStrategyV2(
resolver,
experimental_device_assignment=device_assignment)
self.assertEqual(strategy.extended.num_hosts, 1)
self.assertEqual(strategy.num_replicas_in_sync, 1)
self.assertEqual(strategy.extended.num_replicas_per_host, 1) # pylint: disable=protected-access
def test_device_assignment_constants(self):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = device_assignment_lib.DeviceAssignment(
topology,
core_assignment=device_assignment_lib.SINGLE_CORE_ASSIGNMENT)
self.assertAllEqual([[[0, 0, 0, 0]]], device_assignment.core_assignment)
self.assertEqual(1, device_assignment.num_cores_per_replica)
self.assertEqual(1, device_assignment.num_replicas)
self.assertEqual("/task:0/device:TPU:0", device_assignment.tpu_device())
self.assertEqual("/task:0/device:CPU:0", device_assignment.host_device())
def test_variables_mismatched_device_assignment(self):
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
topology = tpu_strategy_util.initialize_tpu_system(resolver)
strategy0 = tpu_lib.TPUStrategyV2(resolver)
self.assertEqual(
("/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:TPU:1"),
strategy0.extended.worker_devices)
with strategy0.scope():
v = variables.Variable(1.)
v1_assign_op = strategy0.experimental_local_results(v)[1].assign(42.)
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.evaluate(v1_assign_op)
self.assertAllEqual([1., 42.],
self.evaluate(
strategy0.experimental_local_results(v)))
# Second strategy has devices reversed relative to the first.
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=[[[0, 0, 0, 1]], [[0, 0, 0, 0]]])
strategy1 = tpu_lib.TPUStrategyV2(
resolver,
experimental_device_assignment=device_assignment)
self.assertEqual(
("/job:localhost/replica:0/task:0/device:TPU:1",
"/job:localhost/replica:0/task:0/device:TPU:0"),
strategy1.extended.worker_devices)
v_read = strategy1.run(def_function.function(v.read_value))
with self.cached_session():
self.assertAllEqual([42., 1.],
self.evaluate(
strategy0.experimental_local_results(v_read)))
if __name__ == "__main__":
test.main()
| 34.918056
| 100
| 0.697784
|
794c549b74d8f857609565884071d79da4f1342f
| 6,512
|
py
|
Python
|
addons14/project_timesheet_time_control/wizards/hr_timesheet_switch.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-06-10T14:59:13.000Z
|
2021-06-10T14:59:13.000Z
|
addons14/project_timesheet_time_control/wizards/hr_timesheet_switch.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | null | null | null |
addons14/project_timesheet_time_control/wizards/hr_timesheet_switch.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-04-09T09:44:44.000Z
|
2021-04-09T09:44:44.000Z
|
# Copyright 2019 Tecnativa - Jairo Llopis
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class HrTimesheetSwitch(models.TransientModel):
_name = "hr.timesheet.switch"
_inherit = "account.analytic.line"
_description = "Helper to quickly switch between timesheet lines"
running_timer_id = fields.Many2one(
comodel_name="account.analytic.line",
string="Previous timer",
ondelete="cascade",
readonly=True,
default=lambda self: self._default_running_timer_id(),
help="This timer is running and will be stopped",
)
running_timer_start = fields.Datetime(
string="Previous timer start",
related="running_timer_id.date_time",
readonly=True,
)
running_timer_duration = fields.Float(
string="Previous timer duration",
compute="_compute_running_timer_duration",
help="When the previous timer is stopped, it will save this duration.",
)
# Redefine the relation to avoid using the same table than parent model
tag_ids = fields.Many2many(relation="hr_timesheet_switch_line_tag_rel")
@api.model
def _default_running_timer_id(self, employee=None):
"""Obtain running timer."""
employee = employee or self.env.user.employee_ids
# Find running work
running = self.env["account.analytic.line"].search(
[
("date_time", "!=", False),
("employee_id", "in", employee.ids),
("id", "not in", self.env.context.get("resuming_lines", [])),
("project_id", "!=", False),
("unit_amount", "=", 0),
]
)
if len(running) > 1:
raise UserError(
_(
"%d running timers found. Cannot know which one to stop. "
"Please stop them manually."
)
% len(running)
)
return running
@api.depends("date_time", "running_timer_id")
def _compute_running_timer_duration(self):
"""Compute duration of running timer when stopped."""
for one in self:
one.running_timer_duration = one._duration(
one.running_timer_id.date_time,
one.date_time,
)
@api.model
def _closest_suggestion(self):
"""Find most similar account.analytic.line record."""
try:
active = self.env[self.env.context["active_model"]].browse(
self.env.context["active_id"]
)
except KeyError:
# If I don't know where's the user, I don't know what to suggest
return self.env["account.analytic.line"].browse()
# If you're browsing another account.analytic.line, that's the match
if active._name == "account.analytic.line":
return active
# If browsing other models, prepare a search
domain = [("employee_id", "in", self.env.user.employee_ids.ids)]
if active._name == "project.task":
domain.append(("task_id", "=", active.id))
elif active._name == "project.project":
domain += [
("project_id", "=", active.id),
("task_id", "=", False),
]
else:
# No clues for other records, sorry
return self.env["account.analytic.line"].browse()
return self.env["account.analytic.line"].search(
domain,
order="date_time DESC",
limit=1,
)
@api.model
def default_get(self, fields_list):
"""Return defaults depending on the context where it is called."""
result = super().default_get(fields_list)
inherited = self._closest_suggestion()
assert inherited._name == "account.analytic.line"
# Inherit all possible fields from that account.analytic.line record
if inherited:
# Convert inherited to RPC-style values
_fields = set(fields_list) & set(inherited._fields) - {
# These fields must always be reset
"id",
"amount",
"date_time",
"date_time_end",
"date",
"is_task_closed",
"unit_amount",
# This field is from sale_timesheet, which is not among
# this module dependencies; ignoring it will let you
# resume an invoiced AAL if that module is installed,
# and it doesn't hurt here
"timesheet_invoice_id",
# These fields are from the hr_timesheet_activity_begin_end
# module. Unless ignored, these fields will cause a validation
# error because time_stop - time_start must equal duration.
"time_start",
"time_stop",
}
inherited.read(_fields)
values = inherited._convert_to_write(inherited._cache)
for field in _fields:
result[field] = values[field]
return result
def action_switch(self):
"""Stop old timer, start new one."""
self.ensure_one()
# Stop old timer
self.with_context(
resuming_lines=self.ids,
stop_dt=self.date_time,
).running_timer_id.button_end_work()
# Start new timer
_fields = self.env["account.analytic.line"]._fields.keys()
self.read(_fields)
values = self._convert_to_write(self._cache)
new = self.env["account.analytic.line"].create(
{field: value for (field, value) in values.items() if field in _fields}
)
# Display created timer record if requested
if self.env.context.get("show_created_timer"):
form_view = self.env.ref("hr_timesheet.hr_timesheet_line_form")
return {
"res_id": new.id,
"res_model": new._name,
"type": "ir.actions.act_window",
"view_mode": "form",
"view_type": "form",
"views": [(form_view.id, "form")],
}
# Close wizard and reload view
return {
"type": "ir.actions.act_multi",
"actions": [
{"type": "ir.actions.act_window_close"},
{"type": "ir.actions.act_view_reload"},
],
}
| 38.994012
| 83
| 0.565418
|
794c54c969fc1f854789e081caef8e7655f187e6
| 1,420
|
py
|
Python
|
scripts/Load_PRODUCT_DIM.py
|
shauwang/aws-glue-workshop
|
af28372902fc2173a57f220152bfacc5ceeb8bb1
|
[
"Apache-2.0"
] | 25
|
2020-05-27T02:58:42.000Z
|
2021-12-19T23:19:06.000Z
|
scripts/Load_PRODUCT_DIM.py
|
shauwang/aws-glue-workshop
|
af28372902fc2173a57f220152bfacc5ceeb8bb1
|
[
"Apache-2.0"
] | 5
|
2021-01-19T02:45:17.000Z
|
2022-02-13T23:23:50.000Z
|
scripts/Load_PRODUCT_DIM.py
|
shauwang/aws-glue-workshop
|
af28372902fc2173a57f220152bfacc5ceeb8bb1
|
[
"Apache-2.0"
] | 28
|
2020-06-23T14:32:02.000Z
|
2022-01-23T07:36:43.000Z
|
##
## Glue Job : TKO_Load_Product_Dim.py
##
## Glue boilerplate code
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
import boto3, json
args = getResolvedOptions(sys.argv, ['JOB_NAME','S3_BUCKET'])
print (args['JOB_NAME']+" START...")
if 'sc' not in vars(): sc = SparkContext()
glueContext = GlueContext(SparkContext.getOrCreate())
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
## Glue boilerplate code
s3bucketname=args['S3_BUCKET']
db_name='salesdb'
table1='product_category'
table2='product'
output_dir=f"s3://%s/data/sales_analytics/supplier_dim/"%s3bucketname
print (output_dir)
# Read the Source Tables
table1_dyf = glueContext.create_dynamic_frame.from_catalog(database = db_name, table_name = table1)
table2_dyf = glueContext.create_dynamic_frame.from_catalog(database = db_name, table_name = table2)
#Join the Source Tables
product_dim_dyf = Join.apply(table1_dyf,table2_dyf,
'category_id', 'category_id').drop_fields(['category_id'])
# Write the denormalized CUSTOMER_DIM table in Parquet
glueContext.write_dynamic_frame.from_options(frame = product_dim_dyf, connection_type = "s3", connection_options = {"path": output_dir}, format = "parquet")
## Glue boilerplate code
job.commit
| 30.212766
| 156
| 0.773944
|
794c55fc5fc9f44e397c9ca96762aca5939c8e32
| 7,484
|
py
|
Python
|
salt/returners/slack_returner.py
|
aletourneau/salt
|
d7013a2f64eb4b79592220d76274bc5dde609e08
|
[
"Apache-2.0"
] | 1
|
2020-10-02T02:29:25.000Z
|
2020-10-02T02:29:25.000Z
|
salt/returners/slack_returner.py
|
aletourneau/salt
|
d7013a2f64eb4b79592220d76274bc5dde609e08
|
[
"Apache-2.0"
] | null | null | null |
salt/returners/slack_returner.py
|
aletourneau/salt
|
d7013a2f64eb4b79592220d76274bc5dde609e08
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Return salt data via slack
.. versionadded:: Lithium
The following fields can be set in the minion conf file::
slack.channel (required)
slack.api_key (required)
slack.from_name (required)
slack.profile (optional)
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
slack.channel
slack.api_key
slack.from_name
Hipchat settings may also be configured as::
slack:
channel: RoomName
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from_name: user@email.com
alternative.slack:
room_id: RoomName
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from_name: user@email.com
slack_profile:
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from_name: user@email.com
slack:
profile: slack_profile
channel: RoomName
alternative.slack:
profile: slack_profile
channel: RoomName
To use the HipChat returner, append '--return slack' to the salt command. ex:
.. code-block:: bash
salt '*' test.ping --return slack
To use the alternative configuration, append '--return_config alternative' to the salt command. ex:
salt '*' test.ping --return slack --return_config alternative
'''
import salt.returners
import pprint
import requests
import logging
from urlparse import urljoin as _urljoin
from requests.exceptions import ConnectionError
log = logging.getLogger(__name__)
__virtualname__ = 'slack'
def _get_options(ret=None):
'''
Get the slack options from salt.
'''
defaults = {'channel': '#general'}
attrs = {'slack_profile': 'profile',
'channel': 'channel',
'from_name': 'from_name',
'api_key': 'api_key',
}
profile_attr = 'slack_profile'
profile_attrs = {'from_jid': 'from_jid',
'api_key': 'api_key',
'api_version': 'api_key'
}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
profile_attr=profile_attr,
profile_attrs=profile_attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults)
return _options
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
def _query(function, api_key=None, method='GET', data=None):
'''
Slack object method function to construct and execute on the API URL.
:param api_key: The Slack api key.
:param function: The Slack api function to perform.
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method.
:return: The json response from the API call or False.
'''
headers = {}
query_params = {}
if data is None:
data = {}
ret = {'message': '',
'res': True}
slack_functions = {
'rooms': {
'request': 'channels.list',
'response': 'channels',
},
'users': {
'request': 'users.list',
'response': 'members',
},
'message': {
'request': 'chat.postMessage',
'response': 'channel',
},
}
if not api_key:
try:
options = __salt__['config.option']('slack')
if not api_key:
api_key = options.get('api_key')
except (NameError, KeyError, AttributeError):
log.error('No Slack api key found.')
ret['message'] = 'No Slack api key found.'
ret['res'] = False
return ret
api_url = 'https://slack.com'
base_url = _urljoin(api_url, '/api/')
path = slack_functions.get(function).get('request')
url = _urljoin(base_url, path, False)
query_params['token'] = api_key
try:
result = requests.request(
method=method,
url=url,
headers=headers,
params=query_params,
data=data,
verify=True,
)
except ConnectionError as e:
ret['message'] = e
ret['res'] = False
return ret
if result.status_code == 200:
result = result.json()
response = slack_functions.get(function).get('response')
if 'error' in result:
ret['message'] = result['error']
ret['res'] = False
return ret
ret['message'] = result.get(response)
return ret
elif result.status_code == 204:
return True
else:
log.debug(url)
log.debug(query_params)
log.debug(data)
log.debug(result)
if 'error' in result:
ret['message'] = result['error']
ret['res'] = False
return ret
ret['message'] = result
return ret
def _post_message(channel,
message,
from_name,
api_key=None):
'''
Send a message to a HipChat room.
:param room_id: The room id or room name, either will work.
:param message: The message to send to the HipChat room.
:param from_name: Specify who the message is from.
:param api_key: The HipChat api key, if not specified in the configuration.
:param api_version: The HipChat api version, if not specified in the configuration.
:param color: The color for the message, default: yellow.
:param notify: Whether to notify the room, default: False.
:return: Boolean if message was sent successfully.
'''
parameters = dict()
parameters['channel'] = channel
parameters['from'] = from_name
parameters['text'] = message
result = _query(function='message',
api_key=api_key,
method='POST',
data=parameters)
log.debug('result {0}'.format(result))
if result:
return True
else:
return False
def returner(ret):
'''
Send an slack message with the data
'''
_options = _get_options(ret)
channel = _options.get('channel')
from_name = _options.get('from_name')
api_key = _options.get('api_key')
if not channel:
log.error('slack.channel not defined in salt config')
return
if not from_name:
log.error('slack.from_name not defined in salt config')
return
if not api_key:
log.error('slack.api_key not defined in salt config')
return
message = ('id: {0}\r\n'
'function: {1}\r\n'
'function args: {2}\r\n'
'jid: {3}\r\n'
'return: {4}\r\n').format(
ret.get('id'),
ret.get('fun'),
ret.get('fun_args'),
ret.get('jid'),
pprint.pformat(ret.get('return')))
slack = _post_message(channel,
message,
channel,
api_key)
return slack
| 27.718519
| 101
| 0.548904
|
794c56d4c4433f30309b8d7b4fae60ad3839fc4c
| 21,071
|
py
|
Python
|
check_cuda/controllers.py
|
vtpl1/check_cuda
|
b519877456b46ab4bbd4b4fb3ab3a4ccbd38def1
|
[
"MIT"
] | null | null | null |
check_cuda/controllers.py
|
vtpl1/check_cuda
|
b519877456b46ab4bbd4b4fb3ab3a4ccbd38def1
|
[
"MIT"
] | null | null | null |
check_cuda/controllers.py
|
vtpl1/check_cuda
|
b519877456b46ab4bbd4b4fb3ab3a4ccbd38def1
|
[
"MIT"
] | null | null | null |
import ctypes
import logging
import os
import platform
from typing import Dict, List, Union
import psutil
import pynvml as N
from cpuinfo import get_cpu_info
from singleton_decorator.decorator import singleton
import yaml
from .models import (ChannelAndNnModel, CpuInfo, CpuStatus, GpuInfo, GpuStatus, ModelCount, NnModelInfo, NnModelMaxChannelInfo, NnModelMaxChannelInfoList, ProcessStatus,
SystemInfo, SystemStatus)
# Some constants taken from cuda.h
CUDA_SUCCESS = 0
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39
CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
NOT_SUPPORTED = 'Not Supported'
MB = 1024 * 1024
def ConvertSMVer2Cores(major, minor):
# Returns the number of CUDA cores per multiprocessor for a given
# Compute Capability version. There is no way to retrieve that via
# the API, so it needs to be hard-coded.
# See _ConvertSMVer2Cores in helper_cuda.h in NVIDIA's CUDA Samples.
return {
(1, 0): 8, # Tesla
(1, 1): 8,
(1, 2): 8,
(1, 3): 8,
(2, 0): 32, # Fermi
(2, 1): 48,
(3, 0): 192, # Kepler
(3, 2): 192,
(3, 5): 192,
(3, 7): 192,
(5, 0): 128, # Maxwell
(5, 2): 128,
(5, 3): 128,
(6, 0): 64, # Pascal
(6, 1): 128,
(6, 2): 128,
(7, 0): 64, # Volta
(7, 2): 64,
(7, 5): 64, # Turing
}.get((major, minor), 0)
LOGGER = logging.getLogger(__name__)
# def get_public_ip() -> str:
# 'https://api.ipify.org?format=json'
@singleton
class GpuInfoFromNvml(object):
def __init__(self):
self.__is_nvml_loaded = False
self.__gpu_processes: List[ProcessStatus] = []
print("Starting NVML")
try:
N.nvmlInit()
self.__is_nvml_loaded = True
except Exception as e:
print(e)
def __del__(self):
print("Shutting down NVML")
if self.__is_nvml_loaded:
if N:
N.nvmlShutdown()
def _decode(self, b):
if isinstance(b, bytes):
return b.decode('utf-8') # for python3, to unicode
return b
def get_gpu_status_by_gpu_id(self, index) -> Union[GpuStatus, None]:
gpu_status = None
if self.__is_nvml_loaded:
gpu_status = GpuStatus(index=index)
"""Get one GPU information specified by nvml handle"""
handle = N.nvmlDeviceGetHandleByIndex(index)
gpu_status.name = self._decode(N.nvmlDeviceGetName(handle))
gpu_status.uuid = self._decode(N.nvmlDeviceGetUUID(handle))
try:
gpu_status.temperature = N.nvmlDeviceGetTemperature(handle, N.NVML_TEMPERATURE_GPU)
except N.NVMLError:
gpu_status.temperature = None # Not supported
try:
gpu_status.fan_speed = N.nvmlDeviceGetFanSpeed(handle)
except N.NVMLError:
gpu_status.fan_speed = None # Not supported
try:
memory = N.nvmlDeviceGetMemoryInfo(handle) # in Bytes
gpu_status.memory_used = memory.used // MB
gpu_status.memory_total = memory.total // MB
except N.NVMLError:
gpu_status.memory_used = None # Not supported
gpu_status.memory_total = None
try:
utilization = N.nvmlDeviceGetUtilizationRates(handle)
if utilization:
gpu_status.utilization_gpu = utilization.gpu
else:
gpu_status.utilization_gpu = None
except N.NVMLError:
gpu_status.utilization_gpu = None # Not supported
try:
utilization_enc = N.nvmlDeviceGetEncoderUtilization(handle)
if utilization_enc:
gpu_status.utilization_enc = utilization_enc[0]
else:
gpu_status.utilization_enc = None
except N.NVMLError:
gpu_status.utilization_enc = None # Not supported
try:
utilization_dec = N.nvmlDeviceGetDecoderUtilization(handle)
if utilization_dec:
gpu_status.utilization_dec = utilization_dec[0]
else:
gpu_status.utilization_dec = None
except N.NVMLError:
gpu_status.utilization_dec = None # Not supported
try:
nv_comp_processes = N.nvmlDeviceGetComputeRunningProcesses(handle)
except N.NVMLError:
nv_comp_processes = None # Not supported
try:
nv_graphics_processes = N.nvmlDeviceGetGraphicsRunningProcesses(handle)
except N.NVMLError:
nv_graphics_processes = None # Not supported
if nv_comp_processes is None and nv_graphics_processes is None:
processes = None
else:
nv_comp_processes = nv_comp_processes or []
nv_graphics_processes = nv_graphics_processes or []
# A single process might run in both of graphics and compute mode,
# However we will display the process only once
seen_pids = set()
for nv_process in nv_comp_processes + nv_graphics_processes:
if nv_process.pid in seen_pids:
continue
seen_pids.add(nv_process.pid)
try:
process = get_process_status_by_pid(nv_process.pid)
# Bytes to MBytes
# if drivers are not TTC this will be None.
usedmem = nv_process.usedGpuMemory // MB if \
nv_process.usedGpuMemory else None
process.gpu_memory_usage_mib = usedmem
process.gpu_id = index
self.__gpu_processes.append(process)
except psutil.NoSuchProcess:
# TODO: add some reminder for NVML broken context
# e.g. nvidia-smi reset or reboot the system
pass
return gpu_status
def get_gpu_info_by_gpu_id(self, index) -> Union[GpuInfo, None]:
gpu_info = None
if self.__is_nvml_loaded:
gpu_info = GpuInfo(gpu_id=index)
"""Get one GPU information specified by nvml handle"""
handle = N.nvmlDeviceGetHandleByIndex(index)
gpu_info.name = self._decode(N.nvmlDeviceGetName(handle))
gpu_info.uuid = self._decode(N.nvmlDeviceGetUUID(handle))
try:
memory = N.nvmlDeviceGetMemoryInfo(handle) # in Bytes
gpu_info.free_memory_mib = (memory.total - memory.used) // MB
gpu_info.total_memory_mib = memory.total // MB
except N.NVMLError:
gpu_info.free_memory_mib = None # Not supported
gpu_info.total_memory_mib = None
return gpu_info
def get_process_status_running_on_gpus(self) -> List[ProcessStatus]:
return self.__gpu_processes
def get_gpu_status(self) -> List[GpuStatus]:
gpu_list = []
if self.__is_nvml_loaded:
device_count = N.nvmlDeviceGetCount()
self.__gpu_processes.clear()
for index in range(device_count):
gpu_status = self.get_gpu_status_by_gpu_id(index)
if gpu_status:
gpu_list.append(gpu_status)
return gpu_list
def get_gpu_info(self) -> List[GpuInfo]:
gpu_list = []
if self.__is_nvml_loaded:
device_count = N.nvmlDeviceGetCount()
for index in range(device_count):
gpu_status = self.get_gpu_info_by_gpu_id(index)
if gpu_status:
gpu_list.append(gpu_status)
return gpu_list
@singleton
class GpuInfoFromCudaLib:
def __init__(self):
self.__cuda = None
self.__nvidia_device_list: List[GpuInfo] = []
def get_gpu_info(self) -> List[GpuInfo]:
if self.__cuda is None:
libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')
for libname in libnames:
try:
self.__cuda = ctypes.CDLL(libname)
LOGGER.info('Loading cuda libraries')
except OSError:
continue
else:
break
if self.__cuda is not None:
nGpus = ctypes.c_int()
name = b' ' * 100
cc_major = ctypes.c_int()
cc_minor = ctypes.c_int()
cores = ctypes.c_int()
threads_per_core = ctypes.c_int()
clockrate = ctypes.c_int()
freeMem = ctypes.c_size_t()
totalMem = ctypes.c_size_t()
result = ctypes.c_int()
device = ctypes.c_int()
context = ctypes.c_void_p()
error_str = ctypes.c_char_p()
is_continue = True
while is_continue:
is_continue = False
result = self.__cuda.cuInit(0)
if result != CUDA_SUCCESS:
self.__cuda.cuGetErrorString(result, ctypes.byref(error_str))
LOGGER.error("cuInit failed with error code %d: %s" % (result, error_str.value.decode()))
break
result = self.__cuda.cuDeviceGetCount(ctypes.byref(nGpus))
if result != CUDA_SUCCESS:
self.__cuda.cuGetErrorString(result, ctypes.byref(error_str))
LOGGER.error("cuDeviceGetCount failed with error code %d: %s" %
(result, error_str.value.decode()))
break
LOGGER.debug("Found %d device(s)." % nGpus.value)
for i in range(nGpus.value):
cuda_device_name = ''
cuda_compute_capability_major = 0
cuda_compute_capability_minor = 0
cuda_cores = 0
cuda_concurrent_threads = 0
cuda_gpu_clock_mhz = 0
cuda_memory_clock_mhz = 0
cuda_total_memory_mib = 0
cuda_free_memory_mib = 0
result = self.__cuda.cuDeviceGet(ctypes.byref(device), i)
if result != CUDA_SUCCESS:
self.__cuda.cuGetErrorString(result, ctypes.byref(error_str))
LOGGER.error("cuDeviceGet failed with error code %d: %s" %
(result, error_str.value.decode()))
break
LOGGER.debug("Nvidia Device: %d" % i)
if self.__cuda.cuDeviceGetName(ctypes.c_char_p(name), len(name), device) == CUDA_SUCCESS:
cuda_device_name = (name.split(b'\0', 1)[0].decode())
LOGGER.debug(" Name: %s" % cuda_device_name)
if self.__cuda.cuDeviceComputeCapability(ctypes.byref(cc_major), ctypes.byref(cc_minor),
device) == CUDA_SUCCESS:
cuda_compute_capability_major = cc_major.value
cuda_compute_capability_minor = cc_minor.value
LOGGER.debug(" Compute Capability: %d.%d" %
(cuda_compute_capability_major, cuda_compute_capability_minor))
if self.__cuda.cuDeviceGetAttribute(ctypes.byref(cores),
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT,
device) == CUDA_SUCCESS:
LOGGER.debug(" Multiprocessors: %d" % cores.value)
cuda_cores = cores.value * ConvertSMVer2Cores(cc_major.value, cc_minor.value)
LOGGER.debug(" CUDA Cores: %s" % (cuda_cores or "unknown"))
if self.__cuda.cuDeviceGetAttribute(ctypes.byref(threads_per_core),
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR,
device) == CUDA_SUCCESS:
cuda_concurrent_threads = cores.value * threads_per_core.value
LOGGER.debug(" Concurrent threads: %d" % (cuda_concurrent_threads))
if self.__cuda.cuDeviceGetAttribute(ctypes.byref(clockrate), CU_DEVICE_ATTRIBUTE_CLOCK_RATE,
device) == CUDA_SUCCESS:
cuda_gpu_clock_mhz = clockrate.value / 1000.
LOGGER.debug(" GPU clock: %g MHz" % (cuda_gpu_clock_mhz))
if self.__cuda.cuDeviceGetAttribute(ctypes.byref(clockrate),
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE,
device) == CUDA_SUCCESS:
cuda_memory_clock_mhz = clockrate.value / 1000.
LOGGER.debug(" Memory clock: %g MHz" % (cuda_memory_clock_mhz))
result = self.__cuda.cuCtxCreate(ctypes.byref(context), 0, device)
if result != CUDA_SUCCESS:
self.__cuda.cuGetErrorString(result, ctypes.byref(error_str))
LOGGER.error("cuCtxCreate failed with error code %d: %s" %
(result, error_str.value.decode()))
else:
result = self.__cuda.cuMemGetInfo(ctypes.byref(freeMem), ctypes.byref(totalMem))
if result == CUDA_SUCCESS:
cuda_total_memory_mib = totalMem.value / 1024**2
LOGGER.debug(" Total Memory: %ld MiB" % (cuda_total_memory_mib))
cuda_free_memory_mib = freeMem.value / 1024**2
LOGGER.debug(" Free Memory: %ld MiB" % (cuda_free_memory_mib))
else:
self.__cuda.cuGetErrorString(result, ctypes.byref(error_str))
LOGGER.error("cuMemGetInfo failed with error code %d: %s" %
(result, error_str.value.decode()))
self.__cuda.cuCtxDetach(context)
self.__nvidia_device_list.append(GpuInfo(
i,
cuda_device_name,
cuda_compute_capability_major,
cuda_compute_capability_minor,
cuda_cores,
cuda_concurrent_threads,
cuda_gpu_clock_mhz,
cuda_memory_clock_mhz,
cuda_total_memory_mib,
cuda_free_memory_mib,
))
return self.__nvidia_device_list
def get_process_status_by_pid(pid) -> ProcessStatus:
ps_process = psutil.Process(pid=pid)
process = _extract_process_info(ps_process)
return process
def get_process_status_by_name(name='python3') -> List[ProcessStatus]:
process_list = []
for ps_process in psutil.process_iter():
name_, exe, cmdline = "", "", []
try:
name_ = ps_process.name()
cmdline = ps_process.cmdline()
exe = ps_process.exe()
except (psutil.AccessDenied, psutil.ZombieProcess):
pass
except psutil.NoSuchProcess:
continue
if len(cmdline):
if name == name_ or cmdline[0] == name or os.path.basename(exe) == name:
process_list.append(_extract_process_info(ps_process))
return process_list
def get_process_status_running_on_gpus() -> List[ProcessStatus]:
return GpuInfoFromNvml().get_process_status_running_on_gpus()
def _extract_process_info(ps_process) -> ProcessStatus:
process = ProcessStatus()
try:
process.username = ps_process.username()
except psutil.AccessDenied:
pass
# cmdline returns full path;, # as in `ps -o comm`, get short cmdnames.
_cmdline = None
try:
_cmdline = ps_process.cmdline()
except psutil.AccessDenied:
pass
if not _cmdline:
# sometimes, zombie or unknown (e.g. [kworker/8:2H])
process.command = '?'
process.full_command = ['?']
else:
process.command = os.path.basename(_cmdline[0])
process.full_command = _cmdline
try:
process.cpu_percent = ps_process.cpu_percent() / psutil.cpu_count()
process.cpu_memory_usage_mib = round((ps_process.memory_percent() / 100.0) *
psutil.virtual_memory().total // MB)
except psutil.AccessDenied:
pass
process.pid = ps_process.pid
return process
def get_process_status() -> List[ProcessStatus]:
ret = get_process_status_running_on_gpus()
if not len(ret):
ret = get_process_status_by_name()
return ret
def get_cpu_status() -> CpuStatus:
return CpuStatus(cpu_percent=psutil.cpu_percent(),
cpu_memory_usage_percent=psutil.virtual_memory().percent)
def get_gpu_status() -> List[GpuStatus]:
return GpuInfoFromNvml().get_gpu_status()
def get_gpu_info() -> List[GpuInfo]:
return GpuInfoFromNvml().get_gpu_info()
def get_system_status() -> SystemStatus:
return SystemStatus(cpu=get_cpu_status(), gpus=get_gpu_status(), processes=get_process_status())
def get_cpu() -> CpuInfo:
cpu = CpuInfo()
try:
cpu_info = get_cpu_info()
cpu.name = cpu_info["brand_raw"]
cpu.frequency = cpu_info["hz_advertised_friendly"]
cpu.arch = cpu_info["arch"]
cpu.bits = cpu_info["bits"]
cpu.count = cpu_info["count"]
cpu.vendor_id = cpu_info["vendor_id_raw"]
except AttributeError as e:
LOGGER.fatal(e)
return cpu
def get_system_info() -> SystemInfo:
return SystemInfo(host_name=platform.uname().node, os=platform.platform(), cpu=get_cpu(), gpus=get_gpu_info())
@singleton
class ChannelGpuManager:
"""
docstring
"""
def __init__(self) -> None:
self.channel_to_gpu_map: Dict[ChannelAndNnModel, ModelCount] = {}
self.gpu_id_generator = 0
self.configuration_file_name = self.__class__.__name__ + ".yml"
self.model_list = self.__read_default_models()
self.number_of_gpus = len(get_gpu_status())
def __write_default_models(self) -> NnModelMaxChannelInfoList:
model_list = NnModelMaxChannelInfoList()
model_list.models.append(NnModelMaxChannelInfo(key=NnModelInfo(75, 416, 416), max_channel=2))
model_list.models.append(NnModelMaxChannelInfo(key=NnModelInfo(76, 416, 416), max_channel=3))
with open(self.configuration_file_name, 'w') as outfile:
yaml.dump(model_list.to_dict(), outfile)
return model_list
def __read_default_models(self) -> NnModelMaxChannelInfoList:
model_list = None
try:
with open(self.configuration_file_name, 'r') as infile:
model_list = NnModelMaxChannelInfoList.from_dict(yaml.safe_load(infile))
except FileNotFoundError:
pass
if not model_list:
model_list = self.__write_default_models()
if not len(model_list.models):
model_list = self.__write_default_models()
return model_list
def get_next_gpu_id(self) -> int:
ret = self.gpu_id_generator
if self.number_of_gpus:
self.gpu_id_generator = (self.gpu_id_generator + 1) % self.number_of_gpus
return ret
def get_gpu_id_for_the_channel(channel_id: int, purpose: int, width: int, height: int, media_tpe: int = 2) -> int:
candidate = ChannelAndNnModel(channel_id, NnModelInfo(purpose, width, height))
if candidate in ChannelGpuManager().channel_to_gpu_map.keys():
x = ChannelGpuManager().channel_to_gpu_map[candidate]
x.count = x.count + 1
else:
x = ModelCount(gpu_id=ChannelGpuManager().get_next_gpu_id())
ChannelGpuManager().channel_to_gpu_map[candidate] = x
print(candidate, x)
return x.gpu_id
| 41.724752
| 169
| 0.557686
|
794c576f792f8301f9625e49cee825382ba1bfd3
| 3,715
|
py
|
Python
|
spredsheet_qt.py
|
riiy/learn_python
|
f087ff3d504bf7d73d1d45f56eafd6de5ec9b661
|
[
"Apache-2.0"
] | 1
|
2017-05-02T10:34:01.000Z
|
2017-05-02T10:34:01.000Z
|
spredsheet_qt.py
|
congminghaoxue/learn_python
|
f087ff3d504bf7d73d1d45f56eafd6de5ec9b661
|
[
"Apache-2.0"
] | null | null | null |
spredsheet_qt.py
|
congminghaoxue/learn_python
|
f087ff3d504bf7d73d1d45f56eafd6de5ec9b661
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import re
import sys
from collections import ChainMap
import math
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QApplication, QMainWindow, QTableWidget,
QTableWidgetItem, QItemDelegate, QLineEdit)
cellre = re.compile(r'\b[A-Z][0-9]\b')
def cellname(i, j):
return f'{chr(ord("A")+j)}{i+1}'
class SpreadSheetDelegate(QItemDelegate):
def __init__(self, parent=None):
super(SpreadSheetDelegate, self).__init__(parent)
def createEditor(self, parent, styleOption, index):
editor = QLineEdit(parent)
editor.editingFinished.connect(self.commitAndCloseEditor)
return editor
def commitAndCloseEditor(self):
editor = self.sender()
self.commitData.emit(editor)
self.closeEditor.emit(editor, QItemDelegate.NoHint)
def setEditorData(self, editor, index):
editor.setText(index.model().data(index, Qt.EditRole))
def setModelData(self, editor, model, index):
model.setData(index, editor.text())
class SpreadSheetItem(QTableWidgetItem):
def __init__(self, siblings):
super(SpreadSheetItem, self).__init__()
self.siblings = siblings
self.value = 0
self.deps = set()
self.reqs = set()
def formula(self):
return super().data(Qt.DisplayRole)
def data(self, role):
if role == Qt.EditRole:
return self.formula()
if role == Qt.DisplayRole:
return self.display()
return super(SpreadSheetItem, self).data(role)
def calculate(self):
formula = self.formula()
if formula is None or formula == '':
self.value = 0
return
currentreqs = set(cellre.findall(formula))
name = cellname(self.row(), self.column())
# Add this cell to the new requirement's dependents
for r in currentreqs - self.reqs:
self.siblings[r].deps.add(name)
# Add remove this cell from dependents no longer referenced
for r in self.reqs - currentreqs:
self.siblings[r].deps.remove(name)
# Look up the values of our required cells
reqvalues = {r: self.siblings[r].value for r in currentreqs}
# Build an environment with these values and basic math functions
environment = ChainMap(math.__dict__, reqvalues)
# Note that eval is DANGEROUS and should not be used in production
self.value = eval(formula, {}, environment)
self.reqs = currentreqs
def propagate(self):
for d in self.deps:
self.siblings[d].calculate()
self.siblings[d].propagate()
def display(self):
self.calculate()
self.propagate()
return str(self.value)
class SpreadSheet(QMainWindow):
def __init__(self, rows, cols, parent=None):
super(SpreadSheet, self).__init__(parent)
self.rows = rows
self.cols = cols
self.cells = {}
self.create_widgets()
def create_widgets(self):
table = self.table = QTableWidget(self.rows, self.cols, self)
headers = [chr(ord('A') + j) for j in range(self.cols)]
table.setHorizontalHeaderLabels(headers)
table.setItemDelegate(SpreadSheetDelegate(self))
for i in range(self.rows):
for j in range(self.cols):
cell = SpreadSheetItem(self.cells)
self.cells[cellname(i, j)] = cell
self.table.setItem(i, j, cell)
self.setCentralWidget(table)
if __name__ == '__main__':
app = QApplication(sys.argv)
sheet = SpreadSheet(5, 5)
sheet.resize(520, 200)
sheet.show()
sys.exit(app.exec_())
| 28.143939
| 74
| 0.625034
|
794c578d6bd3227e03869efe7e0a01075c44ce5d
| 73,133
|
py
|
Python
|
nova/tests/test_quota.py
|
781778304/nova
|
05aff1959c9f94dae095635133386418390efb37
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/test_quota.py
|
781778304/nova
|
05aff1959c9f94dae095635133386418390efb37
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/test_quota.py
|
781778304/nova
|
05aff1959c9f94dae095635133386418390efb37
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova import compute
from nova.compute import instance_types
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
from nova import flags
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import driver as scheduler_driver
from nova import test
import nova.tests.image.fake
from nova import volume
FLAGS = flags.FLAGS
class QuotaIntegrationTestCase(test.TestCase):
def setUp(self):
super(QuotaIntegrationTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
quota_instances=2,
quota_cores=4,
quota_volumes=2,
quota_gigabytes=20,
quota_floating_ips=1,
network_manager='nova.network.manager.FlatDHCPManager')
# Apparently needed by the RPC tests...
self.network = self.start_service('network')
self.user_id = 'admin'
self.project_id = 'admin'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
orig_rpc_call = rpc.call
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry"""
if (topic == FLAGS.scheduler_topic and
msg['method'] == 'run_instance'):
scheduler = scheduler_driver.Scheduler
instance = scheduler().create_instance_db_entry(
context,
msg['args']['request_spec'],
None)
return [scheduler_driver.encode_instance(instance)]
else:
return orig_rpc_call(context, topic, msg)
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
super(QuotaIntegrationTestCase, self).tearDown()
nova.tests.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = '3' # m1.large
inst['vcpus'] = cores
return db.instance_create(self.context, inst)
def _create_volume(self, size=10):
"""Create a test volume"""
vol = {}
vol['user_id'] = self.user_id
vol['project_id'] = self.project_id
vol['size'] = size
return db.volume_create(self.context, vol)['id']
def test_too_many_instances(self):
instance_uuids = []
for i in range(FLAGS.quota_instances):
instance = self._create_instance()
instance_uuids.append(instance['uuid'])
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid)
for instance_uuid in instance_uuids:
db.instance_destroy(self.context, instance_uuid)
def test_too_many_cores(self):
instance = self._create_instance(cores=4)
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid)
db.instance_destroy(self.context, instance['uuid'])
def test_too_many_volumes(self):
volume_ids = []
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
self.assertRaises(exception.QuotaError,
volume.API().create,
self.context, 10, '', '', None)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_gigabytes(self):
volume_ids = []
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
self.assertRaises(exception.QuotaError,
volume.API().create,
self.context, 10, '', '', None)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_addresses(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
self.context,
self.project_id)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_too_many_metadata_items(self):
metadata = {}
for i in range(FLAGS.quota_metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid,
metadata=metadata)
def _create_with_injected_files(self, files):
api = compute.API()
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context, min_count=1, max_count=1,
instance_type=inst_type, image_href=image_uuid,
injected_files=files)
def test_no_injected_files(self):
api = compute.API()
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context,
instance_type=inst_type,
image_href=image_uuid)
def test_max_injected_files(self):
files = []
for i in xrange(FLAGS.quota_injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_files(self):
files = []
for i in xrange(FLAGS.quota_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
max = FLAGS.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
files = [('/test/path', content)]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_content_bytes(self):
max = FLAGS.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
files = [('/test/path', content)]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
max = FLAGS.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max)])
files = [(path, 'config = quotatest')]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_path_bytes(self):
max = FLAGS.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max + 1)])
files = [(path, 'config = quotatest')]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
class FakeContext(object):
def __init__(self, project_id, quota_class):
self.is_admin = False
self.user_id = 'fake_user'
self.project_id = project_id
self.quota_class = quota_class
def elevated(self):
elevated = self.__class__(self.project_id, self.quota_class)
elevated.is_admin = True
return elevated
class FakeDriver(object):
def __init__(self, by_project=None, by_class=None, reservations=None):
self.called = []
self.by_project = by_project or {}
self.by_class = by_class or {}
self.reservations = reservations or []
def get_by_project(self, context, project_id, resource):
self.called.append(('get_by_project', context, project_id, resource))
try:
return self.by_project[project_id][resource]
except KeyError:
raise exception.ProjectQuotaNotFound(project_id=project_id)
def get_by_class(self, context, quota_class, resource):
self.called.append(('get_by_class', context, quota_class, resource))
try:
return self.by_class[quota_class][resource]
except KeyError:
raise exception.QuotaClassNotFound(class_name=quota_class)
def get_defaults(self, context, resources):
self.called.append(('get_defaults', context, resources))
return resources
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
self.called.append(('get_class_quotas', context, resources,
quota_class, defaults))
return resources
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True, usages=True):
self.called.append(('get_project_quotas', context, resources,
project_id, quota_class, defaults, usages))
return resources
def limit_check(self, context, resources, values):
self.called.append(('limit_check', context, resources, values))
def reserve(self, context, resources, deltas, expire=None):
self.called.append(('reserve', context, resources, deltas, expire))
return self.reservations
def commit(self, context, reservations):
self.called.append(('commit', context, reservations))
def rollback(self, context, reservations):
self.called.append(('rollback', context, reservations))
def destroy_all_by_project(self, context, project_id):
self.called.append(('destroy_all_by_project', context, project_id))
def expire(self, context):
self.called.append(('expire', context))
class BaseResourceTestCase(test.TestCase):
def test_no_flag(self):
resource = quota.BaseResource('test_resource')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, None)
self.assertEqual(resource.default, -1)
def test_with_flag(self):
# We know this flag exists, so use it...
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, 10)
def test_with_flag_no_quota(self):
self.flags(quota_instances=-1)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, -1)
def test_quota_no_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver()
context = FakeContext(None, None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 10)
def test_quota_with_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
))
context = FakeContext('test_project', None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_no_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext(None, 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 20)
def test_quota_with_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
),
by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_override_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
override_project=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
project_id='override_project')
self.assertEqual(quota_value, 20)
def test_quota_with_project_override_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=15),
override_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
quota_class='override_class')
self.assertEqual(quota_value, 20)
class QuotaEngineTestCase(test.TestCase):
def test_init(self):
quota_obj = quota.QuotaEngine()
self.assertEqual(quota_obj._resources, {})
self.assertTrue(isinstance(quota_obj._driver, quota.DbQuotaDriver))
def test_init_override_string(self):
quota_obj = quota.QuotaEngine(
quota_driver_class='nova.tests.test_quota.FakeDriver')
self.assertEqual(quota_obj._resources, {})
self.assertTrue(isinstance(quota_obj._driver, FakeDriver))
def test_init_override_obj(self):
quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
self.assertEqual(quota_obj._resources, {})
self.assertEqual(quota_obj._driver, FakeDriver)
def test_register_resource(self):
quota_obj = quota.QuotaEngine()
resource = quota.AbsoluteResource('test_resource')
quota_obj.register_resource(resource)
self.assertEqual(quota_obj._resources, dict(test_resource=resource))
def test_register_resources(self):
quota_obj = quota.QuotaEngine()
resources = [
quota.AbsoluteResource('test_resource1'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource3'),
]
quota_obj.register_resources(resources)
self.assertEqual(quota_obj._resources, dict(
test_resource1=resources[0],
test_resource2=resources[1],
test_resource3=resources[2],
))
def test_sync_predeclared(self):
quota_obj = quota.QuotaEngine()
def spam(*args, **kwargs):
pass
resource = quota.ReservableResource('test_resource', spam)
quota_obj.register_resource(resource)
self.assertEqual(resource.sync, spam)
def test_sync_multi(self):
quota_obj = quota.QuotaEngine()
def spam(*args, **kwargs):
pass
resources = [
quota.ReservableResource('test_resource1', spam),
quota.ReservableResource('test_resource2', spam),
quota.ReservableResource('test_resource3', spam),
quota.ReservableResource('test_resource4', spam),
]
quota_obj.register_resources(resources[:2])
self.assertEqual(resources[0].sync, spam)
self.assertEqual(resources[1].sync, spam)
self.assertEqual(resources[2].sync, spam)
self.assertEqual(resources[3].sync, spam)
def test_get_by_project(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project(context, 'test_project',
'test_resource')
self.assertEqual(driver.called, [
('get_by_project', context, 'test_project', 'test_resource'),
])
self.assertEqual(result, 42)
def test_get_by_class(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
self.assertEqual(driver.called, [
('get_by_class', context, 'test_class', 'test_resource'),
])
self.assertEqual(result, 42)
def _make_quota_obj(self, driver):
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
resources = [
quota.AbsoluteResource('test_resource4'),
quota.AbsoluteResource('test_resource3'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource1'),
]
quota_obj.register_resources(resources)
return quota_obj
def test_get_defaults(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result = quota_obj.get_defaults(context)
self.assertEqual(driver.called, [
('get_defaults', context, quota_obj._resources),
])
self.assertEqual(result, quota_obj._resources)
def test_get_class_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_class_quotas(context, 'test_class')
result2 = quota_obj.get_class_quotas(context, 'test_class', False)
self.assertEqual(driver.called, [
('get_class_quotas', context, quota_obj._resources,
'test_class', True),
('get_class_quotas', context, quota_obj._resources,
'test_class', False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_get_project_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_project_quotas(context, 'test_project')
result2 = quota_obj.get_project_quotas(context, 'test_project',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual(driver.called, [
('get_project_quotas', context, quota_obj._resources,
'test_project', None, True, True),
('get_project_quotas', context, quota_obj._resources,
'test_project', 'test_class', False, False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_count_no_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource5',
True, foo='bar')
def test_count_wrong_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource1',
True, foo='bar')
def test_count(self):
def fake_count(context, *args, **kwargs):
self.assertEqual(args, (True,))
self.assertEqual(kwargs, dict(foo='bar'))
return 5
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.register_resource(quota.CountableResource('test_resource5',
fake_count))
result = quota_obj.count(context, 'test_resource5', True, foo='bar')
self.assertEqual(result, 5)
def test_limit_check(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
test_resource3=2, test_resource4=1)
self.assertEqual(driver.called, [
('limit_check', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
)),
])
def test_reserve(self):
context = FakeContext(None, None)
driver = FakeDriver(reservations=[
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.reserve(context, test_resource1=4,
test_resource2=3, test_resource3=2,
test_resource4=1)
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
('reserve', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
), None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
), 3600),
])
self.assertEqual(result1, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
self.assertEqual(result2, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
def test_commit(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('commit', context, ['resv-01', 'resv-02', 'resv-03']),
])
def test_rollback(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('rollback', context, ['resv-01', 'resv-02', 'resv-03']),
])
def test_destroy_all_by_project(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project(context, 'test_project')
self.assertEqual(driver.called, [
('destroy_all_by_project', context, 'test_project'),
])
def test_expire(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.expire(context)
self.assertEqual(driver.called, [
('expire', context),
])
def test_resources(self):
quota_obj = self._make_quota_obj(None)
self.assertEqual(quota_obj.resources,
['test_resource1', 'test_resource2',
'test_resource3', 'test_resource4'])
class DbQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(DbQuotaDriverTestCase, self).setUp()
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
quota_volumes=10,
quota_gigabytes=1000,
quota_floating_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
quota_injected_file_path_bytes=255,
quota_security_groups=10,
quota_security_group_rules=20,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.driver = quota.DbQuotaDriver()
self.calls = []
timeutils.set_time_override()
def tearDown(self):
timeutils.clear_time_override()
super(DbQuotaDriverTestCase, self).tearDown()
def test_get_defaults(self):
# Use our pre-defined resources
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
self.assertEqual(result, dict(
instances=10,
cores=20,
ram=50 * 1024,
volumes=10,
gigabytes=1000,
floating_ips=10,
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def _stub_quota_class_get_all_by_name(self):
# Stub out quota_class_get_all_by_name
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual(quota_class, 'test_class')
return dict(
instances=5,
ram=25 * 1024,
gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class')
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
cores=20,
ram=25 * 1024,
volumes=10,
gigabytes=500,
floating_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class', False)
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
ram=25 * 1024,
gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
))
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
cores=10,
gigabytes=50,
injected_files=2,
injected_file_path_bytes=127,
)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
volumes=dict(in_use=2, reserved=0),
gigabytes=dict(in_use=10, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
injected_file_content_bytes=dict(in_use=0, reserved=0),
injected_file_path_bytes=dict(in_use=0, reserved=0),
)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
self._stub_quota_class_get_all_by_name()
def test_get_project_quotas(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
volumes=dict(
limit=10,
in_use=2,
reserved=0,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_no_class(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
])
self.assertEqual(result, dict(
instances=dict(
limit=10,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=50 * 1024,
in_use=10 * 1024,
reserved=0,
),
volumes=dict(
limit=10,
in_use=2,
reserved=0,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
metadata_items=dict(
limit=128,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=10 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_with_class(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project', quota_class='test_class')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
volumes=dict(
limit=10,
in_use=2,
reserved=0,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', defaults=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', usages=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
),
cores=dict(
limit=10,
),
ram=dict(
limit=25 * 1024,
),
volumes=dict(
limit=10,
),
gigabytes=dict(
limit=50,
),
floating_ips=dict(
limit=10,
),
metadata_items=dict(
limit=64,
),
injected_files=dict(
limit=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
),
security_groups=dict(
limit=10,
),
security_group_rules=dict(
limit=20,
),
key_pairs=dict(
limit=100,
),
))
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True):
self.calls.append('get_project_quotas')
return dict((k, dict(limit=v.default))
for k, v in resources.items())
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
def test_get_quotas_has_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync_no_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['metadata_items'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_has_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['instances'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['instances', 'cores', 'ram',
'volumes', 'gigabytes',
'floating_ips', 'security_groups'],
True)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
instances=10,
cores=20,
ram=50 * 1024,
volumes=10,
gigabytes=1000,
floating_ips=10,
security_groups=10,
))
def test_get_quotas_no_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['metadata_items', 'injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes',
'security_group_rules'], False)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
injected_file_path_bytes=255,
security_group_rules=20,
))
def test_limit_check_under(self):
self._stub_get_project_quotas()
self.assertRaises(exception.InvalidQuotaValue,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=-1))
def test_limit_check_over(self):
self._stub_get_project_quotas()
self.assertRaises(exception.OverQuota,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=129))
def test_limit_check_unlimited(self):
self.flags(quota_metadata_items=-1)
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=32767))
def test_limit_check(self):
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=128))
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
def test_reserve_bad_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.assertRaises(exception.InvalidReservationExpiration,
self.driver.reserve,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire='invalid')
self.assertEqual(self.calls, [])
def test_reserve_default_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2))
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_int_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=3600)
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_timedelta_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire_delta = datetime.timedelta(seconds=60)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire_delta)
expire = timeutils.utcnow() + expire_delta
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_datetime_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_until_refresh(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(until_refresh=500)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 500, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_max_age(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(max_age=86400)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 86400),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
class FakeSession(object):
def begin(self):
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
return False
class FakeUsage(sqa_models.QuotaUsage):
def save(self, *args, **kwargs):
pass
class QuotaReserveSqlAlchemyTestCase(test.TestCase):
# nova.db.sqlalchemy.api.quota_reserve is so complex it needs its
# own test case, and since it's a quota manipulator, this is the
# best place to put it...
def setUp(self):
super(QuotaReserveSqlAlchemyTestCase, self).setUp()
self.sync_called = set()
def make_sync(res_name):
def sync(context, project_id, session):
self.sync_called.add(res_name)
if res_name in self.usages:
if self.usages[res_name].in_use < 0:
return {res_name: 2}
else:
return {res_name: self.usages[res_name].in_use - 1}
return {res_name: 0}
return sync
self.resources = {}
for res_name in ('instances', 'cores', 'ram'):
res = quota.ReservableResource(res_name, make_sync(res_name))
self.resources[res_name] = res
self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.usages = {}
self.usages_created = {}
self.reservations_created = {}
def fake_get_session():
return FakeSession()
def fake_get_quota_usages(context, session):
return self.usages.copy()
def fake_quota_usage_create(context, project_id, resource, in_use,
reserved, until_refresh, session=None,
save=True):
quota_usage_ref = self._make_quota_usage(
project_id, resource, in_use, reserved, until_refresh,
timeutils.utcnow(), timeutils.utcnow())
self.usages_created[resource] = quota_usage_ref
return quota_usage_ref
def fake_reservation_create(context, uuid, usage_id, project_id,
resource, delta, expire, session=None):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, resource, delta, expire,
timeutils.utcnow(), timeutils.utcnow())
self.reservations_created[resource] = reservation_ref
return reservation_ref
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages)
self.stubs.Set(sqa_api, 'quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, 'reservation_create', fake_reservation_create)
timeutils.set_time_override()
def _make_quota_usage(self, project_id, resource, in_use, reserved,
until_refresh, created_at, updated_at):
quota_usage_ref = FakeUsage()
quota_usage_ref.id = len(self.usages) + len(self.usages_created)
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.created_at = created_at
quota_usage_ref.updated_at = updated_at
quota_usage_ref.deleted_at = None
quota_usage_ref.deleted = False
return quota_usage_ref
def init_usage(self, project_id, resource, in_use, reserved,
until_refresh=None, created_at=None, updated_at=None):
if created_at is None:
created_at = timeutils.utcnow()
if updated_at is None:
updated_at = timeutils.utcnow()
quota_usage_ref = self._make_quota_usage(project_id, resource, in_use,
reserved, until_refresh,
created_at, updated_at)
self.usages[resource] = quota_usage_ref
def compare_usage(self, usage_dict, expected):
for usage in expected:
resource = usage['resource']
for key, value in usage.items():
actual = getattr(usage_dict[resource], key)
self.assertEqual(actual, value,
"%s != %s on usage for resource %s" %
(actual, value, resource))
def _make_reservation(self, uuid, usage_id, project_id, resource,
delta, expire, created_at, updated_at):
reservation_ref = sqa_models.Reservation()
reservation_ref.id = len(self.reservations_created)
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.created_at = created_at
reservation_ref.updated_at = updated_at
reservation_ref.deleted_at = None
reservation_ref.deleted = False
return reservation_ref
def compare_reservation(self, reservations, expected):
reservations = set(reservations)
for resv in expected:
resource = resv['resource']
resv_obj = self.reservations_created[resource]
self.assertIn(resv_obj.uuid, reservations)
reservations.discard(resv_obj.uuid)
for key, value in resv.items():
actual = getattr(resv_obj, key)
self.assertEqual(actual, value,
"%s != %s on reservation for resource %s" %
(actual, value, resource))
self.assertEqual(len(reservations), 0)
def test_quota_reserve_create_usages(self):
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages_created, [
dict(resource='instances',
project_id='test_project',
in_use=0,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=0,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=0,
reserved=2 * 1024,
until_refresh=None),
])
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages_created['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages_created['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages_created['ram'],
delta=2 * 1024),
])
def test_quota_reserve_negative_in_use(self):
self.init_usage('test_project', 'instances', -1, 0, until_refresh=1)
self.init_usage('test_project', 'cores', -1, 0, until_refresh=1)
self.init_usage('test_project', 'ram', -1, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=5),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=5),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_until_refresh(self):
self.init_usage('test_project', 'instances', 3, 0, until_refresh=1)
self.init_usage('test_project', 'cores', 3, 0, until_refresh=1)
self.init_usage('test_project', 'ram', 3, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=5),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=5),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_max_age(self):
max_age = 3600
record_created = (timeutils.utcnow() -
datetime.timedelta(seconds=max_age))
self.init_usage('test_project', 'instances', 3, 0,
created_at=record_created, updated_at=record_created)
self.init_usage('test_project', 'cores', 3, 0,
created_at=record_created, updated_at=record_created)
self.init_usage('test_project', 'ram', 3, 0,
created_at=record_created, updated_at=record_created)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, max_age)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_no_refresh(self):
self.init_usage('test_project', 'instances', 3, 0)
self.init_usage('test_project', 'cores', 3, 0)
self.init_usage('test_project', 'ram', 3, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=3,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=3,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=3,
reserved=2 * 1024,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_unders(self):
self.init_usage('test_project', 'instances', 1, 0)
self.init_usage('test_project', 'cores', 3, 0)
self.init_usage('test_project', 'ram', 1 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=-2,
cores=-4,
ram=-2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=1,
reserved=0,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=3,
reserved=0,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=1 * 1024,
reserved=0,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=-2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=-4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=-2 * 1024),
])
def test_quota_reserve_overs(self):
self.init_usage('test_project', 'instances', 4, 0)
self.init_usage('test_project', 'cores', 8, 0)
self.init_usage('test_project', 'ram', 10 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
self.assertRaises(exception.OverQuota,
sqa_api.quota_reserve,
context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=4,
reserved=0,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=8,
reserved=0,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=10 * 1024,
reserved=0,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_reduction(self):
self.init_usage('test_project', 'instances', 10, 0)
self.init_usage('test_project', 'cores', 20, 0)
self.init_usage('test_project', 'ram', 20 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=-2,
cores=-4,
ram=-2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=10,
reserved=0,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=20,
reserved=0,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=20 * 1024,
reserved=0,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=-2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=-4),
dict(resource='ram',
usage_id=self.usages['ram'],
project_id='test_project',
delta=-2 * 1024),
])
| 38.289529
| 79
| 0.521994
|
794c57cec824ac193511be8b15418c2a80267b82
| 18,868
|
py
|
Python
|
tests/test_implementations/api_test/test_delete_many_api.py
|
fossabot/FastAPIQuickCRUD
|
69226ec9959dfed41fdfe69f59d8c622bd3726fb
|
[
"MIT"
] | null | null | null |
tests/test_implementations/api_test/test_delete_many_api.py
|
fossabot/FastAPIQuickCRUD
|
69226ec9959dfed41fdfe69f59d8c622bd3726fb
|
[
"MIT"
] | null | null | null |
tests/test_implementations/api_test/test_delete_many_api.py
|
fossabot/FastAPIQuickCRUD
|
69226ec9959dfed41fdfe69f59d8c622bd3726fb
|
[
"MIT"
] | null | null | null |
import json
from collections import OrderedDict
from starlette.testclient import TestClient
from src.fastapi_quickcrud import sqlalchemy_to_pydantic
from src.fastapi_quickcrud.crud_router import crud_router_builder
from src.fastapi_quickcrud.crud_router import CrudService
from src.fastapi_quickcrud.misc.type import CrudMethods
from tests.test_implementations.api_test import get_transaction_session, app, UntitledTable256
UntitledTable256_service = CrudService(model=UntitledTable256)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.UPSERT_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# create_one_model = api_model[CrudMethods.UPSERT_ONE].__dict__
# assert create_one_model['requestModel'] or create_one_model['responseModel']
# create_one_request_model = deepcopy(create_one_model['requestModel'].__dict__['__fields__'])
# create_one_response_model = deepcopy(create_one_model['responseModel'].__dict__['__fields__'])
# Request Test
# assert create_one_request_model.pop('on_conflict', False)
# for k, v in create_one_request_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# Response Test
# for k, v in create_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_create_one = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test_creation_one",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.UPSERT_MANY,
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# create_many_model = api_model[CrudMethods.UPSERT_MANY].__dict__
# assert create_many_model['requestModel'] or create_many_model['responseModel']
# create_many_request_model = deepcopy(create_many_model['requestModel'].__dict__['__fields__'])
# create_many_response_model = deepcopy(create_many_model['responseModel'].__dict__['__fields__'])
#
# # Request Model Test
# assert create_many_request_model.pop('on_conflict', None)
# insert_many_model = create_many_request_model['insert'].sub_fields[0].outer_type_.__dict__['__fields__']
# for k, v in insert_many_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
#
# # Response Model Test
# for k, v in create_many_response_model.items():
# create_many_response_model_item = v.type_.__dict__['__fields__']
# for k, v in create_many_response_model_item.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_create_many = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test_creation_many",
tags=["test"]
)
# Response Mode Test
# response_many = create_many_response_model['__root__'].sub_fields[0].outer_type_.__dict__['__fields__']
# for k, v in response_many.items():
# assert not v.required
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.POST_REDIRECT_GET
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# Model Test
# api_model = UntitledTable256Model.__dict__['POST']
# assert api_model
# post_redirect_get_model = api_model[CrudMethods.POST_REDIRECT_GET].__dict__
# assert post_redirect_get_model['requestModel'] or post_redirect_get_model['responseModel']
# post_redirect_get_request_model = deepcopy(post_redirect_get_model['requestModel'].__dict__['__fields__'])
# post_redirect_get_response_model = deepcopy(post_redirect_get_model['responseModel'].__dict__['__fields__'])
# Request Model Test
# for k, v in post_redirect_get_request_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# Response Model Test
# for k, v in post_redirect_get_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
# for k, v in post_redirect_get_response_model.items():
# assert v.required
test_post_and_redirect_get = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test_post_direct_get",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.FIND_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # # Model Test
# api_model = UntitledTable256Model.__dict__['GET']
# assert api_model
# get_one_model = api_model[CrudMethods.FIND_ONE].__dict__
# assert get_one_model['requestModel'] or get_one_model['responseModel']
# get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__'])
# get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__'])
# primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']]
# assert not primary_key_of_get_sql_schema.required
# get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None)
# for k, v in get_one_request_model.items():
# assert not v.required
# # FIXME some thing may not require
# for k, v in get_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_get_data = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test",
tags=["test"]
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.DELETE_MANY
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
# # # Model Test
# api_model = UntitledTable256Model.__dict__['GET']
# assert api_model
# get_one_model = api_model[CrudMethods.FIND_ONE].__dict__
# assert get_one_model['requestModel'] or get_one_model['responseModel']
# get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__'])
# get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__'])
# primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']]
# assert not primary_key_of_get_sql_schema.required
# get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None)
# for k, v in get_one_request_model.items():
# assert not v.required
# # FIXME some thing may not require
# for k, v in get_one_response_model.items():
# sql_schema = UntitledTable256.__dict__[v.name].comparator
#
# if sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default:
# assert not v.required
# elif sql_schema.nullable:
# assert not v.required
# elif not sql_schema.nullable:
# assert v.required
# elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default:
# assert v.required
# else:
# print(f"{v.name=}")
# print(f"{v.required=}")
# print(f"{v.default=}")
test_delete_data = crud_router_builder(db_session=get_transaction_session,
crud_service=UntitledTable256_service,
crud_models=UntitledTable256Model,
prefix="/test_delete_many",
tags=["test"]
)
[app.include_router(i) for i in [test_post_and_redirect_get, test_delete_data, test_create_one, test_create_many, test_get_data]]
client = TestClient(app)
primary_key_name = UntitledTable256.primary_key_of_table
unique_fields = UntitledTable256.unique_fields
def test_create_many_and_delete_many():
headers = {
'accept': 'application/json',
'Content-Type': 'application/json',
}
data = { "insert": [ { "bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string",
"timestamp_value": "2021-07-24T02:54:53.285Z", "timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string", "array_value": [ 0 ],
"array_str__value": [ "string" ], "time_value": "18:18:18" , "timetz_value": "18:18:18+00:00"},
{"bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string", "time_value": "18:18:18",
"timestamp_value": "2021-07-24T02:54:53.285Z",
"timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string",
"array_value": [0], "array_str__value": ["string"], "timetz_value": "18:18:18+00:00"},
{"bool_value": True, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0,
"float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0,
"json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string",
"timestamp_value": "2021-07-24T02:54:53.285Z",
"timestamptz_value": "2021-07-24T02:54:53.285Z",
"uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string",
"array_value": [0], "array_str__value": ["string"], "time_value": "18:18:18", "timetz_value": "18:18:18+00:00"},
] }
response = client.post('/test_creation_many', headers=headers, data=json.dumps(data))
assert response.status_code == 201
insert_response_data = response.json()
primary_key_list = [i[primary_key_name] for i in insert_response_data]
min_key = min(primary_key_list)
max_key = max(primary_key_list)
params = {"primary_key____from": min_key,
"primary_key____to": max_key,
"bool_value____list":True,
"char_value____str": 'string%',
"char_value____str_____matching_pattern": 'case_sensitive',
"date_value____from": "2021-07-22",
"date_value____to": "2021-07-25",
"float4_value____from": -1,
"float4_value____to": 2,
"float4_value____list": 0,
"float8_value____from": -1,
"float8_value____to": 2,
"float8_value____list": 0,
"int2_value____from": -1,
"int2_value____to": 9,
"int2_value____list": 0,
"int4_value____from": -1,
"int4_value____to": 9,
"int4_value____list": 0,
"int8_value____from": -1,
"int8_value____to": 9,
"int8_value____list": 0,
"interval_value____from": -1,
"interval_value____to": 9,
"interval_value____list": 0,
"numeric_value____from": -1,
"numeric_value____to": 9,
"numeric_value____list": 0,
"text_value____list": "string",
"time_value____from": '18:18:18',
"time_value____to": '18:18:18',
"time_value____list": '18:18:18',
"timestamp_value_value____from": "2021-07-24T02:54:53.285",
"timestamp_value_value____to": "2021-07-24T02:54:53.285",
"timestamp_value_value____list": "2021-07-24T02:54:53.285",
"timestamptz_value_value____from": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____to": "2021-07-24T02:54:53.285Z",
"timestamptz_value_value____list": "2021-07-24T02:54:53.285Z",
"uuid_value_value____list": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"time_value____from": '18:18:18+00:00',
"time_value____to": '18:18:18+00:00',
"time_value____list": '18:18:18+00:00',
"varchar_value____str": 'string',
"varchar_value____str_____matching_pattern": 'case_sensitive',
"varchar_value____list": 'string',
}
from urllib.parse import urlencode
query_string = urlencode(OrderedDict(**params))
response = client.delete(f'/test_delete_many?{query_string}')
assert response.status_code == 200
assert response.headers['x-total-count'] == '3'
| 49.783641
| 138
| 0.614374
|
794c57ea3bdc512efedfbfa8e36d3a18f6a6d484
| 50,260
|
py
|
Python
|
core/agents/acer_single_process.py
|
R3NI3/pytorch-rl
|
20b3b738ca400b1916197f27a91367878b09803c
|
[
"MIT"
] | 851
|
2017-04-09T19:01:27.000Z
|
2022-03-30T17:57:01.000Z
|
core/agents/acer_single_process.py
|
R3NI3/pytorch-rl
|
20b3b738ca400b1916197f27a91367878b09803c
|
[
"MIT"
] | 10
|
2017-05-12T14:15:54.000Z
|
2020-09-24T12:30:42.000Z
|
core/agents/acer_single_process.py
|
R3NI3/pytorch-rl
|
20b3b738ca400b1916197f27a91367878b09803c
|
[
"MIT"
] | 161
|
2017-04-12T16:11:30.000Z
|
2022-01-12T07:55:46.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import random
import time
import math
import torch
from torch.autograd import Variable, grad, backward
import torch.nn.functional as F
from utils.helpers import ACER_On_Policy_Experience
from utils.distributions import sample_poisson, categorical_kl_div
from optims.helpers import adjust_learning_rate
from core.agent_single_process import AgentSingleProcess
class ACERSingleProcess(AgentSingleProcess):
def __init__(self, master, process_id=0):
super(ACERSingleProcess, self).__init__(master, process_id)
# lstm hidden states
if self.master.enable_lstm:
self._reset_on_policy_lstm_hidden_vb_episode() # clear up hidden state
self._reset_on_policy_lstm_hidden_vb_rollout() # detach the previous variable from the computation graph
self._reset_off_policy_lstm_hidden_vb() # clear up hidden state, since sampled batches won't be connected from previous batches
# # NOTE global variable pi
# if self.master.enable_continuous:
# self.pi_vb = Variable(torch.Tensor([math.pi]).type(self.master.dtype))
self.master.logger.warning("Registered ACER-SingleProcess-Agent #" + str(self.process_id) + " w/ Env (seed:" + str(self.env.seed) + ").")
# NOTE: to be called at the beginning of each new episode, clear up the hidden state
def _reset_on_policy_lstm_hidden_vb_episode(self, training=True): # seq_len, batch_size, hidden_dim
not_training = not training
if self.master.enable_continuous:
# self.on_policy_lstm_hidden_vb = (Variable(torch.zeros(2, self.master.hidden_dim).type(self.master.dtype), volatile=not_training),
# Variable(torch.zeros(2, self.master.hidden_dim).type(self.master.dtype), volatile=not_training))
pass
else:
# for self.model
self.on_policy_lstm_hidden_vb = (Variable(torch.zeros(1, self.master.hidden_dim).type(self.master.dtype), volatile=not_training),
Variable(torch.zeros(1, self.master.hidden_dim).type(self.master.dtype), volatile=not_training))
# for self.master.avg_model # NOTE: no grads are needed to compute on this model, so always volatile
self.on_policy_avg_lstm_hidden_vb = (Variable(torch.zeros(1, self.master.hidden_dim).type(self.master.dtype), volatile=True),
Variable(torch.zeros(1, self.master.hidden_dim).type(self.master.dtype), volatile=True))
# NOTE: to be called at the beginning of each rollout, detach the previous variable from the graph
def _reset_on_policy_lstm_hidden_vb_rollout(self):
# for self.model
self.on_policy_lstm_hidden_vb = (Variable(self.on_policy_lstm_hidden_vb[0].data),
Variable(self.on_policy_lstm_hidden_vb[1].data))
# for self.master.avg_model
self.on_policy_avg_lstm_hidden_vb = (Variable(self.on_policy_avg_lstm_hidden_vb[0].data),
Variable(self.on_policy_avg_lstm_hidden_vb[1].data))
# NOTE: to be called before each off-policy learning phase
# NOTE: keeping it separate so as not to mess up the on_policy_lstm_hidden_vb if the current on-policy episode has not finished after the last rollout
def _reset_off_policy_lstm_hidden_vb(self, training=True):
not_training = not training
if self.master.enable_continuous:
pass
else:
# for self.model
self.off_policy_lstm_hidden_vb = (Variable(torch.zeros(self.master.batch_size, self.master.hidden_dim).type(self.master.dtype), volatile=not_training),
Variable(torch.zeros(self.master.batch_size, self.master.hidden_dim).type(self.master.dtype), volatile=not_training))
# for self.master.avg_model # NOTE: no grads are needed to be computed on this model
self.off_policy_avg_lstm_hidden_vb = (Variable(torch.zeros(self.master.batch_size, self.master.hidden_dim).type(self.master.dtype)),
Variable(torch.zeros(self.master.batch_size, self.master.hidden_dim).type(self.master.dtype)))
def _preprocessState(self, state, on_policy, is_valotile=False):
if isinstance(state, list):
state_vb = []
for i in range(len(state)):
if on_policy:
state_vb.append(Variable(torch.from_numpy(state[i]).unsqueeze(0).type(self.master.dtype), volatile=is_valotile))
else:
state_vb.append(Variable(torch.from_numpy(state[i]).view(-1, self.master.state_shape).type(self.master.dtype), volatile=is_valotile))
else:
if on_policy:
state_vb = Variable(torch.from_numpy(state).unsqueeze(0).type(self.master.dtype), volatile=is_valotile)
else:
state_vb = Variable(torch.from_numpy(state).view(-1, self.master.state_shape).type(self.master.dtype), volatile=is_valotile)
return state_vb
def _forward(self, state_vb, on_policy=True):
if self.master.enable_continuous:
pass
else:
if self.master.enable_lstm:
if on_policy: # learn from the current experience
p_vb, q_vb, v_vb, self.on_policy_lstm_hidden_vb = self.model(state_vb, self.on_policy_lstm_hidden_vb)
avg_p_vb, _, _, self.on_policy_avg_lstm_hidden_vb = self.master.avg_model(state_vb, self.on_policy_avg_lstm_hidden_vb)
# then we also need to get an action for the next time step
if self.training:
action = p_vb.multinomial().data[0][0]
else:
action = p_vb.max(1)[1].data.squeeze().numpy()[0]
return action, p_vb, q_vb, v_vb, avg_p_vb
else: # learn from the sampled replays
p_vb, q_vb, v_vb, self.off_policy_lstm_hidden_vb = self.model(state_vb, self.off_policy_lstm_hidden_vb)
avg_p_vb, _, _, self.off_policy_avg_lstm_hidden_vb = self.master.avg_model(state_vb, self.off_policy_avg_lstm_hidden_vb)
return _, p_vb, q_vb, v_vb, avg_p_vb
else:
pass
class ACERLearner(ACERSingleProcess):
def __init__(self, master, process_id=0):
master.logger.warning("<===================================> ACER-Learner #" + str(process_id) + " {Env & Model & Memory}")
super(ACERLearner, self).__init__(master, process_id)
# NOTE: diff from pure on-policy methods like a3c, acer is capable of
# NOTE: off-policy learning and can make use of replay buffer
self.memory = self.master.memory_prototype(capacity = self.master.memory_params.memory_size // self.master.num_processes,
max_episode_length = self.master.early_stop)
self._reset_rollout()
self.training = True # choose actions by polinomial
self.model.train(self.training)
# local counters
self.frame_step = 0 # local frame step counter
self.train_step = 0 # local train step counter
self.on_policy_train_step = 0 # local on-policy train step counter
self.off_policy_train_step = 0 # local off-policy train step counter
# local training stats
self.p_loss_avg = 0. # global policy loss
self.v_loss_avg = 0. # global value loss
self.entropy_loss_avg = 0. # global entropy loss
self.loss_counter = 0 # storing this many losses
self._reset_training_loggings()
# copy local training stats to global every prog_freq
self.last_prog = time.time()
def _reset_training_loggings(self):
self.p_loss_avg = 0.
self.v_loss_avg = 0.
self.entropy_loss_avg = 0.
self.loss_counter = 0
def _reset_rollout(self): # for storing the experiences collected through one rollout
self.rollout = ACER_On_Policy_Experience(state0 = [],
action = [],
reward = [],
state1 = [],
terminal1 = [],
policy_vb = [],
q0_vb = [],
value0_vb = [],
detached_avg_policy_vb = [],
detached_old_policy_vb = [])
def _get_QretT_vb(self, on_policy=True):
if on_policy:
if self.rollout.terminal1[-1]: # for terminal sT: Q_ret = 0
QretT_vb = Variable(torch.zeros(1, 1))
else: # for non-terminal sT: Qret = V(s_i; /theta)
sT_vb = self._preprocessState(self.rollout.state1[-1], on_policy, True) # bootstrap from last state
if self.master.enable_lstm:
_, _, QretT_vb, _ = self.model(sT_vb, self.on_policy_lstm_hidden_vb)# NOTE: only doing inference here
else:
_, _, QretT_vb = self.model(sT_vb) # NOTE: only doing inference here
# # NOTE: here QretT_vb.volatile=True since sT_vb.volatile=True
# # NOTE: if we use detach() here, it would remain volatile
# # NOTE: then all the follow-up computations would only give volatile loss variables
# QretT_vb = Variable(QretT_vb.data)
else:
sT_vb = self._preprocessState(self.rollout.state1[-1], on_policy, True) # bootstrap from last state
if self.master.enable_lstm:
_, _, QretT_vb, _ = self.model(sT_vb, self.off_policy_lstm_hidden_vb) # NOTE: only doing inference here
else:
_, _, QretT_vb = self.model(sT_vb) # NOTE: only doing inference here
# now we have to also set QretT_vb to 0 for terminal sT's
QretT_vb = ((1 - Variable(torch.from_numpy(np.array(self.rollout.terminal1[-1])).float())) * QretT_vb)
# NOTE: here QretT_vb.volatile=True since sT_vb.volatile=True
# NOTE: if we use detach() here, it would remain volatile
# NOTE: then all the follow-up computations would only give volatile loss variables
return Variable(QretT_vb.data)
def _1st_order_trpo(self, detached_policy_loss_vb, detached_policy_vb, detached_avg_policy_vb, detached_splitted_policy_vb=None):
on_policy = detached_splitted_policy_vb is None
# KL divergence k = \delta_{\phi_{\theta}} DKL[ \pi(|\phi_{\theta_a}) || \pi{|\phi_{\theta}}]
# kl_div_vb = F.kl_div(detached_policy_vb.log(), detached_avg_policy_vb, size_average=False) # NOTE: the built-in one does not work on batch
kl_div_vb = categorical_kl_div(detached_policy_vb, detached_avg_policy_vb)
# NOTE: k & g are wll w.r.t. the network output, which is detached_policy_vb
# NOTE: gradient from this part will not flow back into the model
# NOTE: that's why we are only using detached policy variables here
if on_policy:
k_vb = grad(outputs=kl_div_vb, inputs=detached_policy_vb, retain_graph=False, only_inputs=True)[0]
g_vb = grad(outputs=detached_policy_loss_vb, inputs=detached_policy_vb, retain_graph=False, only_inputs=True)[0]
else:
# NOTE NOTE NOTE !!!
# NOTE: here is why we cannot simply detach then split the policy_vb, but must split before detach
# NOTE: cos if we do that then the split cannot backtrace the grads computed in this later part of the graph
# NOTE: it would have no way to connect to the graphs in the model
k_vb = grad(outputs=(kl_div_vb.split(1, 0)), inputs=(detached_splitted_policy_vb), retain_graph=False, only_inputs=True)
g_vb = grad(outputs=(detached_policy_loss_vb.split(1, 0)), inputs=(detached_splitted_policy_vb), retain_graph=False, only_inputs=True)
k_vb = torch.cat(k_vb, 0)
g_vb = torch.cat(g_vb, 0)
kg_dot_vb = (k_vb * g_vb).sum(1, keepdim=True)
kk_dot_vb = (k_vb * k_vb).sum(1, keepdim=True)
z_star_vb = g_vb - ((kg_dot_vb - self.master.clip_1st_order_trpo) / kk_dot_vb).clamp(min=0) * k_vb
return z_star_vb
def _update_global_avg_model(self):
for global_param, global_avg_param in zip(self.master.model.parameters(),
self.master.avg_model.parameters()):
global_avg_param = self.master.avg_model_decay * global_avg_param + \
(1 - self.master.avg_model_decay) * global_param
def _backward(self, unsplitted_policy_vb=None):
on_policy = unsplitted_policy_vb is None
# preparation
rollout_steps = len(self.rollout.reward)
if self.master.enable_continuous:
pass
else:
action_batch_vb = Variable(torch.from_numpy(np.array(self.rollout.action)).view(rollout_steps, -1, 1).long()) # [rollout_steps x batch_size x 1]
if self.master.use_cuda:
action_batch_vb = action_batch_vb.cuda()
if not on_policy: # we save this transformation for on-policy
reward_batch_vb = Variable(torch.from_numpy(np.array(self.rollout.reward)).view(rollout_steps, -1, 1).float()) # [rollout_steps x batch_size x 1]
# NOTE: here we use the detached policies, cos when using 1st order trpo,
# NOTE: the policy losses are not directly backproped into the model
# NOTE: but only backproped up to the output of the network
# NOTE: and to make the code consistent, we also decouple the backprop
# NOTE: into two parts when not using trpo policy update
# NOTE: requires_grad of detached_policy_vb must be True, otherwise grad will not be able to
# NOTE: flow between the two stagets of backprop
if on_policy:
policy_vb = self.rollout.policy_vb
detached_splitted_policy_vb = None
detached_policy_vb = [Variable(self.rollout.policy_vb[i].data, requires_grad=True) for i in range(rollout_steps)] # [rollout_steps x batch_size x action_dim]
else: # NOTE: here rollout.policy_vb is already split by trajectories, we can safely detach and not causing trouble for feed in tuples into grad later
# NOTE: rollout.policy_vb: undetached, splitted -> what we stored during the fake _off_policy_rollout
# NOTE: policy_vb: undetached, batch -> 1. entropy, cos grad from entropy need to flow back through the whole graph 2. the backward of 2nd stage should be computed on this
# NOTE: detached_splitted_policy_vb: detached, splitted -> used as inputs in grad in _1st_order_trpo, cos this part of grad is not backproped into the model
# NOTE: detached_policy_vb: detached, batch -> to ease batch computation on the detached_policy_vb
policy_vb = unsplitted_policy_vb
detached_splitted_policy_vb = [[Variable(self.rollout.policy_vb[i][j].data, requires_grad=True) for j in range(self.master.batch_size)] for i in range(rollout_steps)] # (rollout_steps x (batch_size x [1 x action_dim]))
detached_policy_vb = [torch.cat(detached_splitted_policy_vb[i]) for i in range(rollout_steps)] # detached # we cat the splitted tuples for each timestep across trajectories to ease batch computation
detached_policy_log_vb = [torch.log(detached_policy_vb[i]) for i in range(rollout_steps)]
detached_policy_log_vb = [detached_policy_log_vb[i].gather(1, action_batch_vb[i]) for i in range(rollout_steps) ]
# NOTE: entropy is using the undetached policies here, cos we
# NOTE: backprop entropy_loss the same way as value_loss at once in the end
# NOTE: not decoupled into two stages as the other parts of the policy gradient
entropy_vb = [- (policy_vb[i].log() * policy_vb[i]).sum(1, keepdim=True).mean(0) for i in range(rollout_steps)]
if self.master.enable_1st_order_trpo:
z_star_vb = []
else:
policy_grad_vb = []
QretT_vb = self._get_QretT_vb(on_policy)
# compute loss
entropy_loss_vb = 0.
value_loss_vb = 0.
for i in reversed(range(rollout_steps)):
# 1. policy loss
if on_policy:
# importance sampling weights: always 1 for on-policy
rho_vb = Variable(torch.ones(1, self.master.action_dim))
# Q_ret = r_i + /gamma * Q_ret
QretT_vb = self.master.gamma * QretT_vb + self.rollout.reward[i]
else:
# importance sampling weights: /rho = /pi(|s_i) / /mu(|s_i)
rho_vb = detached_policy_vb[i].detach() / self.rollout.detached_old_policy_vb[i] # TODO: check if this detach is necessary
# Q_ret = r_i + /gamma * Q_ret
QretT_vb = self.master.gamma * QretT_vb + reward_batch_vb[i]
# A = Q_ret - V(s_i; /theta)
advantage_vb = QretT_vb - self.rollout.value0_vb[i]
# g = min(c, /rho_a_i) * /delta_theta * log(/pi(a_i|s_i; /theta)) * A
detached_policy_loss_vb = - (rho_vb.gather(1, action_batch_vb[i]).clamp(max=self.master.clip_trace) * detached_policy_log_vb[i] * advantage_vb.detach()).mean(0)
if self.master.enable_bias_correction:# and not on_policy: # NOTE: have to perform bais correction when off-policy
# g = g + /sum_a [1 - c / /rho_a]_+ /pi(a|s_i; /theta) * /delta_theta * log(/pi(a|s_i; /theta)) * (Q(s_i, a; /theta) - V(s_i; /theta)
bias_correction_coefficient_vb = (1 - self.master.clip_trace / rho_vb).clamp(min=0) * detached_policy_vb[i]
detached_policy_loss_vb -= (bias_correction_coefficient_vb * detached_policy_vb[i].log() * (self.rollout.q0_vb[i].detach() - self.rollout.value0_vb[i].detach())).sum(1, keepdim=True).mean(0)
# 1.1 backprop policy loss up to the network output
if self.master.enable_1st_order_trpo:
if on_policy:
z_star_vb.append(self._1st_order_trpo(detached_policy_loss_vb, detached_policy_vb[i], self.rollout.detached_avg_policy_vb[i]))
else:
z_star_vb.append(self._1st_order_trpo(detached_policy_loss_vb, detached_policy_vb[i], self.rollout.detached_avg_policy_vb[i], detached_splitted_policy_vb[i]))
else:
policy_grad_vb.append(grad(outputs=detached_policy_loss_vb, inputs=detached_policy_vb[i], retain_graph=False, only_inputs=True)[0])
# entropy loss
entropy_loss_vb -= entropy_vb[i]
# 2. value loss
Q_vb = self.rollout.q0_vb[i].gather(1, action_batch_vb[i])
value_loss_vb += ((QretT_vb - Q_vb) ** 2 / 2).mean(0)
# we also need to update QretT_vb here
truncated_rho_vb = rho_vb.gather(1, action_batch_vb[i]).clamp(max=1)
QretT_vb = truncated_rho_vb * (QretT_vb - Q_vb.detach()) + self.rollout.value0_vb[i].detach()
# now we have all the losses ready, we backprop
self.model.zero_grad()
# 1.2 backprop the policy loss from the network output to the whole model
if self.master.enable_1st_order_trpo:
# NOTE: here need to use the undetached policy_vb, cos we need to backprop to the whole model
backward(variables=policy_vb, grad_variables=z_star_vb, retain_graph=True)
else:
# NOTE: here we can backprop both losses at once, but to make consistent
# NOTE: and avoid the need to keep track of another set of undetached policy loss
# NOTE: we also decouple the backprop of the policy loss into two stages
backward(variables=policy_vb, grad_variables=policy_grad_vb, retain_graph=True)
# 2. backprop the value loss and entropy loss
(value_loss_vb + self.master.beta * entropy_loss_vb).backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), self.master.clip_grad)
self._ensure_global_grads()
self.master.optimizer.step()
self.train_step += 1
self.master.train_step.value += 1
# update master.avg_model
self._update_global_avg_model()
# adjust learning rate if enabled
if self.master.lr_decay:
self.master.lr_adjusted.value = max(self.master.lr * (self.master.steps - self.master.train_step.value) / self.master.steps, 1e-32)
adjust_learning_rate(self.master.optimizer, self.master.lr_adjusted.value)
# log training stats
if self.master.enable_1st_order_trpo:
self.p_loss_avg += torch.cat(z_star_vb, 0).data.mean()
else:
self.p_loss_avg += torch.cat(policy_grad_vb, 0).data.mean()
self.v_loss_avg += value_loss_vb.data.numpy()
self.entropy_loss_avg += entropy_loss_vb.data.numpy()
self.loss_counter += 1
# NOTE: get action from current model, execute in env
# NOTE: then get ACER_On_Policy_Experience to calculate stats for backward
# NOTE: push them into replay buffer in the format of {s,a,r,s1,t1,p}
def _on_policy_rollout(self, episode_steps, episode_reward):
# reset rollout experiences
self._reset_rollout()
t_start = self.frame_step
# continue to rollout only if:
# 1. not running out of max steps of this current rollout, and
# 2. not terminal, and
# 3. not exceeding max steps of this current episode
# 4. master not exceeding max train steps
while (self.frame_step - t_start) < self.master.rollout_steps \
and not self.experience.terminal1 \
and (self.master.early_stop is None or episode_steps < self.master.early_stop):
# NOTE: here first store the last frame: experience.state1 as rollout.state0
self.rollout.state0.append(self.experience.state1)
# then get the action to take from rollout.state0 (experience.state1)
if self.master.enable_continuous:
pass
else:
action, p_vb, q_vb, v_vb, avg_p_vb = self._forward(self._preprocessState(self.experience.state1, on_policy=True), on_policy=True)
# then execute action in env to get a new experience.state1 -> rollout.state1
self.experience = self.env.step(action)
# push experience into rollout
self.rollout.action.append(action)
self.rollout.reward.append(self.experience.reward)
self.rollout.state1.append(self.experience.state1)
self.rollout.terminal1.append(self.experience.terminal1)
self.rollout.policy_vb.append(p_vb)
self.rollout.q0_vb.append(q_vb)
self.rollout.value0_vb.append(v_vb)
self.rollout.detached_avg_policy_vb.append(avg_p_vb.detach()) # NOTE
# also push into replay buffer if off-policy learning is enabled
if self.master.replay_ratio > 0:
if self.rollout.terminal1[-1]:
self.memory.append(self.rollout.state0[-1],
None,
None,
None)
else:
self.memory.append(self.rollout.state0[-1],
self.rollout.action[-1],
self.rollout.reward[-1],
self.rollout.policy_vb[-1].detach()) # NOTE: no graphs needed
episode_steps += 1
episode_reward += self.experience.reward
self.frame_step += 1
self.master.frame_step.value += 1
# NOTE: we put this condition in the end to make sure this current rollout won't be empty
if self.master.train_step.value >= self.master.steps:
break
return episode_steps, episode_reward
# NOTE: sample from replay buffer for a bunch of trajectories
# NOTE: then fake rollout on them to get ACER_On_Policy_Experience to get stats for backward
def _off_policy_rollout(self):
# reset rollout experiences
self._reset_rollout()
# first sample trajectories
trajectories = self.memory.sample_batch(self.master.batch_size, maxlen=self.master.rollout_steps)
# NOTE: we also store another set of undetached unsplitted policy_vb here to prepare for backward
unsplitted_policy_vb = []
# then fake the on-policy forward
for t in range(len(trajectories) - 1):
# we first get the data out of the sampled experience
state0 = np.stack((trajectory.state0 for trajectory in trajectories[t]))
action = np.expand_dims(np.stack((trajectory.action for trajectory in trajectories[t])), axis=1)
reward = np.expand_dims(np.stack((trajectory.reward for trajectory in trajectories[t])), axis=1)
state1 = np.stack((trajectory.state0 for trajectory in trajectories[t+1]))
terminal1 = np.expand_dims(np.stack((1 if trajectory.action is None else 0 for trajectory in trajectories[t+1])), axis=1) # NOTE: here is 0/1, in on-policy is False/True
detached_old_policy_vb = torch.cat([trajectory.detached_old_policy_vb for trajectory in trajectories[t]], 0)
# NOTE: here first store the last frame: experience.state1 as rollout.state0
self.rollout.state0.append(state0)
# then get its corresponding output variables to fake the on policy experience
if self.master.enable_continuous:
pass
else:
_, p_vb, q_vb, v_vb, avg_p_vb = self._forward(self._preprocessState(self.rollout.state0[-1], on_policy=False), on_policy=False)
# push experience into rollout
self.rollout.action.append(action)
self.rollout.reward.append(reward)
self.rollout.state1.append(state1)
self.rollout.terminal1.append(terminal1)
self.rollout.policy_vb.append(p_vb.split(1, 0)) # NOTE: must split before detach !!! otherwise graph is cut
self.rollout.q0_vb.append(q_vb)
self.rollout.value0_vb.append(v_vb)
self.rollout.detached_avg_policy_vb.append(avg_p_vb.detach()) # NOTE
self.rollout.detached_old_policy_vb.append(detached_old_policy_vb)
unsplitted_policy_vb.append(p_vb)
# also need to log some training stats here maybe
return unsplitted_policy_vb
def run(self):
# make sure processes are not completely synced by sleeping a bit
time.sleep(int(np.random.rand() * (self.process_id + 5)))
nepisodes = 0
nepisodes_solved = 0
episode_steps = None
episode_reward = None
should_start_new = True
while self.master.train_step.value < self.master.steps:
# NOTE: on-policy learning # NOTE: procedure same as a3c, outs differ a bit
# sync in every step
self._sync_local_with_global()
self.model.zero_grad()
# start of a new episode
if should_start_new:
episode_steps = 0
episode_reward = 0.
# reset on_policy_lstm_hidden_vb for new episode
if self.master.enable_lstm:
# NOTE: clear hidden state at the beginning of each episode
self._reset_on_policy_lstm_hidden_vb_episode()
# Obtain the initial observation by resetting the environment
self._reset_experience()
self.experience = self.env.reset()
assert self.experience.state1 is not None
# reset flag
should_start_new = False
if self.master.enable_lstm:
# NOTE: detach the previous hidden variable from the graph at the beginning of each rollout
self._reset_on_policy_lstm_hidden_vb_rollout()
# Run a rollout for rollout_steps or until terminal
episode_steps, episode_reward = self._on_policy_rollout(episode_steps, episode_reward)
if self.experience.terminal1 or \
self.master.early_stop and episode_steps >= self.master.early_stop:
nepisodes += 1
should_start_new = True
if self.experience.terminal1:
nepisodes_solved += 1
# calculate loss
self._backward() # NOTE: only train_step will increment inside _backward
self.on_policy_train_step += 1
self.master.on_policy_train_step.value += 1
# NOTE: off-policy learning
# perfrom some off-policy training once got enough experience
if self.master.replay_ratio > 0 and len(self.memory) >= self.master.replay_start:
# sample a number of off-policy episodes based on the replay ratio
for _ in range(sample_poisson(self.master.replay_ratio)):
# sync in every step
self._sync_local_with_global() # TODO: don't know if this is necessary here
self.model.zero_grad()
# reset on_policy_lstm_hidden_vb for new episode
if self.master.enable_lstm:
# NOTE: clear hidden state at the beginning of each episode
self._reset_off_policy_lstm_hidden_vb()
unsplitted_policy_vb = self._off_policy_rollout() # fake rollout, just to collect net outs from sampled trajectories
# calculate loss
self._backward(unsplitted_policy_vb) # NOTE: only train_step will increment inside _backward
self.off_policy_train_step += 1
self.master.off_policy_train_step.value += 1
# copy local training stats to global at prog_freq, and clear up local stats
if time.time() - self.last_prog >= self.master.prog_freq:
self.master.p_loss_avg.value += self.p_loss_avg
self.master.v_loss_avg.value += self.v_loss_avg
self.master.entropy_loss_avg.value += self.entropy_loss_avg
self.master.loss_counter.value += self.loss_counter
self._reset_training_loggings()
self.last_prog = time.time()
class ACEREvaluator(ACERSingleProcess):
def __init__(self, master, process_id=0):
master.logger.warning("<===================================> ACER-Evaluator {Env & Model}")
super(ACEREvaluator, self).__init__(master, process_id)
self.training = False # choose actions w/ max probability
self.model.train(self.training)
self._reset_loggings()
self.start_time = time.time()
self.last_eval = time.time()
def _reset_loggings(self):
# training stats across all processes
self.p_loss_avg_log = []
self.v_loss_avg_log = []
self.entropy_loss_avg_log = []
# evaluation stats
self.entropy_avg_log = []
self.v_avg_log = []
self.steps_avg_log = []
self.steps_std_log = []
self.reward_avg_log = []
self.reward_std_log = []
self.nepisodes_log = []
self.nepisodes_solved_log = []
self.repisodes_solved_log = []
# placeholders for windows for online curve plotting
if self.master.visualize:
# training stats across all processes
self.win_p_loss_avg = "win_p_loss_avg"
self.win_v_loss_avg = "win_v_loss_avg"
self.win_entropy_loss_avg = "win_entropy_loss_avg"
# evaluation stats
self.win_entropy_avg = "win_entropy_avg"
self.win_v_avg = "win_v_avg"
self.win_steps_avg = "win_steps_avg"
self.win_steps_std = "win_steps_std"
self.win_reward_avg = "win_reward_avg"
self.win_reward_std = "win_reward_std"
self.win_nepisodes = "win_nepisodes"
self.win_nepisodes_solved = "win_nepisodes_solved"
self.win_repisodes_solved = "win_repisodes_solved"
def _eval_model(self):
self.last_eval = time.time()
eval_at_train_step = self.master.train_step.value
eval_at_frame_step = self.master.frame_step.value
eval_at_on_policy_train_step = self.master.on_policy_train_step.value
eval_at_off_policy_train_step = self.master.off_policy_train_step.value
# first grab the latest global model to do the evaluation
self._sync_local_with_global()
# evaluate
eval_step = 0
eval_entropy_log = []
eval_v_log = []
eval_nepisodes = 0
eval_nepisodes_solved = 0
eval_episode_steps = None
eval_episode_steps_log = []
eval_episode_reward = None
eval_episode_reward_log = []
eval_should_start_new = True
while eval_step < self.master.eval_steps:
if eval_should_start_new: # start of a new episode
eval_episode_steps = 0
eval_episode_reward = 0.
# reset lstm_hidden_vb for new episode
if self.master.enable_lstm:
# NOTE: clear hidden state at the beginning of each episode
self._reset_on_policy_lstm_hidden_vb_episode(self.training)
# Obtain the initial observation by resetting the environment
self._reset_experience()
self.experience = self.env.reset()
assert self.experience.state1 is not None
if not self.training:
if self.master.visualize: self.env.visual()
if self.master.render: self.env.render()
# reset flag
eval_should_start_new = False
if self.master.enable_lstm:
# NOTE: detach the previous hidden variable from the graph at the beginning of each step
# NOTE: not necessary here in evaluation but we do it anyways
self._reset_on_policy_lstm_hidden_vb_rollout()
# Run a single step
if self.master.enable_continuous:
pass
else:
eval_action, p_vb, _, v_vb, _ = self._forward(self._preprocessState(self.experience.state1, True, True), on_policy=True)
self.experience = self.env.step(eval_action)
if not self.training:
if self.master.visualize: self.env.visual()
if self.master.render: self.env.render()
if self.experience.terminal1 or \
self.master.early_stop and (eval_episode_steps + 1) == self.master.early_stop or \
(eval_step + 1) == self.master.eval_steps:
eval_should_start_new = True
eval_episode_steps += 1
eval_episode_reward += self.experience.reward
eval_step += 1
if eval_should_start_new:
eval_nepisodes += 1
if self.experience.terminal1:
eval_nepisodes_solved += 1
# This episode is finished, report and reset
# NOTE make no sense for continuous
if self.master.enable_continuous:
eval_entropy_log.append([0.5 * ((sig_vb * 2 * self.pi_vb.expand_as(sig_vb)).log() + 1).data.numpy()])
else:
eval_entropy_log.append([np.mean((-torch.log(p_vb.data.squeeze()) * p_vb.data.squeeze()).numpy())])
eval_v_log.append([v_vb.data.numpy()])
eval_episode_steps_log.append([eval_episode_steps])
eval_episode_reward_log.append([eval_episode_reward])
self._reset_experience()
eval_episode_steps = None
eval_episode_reward = None
# Logging for this evaluation phase
loss_counter = self.master.loss_counter.value
p_loss_avg = self.master.p_loss_avg.value / loss_counter if loss_counter > 0 else 0.
v_loss_avg = self.master.v_loss_avg.value / loss_counter if loss_counter > 0 else 0.
entropy_loss_avg = self.master.entropy_loss_avg.value / loss_counter if loss_counter > 0 else 0.
self.master._reset_training_loggings()
def _log_at_step(eval_at_step):
self.p_loss_avg_log.append([eval_at_step, p_loss_avg])
self.v_loss_avg_log.append([eval_at_step, v_loss_avg])
self.entropy_loss_avg_log.append([eval_at_step, entropy_loss_avg])
self.entropy_avg_log.append([eval_at_step, np.mean(np.asarray(eval_entropy_log))])
self.v_avg_log.append([eval_at_step, np.mean(np.asarray(eval_v_log))])
self.steps_avg_log.append([eval_at_step, np.mean(np.asarray(eval_episode_steps_log))])
self.steps_std_log.append([eval_at_step, np.std(np.asarray(eval_episode_steps_log))])
self.reward_avg_log.append([eval_at_step, np.mean(np.asarray(eval_episode_reward_log))])
self.reward_std_log.append([eval_at_step, np.std(np.asarray(eval_episode_reward_log))])
self.nepisodes_log.append([eval_at_step, eval_nepisodes])
self.nepisodes_solved_log.append([eval_at_step, eval_nepisodes_solved])
self.repisodes_solved_log.append([eval_at_step, (eval_nepisodes_solved/eval_nepisodes) if eval_nepisodes > 0 else 0.])
# logging
self.master.logger.warning("Reporting @ Step: " + str(eval_at_step) + " | Elapsed Time: " + str(time.time() - self.start_time))
self.master.logger.warning("Iteration: {}; lr: {}".format(eval_at_step, self.master.lr_adjusted.value))
self.master.logger.warning("Iteration: {}; on_policy_steps: {}".format(eval_at_step, eval_at_on_policy_train_step))
self.master.logger.warning("Iteration: {}; off_policy_steps: {}".format(eval_at_step, eval_at_off_policy_train_step))
self.master.logger.warning("Iteration: {}; p_loss_avg: {}".format(eval_at_step, self.p_loss_avg_log[-1][1]))
self.master.logger.warning("Iteration: {}; v_loss_avg: {}".format(eval_at_step, self.v_loss_avg_log[-1][1]))
self.master.logger.warning("Iteration: {}; entropy_loss_avg: {}".format(eval_at_step, self.entropy_loss_avg_log[-1][1]))
self.master._reset_training_loggings()
self.master.logger.warning("Evaluating @ Step: " + str(eval_at_train_step) + " | (" + str(eval_at_frame_step) + " frames)...")
self.master.logger.warning("Evaluation Took: " + str(time.time() - self.last_eval))
self.master.logger.warning("Iteration: {}; entropy_avg: {}".format(eval_at_step, self.entropy_avg_log[-1][1]))
self.master.logger.warning("Iteration: {}; v_avg: {}".format(eval_at_step, self.v_avg_log[-1][1]))
self.master.logger.warning("Iteration: {}; steps_avg: {}".format(eval_at_step, self.steps_avg_log[-1][1]))
self.master.logger.warning("Iteration: {}; steps_std: {}".format(eval_at_step, self.steps_std_log[-1][1]))
self.master.logger.warning("Iteration: {}; reward_avg: {}".format(eval_at_step, self.reward_avg_log[-1][1]))
self.master.logger.warning("Iteration: {}; reward_std: {}".format(eval_at_step, self.reward_std_log[-1][1]))
self.master.logger.warning("Iteration: {}; nepisodes: {}".format(eval_at_step, self.nepisodes_log[-1][1]))
self.master.logger.warning("Iteration: {}; nepisodes_solved: {}".format(eval_at_step, self.nepisodes_solved_log[-1][1]))
self.master.logger.warning("Iteration: {}; repisodes_solved: {}".format(eval_at_step, self.repisodes_solved_log[-1][1]))
if self.master.enable_log_at_train_step:
_log_at_step(eval_at_train_step)
else:
_log_at_step(eval_at_frame_step)
# plotting
if self.master.visualize:
self.win_p_loss_avg = self.master.vis.scatter(X=np.array(self.p_loss_avg_log), env=self.master.refs, win=self.win_p_loss_avg, opts=dict(title="p_loss_avg"))
self.win_v_loss_avg = self.master.vis.scatter(X=np.array(self.v_loss_avg_log), env=self.master.refs, win=self.win_v_loss_avg, opts=dict(title="v_loss_avg"))
self.win_entropy_loss_avg = self.master.vis.scatter(X=np.array(self.entropy_loss_avg_log), env=self.master.refs, win=self.win_entropy_loss_avg, opts=dict(title="entropy_loss_avg"))
self.win_entropy_avg = self.master.vis.scatter(X=np.array(self.entropy_avg_log), env=self.master.refs, win=self.win_entropy_avg, opts=dict(title="entropy_avg"))
self.win_v_avg = self.master.vis.scatter(X=np.array(self.v_avg_log), env=self.master.refs, win=self.win_v_avg, opts=dict(title="v_avg"))
self.win_steps_avg = self.master.vis.scatter(X=np.array(self.steps_avg_log), env=self.master.refs, win=self.win_steps_avg, opts=dict(title="steps_avg"))
# self.win_steps_std = self.master.vis.scatter(X=np.array(self.steps_std_log), env=self.master.refs, win=self.win_steps_std, opts=dict(title="steps_std"))
self.win_reward_avg = self.master.vis.scatter(X=np.array(self.reward_avg_log), env=self.master.refs, win=self.win_reward_avg, opts=dict(title="reward_avg"))
# self.win_reward_std = self.master.vis.scatter(X=np.array(self.reward_std_log), env=self.master.refs, win=self.win_reward_std, opts=dict(title="reward_std"))
self.win_nepisodes = self.master.vis.scatter(X=np.array(self.nepisodes_log), env=self.master.refs, win=self.win_nepisodes, opts=dict(title="nepisodes"))
self.win_nepisodes_solved = self.master.vis.scatter(X=np.array(self.nepisodes_solved_log), env=self.master.refs, win=self.win_nepisodes_solved, opts=dict(title="nepisodes_solved"))
self.win_repisodes_solved = self.master.vis.scatter(X=np.array(self.repisodes_solved_log), env=self.master.refs, win=self.win_repisodes_solved, opts=dict(title="repisodes_solved"))
self.last_eval = time.time()
# save model
self.master._save_model(eval_at_train_step, self.reward_avg_log[-1][1])
def run(self):
while self.master.train_step.value < self.master.steps:
if time.time() - self.last_eval > self.master.eval_freq:
self._eval_model()
# we also do a final evaluation after training is done
self._eval_model()
class ACERTester(ACERSingleProcess):
def __init__(self, master, process_id=0):
master.logger.warning("<===================================> ACER-Tester {Env & Model}")
super(ACERTester, self).__init__(master, process_id)
self.training = False # choose actions w/ max probability
self.model.train(self.training)
self._reset_loggings()
self.start_time = time.time()
def _reset_loggings(self):
# testing stats
self.steps_avg_log = []
self.steps_std_log = []
self.reward_avg_log = []
self.reward_std_log = []
self.nepisodes_log = []
self.nepisodes_solved_log = []
self.repisodes_solved_log = []
# placeholders for windows for online curve plotting
if self.master.visualize:
# evaluation stats
self.win_steps_avg = "win_steps_avg"
self.win_steps_std = "win_steps_std"
self.win_reward_avg = "win_reward_avg"
self.win_reward_std = "win_reward_std"
self.win_nepisodes = "win_nepisodes"
self.win_nepisodes_solved = "win_nepisodes_solved"
self.win_repisodes_solved = "win_repisodes_solved"
def run(self):
test_step = 0
test_nepisodes = 0
test_nepisodes_solved = 0
test_episode_steps = None
test_episode_steps_log = []
test_episode_reward = None
test_episode_reward_log = []
test_should_start_new = True
while test_nepisodes < self.master.test_nepisodes:
if test_should_start_new: # start of a new episode
test_episode_steps = 0
test_episode_reward = 0.
# reset lstm_hidden_vb for new episode
if self.master.enable_lstm:
# NOTE: clear hidden state at the beginning of each episode
self._reset_on_policy_lstm_hidden_vb_episode(self.training)
# Obtain the initial observation by resetting the environment
self._reset_experience()
self.experience = self.env.reset()
assert self.experience.state1 is not None
if not self.training:
if self.master.visualize: self.env.visual()
if self.master.render: self.env.render()
# reset flag
test_should_start_new = False
if self.master.enable_lstm:
# NOTE: detach the previous hidden variable from the graph at the beginning of each step
# NOTE: not necessary here in testing but we do it anyways
self._reset_on_policy_lstm_hidden_vb_rollout()
# Run a single step
if self.master.enable_continuous:
pass
else:
test_action, p_vb, _, v_vb, _ = self._forward(self._preprocessState(self.experience.state1, True, True), on_policy=True)
self.experience = self.env.step(test_action)
if not self.training:
if self.master.visualize: self.env.visual()
if self.master.render: self.env.render()
if self.experience.terminal1 or \
self.master.early_stop and (test_episode_steps + 1) == self.master.early_stop:
test_should_start_new = True
test_episode_steps += 1
test_episode_reward += self.experience.reward
test_step += 1
if test_should_start_new:
test_nepisodes += 1
if self.experience.terminal1:
test_nepisodes_solved += 1
# This episode is finished, report and reset
test_episode_steps_log.append([test_episode_steps])
test_episode_reward_log.append([test_episode_reward])
self._reset_experience()
test_episode_steps = None
test_episode_reward = None
self.steps_avg_log.append([test_nepisodes, np.mean(np.asarray(test_episode_steps_log))])
self.steps_std_log.append([test_nepisodes, np.std(np.asarray(test_episode_steps_log))]); del test_episode_steps_log
self.reward_avg_log.append([test_nepisodes, np.mean(np.asarray(test_episode_reward_log))])
self.reward_std_log.append([test_nepisodes, np.std(np.asarray(test_episode_reward_log))]); del test_episode_reward_log
self.nepisodes_log.append([test_nepisodes, test_nepisodes])
self.nepisodes_solved_log.append([test_nepisodes, test_nepisodes_solved])
self.repisodes_solved_log.append([test_nepisodes, (test_nepisodes_solved/test_nepisodes) if test_nepisodes > 0 else 0.])
# plotting
if self.master.visualize:
self.win_steps_avg = self.master.vis.scatter(X=np.array(self.steps_avg_log), env=self.master.refs, win=self.win_steps_avg, opts=dict(title="steps_avg"))
# self.win_steps_std = self.master.vis.scatter(X=np.array(self.steps_std_log), env=self.master.refs, win=self.win_steps_std, opts=dict(title="steps_std"))
self.win_reward_avg = self.master.vis.scatter(X=np.array(self.reward_avg_log), env=self.master.refs, win=self.win_reward_avg, opts=dict(title="reward_avg"))
# self.win_reward_std = self.master.vis.scatter(X=np.array(self.reward_std_log), env=self.master.refs, win=self.win_reward_std, opts=dict(title="reward_std"))
self.win_nepisodes = self.master.vis.scatter(X=np.array(self.nepisodes_log), env=self.master.refs, win=self.win_nepisodes, opts=dict(title="nepisodes"))
self.win_nepisodes_solved = self.master.vis.scatter(X=np.array(self.nepisodes_solved_log), env=self.master.refs, win=self.win_nepisodes_solved, opts=dict(title="nepisodes_solved"))
self.win_repisodes_solved = self.master.vis.scatter(X=np.array(self.repisodes_solved_log), env=self.master.refs, win=self.win_repisodes_solved, opts=dict(title="repisodes_solved"))
# logging
self.master.logger.warning("Testing Took: " + str(time.time() - self.start_time))
self.master.logger.warning("Testing: steps_avg: {}".format(self.steps_avg_log[-1][1]))
self.master.logger.warning("Testing: steps_std: {}".format(self.steps_std_log[-1][1]))
self.master.logger.warning("Testing: reward_avg: {}".format(self.reward_avg_log[-1][1]))
self.master.logger.warning("Testing: reward_std: {}".format(self.reward_std_log[-1][1]))
self.master.logger.warning("Testing: nepisodes: {}".format(self.nepisodes_log[-1][1]))
self.master.logger.warning("Testing: nepisodes_solved: {}".format(self.nepisodes_solved_log[-1][1]))
self.master.logger.warning("Testing: repisodes_solved: {}".format(self.repisodes_solved_log[-1][1]))
| 60.995146
| 234
| 0.635177
|
794c589b8a28e633c4e9ee80e045b9811ab02773
| 18,069
|
py
|
Python
|
pywikibot/comms/http.py
|
Partlo/RoboCade
|
89c49b3f793b96aeb9e75672fd150872eb52aa11
|
[
"MIT"
] | null | null | null |
pywikibot/comms/http.py
|
Partlo/RoboCade
|
89c49b3f793b96aeb9e75672fd150872eb52aa11
|
[
"MIT"
] | null | null | null |
pywikibot/comms/http.py
|
Partlo/RoboCade
|
89c49b3f793b96aeb9e75672fd150872eb52aa11
|
[
"MIT"
] | null | null | null |
"""
Basic HTTP access interface.
This module handles communication between the bot and the HTTP threads.
This module is responsible for
- Setting up a connection pool
- Providing a (blocking) interface for HTTP requests
- Translate site objects with query strings into URLs
- URL-encoding all data
- Basic HTTP error handling
This module creates and uses its own ``requests.Session`` object.
The session is closed if the module terminates.
If required you can use your own Session object passing it to the
``http.session`` variable::
from pywikibot.comms import http
session = requests.Session()
http.session = session
:py:obj:`flush()` can be called to close the session object.
"""
#
# (C) Pywikibot team, 2007-2021
#
# Distributed under the terms of the MIT license.
#
import atexit
import codecs
import re
import sys
from contextlib import suppress
from http import HTTPStatus, cookiejar
from string import Formatter
from typing import Optional, Union
from urllib.parse import quote, urlparse
from warnings import warn
import requests
import pywikibot
from pywikibot import config
from pywikibot.backports import Tuple
from pywikibot.exceptions import (
FatalServerError,
Server414Error,
Server504Error,
)
from pywikibot.logging import critical, debug, error, log, warning
from pywikibot.tools import (
deprecated,
deprecated_args,
file_mode_checker,
issue_deprecation_warning,
)
try:
import requests_oauthlib
except ImportError as e:
requests_oauthlib = e
# The error message for failed SSL certificate verification
# 'certificate verify failed' is a commonly detectable string
SSL_CERT_VERIFY_FAILED_MSG = 'certificate verify failed'
_logger = 'comms.http'
cookie_file_path = config.datafilepath('pywikibot.lwp')
file_mode_checker(cookie_file_path, create=True)
cookie_jar = cookiejar.LWPCookieJar(cookie_file_path)
try:
cookie_jar.load(ignore_discard=True)
except cookiejar.LoadError:
debug('Loading cookies failed.', _logger)
else:
debug('Loaded cookies from file.', _logger)
session = requests.Session()
session.cookies = cookie_jar
# Prepare flush on quit
def flush():
"""Close the session object. This is called when the module terminates."""
log('Closing network session.')
session.close()
if hasattr(sys, 'last_type'):
critical('Exiting due to uncaught exception {}'.format(sys.last_type))
log('Network session closed.')
atexit.register(flush)
USER_AGENT_PRODUCTS = {
'python': 'Python/' + '.'.join(str(i) for i in sys.version_info),
'http_backend': 'requests/' + requests.__version__,
'pwb': 'Pywikibot/' + pywikibot.__version__,
}
class _UserAgentFormatter(Formatter):
"""User-agent formatter to load version/revision only if necessary."""
def get_value(self, key, args, kwargs):
"""Get field as usual except for version and revision."""
# This is the Pywikibot version; also map it to {revision} at present.
if key in ('version', 'revision'):
return pywikibot.version.getversiondict()['rev']
return super().get_value(key, args, kwargs)
_USER_AGENT_FORMATTER = _UserAgentFormatter()
def user_agent_username(username=None):
"""
Reduce username to a representation permitted in HTTP headers.
To achieve that, this function:
1) replaces spaces (' ') with '_'
2) encodes the username as 'utf-8' and if the username is not ASCII
3) URL encodes the username if it is not ASCII, or contains '%'
"""
if not username:
return ''
username = username.replace(' ', '_') # Avoid spaces or %20.
try:
username.encode('ascii') # just test, but not actually use it
except UnicodeEncodeError:
username = quote(username.encode('utf-8'))
else:
# % is legal in the default $wgLegalTitleChars
# This is so that ops know the real pywikibot will not
# allow a useragent in the username to allow through a hand-coded
# percent-encoded value.
if '%' in username:
username = quote(username)
return username
def user_agent(site=None, format_string: str = None) -> str:
"""
Generate the user agent string for a given site and format.
:param site: The site for which this user agent is intended. May be None.
:type site: BaseSite
:param format_string: The string to which the values will be added using
str.format. Is using config.user_agent_format when it is None.
:return: The formatted user agent
"""
values = USER_AGENT_PRODUCTS.copy()
values.update(dict.fromkeys(['script', 'script_product'],
pywikibot.bot.calledModuleName()))
script_comments = []
if config.user_agent_description:
script_comments.append(config.user_agent_description)
values['family'] = ''
values['code'] = ''
values['lang'] = '' # TODO: use site.lang, if known
values['site'] = ''
username = ''
if site:
script_comments.append(str(site))
# TODO: there are several ways of identifying a user, and username
# is not the best for a HTTP header if the username isn't ASCII.
if site.username():
username = user_agent_username(site.username())
script_comments.append('User:' + username)
values.update({
'family': site.family.name,
'code': site.code,
'lang': site.code, # TODO: use site.lang, if known
'site': str(site),
})
values['username'] = username
values['script_comments'] = '; '.join(script_comments)
format_string = format_string or config.user_agent_format
formatted = _USER_AGENT_FORMATTER.format(format_string, **values)
# clean up after any blank components
formatted = formatted.replace('()', '').replace(' ', ' ').strip()
return formatted
@deprecated('pywikibot.comms.http.fake_user_agent', since='20161205')
def get_fake_user_agent():
"""
Return a fake user agent depending on `fake_user_agent` option in config.
Deprecated, use fake_user_agent() instead.
:rtype: str
"""
if isinstance(config.fake_user_agent, str):
return config.fake_user_agent
if config.fake_user_agent is False:
return user_agent()
return fake_user_agent()
def fake_user_agent() -> str:
"""Return a fake user agent."""
try:
from fake_useragent import UserAgent
except ImportError:
raise ImportError( # Actually complain when fake_useragent is missing.
'fake_useragent must be installed to get fake UAs.')
return UserAgent().random
@deprecated_args(body='data')
def request(site,
uri: Optional[str] = None,
headers: Optional[dict] = None,
**kwargs) -> requests.Response:
"""
Request to Site with default error handling and response decoding.
See :py:obj:`requests.Session.request` for additional parameters.
The optional uri is a relative uri from site base uri including the
document root '/'.
:param site: The Site to connect to
:type site: pywikibot.site.BaseSite
:param uri: the URI to retrieve
:keyword charset: Either a valid charset (usable for str.decode()) or None
to automatically chose the charset from the returned header (defaults
to latin-1)
:type charset: CodecInfo, str, None
:return: The received data Response
"""
kwargs.setdefault('verify', site.verify_SSL_certificate())
old_validation = kwargs.pop('disable_ssl_certificate_validation', None)
if old_validation is not None:
issue_deprecation_warning('disable_ssl_certificate_validation',
instead='verify',
since='20201220')
kwargs.update(verify=not old_validation)
if not headers:
headers = {}
format_string = None
else:
format_string = headers.get('user-agent')
headers['user-agent'] = user_agent(site, format_string)
baseuri = site.base_url(uri)
r = fetch(baseuri, headers=headers, **kwargs)
site.throttle.retry_after = int(r.headers.get('retry-after', 0))
return r
def get_authentication(uri: str) -> Optional[Tuple[str, str]]:
"""
Retrieve authentication token.
:param uri: the URI to access
:return: authentication token
"""
parsed_uri = requests.utils.urlparse(uri)
netloc_parts = parsed_uri.netloc.split('.')
netlocs = [parsed_uri.netloc] + ['.'.join(['*'] + netloc_parts[i + 1:])
for i in range(len(netloc_parts))]
for path in netlocs:
if path in config.authenticate:
if len(config.authenticate[path]) in [2, 4]:
return config.authenticate[path]
warn('config.authenticate["{path}"] has invalid value.\n'
'It should contain 2 or 4 items, not {length}.\n'
'See {url}/OAuth for more info.'
.format(path=path,
length=len(config.authenticate[path]),
url=pywikibot.__url__))
return None
def error_handling_callback(response):
"""
Raise exceptions and log alerts.
:param response: Response returned by Session.request().
:type response: :py:obj:`requests.Response`
"""
# TODO: do some error correcting stuff
if isinstance(response, requests.exceptions.SSLError):
if SSL_CERT_VERIFY_FAILED_MSG in str(response):
raise FatalServerError(str(response))
if isinstance(response, Exception):
with suppress(Exception):
# request exception may contain response and request attribute
error('An error occurred for uri ' + response.request.url)
raise response from None
if response.status_code == HTTPStatus.GATEWAY_TIMEOUT:
raise Server504Error('Server {} timed out'
.format(urlparse(response.url).netloc))
if response.status_code == HTTPStatus.REQUEST_URI_TOO_LONG:
raise Server414Error('Too long GET request')
# TODO: shall it raise? this might break some code, TBC
# response.raise_for_status()
# HTTP status 207 is also a success status for Webdav FINDPROP,
# used by the version module.
if response.status_code not in (HTTPStatus.OK, HTTPStatus.MULTI_STATUS):
warning('Http response status {}'.format(response.status_code))
@deprecated_args(body='data')
def fetch(uri: str, method: str = 'GET', headers: Optional[dict] = None,
default_error_handling: bool = True,
use_fake_user_agent: Union[bool, str] = False, **kwargs):
"""
HTTP request.
See :py:obj:`requests.Session.request` for parameters.
:param uri: URL to send
:param method: HTTP method of the request (default: GET)
:param headers: dictionary of headers of the request
:param default_error_handling: Use default error handling
:param use_fake_user_agent: Set to True to use fake UA, False to use
pywikibot's UA, str to specify own UA. This behaviour might be
overridden by domain in config.
:keyword charset: Either a valid charset (usable for str.decode()) or None
to automatically chose the charset from the returned header (defaults
to latin-1)
:type charset: CodecInfo, str, None
:keyword verify: verify the SSL certificate (default is True)
:type verify: bool or path to certificates
:keyword callbacks: Methods to call once data is fetched
:type callbacks: list of callable
:rtype: :py:obj:`requests.Response`
"""
# Change user agent depending on fake UA settings.
# Set header to new UA if needed.
headers = headers or {}
headers.update(config.extra_headers.copy() or {})
def assign_fake_user_agent(use_fake_user_agent, uri):
uri_domain = urlparse(uri).netloc
use_fake_user_agent = config.fake_user_agent_exceptions.get(
uri_domain, use_fake_user_agent)
if use_fake_user_agent is False:
return user_agent()
if use_fake_user_agent is True:
return fake_user_agent()
if use_fake_user_agent and isinstance(use_fake_user_agent, str):
return use_fake_user_agent # Custom UA.
raise ValueError('Invalid parameter: '
'use_fake_user_agent={}'.format(use_fake_user_agent))
def assign_user_agent(user_agent_format_string):
if not user_agent_format_string or '{' in user_agent_format_string:
return user_agent(None, user_agent_format_string)
# do nothing, it is already a UA
return user_agent_format_string
# If not already specified.
if 'user-agent' not in headers:
# Get fake UA exceptions from `fake_user_agent_exceptions` config.
headers['user-agent'] = assign_fake_user_agent(use_fake_user_agent,
uri)
# Already specified.
else:
headers['user-agent'] = assign_user_agent(headers.get('user-agent'))
callbacks = kwargs.pop('callbacks', [])
# error_handling_callback will be executed first.
if default_error_handling:
callbacks.insert(0, error_handling_callback)
charset = kwargs.pop('charset', None)
auth = get_authentication(uri)
if auth is not None and len(auth) == 4:
if isinstance(requests_oauthlib, ImportError):
warn(str(requests_oauthlib), ImportWarning)
error('OAuth authentication not supported: {}'
.format(requests_oauthlib))
auth = None
else:
auth = requests_oauthlib.OAuth1(*auth)
timeout = config.socket_timeout
old_validation = kwargs.pop('disable_ssl_certificate_validation', None)
if old_validation is not None:
issue_deprecation_warning('disable_ssl_certificate_validation',
instead='verify',
since='20201220')
kwargs.update(verify=not old_validation)
try:
# Note that the connections are pooled which mean that a future
# HTTPS request can succeed even if the certificate is invalid and
# verify=True, when a request with verify=False happened before
response = session.request(method, uri,
headers=headers, auth=auth, timeout=timeout,
**kwargs)
except Exception as e:
response = e
else:
response.encoding = _decide_encoding(response, charset)
for callback in callbacks:
callback(response)
return response
def _get_encoding_from_response_headers(response) -> Optional[str]:
"""Return charset given by the response header."""
content_type = response.headers.get('content-type')
if not content_type:
return None
m = re.search('charset=(?P<charset>.*?$)', content_type)
if m:
header_encoding = m.group('charset')
elif 'json' in content_type:
# application/json | application/sparql-results+json
header_encoding = 'utf-8'
elif 'xml' in content_type:
header = response.content[:100].splitlines()[0] # bytes
m = re.search(
br'encoding=(["\'])(?P<encoding>.+?)\1', header)
if m:
header_encoding = m.group('encoding').decode('utf-8')
else:
header_encoding = 'utf-8'
else:
header_encoding = None
return header_encoding
def _decide_encoding(response, charset) -> Optional[str]:
"""Detect the response encoding."""
def _try_decode(content, encoding):
"""Helper function to try decoding."""
if encoding is None:
return None
try:
content.decode(encoding)
except LookupError:
pywikibot.warning('Unknown or invalid encoding {!r}'
.format(encoding))
except UnicodeDecodeError as e:
pywikibot.warning('{} found in {}'.format(e, content))
else:
return encoding
return None # let chardet do the job
header_encoding = _get_encoding_from_response_headers(response)
if header_encoding is None:
pywikibot.log('Http response does not contain a charset.')
if charset is None:
charset = response.request.headers.get('accept-charset')
# No charset requested, or in request headers or response headers.
# Defaults to latin1.
if charset is None and header_encoding is None:
return _try_decode(response.content, 'latin1')
if charset is None and header_encoding is not None:
return _try_decode(response.content, header_encoding)
if charset is not None and header_encoding is None:
return _try_decode(response.content, charset)
# Both charset and header_encoding are available.
try:
header_codecs = codecs.lookup(header_encoding)
except LookupError:
header_codecs = None
try:
charset_codecs = codecs.lookup(charset)
except LookupError:
charset_codecs = None
if header_codecs and charset_codecs and header_codecs != charset_codecs:
pywikibot.warning(
'Encoding "{}" requested but "{}" received in the '
'response header.'.format(charset, header_encoding))
_encoding = _try_decode(response.content, header_encoding) \
or _try_decode(response.content, charset)
return _encoding
| 35.153696
| 80
| 0.643478
|
794c591674ac127c841213bf2df626ae02933e05
| 41,964
|
py
|
Python
|
pylib/cqlshlib/test/test_cqlsh_completion.py
|
jeffreyflukman/cassandra
|
d26f142b34681d047fe010c8ec9097add0b44d2a
|
[
"Apache-2.0"
] | 1
|
2018-09-20T16:17:29.000Z
|
2018-09-20T16:17:29.000Z
|
pylib/cqlshlib/test/test_cqlsh_completion.py
|
jeffreyflukman/cassandra
|
d26f142b34681d047fe010c8ec9097add0b44d2a
|
[
"Apache-2.0"
] | null | null | null |
pylib/cqlshlib/test/test_cqlsh_completion.py
|
jeffreyflukman/cassandra
|
d26f142b34681d047fe010c8ec9097add0b44d2a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from .basecase import BaseTestCase, cqlsh
from .cassconnect import testrun_cqlsh
import unittest
import sys
BEL = '\x07' # the terminal-bell character
CTRL_C = '\x03'
TAB = '\t'
# completions not printed out in this many seconds may not be acceptable.
# tune if needed for a slow system, etc, but be aware that the test will
# need to wait this long for each completion test, to make sure more info
# isn't coming
COMPLETION_RESPONSE_TIME = 0.5
completion_separation_re = re.compile(r'\s+')
@unittest.skipIf(sys.platform == "win32", 'Tab completion tests not supported on Windows')
class CqlshCompletionCase(BaseTestCase):
def setUp(self):
self.cqlsh_runner = testrun_cqlsh(cqlver=None, env={'COLUMNS': '100000'})
self.cqlsh = self.cqlsh_runner.__enter__()
def tearDown(self):
self.cqlsh_runner.__exit__(None, None, None)
def _get_completions(self, inputstring, split_completed_lines=True):
"""
Get results of tab completion in cqlsh. Returns a bare string if a
string completes immediately. Otherwise, returns a set of all
whitespace-separated tokens in the offered completions by default, or a
list of the lines in the offered completions if split_completed_lines is
False.
"""
self.cqlsh.send(inputstring)
self.cqlsh.send(TAB)
immediate = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
immediate = immediate.replace(' \b', '')
self.assertEqual(immediate[:len(inputstring)], inputstring)
immediate = immediate[len(inputstring):]
immediate = immediate.replace(BEL, '')
if immediate:
return immediate
self.cqlsh.send(TAB)
choice_output = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
if choice_output == BEL:
choice_output = ''
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
choice_lines = choice_output.splitlines()
if choice_lines:
# ensure the last line of the completion is the prompt
prompt_regex = self.cqlsh.prompt.lstrip() + re.escape(inputstring)
msg = ('Double-tab completion '
'does not print prompt for input "{}"'.format(inputstring))
self.assertRegexpMatches(choice_lines[-1], prompt_regex, msg=msg)
choice_lines = [line.strip() for line in choice_lines[:-1]]
choice_lines = [line for line in choice_lines if line]
if split_completed_lines:
completed_lines = map(set, (completion_separation_re.split(line.strip())
for line in choice_lines))
if not completed_lines:
return set()
completed_tokens = set.union(*completed_lines)
return completed_tokens - {''}
else:
return choice_lines
assert False
def _trycompletions_inner(self, inputstring, immediate='', choices=(),
other_choices_ok=False,
split_completed_lines=True):
"""
Test tab completion in cqlsh. Enters in the text in inputstring, then
simulates a tab keypress to see what is immediately completed (this
should only happen when there is only one completion possible). If
there is an immediate completion, the new text is expected to match
'immediate'. If there is no immediate completion, another tab keypress
is simulated in order to get a list of choices, which are expected to
match the items in 'choices' (order is not important, but case is).
"""
completed = self._get_completions(inputstring,
split_completed_lines=split_completed_lines)
if immediate:
msg = 'cqlsh completed %r, but we expected %r' % (completed, immediate)
self.assertEqual(completed, immediate, msg=msg)
return
if other_choices_ok:
self.assertEqual(set(choices), completed.intersection(choices))
else:
self.assertEqual(set(choices), set(completed))
def trycompletions(self, inputstring, immediate='', choices=(),
other_choices_ok=False, split_completed_lines=True):
try:
self._trycompletions_inner(inputstring, immediate, choices,
other_choices_ok=other_choices_ok,
split_completed_lines=split_completed_lines)
finally:
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
def strategies(self):
return self.module.CqlRuleSet.replication_strategies
class TestCqlshCompletion(CqlshCompletionCase):
cqlver = '3.1.6'
module = cqlsh.cql3handling
def test_complete_on_empty_string(self):
self.trycompletions('', choices=('?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY',
'COPY', 'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE',
'DROP', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING', 'REVOKE',
'SELECT', 'SHOW', 'SOURCE', 'TRACING', 'EXPAND', 'SERIAL', 'TRUNCATE',
'UPDATE', 'USE', 'exit', 'quit', 'CLEAR', 'CLS'))
def test_complete_command_words(self):
self.trycompletions('alt', '\b\b\bALTER ')
self.trycompletions('I', 'NSERT INTO ')
self.trycompletions('exit', ' ')
def test_complete_in_uuid(self):
pass
def test_complete_in_select(self):
pass
def test_complete_in_insert(self):
self.trycompletions('INSERT INTO ',
choices=('twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'system.',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.',
'songs'),
other_choices_ok=True)
self.trycompletions('INSERT INTO twenty_rows_composite_table',
immediate=' ')
self.trycompletions('INSERT INTO twenty_rows_composite_table ',
choices=['(', 'JSON'])
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b ',
choices=(')', ','))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, ',
immediate='c ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c ',
choices=(',', ')'))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b)',
immediate=' VALUES ( ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c) VAL',
immediate='UES ( ')
self.trycompletions(
'INSERT INTO twenty_rows_composite_table (a, b, c) VALUES (',
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ( 'eggs",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('eggs'",
immediate=', ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs',"),
['<value for b (text)>'],
split_completed_lines=False)
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam')"),
immediate=' ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') "),
choices=[';', 'USING', 'IF'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam');"),
choices=['?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY', 'COPY',
'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE', 'DROP',
'EXPAND', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING',
'REVOKE', 'SELECT', 'SHOW', 'SOURCE', 'SERIAL', 'TRACING',
'TRUNCATE', 'UPDATE', 'USE', 'exit', 'quit',
'CLEAR', 'CLS'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') US"),
immediate='ING T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING"),
immediate=' T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING T"),
choices=['TTL', 'TIMESTAMP'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TT"),
immediate='L ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TI"),
immediate='MESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 A"),
immediate='ND TTL ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 A"),
immediate='ND TIMESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 AND "),
choices=[])
def test_complete_in_update(self):
self.trycompletions("UPD", immediate="ATE ")
self.trycompletions("UPDATE ",
choices=['twenty_rows_table',
'users', 'has_all_types', 'system.',
'ascii_with_special_chars',
'empty_composite_table', 'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs'],
other_choices_ok=True)
self.trycompletions("UPDATE empty_table ", choices=['USING', 'SET'])
self.trycompletions("UPDATE empty_table S",
immediate='ET lonelycol = ')
self.trycompletions("UPDATE empty_table SET lon",
immediate='elycol = ')
self.trycompletions("UPDATE empty_table SET lonelycol",
immediate=' = ')
self.trycompletions("UPDATE empty_table U", immediate='SING T')
self.trycompletions("UPDATE empty_table USING T",
choices=["TTL", "TIMESTAMP"])
self.trycompletions("UPDATE empty_table SET lonelycol = ",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eg",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs'",
choices=[',', 'WHERE'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonel",
immediate='ykey ')
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey ",
choices=['=', '<=', '>=', '>', '<', 'CONTAINS', 'IN', '['])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 ",
choices=['AND', 'IF', ';'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 AND ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey ",
choices=[',', ')'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) ",
choices=['=', '<=', '>=', '<', '>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) ",
choices=[';', 'AND', 'IF'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF ",
choices=['EXISTS', '<quotedName>', '<identifier>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF EXISTS ",
choices=['>=', '!=', '<=', 'IN', '[', ';', '=', '<', '>', '.'])
def test_complete_in_delete(self):
self.trycompletions('DELETE F', choices=['FROM', '<identifier>', '<quotedName>'])
self.trycompletions('DELETE a ', choices=['FROM', '[', '.', ','])
self.trycompletions('DELETE a [',
choices=['<wholenumber>', 'false', '-', '<uuid>',
'<pgStringLiteral>', '<float>', 'TOKEN',
'<identifier>', '<quotedStringLiteral>',
'{', '[', 'NULL', 'true', '<blobLiteral>'])
self.trycompletions('DELETE a, ',
choices=['<identifier>', '<quotedName>'])
self.trycompletions('DELETE a FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_auth.', 'system_distributed.',
'system_schema.', 'system_traces.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM twenty_rows_composite_table ',
choices=['USING', 'WHERE'])
self.trycompletions('DELETE FROM twenty_rows_composite_table U',
immediate='SING TIMESTAMP ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP ',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 ',
immediate='WHERE ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ',
choices=['a', 'b', 'TOKEN('])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE a ',
choices=['<=', '>=', 'CONTAINS', 'IN', '[', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(',
immediate='a ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a',
immediate=' ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a ',
choices=[')', ','])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) ',
choices=['>=', '<=', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) >= ',
choices=['false', 'true', '<pgStringLiteral>',
'token(', '-', '<float>', 'TOKEN',
'<identifier>', '<uuid>', '{', '[', 'NULL',
'<quotedStringLiteral>', '<blobLiteral>',
'<wholenumber>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) '),
choices=['AND', 'IF', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF '),
choices=['EXISTS', '<identifier>', '<quotedName>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b '),
choices=['>=', '!=', '<=', 'IN', '=', '<', '>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 '),
choices=['AND', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 AND '),
choices=['<identifier>', '<quotedName>'])
self.trycompletions(("DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE "
"b = 'eggs'"),
choices=['AND', 'IF', ';'])
def test_complete_in_batch(self):
pass
def test_complete_in_create_keyspace(self):
self.trycompletions('create keyspace ', '', choices=('<identifier>', '<quotedName>', 'IF'))
self.trycompletions('create keyspace moo ',
"WITH replication = {'class': '")
self.trycompletions('create keyspace "12SomeName" with ',
"replication = {'class': '")
self.trycompletions("create keyspace fjdkljf with foo=bar ", "",
choices=('AND', ';'))
self.trycompletions("create keyspace fjdkljf with foo=bar AND ",
"replication = {'class': '")
self.trycompletions("create keyspace moo with replication", " = {'class': '")
self.trycompletions("create keyspace moo with replication=", " {'class': '")
self.trycompletions("create keyspace moo with replication={", "'class':'")
self.trycompletions("create keyspace moo with replication={'class'", ":'")
self.trycompletions("create keyspace moo with replication={'class': ", "'")
self.trycompletions("create keyspace moo with replication={'class': '", "",
choices=self.strategies())
# ttl is an "unreserved keyword". should work
self.trycompletions("create keySPACE ttl with replication ="
"{ 'class' : 'SimpleStrategy'", ", 'replication_factor': ")
self.trycompletions("create keyspace ttl with replication ="
"{'class':'SimpleStrategy',", " 'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', ", "'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', 'repl", "ication_factor'")
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': ", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1 ", '}')
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1, ",
'', choices=())
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1} ",
'', choices=('AND', ';'))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'NetworkTopologyStrategy', ", '',
choices=('<dc_name>',))
self.trycompletions("create keyspace \"PB and J\" with replication={"
"'class': 'NetworkTopologyStrategy'", ', ')
self.trycompletions("create keyspace PBJ with replication={"
"'class': 'NetworkTopologyStrategy'} and ",
"durable_writes = '")
def test_complete_in_string_literals(self):
# would be great if we could get a space after this sort of completion,
# but readline really wants to make things difficult for us
self.trycompletions("create keyspace blah with replication = {'class': 'Sim",
"pleStrategy'")
def test_complete_in_drop(self):
self.trycompletions('DR', immediate='OP ')
self.trycompletions('DROP ',
choices=['AGGREGATE', 'COLUMNFAMILY', 'FUNCTION',
'INDEX', 'KEYSPACE', 'ROLE', 'TABLE',
'TRIGGER', 'TYPE', 'USER', 'MATERIALIZED'])
def test_complete_in_drop_keyspace(self):
self.trycompletions('DROP K', immediate='EYSPACE ')
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DROP KEYSPACE ',
choices=['IF', quoted_keyspace])
self.trycompletions('DROP KEYSPACE ' + quoted_keyspace,
choices=[';'])
self.trycompletions('DROP KEYSPACE I',
immediate='F EXISTS ' + quoted_keyspace + ';')
def create_columnfamily_table_template(self, name):
"""Parameterized test for CREATE COLUMNFAMILY and CREATE TABLE. Since
they're synonyms, they should have the same completion behavior, so this
test avoids duplication between tests for the two statements."""
prefix = 'CREATE ' + name + ' '
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions(prefix + '',
choices=['IF', quoted_keyspace, '<new_table_name>'])
self.trycompletions(prefix + 'IF ',
immediate='NOT EXISTS ')
self.trycompletions(prefix + 'IF NOT EXISTS ',
choices=['<new_table_name>', quoted_keyspace])
self.trycompletions(prefix + 'IF NOT EXISTS new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace, choices=['.', '('])
self.trycompletions(prefix + quoted_keyspace + '( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + quoted_keyspace + '.',
choices=['<new_table_name>'])
self.trycompletions(prefix + quoted_keyspace + '.new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace + '.new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a ine',
immediate='t ')
self.trycompletions(prefix + ' new_table (col_a int ',
choices=[',', 'PRIMARY'])
self.trycompletions(prefix + ' new_table (col_a int P',
immediate='RIMARY KEY ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY ',
choices=[')', ','])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY,',
choices=['<identifier>', '<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY)',
immediate=' ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) ',
choices=[';', 'WITH'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) W',
immediate='ITH ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry', 'speculative_write_threshold', 'cdc'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry', 'speculative_write_threshold', 'cdc'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance ',
immediate='= ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance = ',
choices=['<float_between_0_and_1>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH compaction ',
immediate="= {'class': '")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': '",
choices=['SizeTieredCompactionStrategy',
'LeveledCompactionStrategy',
'DateTieredCompactionStrategy',
'TimeWindowCompactionStrategy'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'S",
immediate="izeTieredCompactionStrategy'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'",
choices=['}', ','])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', ",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', '",
choices=['bucket_high', 'bucket_low', 'class',
'enabled', 'max_threshold',
'min_sstable_size', 'min_threshold',
'tombstone_compaction_interval',
'tombstone_threshold',
'unchecked_tombstone_compaction',
'only_purge_repaired_tombstones'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'}",
choices=[';', 'AND'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'} AND ",
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry', 'speculative_write_threshold', 'cdc'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'DateTieredCompactionStrategy', '",
choices=['base_time_seconds', 'max_sstable_age_days',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'max_window_size_seconds', 'only_purge_repaired_tombstones'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'TimeWindowCompactionStrategy', '",
choices=['compaction_window_unit', 'compaction_window_size',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'only_purge_repaired_tombstones'])
def test_complete_in_create_columnfamily(self):
self.trycompletions('CREATE C', choices=['COLUMNFAMILY', 'CUSTOM'])
self.trycompletions('CREATE CO', immediate='LUMNFAMILY ')
self.create_columnfamily_table_template('COLUMNFAMILY')
def test_complete_in_create_table(self):
self.trycompletions('CREATE T', choices=['TRIGGER', 'TABLE', 'TYPE'])
self.trycompletions('CREATE TA', immediate='BLE ')
self.create_columnfamily_table_template('TABLE')
def test_complete_in_describe(self):
"""
Tests for Cassandra-10733
"""
self.trycompletions('DES', immediate='C')
# quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DESCR', immediate='IBE ')
self.trycompletions('DESC TABLE ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_distributed.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC TYPE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
self.trycompletions('DESC FUNCTION ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'fbestband',
'fbestsong',
'fmax',
'fmin',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC AGGREGATE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'aggmin',
'aggmax',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
# Unfortunately these commented tests will not work. This is due to the keyspace name containing quotes;
# cqlsh auto-completes a DESC differently when the keyspace contains quotes. I'll leave the
# test here though in case we ever change this script to test using keyspace names without
# quotes
# self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '".',
choices=['twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'songs'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '".',
choices=['address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '"', immediate='.f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".', immediate='f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".f',
choices=['fbestband',
'fbestsong',
'fmax',
'fmin'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '"', immediate='.aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".', immediate='aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".aggm',
choices=['aggmin',
'aggmax'],
other_choices_ok=True)
def test_complete_in_drop_columnfamily(self):
pass
def test_complete_in_truncate(self):
pass
def test_complete_in_alter_columnfamily(self):
pass
def test_complete_in_use(self):
pass
def test_complete_in_create_index(self):
pass
def test_complete_in_drop_index(self):
pass
| 52.259029
| 119
| 0.509151
|
794c5920c867bb2a19f24184f5d218d7b12a4b8a
| 463
|
py
|
Python
|
data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_wound_strength_c.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_wound_strength_c.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_wound_strength_c.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/chemistry/shared_medpack_wound_strength_c.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.235294
| 89
| 0.736501
|
794c5978eec62d1314599c70b192749497444e67
| 8,968
|
py
|
Python
|
satchmo/apps/product/modules/custom/migrations/0002_update_contenttypes.py
|
funwhilelost/satchmo
|
589a5d797533ea15dfde9af7f36e304092d22a94
|
[
"BSD-3-Clause"
] | 1
|
2020-04-23T10:32:05.000Z
|
2020-04-23T10:32:05.000Z
|
satchmo/apps/product/modules/custom/migrations/0002_update_contenttypes.py
|
funwhilelost/satchmo
|
589a5d797533ea15dfde9af7f36e304092d22a94
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/product/modules/custom/migrations/0002_update_contenttypes.py
|
funwhilelost/satchmo
|
589a5d797533ea15dfde9af7f36e304092d22a94
|
[
"BSD-3-Clause"
] | 1
|
2020-04-23T10:32:11.000Z
|
2020-04-23T10:32:11.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from product.migrations import UpdateContentTypeMigration
class Migration(UpdateContentTypeMigration):
_app_label = 'custom'
depends_on = (
('product', '0011_split_products'),
)
needed_by = (
('product', '0012_update_contenttypes'),
)
models = {
'custom.customproduct': {
'Meta': {'object_name': 'CustomProduct'},
'deferred_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'downpayment': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'option_group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.OptionGroup']", 'symmetrical': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'custom.customtextfield': {
'Meta': {'unique_together': "(('slug', 'products'),)", 'object_name': 'CustomTextField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'price_change': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '14', 'decimal_places': '6', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custom_text_fields'", 'to': "orm['custom.CustomProduct']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'custom.customtextfieldtranslation': {
'Meta': {'unique_together': "(('customtextfield', 'languagecode', 'version'),)", 'object_name': 'CustomTextFieldTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'customtextfield': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['custom.CustomTextField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.category': {
'Meta': {'unique_together': "(('site', 'slug'),)", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'related_categories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_categories'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'product.optiongroup': {
'Meta': {'object_name': 'OptionGroup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'product.product': {
'Meta': {'unique_together': "(('site', 'sku'), ('site', 'slug'))", 'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'also_purchased': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'also_products'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Product']"}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'height_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_in_stock': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'length': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'length_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_items': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Product']"}),
'shipclass': ('django.db.models.fields.CharField', [], {'default': "'DEFAULT'", 'max_length': '10'}),
'short_description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'taxClass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.TaxClass']", 'null': 'True', 'blank': 'True'}),
'taxable': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'total_sold': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'weight_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'width_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
'product.taxclass': {
'Meta': {'object_name': 'TaxClass'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['custom']
| 78.666667
| 198
| 0.562556
|
794c59fb0c14d93bd8f7e64d2b510711d33d98b8
| 2,032
|
py
|
Python
|
odk_aggregation_tool/gui/utils.py
|
lindsay-stevens/odk_aggregation_tool
|
8f02a8cf7b11fba89a519e025d25c2dbf7f83b06
|
[
"MIT"
] | null | null | null |
odk_aggregation_tool/gui/utils.py
|
lindsay-stevens/odk_aggregation_tool
|
8f02a8cf7b11fba89a519e025d25c2dbf7f83b06
|
[
"MIT"
] | null | null | null |
odk_aggregation_tool/gui/utils.py
|
lindsay-stevens/odk_aggregation_tool
|
8f02a8cf7b11fba89a519e025d25c2dbf7f83b06
|
[
"MIT"
] | null | null | null |
import os
WRAP_CHARS = "\" \r\n\t"
def not_empty(variable_name, path):
if len(path) == 0:
valid = False
msg = "{0} is empty. Please either:\n" \
"- Enter the path, or\n" \
"- Select the path using the 'Browse...' button."
msg = msg.format(variable_name)
else:
valid = True
msg = None
return valid, msg
def folder_exists(variable_name, path):
if os.path.isdir(path):
valid = True
msg = None
else:
valid = False
msg = "{0} does not correspond to an existing directory.\n" \
"Please check the path and try again."
msg = msg.format(variable_name)
return valid, msg
def clean_path(path):
if path is None:
path = ""
else:
if len(path) > 0:
path = os.path.normpath(path.strip("\" \r\n\t"))
return path
def validations(variable_name, cleaned_path):
yield not_empty(variable_name=variable_name, path=cleaned_path)
yield folder_exists(variable_name=variable_name, path=cleaned_path)
def validate_path(variable_name, path):
"""
Check if the input path is valid, and raise ValueError(s) if not.
Parameters.
:param variable_name: str. Name of variable to state in error messages.
:param path: str. Path to check.
:return: str. valid path to do further work with.
"""
cleaned_path = clean_path(path=path)
kw = {"variable_name": variable_name, "path": cleaned_path}
checks = [(not_empty, kw), (folder_exists, kw)]
for func, kwargs in checks:
valid, message = func(**kwargs)
if not valid:
raise ValueError("Input Error.\n\n{0}".format(message))
return cleaned_path
def format_output(header, content):
"""
Formatted the result header and content, separated by double newlines.
"""
if content is not None:
if type(content) == str:
content = [content]
return "\n\n".join([header, *content])
else:
return header
| 27.459459
| 75
| 0.609744
|
794c5b8bab11f92474615cce40bb701e69b55f9f
| 1,902
|
py
|
Python
|
tensorflow/contrib/summary/summary_test_util.py
|
yasunakacho/tensorflow
|
cf36c3fdefda3c874cd8cebb779744c5035bb435
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/summary/summary_test_util.py
|
yasunakacho/tensorflow
|
cf36c3fdefda3c874cd8cebb779744c5035bb435
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/summary/summary_test_util.py
|
yasunakacho/tensorflow
|
cf36c3fdefda3c874cd8cebb779744c5035bb435
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to test summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
from tensorflow.python.platform import gfile
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, "Found not exactly one file in logdir: %s" % files
return events_from_file(os.path.join(logdir, files[0]))
| 30.190476
| 80
| 0.715563
|
794c5ccbf02c148f987e2682df813674b8f27779
| 7,943
|
py
|
Python
|
models/modules.py
|
nikhilsu/Mixed-modal-learning
|
4e18877cd010665324d46885530e81226cfc1821
|
[
"MIT"
] | null | null | null |
models/modules.py
|
nikhilsu/Mixed-modal-learning
|
4e18877cd010665324d46885530e81226cfc1821
|
[
"MIT"
] | null | null | null |
models/modules.py
|
nikhilsu/Mixed-modal-learning
|
4e18877cd010665324d46885530e81226cfc1821
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import time
from tensorflow.contrib.rnn import GRUCell
from util.infolog import log
def prenet(inputs, is_training, layer_sizes, scope=None):
x = inputs
drop_rate = 0.5 if is_training else 0.0
with tf.variable_scope(scope or 'prenet'):
for i, size in enumerate(layer_sizes):
dense = tf.layers.dense(x, units=size, activation=tf.nn.relu, name='dense_%d' % (i + 1))
x = tf.layers.dropout(dense, rate=drop_rate, training=is_training, name='dropout_%d' % (i + 1))
return x
def encoder_cbhg(inputs, input_lengths, is_training, depth):
input_channels = inputs.get_shape()[2]
return cbhg(
inputs,
input_lengths,
is_training,
scope='encoder_cbhg',
K=16,
projections=[128, input_channels],
depth=depth)
def post_cbhg(inputs, input_dim, is_training, depth):
return cbhg(
inputs,
None,
is_training,
scope='post_cbhg',
K=8,
projections=[256, input_dim],
depth=depth)
def cbhg(inputs, input_lengths, is_training, scope, K, projections, depth):
with tf.variable_scope(scope):
with tf.variable_scope('conv_bank'):
# Convolution bank: concatenate on the last axis to stack channels from all convolutions
conv_outputs = tf.concat(
[conv1d(inputs, k, 128, tf.nn.relu, is_training, 'conv1d_%d' % k) for k in range(1, K + 1)],
axis=-1
)
# Maxpooling:
maxpool_output = tf.layers.max_pooling1d(
conv_outputs,
pool_size=2,
strides=1,
padding='same')
# Two projection layers:
proj1_output = conv1d(maxpool_output, 3, projections[0], tf.nn.relu, is_training, 'proj_1')
proj2_output = conv1d(proj1_output, 3, projections[1], None, is_training, 'proj_2')
# Residual connection:
highway_input = proj2_output + inputs
half_depth = depth // 2
assert half_depth * 2 == depth, 'encoder and postnet depths must be even.'
# Handle dimensionality mismatch:
if highway_input.shape[2] != half_depth:
highway_input = tf.layers.dense(highway_input, half_depth)
# 4-layer HighwayNet:
for i in range(4):
highway_input = highwaynet(highway_input, 'highway_%d' % (i + 1), half_depth)
rnn_input = highway_input
# Bidirectional RNN
outputs, states = tf.nn.bidirectional_dynamic_rnn(
GRUCell(half_depth),
GRUCell(half_depth),
rnn_input,
sequence_length=input_lengths,
dtype=tf.float32)
return tf.concat(outputs, axis=2) # Concat forward and backward
def highwaynet(inputs, scope, depth):
with tf.variable_scope(scope):
H = tf.layers.dense(
inputs,
units=depth,
activation=tf.nn.relu,
name='H')
T = tf.layers.dense(
inputs,
units=depth,
activation=tf.nn.sigmoid,
name='T',
bias_initializer=tf.constant_initializer(-1.0))
return H * T + inputs * (1.0 - T)
def conv1d(inputs, kernel_size, channels, activation, is_training, scope):
with tf.variable_scope(scope):
conv1d_output = tf.layers.conv1d(
inputs,
filters=channels,
kernel_size=kernel_size,
activation=activation,
padding='same')
return tf.layers.batch_normalization(conv1d_output, training=is_training)
VGG_MEAN = [103.939, 116.779, 123.68]
# noinspection PyMethodMayBeStatic
class Vgg19:
def __init__(self, vgg19_npy_path):
self.data_dict = np.load(vgg19_npy_path, encoding='latin1').item()
def build(self, rgb):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
log('Building VGG19. Started at: %ds' % start_time)
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, "conv3_4")
self.pool3 = self.max_pool(self.conv3_4, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4")
self.pool4 = self.max_pool(self.conv4_4, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4")
self.pool5 = self.max_pool(self.conv5_4, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
assert self.fc6.get_shape().as_list()[1:] == [4096]
self.relu6 = tf.nn.relu(self.fc6)
self.fc7 = self.fc_layer(self.relu6, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
self.fc8 = self.fc_layer(self.relu7, "fc8")
log("finished building VGG19 in %ds" % (time.time() - start_time))
return self.fc8
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_filter(self, name):
return tf.constant(self.data_dict[name][0], name="filter")
def get_bias(self, name):
return tf.constant(self.data_dict[name][1], name="biases")
def get_fc_weight(self, name):
return tf.constant(self.data_dict[name][0], name="weights")
def vgg19_pretrained_last_fc(rgb_input, model_path):
return Vgg19(model_path).build(rgb_input)
| 34.534783
| 108
| 0.605187
|
794c5cdfa250b8fe50f6e12fe65941dbb958c50c
| 6,421
|
py
|
Python
|
lib/services/server/ncloud_server/model/stop_server_instances_response.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 12
|
2018-11-20T04:30:49.000Z
|
2021-11-09T12:34:26.000Z
|
lib/services/server/ncloud_server/model/stop_server_instances_response.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 1
|
2019-01-24T15:56:15.000Z
|
2019-05-31T07:56:55.000Z
|
lib/services/server/ncloud_server/model/stop_server_instances_response.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 6
|
2018-06-29T03:45:50.000Z
|
2022-03-18T01:51:45.000Z
|
# coding: utf-8
"""
server
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_server.model.server_instance import ServerInstance # noqa: F401,E501
class StopServerInstancesResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'return_code': 'str',
'return_message': 'str',
'total_rows': 'int',
'server_instance_list': 'list[ServerInstance]'
}
attribute_map = {
'request_id': 'requestId',
'return_code': 'returnCode',
'return_message': 'returnMessage',
'total_rows': 'totalRows',
'server_instance_list': 'serverInstanceList'
}
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, server_instance_list=None): # noqa: E501
"""StopServerInstancesResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._return_code = None
self._return_message = None
self._total_rows = None
self._server_instance_list = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if return_code is not None:
self.return_code = return_code
if return_message is not None:
self.return_message = return_message
if total_rows is not None:
self.total_rows = total_rows
if server_instance_list is not None:
self.server_instance_list = server_instance_list
@property
def request_id(self):
"""Gets the request_id of this StopServerInstancesResponse. # noqa: E501
:return: The request_id of this StopServerInstancesResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this StopServerInstancesResponse.
:param request_id: The request_id of this StopServerInstancesResponse. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def return_code(self):
"""Gets the return_code of this StopServerInstancesResponse. # noqa: E501
:return: The return_code of this StopServerInstancesResponse. # noqa: E501
:rtype: str
"""
return self._return_code
@return_code.setter
def return_code(self, return_code):
"""Sets the return_code of this StopServerInstancesResponse.
:param return_code: The return_code of this StopServerInstancesResponse. # noqa: E501
:type: str
"""
self._return_code = return_code
@property
def return_message(self):
"""Gets the return_message of this StopServerInstancesResponse. # noqa: E501
:return: The return_message of this StopServerInstancesResponse. # noqa: E501
:rtype: str
"""
return self._return_message
@return_message.setter
def return_message(self, return_message):
"""Sets the return_message of this StopServerInstancesResponse.
:param return_message: The return_message of this StopServerInstancesResponse. # noqa: E501
:type: str
"""
self._return_message = return_message
@property
def total_rows(self):
"""Gets the total_rows of this StopServerInstancesResponse. # noqa: E501
:return: The total_rows of this StopServerInstancesResponse. # noqa: E501
:rtype: int
"""
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
"""Sets the total_rows of this StopServerInstancesResponse.
:param total_rows: The total_rows of this StopServerInstancesResponse. # noqa: E501
:type: int
"""
self._total_rows = total_rows
@property
def server_instance_list(self):
"""Gets the server_instance_list of this StopServerInstancesResponse. # noqa: E501
:return: The server_instance_list of this StopServerInstancesResponse. # noqa: E501
:rtype: list[ServerInstance]
"""
return self._server_instance_list
@server_instance_list.setter
def server_instance_list(self, server_instance_list):
"""Sets the server_instance_list of this StopServerInstancesResponse.
:param server_instance_list: The server_instance_list of this StopServerInstancesResponse. # noqa: E501
:type: list[ServerInstance]
"""
self._server_instance_list = server_instance_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StopServerInstancesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.726852
| 137
| 0.62389
|
794c5cfc8345aff5dd102eae8c6a5fce80f563f5
| 2,072
|
py
|
Python
|
setup.py
|
mlodic/client-python
|
bd312c29744449919c422ea24c4ee409434b3a14
|
[
"Apache-2.0"
] | 42
|
2019-06-26T19:07:26.000Z
|
2022-03-24T00:44:53.000Z
|
setup.py
|
mlodic/client-python
|
bd312c29744449919c422ea24c4ee409434b3a14
|
[
"Apache-2.0"
] | 178
|
2019-06-23T23:04:04.000Z
|
2022-03-30T18:26:38.000Z
|
setup.py
|
mlodic/client-python
|
bd312c29744449919c422ea24c4ee409434b3a14
|
[
"Apache-2.0"
] | 55
|
2019-07-11T17:16:34.000Z
|
2022-03-30T16:33:26.000Z
|
#!/usr/bin/python3
# coding: utf-8
import os
import pathlib
import sys
from setuptools import setup
from setuptools.command.install import install
VERSION = "5.1.3"
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
# Get requirements from files
requirements = (HERE / "requirements.txt").read_text().split("\n")
requirements_test = (HERE / "test-requirements.txt").read_text().split("\n")
class VerifyVersionCommand(install):
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="pycti",
version=VERSION,
python_requires=">=3.7",
description="Python API client for OpenCTI.",
long_description=README,
long_description_content_type="text/markdown",
author="OpenCTI",
author_email="contact@opencti.io",
maintainer="OpenCTI",
url="https://github.com/OpenCTI-Platform/client-python",
license="Apache",
packages=["pycti", "pycti.api", "pycti.connector", "pycti.entities", "pycti.utils"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Natural Language :: French",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Python Modules",
],
include_package_data=True,
install_requires=requirements,
cmdclass={"verify": VerifyVersionCommand},
extras_require={
"dev": requirements_test + requirements,
"doc": ["autoapi", "sphinx_rtd_theme", "sphinx-autodoc-typehints"],
}, # Optional
)
| 31.393939
| 88
| 0.647201
|
794c5e394d88cbda10902153f4691fbc062458c3
| 28
|
py
|
Python
|
tests/__init__.py
|
fabaff/python-iceportal
|
8c722606ab3fdc6b4687927b02fcc4e1038fb00d
|
[
"MIT"
] | 1
|
2019-12-26T18:36:54.000Z
|
2019-12-26T18:36:54.000Z
|
tests/__init__.py
|
fabaff/python-iceportal
|
8c722606ab3fdc6b4687927b02fcc4e1038fb00d
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
fabaff/python-iceportal
|
8c722606ab3fdc6b4687927b02fcc4e1038fb00d
|
[
"MIT"
] | null | null | null |
"""Tests for the client."""
| 14
| 27
| 0.607143
|
794c5fac2975efb116bf195f23b55fbdedd53ba0
| 7,505
|
py
|
Python
|
src/datadog_api_client/v2/model/security_monitoring_signal_attributes.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v2/model/security_monitoring_signal_attributes.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v2/model/security_monitoring_signal_attributes.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class SecurityMonitoringSignalAttributes(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'attributes': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
'message': (str,), # noqa: E501
'tags': ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501
'timestamp': (datetime,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'attributes': 'attributes', # noqa: E501
'message': 'message', # noqa: E501
'tags': 'tags', # noqa: E501
'timestamp': 'timestamp', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SecurityMonitoringSignalAttributes - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
attributes ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): A JSON object of attributes in the security signal.. [optional] # noqa: E501
message (str): The message in the security signal defined by the rule that generated the signal.. [optional] # noqa: E501
tags ([bool, date, datetime, dict, float, int, list, str, none_type]): An array of tags associated with the security signal.. [optional] # noqa: E501
timestamp (datetime): The timestamp of the security signal.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 42.885714
| 173
| 0.593471
|
794c5fe16a7fb3be1eb6875a7681459c7d3ec6e2
| 6,432
|
py
|
Python
|
S4/S4 Library/simulation/objects/components/get_put_component_mixin.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:37.000Z
|
2021-05-20T19:33:37.000Z
|
S4/S4 Library/simulation/objects/components/get_put_component_mixin.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
S4/S4 Library/simulation/objects/components/get_put_component_mixin.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
from animation.posture_manifest import AnimationParticipant, PostureManifestEntry, MATCH_ANY, PostureManifest, SlotManifest
from carry.carry_utils import PARAM_CARRY_TRACK
from interactions.constraint_variants import TunableConstraintVariant
from interactions.constraints import Anywhere
from interactions.utils.animation_reference import TunableAnimationReference
from objects.components import ComponentMetaclass
from postures.posture_specs import PostureSpecVariable
from postures.posture_state_spec import PostureStateSpec
from sims4.tuning.tunable import HasTunableFactory, TunableVariant, TunableList, AutoFactoryInit, HasTunableSingletonFactory
import interactions.constraints
import sims4.log
logger = sims4.log.Logger('GetPutComponent', default_owner='tastle')
class GenericAnimation(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'constraints': TunableList(description='\n The list of constraints that the Sim will fulfill before\n using the generic animation.\n \n Example:\n A cone constraint and a facing constraint to get the\n Sim to stand in front of and facing the object.\n ', tunable=TunableConstraintVariant())}
def get_access_animation_factory(self, is_put):
if is_put:
return GetPutComponentMixin.GENERIC_PUT_ANIMATION
else:
return GetPutComponentMixin.GENERIC_GET_ANIMATION
def get_access_constraint(self, put, inventory_owner):
entries = []
entries.append(PostureManifestEntry(None, MATCH_ANY, MATCH_ANY, MATCH_ANY, MATCH_ANY, MATCH_ANY, inventory_owner))
surface_posture_manifest = PostureManifest(entries)
surface_posture_state_spec = PostureStateSpec(surface_posture_manifest, SlotManifest().intern(), PostureSpecVariable.ANYTHING)
constraint_total = interactions.constraints.Constraint(debug_name='Required Surface For Generic Get Put', posture_state_spec=surface_posture_state_spec)
for constraint_factory in self.constraints:
constraint = constraint_factory.create_constraint(None, target=inventory_owner)
constraint_total = constraint_total.intersect(constraint)
return constraint_total
class CustomAnimation(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'get': TunableAnimationReference(callback=None), 'put': TunableAnimationReference(callback=None)}
def get_access_animation_factory(self, is_put):
if is_put:
return self.put
else:
return self.get
def get_access_constraint(self, is_put, inventory_owner):
animation_factory = self.get_access_animation_factory(is_put)
if animation_factory is None:
return
constraint = animation_factory().get_constraint()
return constraint
class GetPutComponentMixin(HasTunableFactory, metaclass=ComponentMetaclass):
GENERIC_GET_ANIMATION = TunableAnimationReference()
GENERIC_PUT_ANIMATION = TunableAnimationReference()
GENERIC_CONSTRAINT = Anywhere()
@classmethod
def register_tuned_animation(cls, *_, **__):
pass
@classmethod
def add_auto_constraint(cls, participant_type, tuned_constraint, **kwargs):
cls.GENERIC_CONSTRAINT = cls.GENERIC_CONSTRAINT.intersect(tuned_constraint)
FACTORY_TUNABLES = {'get_put': TunableVariant(description='\n This controls the behavior of a Sim who wants to get from or\n put to the component owner.\n ', default='none', locked_args={'none': None}, generic=GenericAnimation.TunableFactory(), custom=CustomAnimation.TunableFactory())}
def __init__(self, *args, get_put=None, **kwargs):
super().__init__(*args, **kwargs)
self._get_put = get_put
@property
def has_get_put(self):
return self._get_put is not None
def _get_access_constraint(self, sim, is_put, carry_target, resolver=None):
if self._get_put is None:
return
constraint = self._get_put.get_access_constraint(is_put, self.owner)
def constraint_resolver(animation_participant, default=None):
if resolver is not None:
result = resolver(animation_participant, default=default)
if result is not default:
return result
if animation_participant == AnimationParticipant.ACTOR:
return sim
if animation_participant in (AnimationParticipant.CARRY_TARGET, AnimationParticipant.TARGET):
return carry_target
elif animation_participant == AnimationParticipant.SURFACE:
return self.owner
return default
concrete_constraint = constraint.apply_posture_state(None, constraint_resolver)
return concrete_constraint
def get_surface_target(self, sim):
inv_owner = self.owner
body_target = sim.posture.target
if body_target is not None and body_target.inventory_component is not None and body_target.inventory_component.inventory_type == inv_owner.inventory_component.inventory_type:
return body_target
return inv_owner
def _get_access_animation(self, is_put:bool):
if self._get_put is None:
return
animation_factory = self._get_put.get_access_animation_factory(is_put)
if animation_factory is None:
return
def append_animations(arb, sim, carry_target, carry_track, animation_context, surface_height):
asm = sim.posture.get_registered_asm(animation_context, animation_factory.asm_key, None, use_cache=False)
asm.set_parameter('surfaceHeight', surface_height)
result = sim.posture.setup_asm_interaction(asm, sim, None, animation_factory.actor_name, None, carry_target=carry_target, carry_target_name=animation_factory.carry_target_name, surface_target=self.get_surface_target(sim), carry_track=carry_track)
if not result:
logger.error("Couldn't setup get-put asm {} for {}. {}", asm, self.owner, result)
asm.set_actor_parameter(animation_factory.carry_target_name, carry_target, PARAM_CARRY_TRACK, carry_track.name.lower())
animation_factory.append_to_arb(asm, arb)
animation_factory.append_exit_to_arb(asm, arb)
return append_animations
| 54.05042
| 428
| 0.725435
|
794c5feeb1f47bb6cd8d15bf6c5d8b9bce55a9bc
| 1,801
|
py
|
Python
|
2021/python/dec13/solution.py
|
razzlestorm/advent-of-code
|
288488539ed64e078368ac012b7f794faa4776ba
|
[
"MIT"
] | null | null | null |
2021/python/dec13/solution.py
|
razzlestorm/advent-of-code
|
288488539ed64e078368ac012b7f794faa4776ba
|
[
"MIT"
] | null | null | null |
2021/python/dec13/solution.py
|
razzlestorm/advent-of-code
|
288488539ed64e078368ac012b7f794faa4776ba
|
[
"MIT"
] | null | null | null |
"""
from pathlib import Path
from collections import defaultdict
from typing import List
FILE_DIR = Path(__file__).parent
if __name__ == "__main__":
DATA = (FILE_DIR / "input.txt").read_text().strip()
lines = [x.strip() for x in DATA.split("\n")]
divide = lines.index("")
dots = set(tuple(map(int, line.split(",")))
for line in lines[: divide])
print(len(dots))
for instruction in lines[divide + 1 :]:
axis, position = instruction.split()[2].split("=")
position = int(position)
update = set()
for x, y in dots:
if axis == "x" and x > position:
update.add((2 * position - x, y))
elif axis == "y" and y > position:
update.add((x, 2 * position - y))
else:
update.add((x, y))
dots = update
xmin = min(x for x, y in dots)
xmax = max(x for x, y in dots)
ymin = min(y for x, y in dots)
ymax = max(y for x, y in dots)
for y in range(ymin, ymax + 1):
print("".join("#" if (x, y) in dots else "."
for x in range(xmin, xmax + 1)))
print(len(dots))
"""
## Blatantly stolen from: https://www.reddit.com/r/adventofcode/comments/rf7onx/comment/hodsguh/?utm_source=share&utm_medium=web2x&context=3
import sys
L = open(sys.argv[1]).read().splitlines()
P = [tuple(map(int,p.split(','))) for p in L if ',' in p]
F = [('y' in p,int(p[13:])) for p in L[len(P)+1:]]
m = lambda n,Y: min([v+1 for y,v in n if Y^y] or [1e4])
def f(P,F):
b = lambda n,f: abs((n//f)%2*(f-2)-n%f)
return set((b(x,m(F,1)),b(y,m(F,0))) for x,y in P)
N=f(P,F)
print("Part 1: {:d}".format(len(f(P,F[:1]))))
print("Part 2: \n{:s}".format('\n'.join(''.join(" #"[(x,y) in N] for x in range(m(F,1))) for y in range(m(F,0)))))
| 33.351852
| 140
| 0.546918
|
794c60bc826ef483227b494d5890546471327b23
| 1,776
|
py
|
Python
|
gas_bear/__main__.py
|
Jennypies/gas_bear
|
fc604086cb9d726c04ddfd78339a84c50747b801
|
[
"MIT"
] | null | null | null |
gas_bear/__main__.py
|
Jennypies/gas_bear
|
fc604086cb9d726c04ddfd78339a84c50747b801
|
[
"MIT"
] | null | null | null |
gas_bear/__main__.py
|
Jennypies/gas_bear
|
fc604086cb9d726c04ddfd78339a84c50747b801
|
[
"MIT"
] | null | null | null |
import logging
import bme680
import time
import csv
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(format="[%(levelname)s %(name)s] %(message)s", level=logging.INFO)
sensor = bme680.BME680(bme680.I2C_ADDR_PRIMARY)
sensor.set_humidity_oversample(bme680.OS_2X)
sensor.set_pressure_oversample(bme680.OS_4X)
sensor.set_temperature_oversample(bme680.OS_8X)
sensor.set_filter(bme680.FILTER_SIZE_3)
sensor.set_gas_status(bme680.ENABLE_GAS_MEAS)
sensor.set_gas_heater_temperature(320)
sensor.set_gas_heater_duration(150)
sensor.select_gas_heater_profile(0)
logger.info("Initial sensor config complete")
while True:
logger.info("Polling sensors")
if sensor.get_sensor_data():
logger.info("Sensor data collected")
output = [
sensor.data.temperature,
sensor.data.pressure,
sensor.data.humidity
]
if sensor.data.heat_stable:
logger.info("Gas sensor data collected")
output.append(sensor.data.gas_resistance)
else:
logger.warning("Gas sensor data not heat stable")
output.append(0)
else:
logger.warning("No data from sensors")
output = [0, 0, 0, 0]
output.insert(0, time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()))
with open('data.csv', mode='a+', newline='') as f:
writer = csv.writer(f)
writer.writerow(output)
f.close()
logger.info("Waiting for a bit...")
time.sleep(300)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
logger.info("gas sensing has stopped")
| 28.645161
| 90
| 0.610923
|
794c6199596bec821b452a3b4e637e36de2e00b0
| 47,670
|
py
|
Python
|
gammapy/datasets/tests/test_map.py
|
Jaleleddine/gammapy
|
de9195df40fa5bbf8840cda4e7cd5e8cc5eaadbb
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/datasets/tests/test_map.py
|
Jaleleddine/gammapy
|
de9195df40fa5bbf8840cda4e7cd5e8cc5eaadbb
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/datasets/tests/test_map.py
|
Jaleleddine/gammapy
|
de9195df40fa5bbf8840cda4e7cd5e8cc5eaadbb
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table
from regions import CircleSkyRegion
from gammapy.data import GTI
from gammapy.datasets import Datasets, MapDataset, MapDatasetOnOff
from gammapy.datasets.map import MapEvaluator
from gammapy.irf import (
EDispKernelMap,
EDispMap,
EffectiveAreaTable2D,
EnergyDependentMultiGaussPSF,
PSFMap,
PSFKernel
)
from gammapy.makers.utils import make_map_exposure_true_energy
from gammapy.maps import Map, MapAxis, WcsGeom, WcsNDMap, RegionGeom, RegionNDMap
from gammapy.modeling import Fit
from gammapy.modeling.models import (
FoVBackgroundModel,
GaussianSpatialModel,
Models,
PointSpatialModel,
PowerLawSpectralModel,
SkyModel,
ConstantSpectralModel
)
from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency
@pytest.fixture
def geom():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=2)
return WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.02,
width=(2, 2),
frame="icrs",
axes=[axis],
)
@pytest.fixture
def geom_etrue():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=3, name="energy_true")
return WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.02,
width=(2, 2),
frame="icrs",
axes=[axis],
)
@pytest.fixture
def geom_image():
energy = np.logspace(-1.0, 1.0, 2)
axis = MapAxis.from_edges(energy, name="energy", unit=u.TeV, interp="log")
return WcsGeom.create(
skydir=(0, 0), binsz=0.02, width=(2, 2), frame="galactic", axes=[axis]
)
def get_exposure(geom_etrue):
filename = (
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
aeff = EffectiveAreaTable2D.read(filename, hdu="EFFECTIVE AREA")
exposure_map = make_map_exposure_true_energy(
pointing=SkyCoord(1, 0.5, unit="deg", frame="galactic"),
livetime=1 * u.hr,
aeff=aeff,
geom=geom_etrue,
)
return exposure_map
def get_psf():
filename = (
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
psf = EnergyDependentMultiGaussPSF.read(filename, hdu="POINT SPREAD FUNCTION")
table_psf = psf.to_energy_dependent_table_psf(theta=0.5 * u.deg)
psf_map = PSFMap.from_energy_dependent_table_psf(table_psf)
return psf_map
@pytest.fixture
def sky_model():
spatial_model = GaussianSpatialModel(
lon_0="0.2 deg", lat_0="0.1 deg", sigma="0.2 deg", frame="galactic"
)
spectral_model = PowerLawSpectralModel(
index=3, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV"
)
return SkyModel(
spatial_model=spatial_model, spectral_model=spectral_model, name="test-model"
)
def get_map_dataset(geom, geom_etrue, edisp="edispmap", name="test", **kwargs):
"""Returns a MapDatasets"""
# define background model
background = Map.from_geom(geom)
background.data += 0.2
psf = get_psf()
exposure = get_exposure(geom_etrue)
e_reco = geom.axes["energy"]
e_true = geom_etrue.axes["energy_true"]
if edisp == "edispmap":
edisp = EDispMap.from_diagonal_response(energy_axis_true=e_true)
elif edisp == "edispkernelmap":
edisp = EDispKernelMap.from_diagonal_response(
energy_axis=e_reco, energy_axis_true=e_true
)
else:
edisp = None
# define fit mask
center = SkyCoord("0.2 deg", "0.1 deg", frame="galactic")
circle = CircleSkyRegion(center=center, radius=1 * u.deg)
mask_fit = geom.region_mask([circle])
mask_fit = Map.from_geom(geom, data=mask_fit)
models = FoVBackgroundModel(dataset_name=name)
return MapDataset(
models=models,
exposure=exposure,
background=background,
psf=psf,
edisp=edisp,
mask_fit=mask_fit,
name=name,
**kwargs,
)
@requires_data()
def test_map_dataset_str(sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
dataset.counts = dataset.npred()
dataset.mask_safe = dataset.mask_fit
assert "MapDataset" in str(dataset)
assert "(frozen)" in str(dataset)
assert "background" in str(dataset)
dataset.mask_safe = None
assert "MapDataset" in str(dataset)
@requires_data()
def test_fake(sky_model, geom, geom_etrue):
"""Test the fake dataset"""
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
npred = dataset.npred()
assert np.all(npred.data >= 0) # npred must be positive
dataset.counts = npred
real_dataset = dataset.copy()
dataset.fake(314)
assert real_dataset.counts.data.shape == dataset.counts.data.shape
assert_allclose(real_dataset.counts.data.sum(), 9525.299054, rtol=1e-5)
assert_allclose(dataset.counts.data.sum(), 9723)
@requires_data()
def test_different_exposure_unit(sky_model, geom):
energy_range_true = np.logspace(2, 4, 3)
axis = MapAxis.from_edges(
energy_range_true, name="energy_true", unit="GeV", interp="log"
)
geom_gev = geom.to_image().to_cube([axis])
dataset = get_map_dataset(geom, geom_gev, edisp="None")
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
npred = dataset.npred()
assert_allclose(npred.data[0, 50, 50], 6.086019)
@pytest.mark.parametrize(("edisp_mode"), ["edispmap", "edispkernelmap"])
@requires_data()
def test_to_spectrum_dataset(sky_model, geom, geom_etrue, edisp_mode):
dataset_ref = get_map_dataset(geom, geom_etrue, edisp=edisp_mode)
bkg_model = FoVBackgroundModel(dataset_name=dataset_ref.name)
dataset_ref.models = [sky_model, bkg_model]
dataset_ref.counts = dataset_ref.npred_background() * 0.0
dataset_ref.counts.data[1, 50, 50] = 1
dataset_ref.counts.data[1, 60, 50] = 1
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset_ref.gti = gti
on_region = CircleSkyRegion(center=geom.center_skydir, radius=0.05 * u.deg)
spectrum_dataset = dataset_ref.to_spectrum_dataset(on_region)
spectrum_dataset_corrected = dataset_ref.to_spectrum_dataset(
on_region, containment_correction=True
)
mask = np.ones_like(dataset_ref.counts, dtype="bool")
mask[1, 40:60, 40:60] = 0
dataset_ref.mask_safe = Map.from_geom(dataset_ref.counts.geom, data=mask)
spectrum_dataset_mask = dataset_ref.to_spectrum_dataset(on_region)
assert np.sum(spectrum_dataset.counts.data) == 1
assert spectrum_dataset.data_shape == (2, 1, 1)
assert spectrum_dataset.background.geom.axes[0].nbin == 2
assert spectrum_dataset.exposure.geom.axes[0].nbin == 3
assert spectrum_dataset.exposure.unit == "m2s"
assert spectrum_dataset.edisp.get_edisp_kernel().energy_axis.nbin == 2
assert spectrum_dataset.edisp.get_edisp_kernel().energy_axis_true.nbin == 3
assert_allclose(spectrum_dataset.edisp.exposure_map.data[1], 3.070884e09, rtol=1e-5)
assert np.sum(spectrum_dataset_mask.counts.data) == 0
assert spectrum_dataset_mask.data_shape == (2, 1, 1)
assert spectrum_dataset_corrected.exposure.unit == "m2s"
assert_allclose(spectrum_dataset.exposure.data[1], 3.070884e09, rtol=1e-5)
assert_allclose(spectrum_dataset_corrected.exposure.data[1], 2.035899e09, rtol=1e-5)
@requires_data()
def test_info_dict(sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
dataset.counts = dataset.npred()
info_dict = dataset.info_dict()
assert_allclose(info_dict["counts"], 9526, rtol=1e-3)
assert_allclose(info_dict["background"], 4000.0005, rtol=1e-3)
assert_allclose(info_dict["npred_background"], 4000.0, rtol=1e-3)
assert_allclose(info_dict["excess"], 5525.756, rtol=1e-3)
assert_allclose(info_dict["exposure_min"].value, 8.32e8, rtol=1e-3)
assert_allclose(info_dict["exposure_max"].value, 1.105e10, rtol=1e-3)
assert info_dict["exposure_max"].unit == "m2 s"
assert info_dict["name"] == "test"
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
info_dict = dataset.info_dict()
assert_allclose(info_dict["counts"], 9526, rtol=1e-3)
assert_allclose(info_dict["background"], 4000.0005, rtol=1e-3)
assert_allclose(info_dict["npred_background"], 4000.0, rtol=1e-3)
assert_allclose(info_dict["sqrt_ts"], 74.024180, rtol=1e-3)
assert_allclose(info_dict["excess"], 5525.756, rtol=1e-3)
assert_allclose(info_dict["ontime"].value, 3600)
assert info_dict["name"] == "test"
def get_fermi_3fhl_gc_dataset():
counts = Map.read("$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts-cube.fits.gz")
background = Map.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-background-cube.fits.gz"
)
bkg_model = FoVBackgroundModel(dataset_name="fermi-3fhl-gc")
exposure = Map.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-exposure-cube.fits.gz"
)
return MapDataset(
counts=counts,
background=background,
models=[bkg_model],
exposure=exposure,
name="fermi-3fhl-gc",
)
@requires_data()
def test_resample_energy_3fhl():
dataset = get_fermi_3fhl_gc_dataset()
new_axis = MapAxis.from_edges([10, 35, 100] * u.GeV, interp="log", name="energy")
grouped = dataset.resample_energy_axis(energy_axis=new_axis)
assert grouped.counts.data.shape == (2, 200, 400)
assert grouped.counts.data[0].sum() == 28581
assert_allclose(
grouped.npred_background().data.sum(axis=(1, 2)),
[25074.366386, 2194.298612],
rtol=1e-5,
)
assert_allclose(grouped.exposure.data, dataset.exposure.data, rtol=1e-5)
axis = grouped.counts.geom.axes[0]
npred = dataset.npred()
npred_grouped = grouped.npred()
assert_allclose(npred.resample_axis(axis=axis).data.sum(), npred_grouped.data.sum())
@requires_data()
def test_to_image_3fhl():
dataset = get_fermi_3fhl_gc_dataset()
dataset_im = dataset.to_image()
assert dataset_im.counts.data.sum() == dataset.counts.data.sum()
assert_allclose(dataset_im.npred_background().data.sum(), 28548.625, rtol=1e-5)
assert_allclose(dataset_im.exposure.data, dataset.exposure.data, rtol=1e-5)
npred = dataset.npred()
npred_im = dataset_im.npred()
assert_allclose(npred.data.sum(), npred_im.data.sum())
def test_to_image_mask_safe():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=2)
geom = WcsGeom.create(
skydir=(0, 0), binsz=0.5, width=(1, 1), frame="icrs", axes=[axis]
)
dataset = MapDataset.create(geom)
# Check map_safe handling
data = np.array([[[False, True], [True, True]], [[False, False], [True, True]]])
dataset.mask_safe = WcsNDMap.from_geom(geom=geom, data=data)
dataset_im = dataset.to_image()
assert dataset_im.mask_safe.data.dtype == bool
desired = np.array([[False, True], [True, True]])
assert (dataset_im.mask_safe.data == desired).all()
# Check that missing entries in the dataset do not break
dataset_copy = dataset.copy()
dataset_copy.exposure = None
dataset_im = dataset_copy.to_image()
assert dataset_im.exposure is None
dataset_copy = dataset.copy()
dataset_copy.counts = None
dataset_im = dataset_copy.to_image()
assert dataset_im.counts is None
@requires_data()
def test_downsample():
dataset = get_fermi_3fhl_gc_dataset()
downsampled = dataset.downsample(2)
assert downsampled.counts.data.shape == (11, 100, 200)
assert downsampled.counts.data.sum() == dataset.counts.data.sum()
assert_allclose(
downsampled.npred_background().data.sum(axis=(1, 2)),
dataset.npred_background().data.sum(axis=(1, 2)),
rtol=1e-5,
)
assert_allclose(downsampled.exposure.data[5, 50, 100], 3.318082e11, rtol=1e-5)
with pytest.raises(ValueError):
dataset.downsample(2, axis_name="energy")
@requires_data()
def test_map_dataset_fits_io(tmp_path, sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
dataset.counts = dataset.npred()
dataset.mask_safe = dataset.mask_fit
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
hdulist = dataset.to_hdulist()
actual = [hdu.name for hdu in hdulist]
desired = [
"PRIMARY",
"COUNTS",
"COUNTS_BANDS",
"EXPOSURE",
"EXPOSURE_BANDS",
"BACKGROUND",
"BACKGROUND_BANDS",
"EDISP",
"EDISP_BANDS",
"EDISP_EXPOSURE",
"EDISP_EXPOSURE_BANDS",
"PSF",
"PSF_BANDS",
"PSF_EXPOSURE",
"PSF_EXPOSURE_BANDS",
"MASK_SAFE",
"MASK_SAFE_BANDS",
"MASK_FIT",
"MASK_FIT_BANDS",
"GTI",
]
assert actual == desired
dataset.write(tmp_path / "test.fits")
dataset_new = MapDataset.read(tmp_path / "test.fits")
assert dataset_new.mask.data.dtype == bool
assert_allclose(dataset.counts.data, dataset_new.counts.data)
assert_allclose(
dataset.npred_background().data, dataset_new.npred_background().data
)
assert_allclose(dataset.edisp.edisp_map.data, dataset_new.edisp.edisp_map.data)
assert_allclose(dataset.psf.psf_map.data, dataset_new.psf.psf_map.data)
assert_allclose(dataset.exposure.data, dataset_new.exposure.data)
assert_allclose(dataset.mask_fit.data, dataset_new.mask_fit.data)
assert_allclose(dataset.mask_safe.data, dataset_new.mask_safe.data)
assert dataset.counts.geom == dataset_new.counts.geom
assert dataset.exposure.geom == dataset_new.exposure.geom
assert dataset.npred_background().geom == dataset_new.npred_background().geom
assert dataset.edisp.edisp_map.geom == dataset_new.edisp.edisp_map.geom
assert_allclose(
dataset.gti.time_sum.to_value("s"), dataset_new.gti.time_sum.to_value("s")
)
# To test io of psf and edisp map
stacked = MapDataset.create(geom)
stacked.write(tmp_path / "test-2.fits", overwrite=True)
stacked1 = MapDataset.read(tmp_path / "test-2.fits")
assert stacked1.psf.psf_map is not None
assert stacked1.psf.exposure_map is not None
assert stacked1.edisp.edisp_map is not None
assert stacked1.edisp.exposure_map is not None
assert stacked.mask.data.dtype == bool
assert_allclose(stacked1.psf.psf_map, stacked.psf.psf_map)
assert_allclose(stacked1.edisp.edisp_map, stacked.edisp.edisp_map)
@requires_dependency("iminuit")
@requires_dependency("matplotlib")
@requires_data()
def test_map_fit(sky_model, geom, geom_etrue):
dataset_1 = get_map_dataset(geom, geom_etrue, name="test-1")
dataset_2 = get_map_dataset(geom, geom_etrue, name="test-2")
datasets = Datasets([dataset_1, dataset_2])
models = Models(datasets.models)
models.insert(0, sky_model)
models["test-1-bkg"].spectral_model.norm.value = 0.5
models["test-model"].spatial_model.sigma.frozen = True
datasets.models = models
dataset_2.counts = dataset_2.npred()
dataset_1.counts = dataset_1.npred()
models["test-1-bkg"].spectral_model.norm.value = 0.49
models["test-2-bkg"].spectral_model.norm.value = 0.99
fit = Fit(datasets)
result = fit.run()
assert result.success
assert "minuit" in repr(result)
npred = dataset_1.npred().data.sum()
assert_allclose(npred, 7525.790688, rtol=1e-3)
assert_allclose(result.total_stat, 21659.2139, rtol=1e-3)
pars = result.parameters
assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
assert_allclose(pars["lon_0"].error, 0.002244, rtol=1e-2)
assert_allclose(pars["index"].value, 3, rtol=1e-2)
assert_allclose(pars["index"].error, 0.024277, rtol=1e-2)
assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
assert_allclose(pars["amplitude"].error, 4.216154e-13, rtol=1e-2)
# background norm 1
assert_allclose(pars[8].value, 0.5, rtol=1e-2)
assert_allclose(pars[8].error, 0.015811, rtol=1e-2)
# background norm 2
assert_allclose(pars[11].value, 1, rtol=1e-2)
assert_allclose(pars[11].error, 0.02147, rtol=1e-2)
# test mask_safe evaluation
mask_safe = geom.energy_mask(energy_min=1 * u.TeV)
dataset_1.mask_safe = Map.from_geom(geom, data=mask_safe)
dataset_2.mask_safe = Map.from_geom(geom, data=mask_safe)
stat = fit.datasets.stat_sum()
assert_allclose(stat, 14823.579908, rtol=1e-5)
region = sky_model.spatial_model.to_region()
initial_counts = dataset_1.counts.copy()
with mpl_plot_check():
dataset_1.plot_residuals(kwargs_spectral=dict(region=region))
# check dataset has not changed
assert initial_counts == dataset_1.counts
# test model evaluation outside image
dataset_1.models[0].spatial_model.lon_0.value = 150
dataset_1.npred()
assert not dataset_1._evaluators["test-model"].contributes
@requires_dependency("iminuit")
@requires_data()
def test_map_fit_one_energy_bin(sky_model, geom_image):
energy_axis = geom_image.axes["energy"]
geom_etrue = geom_image.to_image().to_cube([energy_axis.copy(name="energy_true")])
dataset = get_map_dataset(geom_image, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
sky_model.spectral_model.index.value = 3.0
sky_model.spectral_model.index.frozen = True
dataset.models[f"{dataset.name}-bkg"].spectral_model.norm.value = 0.5
dataset.counts = dataset.npred()
# Move a bit away from the best-fit point, to make sure the optimiser runs
sky_model.parameters["sigma"].value = 0.21
dataset.models[f"{dataset.name}-bkg"].parameters["norm"].frozen = True
fit = Fit([dataset])
result = fit.run()
assert result.success
npred = dataset.npred().data.sum()
assert_allclose(npred, 16538.124036, rtol=1e-3)
assert_allclose(result.total_stat, -34844.125047, rtol=1e-3)
pars = result.parameters
assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
assert_allclose(pars["lon_0"].error, 0.001689, rtol=1e-2)
assert_allclose(pars["sigma"].value, 0.2, rtol=1e-2)
assert_allclose(pars["sigma"].error, 0.00092, rtol=1e-2)
assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
assert_allclose(pars["amplitude"].error, 8.127593e-14, rtol=1e-2)
def test_create():
# tests empty datasets created
rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit="deg", name="rad")
e_reco = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 3), name="energy", unit=u.TeV, interp="log"
)
e_true = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 4), name="energy_true", unit=u.TeV, interp="log"
)
geom = WcsGeom.create(binsz=0.02, width=(2, 2), axes=[e_reco])
empty_dataset = MapDataset.create(
geom=geom, energy_axis_true=e_true, rad_axis=rad_axis
)
assert empty_dataset.counts.data.shape == (2, 100, 100)
assert empty_dataset.exposure.data.shape == (3, 100, 100)
assert empty_dataset.psf.psf_map.data.shape == (3, 50, 10, 10)
assert empty_dataset.psf.exposure_map.data.shape == (3, 1, 10, 10)
assert isinstance(empty_dataset.edisp, EDispKernelMap)
assert empty_dataset.edisp.edisp_map.data.shape == (3, 2, 10, 10)
assert empty_dataset.edisp.exposure_map.data.shape == (3, 1, 10, 10)
assert_allclose(empty_dataset.edisp.edisp_map.data.sum(), 300)
assert_allclose(empty_dataset.gti.time_delta, 0.0 * u.s)
def test_create_with_migra(tmp_path):
# tests empty datasets created
migra_axis = MapAxis(nodes=np.linspace(0.0, 3.0, 51), unit="", name="migra")
rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit="deg", name="rad")
e_reco = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 3), name="energy", unit=u.TeV, interp="log"
)
e_true = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 4), name="energy_true", unit=u.TeV, interp="log"
)
geom = WcsGeom.create(binsz=0.02, width=(2, 2), axes=[e_reco])
empty_dataset = MapDataset.create(
geom=geom, energy_axis_true=e_true, migra_axis=migra_axis, rad_axis=rad_axis
)
empty_dataset.write(tmp_path / "test.fits")
dataset_new = MapDataset.read(tmp_path / "test.fits")
assert isinstance(empty_dataset.edisp, EDispMap)
assert empty_dataset.edisp.edisp_map.data.shape == (3, 50, 10, 10)
assert empty_dataset.edisp.exposure_map.data.shape == (3, 1, 10, 10)
assert_allclose(empty_dataset.edisp.edisp_map.data.sum(), 300)
assert_allclose(empty_dataset.gti.time_delta, 0.0 * u.s)
assert isinstance(dataset_new.edisp, EDispMap)
assert dataset_new.edisp.edisp_map.data.shape == (3, 50, 10, 10)
def test_stack(sky_model):
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=3)
geom = WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.05,
width=(2, 2),
frame="icrs",
axes=[axis],
)
axis_etrue = MapAxis.from_energy_bounds(
"0.1 TeV", "10 TeV", nbin=5, name="energy_true"
)
geom_etrue = WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.05,
width=(2, 2),
frame="icrs",
axes=[axis_etrue],
)
edisp = EDispKernelMap.from_diagonal_response(
energy_axis=axis, energy_axis_true=axis_etrue, geom=geom
)
edisp.exposure_map.quantity = (
1e0 * u.m ** 2 * u.s * np.ones(edisp.exposure_map.data.shape)
)
bkg1 = Map.from_geom(geom)
bkg1.data += 0.2
cnt1 = Map.from_geom(geom)
cnt1.data = 1.0 * np.ones(cnt1.data.shape)
exp1 = Map.from_geom(geom_etrue)
exp1.quantity = 1e7 * u.m ** 2 * u.s * np.ones(exp1.data.shape)
mask1 = Map.from_geom(geom)
mask1.data = np.ones(mask1.data.shape, dtype=bool)
mask1.data[0][:][5:10] = False
dataset1 = MapDataset(
counts=cnt1,
background=bkg1,
exposure=exp1,
mask_safe=mask1,
name="dataset-1",
edisp=edisp,
meta_table=Table({"OBS_ID": [0]}),
)
bkg2 = Map.from_geom(geom)
bkg2.data = 0.1 * np.ones(bkg2.data.shape)
cnt2 = Map.from_geom(geom)
cnt2.data = 1.0 * np.ones(cnt2.data.shape)
exp2 = Map.from_geom(geom_etrue)
exp2.quantity = 1e7 * u.m ** 2 * u.s * np.ones(exp2.data.shape)
mask2 = Map.from_geom(geom)
mask2.data = np.ones(mask2.data.shape, dtype=bool)
mask2.data[0][:][5:10] = False
mask2.data[1][:][10:15] = False
dataset2 = MapDataset(
counts=cnt2,
background=bkg2,
exposure=exp2,
mask_safe=mask2,
name="dataset-2",
edisp=edisp,
meta_table=Table({"OBS_ID": [1]}),
)
background_model2 = FoVBackgroundModel(dataset_name="dataset-2")
background_model1 = FoVBackgroundModel(dataset_name="dataset-1")
dataset1.models = [background_model1, sky_model]
dataset2.models = [background_model2, sky_model]
dataset1.stack(dataset2)
dataset1.models = [sky_model]
npred_b = dataset1.npred()
assert_allclose(npred_b.data.sum(), 1459.985035, 1e-5)
assert_allclose(dataset1.npred_background().data.sum(), 1360.00, 1e-5)
assert_allclose(dataset1.counts.data.sum(), 9000, 1e-5)
assert_allclose(dataset1.mask_safe.data.sum(), 4600)
assert_allclose(dataset1.exposure.data.sum(), 1.6e11)
assert_allclose(dataset1.meta_table["OBS_ID"][0], [0, 1])
@requires_data()
def test_npred_sig(sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
pwl = PowerLawSpectralModel()
gauss = GaussianSpatialModel(
lon_0="0.0 deg", lat_0="0.0 deg", sigma="0.5 deg", frame="galactic"
)
model1 = SkyModel(pwl, gauss)
bkg = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [bkg, sky_model, model1]
assert_allclose(dataset.npred().data.sum(), 9676.047906, rtol=1e-3)
assert_allclose(dataset.npred_signal().data.sum(), 5676.04790, rtol=1e-3)
assert_allclose(dataset.npred_signal(model=model1).data.sum(), 150.7487, rtol=1e-3)
def test_stack_npred():
pwl = PowerLawSpectralModel()
gauss = GaussianSpatialModel(sigma="0.2 deg")
model = SkyModel(pwl, gauss)
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=5)
axis_etrue = MapAxis.from_energy_bounds(
"0.1 TeV", "10 TeV", nbin=11, name="energy_true"
)
geom = WcsGeom.create(
skydir=(0, 0), binsz=0.05, width=(2, 2), frame="icrs", axes=[axis],
)
dataset_1 = MapDataset.create(
geom,
energy_axis_true=axis_etrue,
name="dataset-1",
gti=GTI.create("0 min", "30 min"),
)
dataset_1.psf = None
dataset_1.exposure.data += 1
dataset_1.mask_safe.data = geom.energy_mask(energy_min=1 * u.TeV)
dataset_1.background.data += 1
bkg_model_1 = FoVBackgroundModel(dataset_name=dataset_1.name)
dataset_1.models = [model, bkg_model_1]
dataset_2 = MapDataset.create(
geom,
energy_axis_true=axis_etrue,
name="dataset-2",
gti=GTI.create("30 min", "60 min"),
)
dataset_2.psf = None
dataset_2.exposure.data += 1
dataset_2.mask_safe.data = geom.energy_mask(energy_min=0.2 * u.TeV)
dataset_2.background.data += 1
bkg_model_2 = FoVBackgroundModel(dataset_name=dataset_2.name)
dataset_2.models = [model, bkg_model_2]
npred_1 = dataset_1.npred()
npred_1.data[~dataset_1.mask_safe.data] = 0
npred_2 = dataset_2.npred()
npred_2.data[~dataset_2.mask_safe.data] = 0
stacked_npred = Map.from_geom(geom)
stacked_npred.stack(npred_1)
stacked_npred.stack(npred_2)
stacked = MapDataset.create(geom, energy_axis_true=axis_etrue, name="stacked")
stacked.stack(dataset_1)
stacked.stack(dataset_2)
npred_stacked = stacked.npred()
assert_allclose(npred_stacked.data, stacked_npred.data)
def to_cube(image):
# introduce a fake enery axis for now
axis = MapAxis.from_edges([1, 10] * u.TeV, name="energy")
geom = image.geom.to_cube([axis])
return WcsNDMap.from_geom(geom=geom, data=image.data)
@pytest.fixture
def images():
"""Load some `counts`, `counts_off`, `acceptance_on`, `acceptance_off" images"""
filename = "$GAMMAPY_DATA/tests/unbundled/hess/survey/hess_survey_snippet.fits.gz"
return {
"counts": to_cube(WcsNDMap.read(filename, hdu="ON")),
"counts_off": to_cube(WcsNDMap.read(filename, hdu="OFF")),
"acceptance": to_cube(WcsNDMap.read(filename, hdu="ONEXPOSURE")),
"acceptance_off": to_cube(WcsNDMap.read(filename, hdu="OFFEXPOSURE")),
"exposure": to_cube(WcsNDMap.read(filename, hdu="EXPGAMMAMAP")),
"background": to_cube(WcsNDMap.read(filename, hdu="BACKGROUND")),
}
def test_npred_psf_after_edisp():
energy_axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=3)
energy_axis_true = MapAxis.from_energy_bounds(
"0.8 TeV", "15 TeV", nbin=6, name="energy_true"
)
geom = WcsGeom.create(width=4 * u.deg, binsz=0.02, axes=[energy_axis])
dataset = MapDataset.create(geom=geom, energy_axis_true=energy_axis_true)
dataset.background.data += 1
dataset.exposure.data += 1e12
dataset.mask_safe.data += True
dataset.psf = PSFMap.from_gauss(
energy_axis_true=energy_axis_true, sigma=0.2 * u.deg
)
model = SkyModel(
spectral_model=PowerLawSpectralModel(),
spatial_model=PointSpatialModel(),
name="test-model",
)
model.apply_irf["psf_after_edisp"] = True
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [bkg_model, model]
npred = dataset.npred()
assert_allclose(npred.data.sum(), 129553.858658)
def get_map_dataset_onoff(images, **kwargs):
"""Returns a MapDatasetOnOff"""
mask_geom = images["counts"].geom
mask_data = np.ones(images["counts"].data.shape, dtype=bool)
mask_safe = Map.from_geom(mask_geom, data=mask_data)
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
return MapDatasetOnOff(
counts=images["counts"],
counts_off=images["counts_off"],
acceptance=images["acceptance"],
acceptance_off=images["acceptance_off"],
exposure=images["exposure"],
mask_safe=mask_safe,
gti=gti,
**kwargs,
)
@requires_data()
def test_map_dataset_on_off_fits_io(images, tmp_path):
dataset = get_map_dataset_onoff(images)
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
hdulist = dataset.to_hdulist()
actual = [hdu.name for hdu in hdulist]
desired = [
"PRIMARY",
"COUNTS",
"COUNTS_BANDS",
"EXPOSURE",
"EXPOSURE_BANDS",
"MASK_SAFE",
"MASK_SAFE_BANDS",
"GTI",
"COUNTS_OFF",
"COUNTS_OFF_BANDS",
"ACCEPTANCE",
"ACCEPTANCE_BANDS",
"ACCEPTANCE_OFF",
"ACCEPTANCE_OFF_BANDS",
]
assert actual == desired
dataset.write(tmp_path / "test.fits")
dataset_new = MapDatasetOnOff.read(tmp_path / "test.fits")
assert dataset_new.mask.data.dtype == bool
assert_allclose(dataset.counts.data, dataset_new.counts.data)
assert_allclose(dataset.counts_off.data, dataset_new.counts_off.data)
assert_allclose(dataset.acceptance.data, dataset_new.acceptance.data)
assert_allclose(dataset.acceptance_off.data, dataset_new.acceptance_off.data)
assert_allclose(dataset.exposure.data, dataset_new.exposure.data)
assert_allclose(dataset.mask_safe, dataset_new.mask_safe)
assert np.all(dataset.mask_safe.data == dataset_new.mask_safe.data) == True
assert dataset.mask_safe.geom == dataset_new.mask_safe.geom
assert dataset.counts.geom == dataset_new.counts.geom
assert dataset.exposure.geom == dataset_new.exposure.geom
assert_allclose(
dataset.gti.time_sum.to_value("s"), dataset_new.gti.time_sum.to_value("s")
)
def test_create_onoff(geom):
# tests empty datasets created
migra_axis = MapAxis(nodes=np.linspace(0.0, 3.0, 51), unit="", name="migra")
rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit="deg", name="rad")
energy_axis = geom.axes["energy"].copy(name="energy_true")
empty_dataset = MapDatasetOnOff.create(geom, energy_axis, migra_axis, rad_axis)
assert_allclose(empty_dataset.counts.data.sum(), 0.0)
assert_allclose(empty_dataset.counts_off.data.sum(), 0.0)
assert_allclose(empty_dataset.acceptance.data.sum(), 0.0)
assert_allclose(empty_dataset.acceptance_off.data.sum(), 0.0)
assert empty_dataset.psf.psf_map.data.shape == (2, 50, 10, 10)
assert empty_dataset.psf.exposure_map.data.shape == (2, 1, 10, 10)
assert empty_dataset.edisp.edisp_map.data.shape == (2, 50, 10, 10)
assert empty_dataset.edisp.exposure_map.data.shape == (2, 1, 10, 10)
assert_allclose(empty_dataset.edisp.edisp_map.data.sum(), 200)
assert_allclose(empty_dataset.gti.time_delta, 0.0 * u.s)
@requires_data()
def test_map_dataset_onoff_str(images):
dataset = get_map_dataset_onoff(images)
assert "MapDatasetOnOff" in str(dataset)
@requires_data()
def test_stack_onoff(images):
dataset = get_map_dataset_onoff(images)
stacked = dataset.copy()
stacked.stack(dataset)
assert_allclose(stacked.counts.data.sum(), 2 * dataset.counts.data.sum())
assert_allclose(stacked.counts_off.data.sum(), 2 * dataset.counts_off.data.sum())
assert_allclose(
stacked.acceptance.data.sum(), dataset.data_shape[1] * dataset.data_shape[2]
)
assert_allclose(np.nansum(stacked.acceptance_off.data), 2.925793e+08, rtol=1e-5)
assert_allclose(stacked.exposure.data, 2.0 * dataset.exposure.data)
def test_dataset_cutout_aligned(geom):
dataset = MapDataset.create(geom)
kwargs = {"position": geom.center_skydir, "width": 1 * u.deg}
geoms = {name: geom.cutout(**kwargs) for name, geom in dataset.geoms.items()}
cutout = MapDataset.from_geoms(**geoms, name="cutout")
assert dataset.counts.geom.is_aligned(cutout.counts.geom)
assert dataset.exposure.geom.is_aligned(cutout.exposure.geom)
assert dataset.edisp.edisp_map.geom.is_aligned(cutout.edisp.edisp_map.geom)
assert dataset.psf.psf_map.geom.is_aligned(cutout.psf.psf_map.geom)
def test_stack_onoff_cutout(geom_image):
# Test stacking of cutouts
energy_axis_true = MapAxis.from_energy_bounds(
"1 TeV", "10 TeV", nbin=3, name="energy_true"
)
dataset = MapDatasetOnOff.create(geom_image, energy_axis_true=energy_axis_true)
dataset.gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
kwargs = {"position": geom_image.center_skydir, "width": 1 * u.deg}
geoms = {name: geom.cutout(**kwargs) for name, geom in dataset.geoms.items()}
dataset_cutout = MapDatasetOnOff.from_geoms(**geoms, name="cutout-dataset")
dataset_cutout.gti = GTI.create(
[0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00"
)
dataset_cutout.mask_safe.data += True
dataset_cutout.counts.data += 1
dataset_cutout.counts_off.data += 1
dataset_cutout.exposure.data += 1
dataset.stack(dataset_cutout)
assert_allclose(dataset.counts.data.sum(), 2500)
assert_allclose(dataset.counts_off.data.sum(), 2500)
assert_allclose(dataset.alpha.data.sum(), 0)
assert_allclose(dataset.exposure.data.sum(), 7500)
assert dataset_cutout.name == "cutout-dataset"
def test_datasets_io_no_model(tmpdir):
axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=2)
geom = WcsGeom.create(npix=(5, 5), axes=[axis])
dataset_1 = MapDataset.create(geom, name="dataset_1")
dataset_2 = MapDataset.create(geom, name="dataset_2")
datasets = Datasets([dataset_1, dataset_2])
datasets.write(filename=tmpdir / "datasets.yaml")
filename_1 = tmpdir / "dataset_1.fits"
assert filename_1.exists()
filename_2 = tmpdir / "dataset_2.fits"
assert filename_2.exists()
@requires_data()
def test_map_dataset_on_off_to_spectrum_dataset(images):
dataset = get_map_dataset_onoff(images)
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
on_region = CircleSkyRegion(
center=dataset.counts.geom.center_skydir, radius=0.1 * u.deg
)
spectrum_dataset = dataset.to_spectrum_dataset(on_region)
assert spectrum_dataset.counts.data[0] == 8
assert spectrum_dataset.data_shape == (1, 1, 1)
assert spectrum_dataset.counts_off.data[0] == 33914
assert_allclose(spectrum_dataset.alpha.data[0], 0.0002143, atol=1e-7)
excess_map = images["counts"] - images["background"]
excess_true = excess_map.get_spectrum(on_region, np.sum).data[0]
excess = spectrum_dataset.excess.data[0]
assert_allclose(excess, excess_true, rtol=1e-3)
assert spectrum_dataset.name != dataset.name
@requires_data()
def test_map_dataset_on_off_to_spectrum_dataset_weights():
e_reco = MapAxis.from_bounds(1, 10, nbin=3, unit="TeV", name="energy")
geom = WcsGeom.create(
skydir=(0, 0), width=(2.5, 2.5), binsz=0.5, axes=[e_reco], frame="galactic"
)
counts = Map.from_geom(geom)
counts.data += 1
counts_off = Map.from_geom(geom)
counts_off.data += 2
acceptance = Map.from_geom(geom)
acceptance.data += 1
acceptance_off = Map.from_geom(geom)
acceptance_off.data += 4
weights = Map.from_geom(geom, dtype="bool")
weights.data[1:, 2:4, 2] = True
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset = MapDatasetOnOff(
counts=counts,
counts_off=counts_off,
acceptance=acceptance,
acceptance_off=acceptance_off,
mask_safe=weights,
gti=gti,
)
on_region = CircleSkyRegion(
center=dataset.counts.geom.center_skydir, radius=1.5 * u.deg
)
spectrum_dataset = dataset.to_spectrum_dataset(on_region)
assert_allclose(spectrum_dataset.counts.data[:, 0, 0], [0, 2, 2])
assert_allclose(spectrum_dataset.counts_off.data[:, 0, 0], [0, 4, 4])
assert_allclose(spectrum_dataset.acceptance.data[:, 0, 0], [0, 0.08, 0.08])
assert_allclose(spectrum_dataset.acceptance_off.data[:, 0, 0], [0, 0.32, 0.32])
assert_allclose(spectrum_dataset.alpha.data[:, 0, 0], [0, 0.25, 0.25])
@requires_data()
def test_map_dataset_on_off_cutout(images):
dataset = get_map_dataset_onoff(images)
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
cutout_dataset = dataset.cutout(
images["counts"].geom.center_skydir, ["1 deg", "1 deg"]
)
assert cutout_dataset.counts.data.shape == (1, 50, 50)
assert cutout_dataset.counts_off.data.shape == (1, 50, 50)
assert cutout_dataset.acceptance.data.shape == (1, 50, 50)
assert cutout_dataset.acceptance_off.data.shape == (1, 50, 50)
assert cutout_dataset.name != dataset.name
def test_map_dataset_on_off_fake(geom):
rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit="deg", name="rad")
energy_true_axis = geom.axes["energy"].copy(name="energy_true")
empty_dataset = MapDataset.create(geom, energy_true_axis, rad_axis=rad_axis)
empty_dataset = MapDatasetOnOff.from_map_dataset(
empty_dataset, acceptance=1, acceptance_off=10.0
)
empty_dataset.acceptance_off.data[0, 50, 50] = 0
background_map = Map.from_geom(geom, data=1)
empty_dataset.fake(background_map, random_state=42)
assert_allclose(empty_dataset.counts.data[0, 50, 50], 0)
assert_allclose(empty_dataset.counts.data.mean(), 0.99445, rtol=1e-3)
assert_allclose(empty_dataset.counts_off.data.mean(), 10.00055, rtol=1e-3)
@requires_data()
def test_map_dataset_on_off_to_image():
axis = MapAxis.from_energy_bounds(1, 10, 2, unit="TeV")
geom = WcsGeom.create(npix=(10, 10), binsz=0.05, axes=[axis])
counts = Map.from_geom(geom, data=np.ones((2, 10, 10)))
counts_off = Map.from_geom(geom, data=np.ones((2, 10, 10)))
acceptance = Map.from_geom(geom, data=np.ones((2, 10, 10)))
acceptance_off = Map.from_geom(geom, data=np.ones((2, 10, 10)))
acceptance_off *= 2
dataset = MapDatasetOnOff(
counts=counts,
counts_off=counts_off,
acceptance=acceptance,
acceptance_off=acceptance_off,
)
image_dataset = dataset.to_image()
assert image_dataset.counts.data.shape == (1, 10, 10)
assert image_dataset.acceptance_off.data.shape == (1, 10, 10)
assert_allclose(image_dataset.acceptance, 2)
assert_allclose(image_dataset.acceptance_off, 4)
assert_allclose(image_dataset.counts_off, 2)
assert image_dataset.name != dataset.name
# Try with a safe_mask
mask_safe = Map.from_geom(geom, data=np.ones((2, 10, 10), dtype="bool"))
mask_safe.data[0] = 0
dataset.mask_safe = mask_safe
image_dataset = dataset.to_image()
assert_allclose(image_dataset.acceptance, 1)
assert_allclose(image_dataset.acceptance_off, 2)
assert_allclose(image_dataset.counts_off, 1)
def test_map_dataset_geom(geom, sky_model):
e_true = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=5, name="energy_true")
dataset = MapDataset.create(geom, energy_axis_true=e_true)
dataset.counts = None
dataset.background = None
npred = dataset.npred()
assert npred.geom == geom
dataset.mask_safe = None
dataset.mask_fit = None
with pytest.raises(ValueError):
dataset._geom
@requires_data()
def test_names(geom, geom_etrue, sky_model):
m = Map.from_geom(geom)
m.quantity = 0.2 * np.ones(m.data.shape)
background_model1 = FoVBackgroundModel(dataset_name="test")
assert background_model1.name == "test-bkg"
c_map1 = Map.from_geom(geom)
c_map1.quantity = 0.3 * np.ones(c_map1.data.shape)
model1 = sky_model.copy()
assert model1.name != sky_model.name
model1 = sky_model.copy(name="model1")
assert model1.name == "model1"
model2 = sky_model.copy(name="model2")
dataset1 = MapDataset(
counts=c_map1,
models=Models([model1, model2, background_model1]),
exposure=get_exposure(geom_etrue),
background=m,
name="test",
)
dataset2 = dataset1.copy()
assert dataset2.name != dataset1.name
assert dataset2.models is None
dataset2 = dataset1.copy(name="dataset2")
assert dataset2.name == "dataset2"
assert dataset2.models is None
def test_stack_dataset_dataset_on_off():
axis = MapAxis.from_edges([1, 10] * u.TeV, name="energy")
geom = WcsGeom.create(width=1, axes=[axis])
gti = GTI.create([0 * u.s], [1 * u.h])
dataset = MapDataset.create(geom, gti=gti)
dataset_on_off = MapDatasetOnOff.create(geom, gti=gti)
dataset_on_off.mask_safe.data += True
dataset_on_off.acceptance_off += 5
dataset_on_off.acceptance += 1
dataset_on_off.counts_off += 1
dataset.stack(dataset_on_off)
assert_allclose(dataset.npred_background().data, 0.166667, rtol=1e-3)
@requires_data()
def test_info_dict_on_off(images):
dataset = get_map_dataset_onoff(images)
info_dict = dataset.info_dict()
assert_allclose(info_dict["counts"], 4299, rtol=1e-3)
assert_allclose(info_dict["excess"], -22.52295, rtol=1e-3)
assert_allclose(info_dict["exposure_min"].value, 1.739467e08, rtol=1e-3)
assert_allclose(info_dict["exposure_max"].value, 3.4298378e09, rtol=1e-3)
assert_allclose(info_dict["npred"], 4321.518, rtol=1e-3)
assert_allclose(info_dict["counts_off"], 20407510.0, rtol=1e-3)
assert_allclose(info_dict["acceptance"], 4272.7075, rtol=1e-3)
assert_allclose(info_dict["acceptance_off"], 20175596.0, rtol=1e-3)
assert_allclose(info_dict["alpha"], 0.000169, rtol=1e-3)
assert_allclose(info_dict["ontime"].value, 3600)
def test_slice_by_idx():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=17)
axis_etrue = MapAxis.from_energy_bounds(
"0.1 TeV", "10 TeV", nbin=31, name="energy_true"
)
geom = WcsGeom.create(
skydir=(0, 0), binsz=0.5, width=(2, 2), frame="icrs", axes=[axis],
)
dataset = MapDataset.create(geom=geom, energy_axis_true=axis_etrue, binsz_irf=0.5)
slices = {"energy": slice(5, 10)}
sub_dataset = dataset.slice_by_idx(slices)
assert sub_dataset.counts.geom.data_shape == (5, 4, 4)
assert sub_dataset.mask_safe.geom.data_shape == (5, 4, 4)
assert sub_dataset.npred_background().geom.data_shape == (5, 4, 4)
assert sub_dataset.exposure.geom.data_shape == (31, 4, 4)
assert sub_dataset.edisp.edisp_map.geom.data_shape == (31, 5, 4, 4)
assert sub_dataset.psf.psf_map.geom.data_shape == (31, 66, 4, 4)
axis = sub_dataset.counts.geom.axes["energy"]
assert_allclose(axis.edges[0].value, 0.387468, rtol=1e-5)
slices = {"energy_true": slice(5, 10)}
sub_dataset = dataset.slice_by_idx(slices)
assert sub_dataset.counts.geom.data_shape == (17, 4, 4)
assert sub_dataset.mask_safe.geom.data_shape == (17, 4, 4)
assert sub_dataset.npred_background().geom.data_shape == (17, 4, 4)
assert sub_dataset.exposure.geom.data_shape == (5, 4, 4)
assert sub_dataset.edisp.edisp_map.geom.data_shape == (5, 17, 4, 4)
assert sub_dataset.psf.psf_map.geom.data_shape == (5, 66, 4, 4)
axis = sub_dataset.counts.geom.axes["energy"]
assert_allclose(axis.edges[0].value, 0.1, rtol=1e-5)
axis = sub_dataset.exposure.geom.axes["energy_true"]
assert_allclose(axis.edges[0].value, 0.210175, rtol=1e-5)
@requires_dependency("matplotlib")
def test_plot_residual_onoff():
axis = MapAxis.from_energy_bounds(1, 10, 2, unit="TeV")
geom = WcsGeom.create(npix=(10, 10), binsz=0.05, axes=[axis])
counts = Map.from_geom(geom, data=np.ones((2, 10, 10)))
counts_off = Map.from_geom(geom, data=np.ones((2, 10, 10)))
acceptance = Map.from_geom(geom, data=np.ones((2, 10, 10)))
acceptance_off = Map.from_geom(geom, data=np.ones((2, 10, 10)))
acceptance_off *= 2
dataset = MapDatasetOnOff(
counts=counts,
counts_off=counts_off,
acceptance=acceptance,
acceptance_off=acceptance_off,
)
with mpl_plot_check():
dataset.plot_residuals_spatial()
def test_to_map_dataset():
axis = MapAxis.from_energy_bounds(1, 10, 2, unit="TeV")
geom = WcsGeom.create(npix=(10, 10), binsz=0.05, axes=[axis])
counts = Map.from_geom(geom, data=np.ones((2, 10, 10)))
counts_off = Map.from_geom(geom, data=np.ones((2, 10, 10)))
acceptance = Map.from_geom(geom, data=np.ones((2, 10, 10)))
acceptance_off = Map.from_geom(geom, data=np.ones((2, 10, 10)))
acceptance_off *= 2
dataset_onoff = MapDatasetOnOff(
counts=counts,
counts_off=counts_off,
acceptance=acceptance,
acceptance_off=acceptance_off,
)
dataset = dataset_onoff.to_map_dataset(name="ds")
assert dataset.name == "ds"
assert_allclose(dataset.npred_background().data.sum(), 100)
assert isinstance(dataset, MapDataset)
assert dataset.counts == dataset_onoff.counts
def test_downsample_onoff():
axis = MapAxis.from_energy_bounds(1, 10, 4, unit="TeV")
geom = WcsGeom.create(npix=(10, 10), binsz=0.05, axes=[axis])
counts = Map.from_geom(geom, data=np.ones((4, 10, 10)))
counts_off = Map.from_geom(geom, data=np.ones((4, 10, 10)))
acceptance = Map.from_geom(geom, data=np.ones((4, 10, 10)))
acceptance_off = Map.from_geom(geom, data=np.ones((4, 10, 10)))
acceptance_off *= 2
dataset_onoff = MapDatasetOnOff(
counts=counts,
counts_off=counts_off,
acceptance=acceptance,
acceptance_off=acceptance_off,
)
downsampled = dataset_onoff.downsample(2, axis_name="energy")
assert downsampled.counts.data.shape == (2, 10, 10)
assert downsampled.counts.data.sum() == dataset_onoff.counts.data.sum()
assert downsampled.counts_off.data.sum() == dataset_onoff.counts_off.data.sum()
assert_allclose(downsampled.alpha.data, 0.5)
def test_compute_flux_spatial():
center = SkyCoord("0 deg", "0 deg", frame="galactic")
region = CircleSkyRegion(center=center, radius=0.1 * u.deg)
nbin = 2
energy_axis_true = MapAxis.from_energy_bounds(".1 TeV", "10 TeV", nbin=nbin, name="energy_true")
spectral_model = ConstantSpectralModel()
spatial_model = PointSpatialModel(lon_0 = 0*u.deg, lat_0 = 0*u.deg, frame='galactic')
models = SkyModel(spectral_model=spectral_model, spatial_model=spatial_model)
model = Models(models)
exposure_region = RegionNDMap.create(region, axes=[energy_axis_true])
exposure_region.data += 1.0
exposure_region.unit = "m2 s"
geom = RegionGeom(region, axes=[energy_axis_true])
psf = PSFKernel.from_gauss(geom.to_wcs_geom(), sigma="0.1 deg")
evaluator = MapEvaluator(model=model[0], exposure=exposure_region, psf=psf)
flux = evaluator.compute_flux_spatial()
assert_allclose(flux.value, [0.39677402, 0.39677402], atol=0.001)
| 34.319654
| 100
| 0.68842
|
794c61f491b72fcfbe531e61cb32e9014377360c
| 33,908
|
py
|
Python
|
python/pyspark/pandas/utils.py
|
asranasinghe/spark
|
6eee25b2d587016acdc49966510b50edc42053f5
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3
|
2016-05-26T10:38:18.000Z
|
2021-06-12T12:49:29.000Z
|
python/pyspark/pandas/utils.py
|
asranasinghe/spark
|
6eee25b2d587016acdc49966510b50edc42053f5
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8
|
2015-10-02T00:38:43.000Z
|
2021-06-16T00:54:07.000Z
|
python/pyspark/pandas/utils.py
|
asranasinghe/spark
|
6eee25b2d587016acdc49966510b50edc42053f5
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-05-20T10:44:27.000Z
|
2017-09-19T20:11:06.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Commonly used utils in pandas-on-Spark.
"""
import functools
from collections import OrderedDict
from contextlib import contextmanager
import os
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
TYPE_CHECKING,
cast,
no_type_check,
overload,
)
import warnings
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import DoubleType
import pandas as pd
from pandas.api.types import is_list_like
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas.typedef.typehints import as_spark_type
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.base import IndexOpsMixin # noqa: F401 (SPARK-34943)
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.internal import InternalFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
ERROR_MESSAGE_CANNOT_COMBINE = (
"Cannot combine the series or dataframe because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option."
)
SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.pyspark.enabled"
def same_anchor(
this: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
that: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
) -> bool:
"""
Check if the anchors of the given DataFrame or Series are the same or not.
"""
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
if isinstance(this, InternalFrame):
this_internal = this
else:
assert isinstance(this, (DataFrame, IndexOpsMixin)), type(this)
this_internal = this._internal
if isinstance(that, InternalFrame):
that_internal = that
else:
assert isinstance(that, (DataFrame, IndexOpsMixin)), type(that)
that_internal = that._internal
return (
this_internal.spark_frame is that_internal.spark_frame
and this_internal.index_level == that_internal.index_level
and all(
spark_column_equals(this_scol, that_scol)
for this_scol, that_scol in zip(
this_internal.index_spark_columns, that_internal.index_spark_columns
)
)
)
def combine_frames(
this: "DataFrame",
*args: Union["DataFrame", "Series"],
how: str = "full",
preserve_order_column: bool = False
) -> "DataFrame":
"""
This method combines `this` DataFrame with a different `that` DataFrame or
Series from a different DataFrame.
It returns a DataFrame that has prefix `this_` and `that_` to distinct
the columns names from both DataFrames
It internally performs a join operation which can be expensive in general.
So, if `compute.ops_on_diff_frames` option is False,
this method throws an exception.
"""
from pyspark.pandas.config import get_option
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series
if all(isinstance(arg, Series) for arg in args):
assert all(
same_anchor(arg, args[0]) for arg in args
), "Currently only one different DataFrame (from given Series) is supported"
assert not same_anchor(this, args[0]), "We don't need to combine. All series is in this."
that = args[0]._psdf[list(args)]
elif len(args) == 1 and isinstance(args[0], DataFrame):
assert isinstance(args[0], DataFrame)
assert not same_anchor(
this, args[0]
), "We don't need to combine. `this` and `that` are same."
that = args[0]
else:
raise AssertionError("args should be single DataFrame or " "single/multiple Series")
if get_option("compute.ops_on_diff_frames"):
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = internal.spark_frame.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
this_internal = resolve(this._internal, "this")
that_internal = resolve(that._internal, "that")
this_index_map = list(
zip(
this_internal.index_spark_column_names,
this_internal.index_names,
this_internal.index_fields,
)
)
that_index_map = list(
zip(
that_internal.index_spark_column_names,
that_internal.index_names,
that_internal.index_fields,
)
)
assert len(this_index_map) == len(that_index_map)
join_scols = []
merged_index_scols = []
# Note that the order of each element in index_map is guaranteed according to the index
# level.
this_and_that_index_map = list(zip(this_index_map, that_index_map))
this_sdf = this_internal.spark_frame.alias("this")
that_sdf = that_internal.spark_frame.alias("that")
# If the same named index is found, that's used.
index_column_names = []
index_use_extension_dtypes = []
for (
i,
((this_column, this_name, this_field), (that_column, that_name, that_field)),
) in enumerate(this_and_that_index_map):
if this_name == that_name:
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = scol_for(this_sdf, this_column)
that_scol = scol_for(that_sdf, that_column)
join_scol = this_scol == that_scol
join_scols.append(join_scol)
column_name = SPARK_INDEX_NAME_FORMAT(i)
index_column_names.append(column_name)
index_use_extension_dtypes.append(
any(field.is_extension_dtype for field in [this_field, that_field])
)
merged_index_scols.append(
F.when(this_scol.isNotNull(), this_scol).otherwise(that_scol).alias(column_name)
)
else:
raise ValueError("Index names must be exactly matched currently.")
assert len(join_scols) > 0, "cannot join with no overlapping index names"
joined_df = this_sdf.join(that_sdf, on=join_scols, how=how)
if preserve_order_column:
order_column = [scol_for(this_sdf, NATURAL_ORDER_COLUMN_NAME)]
else:
order_column = []
joined_df = joined_df.select(
*merged_index_scols,
*(
scol_for(this_sdf, this_internal.spark_column_name_for(label))
for label in this_internal.column_labels
),
*(
scol_for(that_sdf, that_internal.spark_column_name_for(label))
for label in that_internal.column_labels
),
*order_column
)
index_spark_columns = [scol_for(joined_df, col) for col in index_column_names]
index_columns = set(index_column_names)
new_data_columns = [
col
for col in joined_df.columns
if col not in index_columns and col != NATURAL_ORDER_COLUMN_NAME
]
schema = joined_df.select(*index_spark_columns, *new_data_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field, use_extension_dtypes=use_extension_dtypes)
for struct_field, use_extension_dtypes in zip(
schema.fields[: len(index_spark_columns)], index_use_extension_dtypes
)
]
data_fields = [
InternalField.from_struct_field(
struct_field, use_extension_dtypes=field.is_extension_dtype
)
for struct_field, field in zip(
schema.fields[len(index_spark_columns) :],
this_internal.data_fields + that_internal.data_fields,
)
]
level = max(this_internal.column_labels_level, that_internal.column_labels_level)
def fill_label(label: Optional[Tuple]) -> List:
if label is None:
return ([""] * (level - 1)) + [None]
else:
return ([""] * (level - len(label))) + list(label)
column_labels = [
tuple(["this"] + fill_label(label)) for label in this_internal.column_labels
] + [tuple(["that"] + fill_label(label)) for label in that_internal.column_labels]
column_label_names = (
cast(List[Optional[Tuple]], [None]) * (1 + level - this_internal.column_labels_level)
) + this_internal.column_label_names
return DataFrame(
InternalFrame(
spark_frame=joined_df,
index_spark_columns=index_spark_columns,
index_names=this_internal.index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(joined_df, col) for col in new_data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
def align_diff_frames(
resolve_func: Callable[["DataFrame", List[Tuple], List[Tuple]], Tuple["Series", Tuple]],
this: "DataFrame",
that: "DataFrame",
fillna: bool = True,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method aligns two different DataFrames with a given `func`. Columns are resolved and
handled within the given `func`.
To use this, `compute.ops_on_diff_frames` should be True, for now.
:param resolve_func: Takes aligned (joined) DataFrame, the column of the current DataFrame, and
the column of another DataFrame. It returns an iterable that produces Series.
>>> from pyspark.pandas.config import set_option, reset_option
>>>
>>> set_option("compute.ops_on_diff_frames", True)
>>>
>>> psdf1 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>> psdf2 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>>
>>> def func(psdf, this_column_labels, that_column_labels):
... psdf # conceptually this is A + B.
...
... # Within this function, Series from A or B can be performed against `psdf`.
... this_label = this_column_labels[0] # this is ('a',) from psdf1.
... that_label = that_column_labels[0] # this is ('a',) from psdf2.
... new_series = (psdf[this_label] - psdf[that_label]).rename(str(this_label))
...
... # This new series will be placed in new DataFrame.
... yield (new_series, this_label)
>>>
>>>
>>> align_diff_frames(func, psdf1, psdf2).sort_index()
a
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
>>> reset_option("compute.ops_on_diff_frames")
:param this: a DataFrame to align
:param that: another DataFrame to align
:param fillna: If True, it fills missing values in non-common columns in both `this` and `that`.
Otherwise, it returns as are.
:param how: join way. In addition, it affects how `resolve_func` resolves the column conflict.
- full: `resolve_func` should resolve only common columns from 'this' and 'that' DataFrames.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` and
'that_columns' in this function are B, C and B, C.
- left: `resolve_func` should resolve columns including that columns.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` is
B, C but `that_columns` are B, C, D.
- inner: Same as 'full' mode; however, internally performs inner join instead.
:return: Aligned DataFrame
"""
from pyspark.pandas.frame import DataFrame
assert how == "full" or how == "left" or how == "inner"
this_column_labels = this._internal.column_labels
that_column_labels = that._internal.column_labels
common_column_labels = set(this_column_labels).intersection(that_column_labels)
# 1. Perform the join given two dataframes.
combined = combine_frames(this, that, how=how, preserve_order_column=preserve_order_column)
# 2. Apply the given function to transform the columns in a batch and keep the new columns.
combined_column_labels = combined._internal.column_labels
that_columns_to_apply = []
this_columns_to_apply = []
additional_that_columns = []
columns_to_keep = []
column_labels_to_keep = []
for combined_label in combined_column_labels:
for common_label in common_column_labels:
if combined_label == tuple(["this", *common_label]):
this_columns_to_apply.append(combined_label)
break
elif combined_label == tuple(["that", *common_label]):
that_columns_to_apply.append(combined_label)
break
else:
if how == "left" and combined_label in [
tuple(["that", *label]) for label in that_column_labels
]:
# In this case, we will drop `that_columns` in `columns_to_keep` but passes
# it later to `func`. `func` should resolve it.
# Note that adding this into a separate list (`additional_that_columns`)
# is intentional so that `this_columns` and `that_columns` can be paired.
additional_that_columns.append(combined_label)
elif fillna:
columns_to_keep.append(F.lit(None).cast(DoubleType()).alias(str(combined_label)))
column_labels_to_keep.append(combined_label)
else:
columns_to_keep.append(combined._psser_for(combined_label))
column_labels_to_keep.append(combined_label)
that_columns_to_apply += additional_that_columns
# Should extract columns to apply and do it in a batch in case
# it adds new columns for example.
if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0:
psser_set, column_labels_set = zip(
*resolve_func(combined, this_columns_to_apply, that_columns_to_apply)
)
columns_applied = list(psser_set)
column_labels_applied = list(column_labels_set)
else:
columns_applied = []
column_labels_applied = []
applied = DataFrame(
combined._internal.with_new_columns(
columns_applied + columns_to_keep,
column_labels=column_labels_applied + column_labels_to_keep,
)
) # type: DataFrame
# 3. Restore the names back and deduplicate columns.
this_labels = OrderedDict()
# Add columns in an order of its original frame.
for this_label in this_column_labels:
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels and this_label == new_label[1:]:
this_labels[new_label[1:]] = new_label
# After that, we will add the rest columns.
other_labels = OrderedDict()
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels:
other_labels[new_label[1:]] = new_label
psdf = applied[list(this_labels.values()) + list(other_labels.values())]
psdf.columns = psdf.columns.droplevel()
return psdf
def is_testing() -> bool:
"""Indicates whether Spark is currently running tests."""
return "SPARK_TESTING" in os.environ
def default_session(conf: Optional[Dict[str, Any]] = None) -> spark.SparkSession:
if conf is None:
conf = dict()
builder = spark.SparkSession.builder.appName("pandas-on-Spark")
for key, value in conf.items():
builder = builder.config(key, value)
# Currently, pandas-on-Spark is dependent on such join due to 'compute.ops_on_diff_frames'
# configuration. This is needed with Spark 3.0+.
builder.config("spark.sql.analyzer.failAmbiguousSelfJoin", False)
if is_testing():
builder.config("spark.executor.allowSparkContext", False)
return builder.getOrCreate()
@contextmanager
def sql_conf(
pairs: Dict[str, Any], *, spark: Optional[spark.SparkSession] = None
) -> Iterator[None]:
"""
A convenient context manager to set `value` to the Spark SQL configuration `key` and
then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
if spark is None:
spark = default_session()
keys = pairs.keys()
new_values = pairs.values()
old_values = [spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
spark.conf.unset(key)
else:
spark.conf.set(key, old_value)
def validate_arguments_and_invoke_function(
pobj: Union[pd.DataFrame, pd.Series],
pandas_on_spark_func: Callable,
pandas_func: Callable,
input_args: Dict,
) -> Any:
"""
Invokes a pandas function.
This is created because different versions of pandas support different parameters, and as a
result when we code against the latest version, our users might get a confusing
"got an unexpected keyword argument" error if they are using an older version of pandas.
This function validates all the arguments, removes the ones that are not supported if they
are simply the default value (i.e. most likely the user didn't explicitly specify it). It
throws a TypeError if the user explicitly specify an argument that is not supported by the
pandas version available.
For example usage, look at DataFrame.to_html().
:param pobj: the pandas DataFrame or Series to operate on
:param pandas_on_spark_func: pandas-on-Spark function, used to get default parameter values
:param pandas_func: pandas function, used to check whether pandas supports all the arguments
:param input_args: arguments to pass to the pandas function, often created by using locals().
Make sure locals() call is at the top of the function so it captures only
input parameters, rather than local variables.
:return: whatever pandas_func returns
"""
import inspect
# Makes a copy since whatever passed in is likely created by locals(), and we can't delete
# 'self' key from that.
args = input_args.copy()
del args["self"]
if "kwargs" in args:
# explode kwargs
kwargs = args["kwargs"]
del args["kwargs"]
args = {**args, **kwargs}
pandas_on_spark_params = inspect.signature(pandas_on_spark_func).parameters
pandas_params = inspect.signature(pandas_func).parameters
for param in pandas_on_spark_params.values():
if param.name not in pandas_params:
if args[param.name] == param.default:
del args[param.name]
else:
raise TypeError(
(
"The pandas version [%s] available does not support parameter '%s' "
+ "for function '%s'."
)
% (pd.__version__, param.name, pandas_func.__name__)
)
args["self"] = pobj
return pandas_func(**args)
@no_type_check
def lazy_property(fn: Callable[[Any], Any]) -> property:
"""
Decorator that makes a property lazy-evaluated.
Copied from https://stevenloria.com/lazy-properties/
"""
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def wrapped_lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
def deleter(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
return wrapped_lazy_property.deleter(deleter)
def scol_for(sdf: spark.DataFrame, column_name: str) -> spark.Column:
"""Return Spark Column for the given column name."""
return sdf["`{}`".format(column_name)]
def column_labels_level(column_labels: List[Tuple]) -> int:
"""Return the level of the column index."""
if len(column_labels) == 0:
return 1
else:
levels = set(1 if label is None else len(label) for label in column_labels)
assert len(levels) == 1, levels
return list(levels)[0]
def name_like_string(name: Optional[Union[Any, Tuple]]) -> str:
"""
Return the name-like strings from str or tuple of str
Examples
--------
>>> name = 'abc'
>>> name_like_string(name)
'abc'
>>> name = ('abc',)
>>> name_like_string(name)
'abc'
>>> name = ('a', 'b', 'c')
>>> name_like_string(name)
'(a, b, c)'
"""
if name is None:
name = ("__none__",)
elif is_list_like(name):
name = tuple([str(n) for n in name])
else:
name = (str(name),)
return ("(%s)" % ", ".join(name)) if len(name) > 1 else name[0]
def is_name_like_tuple(value: Any, allow_none: bool = True, check_type: bool = False) -> bool:
"""
Check the given tuple is be able to be used as a name.
Examples
--------
>>> is_name_like_tuple(('abc',))
True
>>> is_name_like_tuple((1,))
True
>>> is_name_like_tuple(('abc', 1, None))
True
>>> is_name_like_tuple(('abc', 1, None), check_type=True)
True
>>> is_name_like_tuple((1.0j,))
True
>>> is_name_like_tuple(tuple())
False
>>> is_name_like_tuple((list('abc'),))
False
>>> is_name_like_tuple(('abc', 1, None), allow_none=False)
False
>>> is_name_like_tuple((1.0j,), check_type=True)
False
"""
if value is None:
return allow_none
elif not isinstance(value, tuple):
return False
elif len(value) == 0:
return False
elif not allow_none and any(v is None for v in value):
return False
elif any(is_list_like(v) or isinstance(v, slice) for v in value):
return False
elif check_type:
return all(
v is None or as_spark_type(type(v), raise_error=False) is not None for v in value
)
else:
return True
def is_name_like_value(
value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False
) -> bool:
"""
Check the given value is like a name.
Examples
--------
>>> is_name_like_value('abc')
True
>>> is_name_like_value(1)
True
>>> is_name_like_value(None)
True
>>> is_name_like_value(('abc',))
True
>>> is_name_like_value(1.0j)
True
>>> is_name_like_value(list('abc'))
False
>>> is_name_like_value(None, allow_none=False)
False
>>> is_name_like_value(('abc',), allow_tuple=False)
False
>>> is_name_like_value(1.0j, check_type=True)
False
"""
if value is None:
return allow_none
elif isinstance(value, tuple):
return allow_tuple and is_name_like_tuple(
value, allow_none=allow_none, check_type=check_type
)
elif is_list_like(value) or isinstance(value, slice):
return False
elif check_type:
return as_spark_type(type(value), raise_error=False) is not None
else:
return True
def validate_axis(axis: Optional[Union[int, str]] = 0, none_axis: int = 0) -> int:
"""Check the given axis is valid."""
# convert to numeric axis
axis = cast(
Dict[Optional[Union[int, str]], int], {None: none_axis, "index": 0, "columns": 1}
).get(axis, axis)
if axis in (none_axis, 0, 1):
return cast(int, axis)
else:
raise ValueError("No axis named {0}".format(axis))
def validate_bool_kwarg(value: Any, arg_name: str) -> Optional[bool]:
"""Ensures that argument passed in arg_name is of type bool."""
if not (isinstance(value, bool) or value is None):
raise TypeError(
'For argument "{}" expected type bool, received '
"type {}.".format(arg_name, type(value).__name__)
)
return value
def validate_how(how: str) -> str:
"""Check the given how for join is valid."""
if how == "full":
warnings.warn(
"Warning: While pandas-on-Spark will accept 'full', you should use 'outer' "
+ "instead to be compatible with the pandas merge API",
UserWarning,
)
if how == "outer":
# 'outer' in pandas equals 'full' in Spark
how = "full"
if how not in ("inner", "left", "right", "full"):
raise ValueError(
"The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']",
)
return how
@overload
def verify_temp_column_name(df: spark.DataFrame, column_name_or_label: str) -> str:
...
@overload
def verify_temp_column_name(
df: "DataFrame", column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
...
def verify_temp_column_name(
df: Union["DataFrame", spark.DataFrame], column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
"""
Verify that the given column name does not exist in the given pandas-on-Spark or
Spark DataFrame.
The temporary column names should start and end with `__`. In addition, `column_name_or_label`
expects a single string, or column labels when `df` is a pandas-on-Spark DataFrame.
>>> psdf = ps.DataFrame({("x", "a"): ['a', 'b', 'c']})
>>> psdf["__dummy__"] = 0
>>> psdf[("", "__dummy__")] = 1
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x __dummy__
a __dummy__
0 a 0 1
1 b 0 1
2 c 0 1
>>> verify_temp_column_name(psdf, '__tmp__')
('__tmp__', '')
>>> verify_temp_column_name(psdf, ('', '__tmp__'))
('', '__tmp__')
>>> verify_temp_column_name(psdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `(__dummy__, )` ...
>>> verify_temp_column_name(psdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: ... `(, __dummy__)` ...
>>> verify_temp_column_name(psdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('dummy', '')
>>> verify_temp_column_name(psdf, ('', 'dummy'))
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('', 'dummy')
>>> internal = psdf._internal.resolved_copy
>>> sdf = internal.spark_frame
>>> sdf.select(internal.data_spark_columns).show() # doctest: +NORMALIZE_WHITESPACE
+------+---------+-------------+
|(x, a)|__dummy__|(, __dummy__)|
+------+---------+-------------+
| a| 0| 1|
| b| 0| 1|
| c| 0| 1|
+------+---------+-------------+
>>> verify_temp_column_name(sdf, '__tmp__')
'__tmp__'
>>> verify_temp_column_name(sdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `__dummy__` ... '(x, a)', '__dummy__', '(, __dummy__)', ...
>>> verify_temp_column_name(sdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: <class 'tuple'>
>>> verify_temp_column_name(sdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should start and end with `__`: dummy
"""
from pyspark.pandas.frame import DataFrame
if isinstance(df, DataFrame):
if isinstance(column_name_or_label, str):
column_name = column_name_or_label
level = df._internal.column_labels_level
column_name_or_label = tuple([column_name_or_label] + ([""] * (level - 1)))
else:
column_name = name_like_string(column_name_or_label)
assert any(len(label) > 0 for label in column_name_or_label) and all(
label == "" or (label.startswith("__") and label.endswith("__"))
for label in column_name_or_label
), "The temporary column name should be empty or start and end with `__`: {}".format(
column_name_or_label
)
assert all(
column_name_or_label != label for label in df._internal.column_labels
), "The given column name `{}` already exists in the pandas-on-Spark DataFrame: {}".format(
name_like_string(column_name_or_label), df.columns
)
df = df._internal.resolved_copy.spark_frame
else:
assert isinstance(column_name_or_label, str), type(column_name_or_label)
assert column_name_or_label.startswith("__") and column_name_or_label.endswith(
"__"
), "The temporary column name should start and end with `__`: {}".format(
column_name_or_label
)
column_name = column_name_or_label
assert isinstance(df, spark.DataFrame), type(df)
assert (
column_name not in df.columns
), "The given column name `{}` already exists in the Spark DataFrame: {}".format(
column_name, df.columns
)
return column_name_or_label
def spark_column_equals(left: spark.Column, right: spark.Column) -> bool:
"""
Check both `left` and `right` have the same expressions.
>>> spark_column_equals(F.lit(0), F.lit(0))
True
>>> spark_column_equals(F.lit(0) + 1, F.lit(0) + 1)
True
>>> spark_column_equals(F.lit(0) + 1, F.lit(0) + 2)
False
>>> sdf1 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf1["x"] + 1)
True
>>> sdf2 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf2["x"] + 1)
False
"""
return left._jc.equals(right._jc) # type: ignore
def compare_null_first(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNull() & right.isNotNull()
)
def compare_null_last(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNotNull() & right.isNull()
)
def compare_disallow_null(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return left.isNotNull() & right.isNotNull() & comp(left, right)
def compare_allow_null(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return left.isNull() | right.isNull() | comp(left, right)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.utils
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.utils.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.utils tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.utils,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 35.394572
| 100
| 0.625664
|
794c622c32acd2cc224632bf9ef75ae1d1195141
| 1,974
|
py
|
Python
|
tests/test_reg.py
|
jddixon/fieldz
|
0503f776f053d9711ea4f17d53c17fd732679b02
|
[
"MIT"
] | null | null | null |
tests/test_reg.py
|
jddixon/fieldz
|
0503f776f053d9711ea4f17d53c17fd732679b02
|
[
"MIT"
] | null | null | null |
tests/test_reg.py
|
jddixon/fieldz
|
0503f776f053d9711ea4f17d53c17fd732679b02
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# test_reg.py
# import time
import unittest
# from rnglib import SimpleRNG
from wireops.enum import FieldTypes
from fieldz.reg import NodeReg
from fieldz.enum import CoreTypes
# TESTS --------------------------------------------------------------
class TestReg(unittest.TestCase):
def test_node_reg(self):
test_reg = NodeReg()
nbr_coretypes = len(CoreTypes)
self.assertEqual(nbr_coretypes, 6)
nbr_fieldtypes = len(FieldTypes)
self.assertEqual(nbr_fieldtypes, 18)
# DEBUG
print("test_node_reg: nbr_coretypes is %d" % nbr_coretypes)
print("test_node_reg: nbr_fieldtypes is %d" % nbr_fieldtypes)
print(
"test_node_reg: test_reg.next_reg_id is %d test_reg.next_reg_id" %
test_reg.next_reg_id)
# END
self.assertEqual(
len(FieldTypes) + nbr_coretypes,
test_reg.next_reg_id)
# verify that all fieldTypes are defined in the registry, each
# with the proper index (vBool through fBytes32 at FieldTypes.MAX_NDX)
for ndx, ftype in enumerate(FieldTypes):
name = test_reg[ndx].qual_name
# DEBUG
print('%2u %s' % (ndx, name))
# END
self.assertEqual(ftype.sym, name)
for ndx, coretype in enumerate(CoreTypes):
i = ndx + nbr_fieldtypes
name = test_reg[i].qual_name
# DEBUG
print('%2u %s' % (i, name))
# END
self.assertEqual(coretype.sym, name)
# XXX FIGURE THIS OUT
# self.assertEqual(test_reg.name2reg_id(name), i)
self.assertEqual(nbr_fieldtypes + nbr_coretypes, len(test_reg))
# print "DEBUG: len(test_reg) is %u" % len(test_reg)
# print "DEBUG: next_reg_id is %u" % test_reg.next_reg_id
self.assertEqual(len(test_reg), test_reg.next_reg_id)
if __name__ == '__main__':
unittest.main()
| 29.909091
| 78
| 0.598784
|
794c6337c3bc31f6a638917d7acf8c6201d09a77
| 11,478
|
py
|
Python
|
flask_flatpages/flatpages.py
|
blackdog308/Flask-FlatPages
|
d618de221bbc192693322df54bbf56c0abe64cfd
|
[
"BSD-3-Clause"
] | null | null | null |
flask_flatpages/flatpages.py
|
blackdog308/Flask-FlatPages
|
d618de221bbc192693322df54bbf56c0abe64cfd
|
[
"BSD-3-Clause"
] | null | null | null |
flask_flatpages/flatpages.py
|
blackdog308/Flask-FlatPages
|
d618de221bbc192693322df54bbf56c0abe64cfd
|
[
"BSD-3-Clause"
] | null | null | null |
"""
=========================
flask_flatpages.flatpages
=========================
Flatpages extension.
"""
import operator
import os
from itertools import takewhile
import six
from flask import abort
from werkzeug.utils import cached_property, import_string
from .page import Page
from .utils import force_unicode, pygmented_markdown
if six.PY3:
from inspect import getfullargspec
else:
from inspect import getargspec as getfullargspec
class FlatPages(object):
"""A collection of :class:`Page` objects."""
#: Default configuration for FlatPages extension
default_config = (
('root', 'pages'),
('extension', '.html'),
('encoding', 'utf-8'),
('html_renderer', pygmented_markdown),
('markdown_extensions', ['codehilite']),
('extension_configs', {}),
('auto_reload', 'if debug'),
('case_insensitive', False),
('instance_relative', False)
)
def __init__(self, app=None, name=None):
"""Initialize FlatPages extension.
:param app: Your application. Can be omitted if you call
:meth:`init_app` later.
:type app: A :class:`~flask.Flask` instance
:param name: The name for this FlatPages instance. Used for looking
up config values using
'FLATPAGES_%s_%s' % (name.upper(), key)
By default, no name is used, so configuration is
done by specifying config values using
'FLATPAGES_%s' % (key)
Typically, you only need to set this parameter if you
want to use multiple :class:`FlatPages instances within the
same Flask application.
:type name: string
.. versionchanged:: 0.6
New parameter `name` to support multiple FlatPages instances.
"""
self.name = name
if name is None:
self.config_prefix = 'FLATPAGES'
else:
self.config_prefix = '_'.join(('FLATPAGES', name.upper()))
#: dict of filename: (page object, mtime when loaded)
self._file_cache = {}
if app:
self.init_app(app)
def __iter__(self):
"""Iterate on all :class:`Page` objects."""
return six.itervalues(self._pages)
def config(self, key):
"""Read actual configuration from Flask application config.
:param key: Lowercase config key from :attr:`default_config` tuple
"""
return self.app.config['_'.join((self.config_prefix, key.upper()))]
def get(self, path, default=None):
"""
Return the :class:`Page` object at ``path``.
Returns ``default`` if there is no such page.
"""
# This may trigger the property. Do it outside of the try block.
pages = self._pages
try:
return pages[path]
except KeyError:
return default
def get_or_404(self, path):
"""
Return the :class:`Page` object at ``path``.
Raise Flask's 404 error if there is no such page.
"""
page = self.get(path)
if not page:
abort(404)
return page
def init_app(self, app):
"""
Use to initialize an application.
Ueful for passing an app later and app factory patterns.
:param app: your application
:type app: a :class:`~flask.Flask` instance
"""
# Store default config to application
for key, value in self.default_config:
config_key = '_'.join((self.config_prefix, key.upper()))
app.config.setdefault(config_key, value)
# Register function to forget all pages if necessary
app.before_request(self._conditional_auto_reset)
# And finally store application to current instance and current
# instance to application
if 'flatpages' not in app.extensions:
app.extensions['flatpages'] = {}
app.extensions['flatpages'][self.name] = self
self.app = app
def reload(self):
"""Forget all pages.
All pages will be reloaded next time they're accessed.
"""
try:
# This will "unshadow" the cached_property.
# The property will be re-executed on next access.
del self.__dict__['_pages']
except KeyError:
pass
@property
def root(self):
"""Full path to the directory where pages are looked for.
This corresponds to the `FLATPAGES_%(name)s_ROOT` config value,
interpreted as relative to the app's root directory, or as relative
to the app's instance folder if `FLATPAGES_%(name)s_INSTANCE_RELATIVE`
is set to `True`.
"""
if self.config('instance_relative'):
root_dir = os.path.join(self.app.instance_path,
self.config('root'))
else:
root_dir = os.path.join(self.app.root_path, self.config('root'))
return force_unicode(root_dir)
def _conditional_auto_reset(self):
"""Reset if configured to do so on new requests."""
auto = self.config('auto_reload')
if auto == 'if debug':
auto = self.app.debug
if auto:
self.reload()
def _load_file(self, path, filename, rel_path):
"""
Load file from file system and cache it.
We store the result as a tuple of :class:`Path` and the file `mtime`.
"""
mtime = os.path.getmtime(filename)
cached = self._file_cache.get(filename)
if cached and cached[1] == mtime:
page = cached[0]
else:
encoding = self.config('encoding')
if six.PY3:
with open(filename, encoding=encoding) as handler:
content = handler.read()
else:
with open(filename) as handler:
content = handler.read().decode(encoding)
page = self._parse(content, path, rel_path)
self._file_cache[filename] = (page, mtime)
return page
@cached_property
def _pages(self):
"""
Walk the page root directory and return a dict of pages.
Returns a dictionary of pages keyed by their path.
"""
def _walker():
"""
Walk over directory and find all possible flatpages.
Returns files which end with the string or sequence given by
``FLATPAGES_%(name)s_EXTENSION``.
"""
for cur_path, _, filenames in os.walk(self.root):
rel_path = cur_path.replace(self.root, '').lstrip(os.sep)
path_prefix = tuple(rel_path.split(os.sep)) if rel_path else ()
for name in filenames:
if not name.endswith(extension):
continue
full_name = os.path.join(cur_path, name)
name_without_extension = [name[:-len(item)]
for item in extension
if name.endswith(item)][0]
path = u'/'.join(path_prefix + (name_without_extension, ))
if self.config('case_insensitive'):
path = path.lower()
yield (path, full_name, rel_path)
# Read extension from config
extension = self.config('extension')
# Support for multiple extensions
if isinstance(extension, six.string_types):
if ',' in extension:
extension = tuple(extension.split(','))
else:
extension = (extension, )
elif isinstance(extension, (list, set)):
extension = tuple(extension)
# FlatPage extension should be a string or a sequence
if not isinstance(extension, tuple):
raise ValueError(
'Invalid value for FlatPages extension. Should be a string or '
'a sequence, got {0} instead: {1}'.
format(type(extension).__name__, extension)
)
pages = {}
for path, full_name, rel_path in _walker():
if path in pages:
raise ValueError(
'Multiple pages found which correspond to the same path. '
'This error can arise when using multiple extensions.')
pages[path] = self._load_file(path, full_name, rel_path)
return pages
def _parse(self, content, path, rel_path):
"""Parse a flatpage file, i.e. read and parse its meta data and body.
:return: initialized :class:`Page` instance.
"""
lines = iter(content.split('\n'))
# Read lines until an empty line is encountered.
meta = '\n'.join(takewhile(operator.methodcaller('strip'), lines))
# The rest is the content. `lines` is an iterator so it continues
# where `itertools.takewhile` left it.
content = '\n'.join(lines)
# Now we ready to get HTML renderer function
html_renderer = self.config('html_renderer')
# If function is not callable yet, import it
if not callable(html_renderer):
html_renderer = import_string(html_renderer)
# Make able to pass custom arguments to renderer function
html_renderer = self._smart_html_renderer(html_renderer)
# Assign the relative path (to root) for use in the page object
folder = rel_path
# Initialize and return Page instance
return Page(path, meta, content, html_renderer, folder)
def _smart_html_renderer(self, html_renderer):
"""
Wrappper to enable rendering functions with differing signatures.
We stay backwards compatible by using reflection, i.e. we inspect the
given rendering function's signature in order to find out how many
arguments the function takes.
.. versionchanged:: 0.6
Support for HTML renderer functions with signature
``f(body, flatpages, page)``, where ``page`` is an instance of
:class:`Page`.
.. versionchanged:: 0.5
Support for HTML renderer functions with signature
``f(body, flatpages)``, where ``flatpages`` is an instance of
:class:`FlatPages`.
"""
def wrapper(page):
"""Wrap HTML renderer function.
Pass arguments to the renderer based on the number of arguments.
* 1 argument -> page body
* 2 arguments -> page body, flatpages instance
* 3 arguments -> page body, flatpages instance, page instance
"""
body = page.body
try:
args_length = len(getfullargspec(html_renderer).args)
except TypeError:
return html_renderer(body)
if args_length == 1:
return html_renderer(body)
elif args_length == 2:
return html_renderer(body, self)
elif args_length == 3:
return html_renderer(body, self, page)
raise ValueError(
'HTML renderer function {0!r} not supported by '
'Flask-FlatPages, wrong number of arguments: {1}.'.
format(html_renderer, args_length)
)
return wrapper
| 33.95858
| 79
| 0.571354
|
794c639e5d20656ef44130fd65349221cb90bc6f
| 392
|
py
|
Python
|
citymngmt/migrations/0036_alter_company_color.py
|
NazarenoCavazzon/mibus-api
|
1213971d410d9183d8fc1d51eb996bd5ce7158bf
|
[
"MIT"
] | null | null | null |
citymngmt/migrations/0036_alter_company_color.py
|
NazarenoCavazzon/mibus-api
|
1213971d410d9183d8fc1d51eb996bd5ce7158bf
|
[
"MIT"
] | null | null | null |
citymngmt/migrations/0036_alter_company_color.py
|
NazarenoCavazzon/mibus-api
|
1213971d410d9183d8fc1d51eb996bd5ce7158bf
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-10-11 02:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('citymngmt', '0035_auto_20211011_0001'),
]
operations = [
migrations.AlterField(
model_name='company',
name='color',
field=models.TextField(default='#30b618'),
),
]
| 20.631579
| 54
| 0.596939
|
794c646de1379f2e13d1c95d19cd113962518b4d
| 2,129
|
py
|
Python
|
scripts/quest/q21201e.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 2
|
2020-04-15T03:16:07.000Z
|
2020-08-12T23:28:32.000Z
|
scripts/quest/q21201e.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
scripts/quest/q21201e.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 3
|
2020-08-25T06:55:25.000Z
|
2020-12-01T13:07:43.000Z
|
# 21201 - [Job Adv] (Lv.30) Aran
sm.setSpeakerID(1201002)
sm.sendNext("First you promise to defeat the Black Mage and make me a famous weapon, then you abandon me for hundreds of years, and now you're telling me you don't remember who I am? What the...?! Do you think I'll let you get away with that? You're the one who begged and pined for me!")
sm.setPlayerAsSpeaker()
sm.sendNext("I did tell #p1203000# to make a polearm for me if I could prove my worth.")
sm.setSpeakerID(1201002)
sm.sendNext("After all that begging, shouldn't you treat me with a little more love and respect? Ya know, a weapon like me's rare and a wonderful thing. I am the ultimate #p1201001# that can help you defeat the Black Mage. How could you ditch me for hundreds of years...")
sm.setPlayerAsSpeaker()
sm.sendNext("Hey, I never begged for you.")
sm.setSpeakerID(1201002)
sm.sendNext("What? You never begged for me? Ha! #p1203000# told me you got on your knees, begged for me in tears, and... Wait a sec. Aran! Did you just remember who I am?!")
sm.setPlayerAsSpeaker()
sm.sendNext("Maybe a little bit...")
sm.setSpeakerID(1201002)
sm.sendNext("Aran, it is you! *Sniff sniff* Wait, *ahem* I didn't get emotional, it's just allergies. I know the Black Mage has stripped you of your abilities so you probably don't even have the strength to lift me... but at least you remember me! I'm glad that your memory's starting to return.")
if sm.sendAskYesNo("Evn though you've lost your memory, you're still my master. You endured some very tough training in the past, and I'm sure your body still remembers the skills that got you through those hard times. Alright, I'll restore your abilities!"):
if not sm.canHold(1142130):
sm.sendSayOkay("Please make some space in your equipment inventory.")
sm.dispose()
sm.completeQuest(parentID)
sm.giveItem(1142130)
sm.jobAdvance(2110)
sm.sendNext("Your level isn't what it used to be back in your glory days, so I can't restore all of your old abilities. But the few that I can restore should help you level up faster. Now hurry up and train so you can return to the old you.")
| 88.708333
| 297
| 0.743542
|
794c64aa04347323d0856ec4fcf0dc784d86dd53
| 2,974
|
py
|
Python
|
MouseJiggle.py
|
Allencheng01/MouseJiggle
|
4cf3bbf3e78fe53850d69ec9f78c2d1940eeb07f
|
[
"BSD-2-Clause"
] | 2
|
2021-01-15T20:44:04.000Z
|
2021-02-09T16:56:50.000Z
|
MouseJiggle.py
|
Allencheng01/MouseJiggle
|
4cf3bbf3e78fe53850d69ec9f78c2d1940eeb07f
|
[
"BSD-2-Clause"
] | null | null | null |
MouseJiggle.py
|
Allencheng01/MouseJiggle
|
4cf3bbf3e78fe53850d69ec9f78c2d1940eeb07f
|
[
"BSD-2-Clause"
] | null | null | null |
import os, sys
import ctypes
import time
import multiprocessing
# # see http://msdn.microsoft.com/en-us/library/ms646260(VS.85).aspx for details
# ctypes.windll.user32.SetCursorPos(100, 20)
# ctypes.windll.user32.mouse_event(2, 0, 0, 0,0) # left down
# ctypes.windll.user32.mouse_event(4, 0, 0, 0,0) # left up
# Value Meaning
# MOUSEEVENTF_ABSOLUTE
# 0x8000
# The dx and dy parameters contain normalized absolute coordinates. If not set, those parameters contain relative data: the change in position since the last reported position. This flag can be set, or not set, regardless of what kind of mouse or mouse-like device, if any, is connected to the system. For further information about relative mouse motion, see the following Remarks section.
# MOUSEEVENTF_LEFTDOWN
# 0x0002
# The left button is down.
# MOUSEEVENTF_LEFTUP
# 0x0004
# The left button is up.
# MOUSEEVENTF_MIDDLEDOWN
# 0x0020
# The middle button is down.
# MOUSEEVENTF_MIDDLEUP
# 0x0040
# The middle button is up.
# MOUSEEVENTF_MOVE
# 0x0001
# Movement occurred.
# MOUSEEVENTF_RIGHTDOWN
# 0x0008
# The right button is down.
# MOUSEEVENTF_RIGHTUP
# 0x0010
# The right button is up.
# MOUSEEVENTF_WHEEL
# 0x0800
# The wheel has been moved, if the mouse has a wheel. The amount of movement is specified in dwData
# MOUSEEVENTF_XDOWN
# 0x0080
# An X button was pressed.
# MOUSEEVENTF_XUP
# 0x0100
# An X button was released.
# MOUSEEVENTF_WHEEL
# 0x0800
# The wheel button is rotated.
# MOUSEEVENTF_HWHEEL
# 0x01000
# The wheel button is tilted.
MOUSEEVENTF_MOVE = 0x0001 # mouse move
MOUSEEVENTF_LEFTDOWN = 0x0002 # left button down
MOUSEEVENTF_LEFTUP = 0x0004 # left button up
MOUSEEVENTF_RIGHTDOWN = 0x0008 # right button down
MOUSEEVENTF_RIGHTUP = 0x0010 # right button up
MOUSEEVENTF_MIDDLEDOWN = 0x0020 # middle button down
MOUSEEVENTF_MIDDLEUP = 0x0040 # middle button up
MOUSEEVENTF_WHEEL = 0x0800 # wheel button rolled
MOUSEEVENTF_ABSOLUTE = 0x8000 # absolute move
SM_CXSCREEN = 0
SM_CYSCREEN = 1
class POINT(ctypes.Structure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
def MouseListener():
pos = POINT()
ctypes.windll.user32.GetCursorPos(ctypes.byref(pos))
x = pos.x
y = pos.y
while True:
time.sleep(10/1000)
ctypes.windll.user32.GetCursorPos(ctypes.byref(pos))
if x != pos.x or y != pos.y:
x, y = pos.x, pos.y
print(x,' , ', y)
pass
def main():
# p = multiprocessing.Process(target=MouseListener)
# p.start()
dx = 5
dy = -5
while True:
dx *= -1
dy *= -1
ctypes.windll.user32.mouse_event(MOUSEEVENTF_MOVE, dx, dy, 0, 0)
time.sleep(2)
# ctypes.windll.user32.keybd_event(0x31, 0, 0, 0) # Key Down
# ctypes.windll.user32.keybd_event(0x31, 0, 0x0002, 0) # Key Up
# p.join()
pass
if __name__ == "__main__":
main()
pass
| 29.445545
| 390
| 0.686281
|
794c64b94fe8173ca94ab83acac47a9fe6c5a045
| 4,609
|
py
|
Python
|
Athos/HelperScripts/FindAccuracy_Porthos.py
|
mpc-msri-dev/EzPC
|
a489c49d5c92f51df0277a7e5751e1b8baeb0bc1
|
[
"MIT"
] | null | null | null |
Athos/HelperScripts/FindAccuracy_Porthos.py
|
mpc-msri-dev/EzPC
|
a489c49d5c92f51df0277a7e5751e1b8baeb0bc1
|
[
"MIT"
] | null | null | null |
Athos/HelperScripts/FindAccuracy_Porthos.py
|
mpc-msri-dev/EzPC
|
a489c49d5c92f51df0277a7e5751e1b8baeb0bc1
|
[
"MIT"
] | null | null | null |
'''
Authors: Nishant Kumar.
Copyright:
Copyright (c) 2020 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# Use this script to find accuracy once the full exploration has been done on relevant scale factors.
# NOTE: The ground truth labels are in [1, 1000].
# Resnet outputs labels in [0,1000] -- class 0 is extraneous and is the other category.
# For DenseNet and SqueezeNet, the labels after argmax are in [0,999]:
# so either we have to do ground truth labels-1 or while outputing from the code for SqNet/DenseNet,
# add a +1. Choosing to go with the former.
# So, in summary, when running resnet, use the last parameter as 1, while for SqueezeNet/DenseNet use it as 0.
import os, sys
import numpy as np
if (len(sys.argv) < 4):
print("Usage : python3 FindAccuracy_Porthos.py <groundTruthLabelsFileName> <inferenceOutputDirectory> <lowerBoundOfOutputLabels>")
exit(1)
# Change following parameters accordingly
ScalesToCheck = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
# ScalesToCheck = [11]
numImages = 96
topK = 5
groundTruthLabelsFileName = sys.argv[1]
inferenceOutputDirectory = sys.argv[2]
lowerBoundOfOutputLabels = int(sys.argv[3])
if (lowerBoundOfOutputLabels != 0 and lowerBoundOfOutputLabels != 1):
print("lowerBoundOfOutputLabels should be either 0 or 1. Exiting.", file=sys.stderr)
exit(1)
with open(groundTruthLabelsFileName, 'r') as ff:
groundTruthLabels = ff.readlines()
groundTruthLabels = list(map(lambda x : int(x.rstrip()), groundTruthLabels)) #For imagenet, this is in [1,1000]
if (lowerBoundOfOutputLabels==0):
groundTruthLabels = list(map(lambda x : x-1, groundTruthLabels)) #If the labels in the output start from 0,
# subtract 1 from the ground truth labels.
def parseInferenceOutputFile(predictions, i, outputFileName):
with open(outputFileName, 'r') as ff:
lines = ff.readlines()
lines = list(map(lambda x : x.rstrip(), lines))
lines = list(filter(lambda x : x!='', lines))
if(len(lines)!=2):
print("Error in parsing : "+outputFileName)
assert(False)
imgCounter = None
for line in lines:
if (line.startswith('Answer for')):
imgCounter = int(line.split('=')[1].split(':')[0]) #This is assumed to be 0-indexed
else:
assert(imgCounter is not None)
preds = line.split()
# print(imgCounter,preds)
preds = np.array(list(map(lambda x : int(x), preds)))
topKPredsIdx = np.argpartition(preds, -1*topK)[-1*topK:]
topKPredsIdx = topKPredsIdx[np.argsort(preds[topKPredsIdx])]
for i, val in enumerate(topKPredsIdx):
predictions[imgCounter][i] = val
def calculateAccuracy(predictions):
global groundTruthLabels
top1CorrectPred = 0
topKCorrectPred = 0
for i in range(numImages):
if not(predictions[i][0]):
continue
if (groundTruthLabels[i] == predictions[i][-1]):
top1CorrectPred += 1
if (groundTruthLabels[i] in predictions[i]):
topKCorrectPred += 1
return (top1CorrectPred/(1.0*numImages), topKCorrectPred/(1.0*numImages))
for curScale in ScalesToCheck:
predictions = [[None]*topK for _ in range(numImages)]
for i in range(numImages):
curFileName = os.path.join(inferenceOutputDirectory, 'stderr_' + str(curScale) + '_' + str(i) +'_proc_1.outp')
parseInferenceOutputFile(predictions, i, curFileName)
for i in range(numImages):
for j in range(topK):
assert(predictions[i][j] is not None)
(top1Acc, topKAcc) = calculateAccuracy(predictions)
print("curScale = " + str(curScale) + ", top1Acc = " + str(top1Acc) + ", topKAcc = " + str(topKAcc))
| 42.284404
| 132
| 0.722933
|
794c6530522843d34631b9c8c1497d78e079adae
| 352
|
py
|
Python
|
gpu_bdb/setup.py
|
beckernick/tpcx-bb
|
f6a398d03e7aee4fd190606e6eb0fb1fbea3ea23
|
[
"Apache-2.0"
] | 62
|
2020-05-14T13:33:02.000Z
|
2020-10-29T13:28:26.000Z
|
gpu_bdb/setup.py
|
beckernick/tpcx-bb
|
f6a398d03e7aee4fd190606e6eb0fb1fbea3ea23
|
[
"Apache-2.0"
] | 104
|
2020-07-01T21:07:42.000Z
|
2020-11-13T16:36:04.000Z
|
gpu_bdb/setup.py
|
beckernick/tpcx-bb
|
f6a398d03e7aee4fd190606e6eb0fb1fbea3ea23
|
[
"Apache-2.0"
] | 21
|
2020-05-14T14:44:40.000Z
|
2020-11-07T12:08:28.000Z
|
# Copyright (c) 2020, NVIDIA CORPORATION.
from setuptools import find_packages, setup
qnums = [str(i).zfill(2) for i in range(1, 31)]
setup(
name="bdb_tools",
version="0.2",
author="RAPIDS",
packages=["benchmark_runner", "bdb_tools"],
package_data={"benchmark_runner": ["benchmark_config.yaml"]},
include_package_data=True,
)
| 25.142857
| 65
| 0.6875
|
794c6553a0df6730a4e8be18c8748f74ae505fdf
| 1,463
|
py
|
Python
|
TS_data_prophet_plotly.py
|
mmuratardag/DS_SpA_W07_Time_Series_Analysis
|
d3dc95d32f4e0a2efd927eaa9069289baefce7c6
|
[
"MIT"
] | null | null | null |
TS_data_prophet_plotly.py
|
mmuratardag/DS_SpA_W07_Time_Series_Analysis
|
d3dc95d32f4e0a2efd927eaa9069289baefce7c6
|
[
"MIT"
] | null | null | null |
TS_data_prophet_plotly.py
|
mmuratardag/DS_SpA_W07_Time_Series_Analysis
|
d3dc95d32f4e0a2efd927eaa9069289baefce7c6
|
[
"MIT"
] | null | null | null |
import pandas as pd
import geopandas as gpd
import json
import plotly.express as px
df = pd.read_csv('berlin_weather_forecast.csv')
df['date'] = pd.to_datetime(df['date'].astype(str))
df_gb = df.groupby(['name','year','week'])['predicted_tempreature'].mean().reset_index()
df_gb['predicted_tempreature'] = df_gb['predicted_tempreature'].round(2)
#df_gb['predicted_tempreature'] = df_gb['predicted_tempreature']*100
#df_gb['predicted_tempreature'].min(), df_gb['predicted_tempreature'].max() ### (-235.0, 2186.0)
df_gb = df_gb[df_gb['year']>= 2021]
df_gb = df_gb[df_gb['week']<= 10]
filename = "berlin_bezirke.geojson"
file = open(filename)
geo_json_file = gpd.read_file(file)
json_file = geo_json_file.to_json()
converted_json = json.loads(json_file)
interactive_map = px.choropleth_mapbox(
mapbox_style = 'open-street-map',
data_frame = df_gb,
geojson = converted_json,
featureidkey = 'properties.name',
locations = 'name',
color = 'predicted_tempreature',
center = {"lat": 52.5200, "lon": 13.4050},
zoom = 10,
animation_frame = 'week',
animation_group = 'name',
color_continuous_scale = "thermal",
range_color = (-2.35, 22),
color_continuous_midpoint = 12,
title = "Predicted Weekly Average Tempratures for each Berlin District. The period is from the 1st week of January 2021 to the 1st week of March 2021.")
interactive_map.write_html("berlin_interactive_map.html", include_plotlyjs='cdn')
| 34.833333
| 156
| 0.71907
|
794c659c6b6c83d2837dde1f5ae9417a2d1e1c1e
| 348
|
py
|
Python
|
heuristics/conditions/named_conditions/__init__.py
|
TeamJumpstart/InformatiCup2021
|
a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4
|
[
"MIT"
] | 10
|
2021-04-18T17:54:02.000Z
|
2021-07-26T19:58:41.000Z
|
heuristics/conditions/named_conditions/__init__.py
|
DiddiZ/InformatiCup2021
|
a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4
|
[
"MIT"
] | 1
|
2021-04-21T15:13:41.000Z
|
2021-04-21T15:13:41.000Z
|
heuristics/conditions/named_conditions/__init__.py
|
DiddiZ/InformatiCup2021
|
a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4
|
[
"MIT"
] | 1
|
2021-04-20T09:42:50.000Z
|
2021-04-20T09:42:50.000Z
|
from heuristics.conditions.named_conditions.endgame_condition import EndgameCondition
from heuristics.conditions.named_conditions.lategame_condition import LategameCondition
from heuristics.conditions.named_conditions.midgame_condition import MidgameCondition
__all__ = [
"EndgameCondition",
"MidgameCondition",
"LategameCondition",
]
| 34.8
| 87
| 0.847701
|
794c664cdb397711551dee070ea292192e627c41
| 8,240
|
py
|
Python
|
bgexplorer/modeleditor/widgets.py
|
bloer/bgexplorer
|
c87b79b6eee85441628b696051ad9f2e95246694
|
[
"BSD-2-Clause"
] | null | null | null |
bgexplorer/modeleditor/widgets.py
|
bloer/bgexplorer
|
c87b79b6eee85441628b696051ad9f2e95246694
|
[
"BSD-2-Clause"
] | 74
|
2017-10-17T20:49:31.000Z
|
2022-03-11T17:52:27.000Z
|
bgexplorer/modeleditor/widgets.py
|
bloer/bgexplorer
|
c87b79b6eee85441628b696051ad9f2e95246694
|
[
"BSD-2-Clause"
] | null | null | null |
"""
@file widgets.py
@author: bloer
Defines a few custom widgets for rendering dynamic elements
"""
#python 2/3 compatibility
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import json
import types
from collections import namedtuple
from wtforms.widgets import (html_params, HTMLString, HiddenInput, TextInput,
CheckboxInput, RadioInput)
from wtforms.utils import unset_value
from flask_bootstrap import is_hidden_field_filter
def is_hidden_field(field):
return (is_hidden_field_filter(field)
or isinstance(field.widget, HiddenInput)
or getattr(field.widget,'input_type', None) == 'hidden')
class TableRow(object):
"""Render a FormField as a row in a table"""
def __call__(self, field, **kwargs):
html = []
kwargs.setdefault('id', field.id)
kwargs.setdefault('data-prefix', field.name)
render_kw = kwargs.pop('render_kw',{})
html.append("<tr %s>"%html_params(**kwargs))
for subfield in field:
html.append('<td data-column="%s"'%subfield.short_name)
if is_hidden_field(subfield):
html.append(' class="hide"')
html.append('>%s</td>'%subfield(**render_kw))
#add remove button
html.append('<td data-column="delete">'
'<a onclick="$(this).parents(\'tr\')'
'.fadeOut(function(){$(this).remove();});">')
html.append('<span class="text-danger linklike glyphicon '
'glyphicon-remove"></span>')
html.append('</a></td>')
html.append("</tr>")
return HTMLString(''.join(html))
class SortableTable(object):
"""
Create a table with sortable rows that returns the data as
a JSON string
"""
def __call__(self, field, **kwargs):
html = []
id = kwargs.setdefault('id', field.id)
#we need a bound subfield to make the table columns
boundform = field.unbound_field.bind(
form=None, prefix=field._prefix, _meta=field.meta,
translations=field._translations,
name=field.short_name,
id=field.id+'-template')
boundform.process(None, unset_value)
#now make the table
#Flask_Bootstrap wants to give this form-control class...
kwargs['class'] = kwargs.get('class','').replace('form-control','')
kwargs['class'] += ' '+kwargs.get('_class','')
html.append("<table %s>"%html_params(**kwargs));
html.append("<thead><tr>")
#should inherit from FieldList...
for subfield in boundform:
if not is_hidden_field(subfield):
html.append('<th title="%s">%s</th>'%(subfield.description,
subfield.label))
#one more to hold the remove button
html.append("<th></th>");
html.append("</tr></thead>")
#now loop through the subforms
html.append('<tbody class="sortable">')
for entry in field:
html.append(TableRow()(entry, **{'data-prefix':field.short_name}))
#make a fake hidden form for cloning
html.append(TableRow()(boundform, render_kw={'disabled':'disabled'},
**{'class':'hide template',
'data-prefix':field.short_name}))
html.append("</tbody></table>")
#add an 'add new' button
html.append('<button type="button" class="btn text-primary" ')
html.append('onclick="addrow(\'#%s\')" >'%id)
html.append('<span class="glyphicon glyphicon-plus"></span>')
html.append(' Add')
html.append('</button>')
html.append(
"""<script type="text/javascript">
function setindex(index, row){
row = $(row);
row.data('index',index);
row.attr('id', row.data('prefix')+'-'+index);
var inputs = row.find("[name]").attr("disabled",false);
inputs.each(function(){
var name = row.data('prefix') + '-' + index + '-'
+ $(this).parents("td").data('column');
$(this).attr('id',name).attr('name',name);
});
}
function addrow(obj, valsFrom){
obj = $(obj);
var row = obj.find("tr.template").clone()
.removeClass('hide').removeClass('template');
if(valsFrom){
valsFrom=$(valsFrom);
//copy data-* for each column we care about
row.children("td").each(function(){
var name=$(this).data('column');
if(!name) return;
var copyval = valsFrom.data(name);
if(!copyval) return;
$(this).find("input, select").val(copyval);
$(this).children().css("display","none");
$("<p class='form-control-static'>")
.text(copyval)
.appendTo($(this));
});
$("<div>")
.attr("class","alert alert-success")
.css("position","absolute")
.css("width",valsFrom.width())
.css("height",valsFrom.height())
.appendTo("body")
.offset(valsFrom.offset())
.animate({
'left':obj.offset().left,
'top':obj.offset().top + obj.height(),
},function(){
obj.children('tbody').append(row);
$(this).remove();
});
}
var index = obj.find("tbody tr").not('.template').size();
setindex(index, row);
if(!valsFrom)
obj.children('tbody').append(row);
}
function sortIndices(container){
$(container).children().not('.template').each(setindex);
}
</script>
""")
return HTMLString(''.join(html))
class InputChoices(TextInput):
def __init__(self, choices=None):
self.choices = choices or []
def __call__(self, field, **kwargs):
html = []
html.append('<div class="dropdown">')
html.append('<div class="input-group" data-toggle="dropdown">')
html.append(super().__call__(field, **kwargs))
html.append('<div class="input-group-btn">')
html.append('<button class="btn dropdown-toggle hide" type="button">')
#html.append('<span class="caret" style="width:0.5em;"></span>')
html.append('</button></div>')
html.append('</div>')
html.append('<ul class="dropdown-menu">')
for choice in self.choices:
html.append('<li><a href="javascript:void(0)" onclick='
'"$(this).parents(\'.dropdown\').find(\'input\')'
'.val($(this).text());">'
'%s</a></li>'%choice)
html.append('</ul></div>')
return HTMLString(''.join(html))
class StaticIfExists(object):
""" If the value is already defined, render it as static
only create a non-hidden input if _value is empty
Args:
default (widget): Widget to render if value is not set
"""
def __init__(self, default=TextInput()):
self.default = default
def __call__(self, field, **kwargs):
value = field.data
if hasattr(field,'_value'):
value = field._value()
if not value or value == str(None):
return self.default(field, **kwargs)
else:
if not hasattr(field, '_value'):
field._value = types.MethodType(lambda self: self.data, field)
if hasattr(field,'link'):
value = '<a href="%s">%s</a>'%(field.link, value)
return HiddenInput()(field, **kwargs)+\
HTMLString('<p class="form-control-static">'+value+'</p')
| 40.392157
| 78
| 0.514442
|
794c669636f8510b4e6d8f39606ee1efdc196d5d
| 17,524
|
py
|
Python
|
ckanext/stats/stats.py
|
GlobalMaksimum/ckan
|
bdba078d26d485e75554ba9570e292ec480eb9e4
|
[
"Apache-2.0"
] | 1
|
2019-11-03T11:35:38.000Z
|
2019-11-03T11:35:38.000Z
|
ckanext/stats/stats.py
|
GlobalMaksimum/ckan
|
bdba078d26d485e75554ba9570e292ec480eb9e4
|
[
"Apache-2.0"
] | 135
|
2019-07-03T19:58:12.000Z
|
2020-02-14T19:57:33.000Z
|
ckanext/stats/stats.py
|
GlobalMaksimum/ckan
|
bdba078d26d485e75554ba9570e292ec480eb9e4
|
[
"Apache-2.0"
] | 3
|
2019-09-11T10:04:59.000Z
|
2020-01-30T15:55:50.000Z
|
# encoding: utf-8
import datetime
from ckan.common import config
from six import text_type
from sqlalchemy import Table, select, join, func, and_
import ckan.plugins as p
import ckan.model as model
cache_enabled = p.toolkit.asbool(config.get('ckanext.stats.cache_enabled', 'True'))
if cache_enabled:
from pylons import cache
our_cache = cache.get_cache('stats', type='dbm')
DATE_FORMAT = '%Y-%m-%d'
def table(name):
return Table(name, model.meta.metadata, autoload=True)
def datetime2date(datetime_):
return datetime.date(datetime_.year, datetime_.month, datetime_.day)
class Stats(object):
@classmethod
def top_rated_packages(cls, limit=10):
# NB Not using sqlalchemy as sqla 0.4 doesn't work using both group_by
# and apply_avg
package = table('package')
rating = table('rating')
sql = select([package.c.id, func.avg(rating.c.rating), func.count(rating.c.rating)], from_obj=[package.join(rating)]).\
where(and_(package.c.private==False, package.c.state=='active')). \
group_by(package.c.id).\
order_by(func.avg(rating.c.rating).desc(), func.count(rating.c.rating).desc()).\
limit(limit)
res_ids = model.Session.execute(sql).fetchall()
res_pkgs = [(model.Session.query(model.Package).get(text_type(pkg_id)), avg, num) for pkg_id, avg, num in res_ids]
return res_pkgs
@classmethod
def most_edited_packages(cls, limit=10):
package_revision = table('package_revision')
package = table('package')
s = select([package_revision.c.id, func.count(package_revision.c.revision_id)], from_obj=[package_revision.join(package)]).\
where(and_(package.c.private==False, package.c.state=='active', )).\
group_by(package_revision.c.id).\
order_by(func.count(package_revision.c.revision_id).desc()).\
limit(limit)
res_ids = model.Session.execute(s).fetchall()
res_pkgs = [(model.Session.query(model.Package).get(text_type(pkg_id)), val) for pkg_id, val in res_ids]
return res_pkgs
@classmethod
def largest_groups(cls, limit=10):
member = table('member')
package = table('package')
j = join(member, package,
member.c.table_id == package.c.id)
s = select([member.c.group_id, func.count(member.c.table_id)]).\
select_from(j).\
group_by(member.c.group_id).\
where(and_(member.c.group_id!=None, member.c.table_name=='package', package.c.private==False, package.c.state=='active')).\
order_by(func.count(member.c.table_id).desc()).\
limit(limit)
res_ids = model.Session.execute(s).fetchall()
res_groups = [(model.Session.query(model.Group).get(text_type(group_id)), val) for group_id, val in res_ids]
return res_groups
@classmethod
def top_tags(cls, limit=10, returned_tag_info='object'): # by package
assert returned_tag_info in ('name', 'id', 'object')
tag = table('tag')
package_tag = table('package_tag')
package = table('package')
if returned_tag_info == 'name':
from_obj = [package_tag.join(tag)]
tag_column = tag.c.name
else:
from_obj = None
tag_column = package_tag.c.tag_id
j = join(package_tag, package,
package_tag.c.package_id == package.c.id)
s = select([tag_column, func.count(package_tag.c.package_id)],
from_obj=from_obj).\
select_from(j).\
where(and_(package_tag.c.state=='active', package.c.private == False, package.c.state == 'active' ))
s = s.group_by(tag_column).\
order_by(func.count(package_tag.c.package_id).desc()).\
limit(limit)
res_col = model.Session.execute(s).fetchall()
if returned_tag_info in ('id', 'name'):
return res_col
elif returned_tag_info == 'object':
res_tags = [(model.Session.query(model.Tag).get(text_type(tag_id)), val) for tag_id, val in res_col]
return res_tags
@classmethod
def top_package_creators(cls, limit=10):
userid_count = \
model.Session.query(model.Package.creator_user_id,
func.count(model.Package.creator_user_id))\
.filter(model.Package.state == 'active')\
.filter(model.Package.private == False)\
.group_by(model.Package.creator_user_id) \
.order_by(func.count(model.Package.creator_user_id).desc())\
.limit(limit).all()
user_count = [
(model.Session.query(model.User).get(text_type(user_id)), count)
for user_id, count in userid_count
if user_id]
return user_count
class RevisionStats(object):
@classmethod
def package_addition_rate(cls, weeks_ago=0):
week_commenced = cls.get_date_weeks_ago(weeks_ago)
return cls.get_objects_in_a_week(week_commenced,
type_='package_addition_rate')
@classmethod
def package_revision_rate(cls, weeks_ago=0):
week_commenced = cls.get_date_weeks_ago(weeks_ago)
return cls.get_objects_in_a_week(week_commenced,
type_='package_revision_rate')
@classmethod
def get_date_weeks_ago(cls, weeks_ago):
'''
@param weeks_ago: specify how many weeks ago to give count for
(0 = this week so far)
'''
date_ = datetime.date.today()
return date_ - datetime.timedelta(days=
datetime.date.weekday(date_) + 7 * weeks_ago)
@classmethod
def get_week_dates(cls, weeks_ago):
'''
@param weeks_ago: specify how many weeks ago to give count for
(0 = this week so far)
'''
package_revision = table('package_revision')
revision = table('revision')
today = datetime.date.today()
date_from = datetime.datetime(today.year, today.month, today.day) -\
datetime.timedelta(days=datetime.date.weekday(today) + \
7 * weeks_ago)
date_to = date_from + datetime.timedelta(days=7)
return (date_from, date_to)
@classmethod
def get_date_week_started(cls, date_):
assert isinstance(date_, datetime.date)
if isinstance(date_, datetime.datetime):
date_ = datetime2date(date_)
return date_ - datetime.timedelta(days=datetime.date.weekday(date_))
@classmethod
def get_package_revisions(cls):
'''
@return: Returns list of revisions and date of them, in
format: [(id, date), ...]
'''
package_revision = table('package_revision')
revision = table('revision')
s = select([package_revision.c.id, revision.c.timestamp], from_obj=[package_revision.join(revision)]).order_by(revision.c.timestamp)
res = model.Session.execute(s).fetchall() # [(id, datetime), ...]
return res
@classmethod
def get_new_packages(cls):
'''
@return: Returns list of new pkgs and date when they were created, in
format: [(id, date_ordinal), ...]
'''
def new_packages():
# Can't filter by time in select because 'min' function has to
# be 'for all time' else you get first revision in the time period.
package_revision = table('package_revision')
revision = table('revision')
s = select([package_revision.c.id, func.min(revision.c.timestamp)], from_obj=[package_revision.join(revision)]).group_by(package_revision.c.id).order_by(func.min(revision.c.timestamp))
res = model.Session.execute(s).fetchall() # [(id, datetime), ...]
res_pickleable = []
for pkg_id, created_datetime in res:
res_pickleable.append((pkg_id, created_datetime.toordinal()))
return res_pickleable
if cache_enabled:
week_commences = cls.get_date_week_started(datetime.date.today())
key = 'all_new_packages_%s' + week_commences.strftime(DATE_FORMAT)
new_packages = our_cache.get_value(key=key,
createfunc=new_packages)
else:
new_packages = new_packages()
return new_packages
@classmethod
def get_deleted_packages(cls):
'''
@return: Returns list of deleted pkgs and date when they were deleted, in
format: [(id, date_ordinal), ...]
'''
def deleted_packages():
# Can't filter by time in select because 'min' function has to
# be 'for all time' else you get first revision in the time period.
package_revision = table('package_revision')
revision = table('revision')
s = select([package_revision.c.id, func.min(revision.c.timestamp)], from_obj=[package_revision.join(revision)]).\
where(package_revision.c.state==model.State.DELETED).\
group_by(package_revision.c.id).\
order_by(func.min(revision.c.timestamp))
res = model.Session.execute(s).fetchall() # [(id, datetime), ...]
res_pickleable = []
for pkg_id, deleted_datetime in res:
res_pickleable.append((pkg_id, deleted_datetime.toordinal()))
return res_pickleable
if cache_enabled:
week_commences = cls.get_date_week_started(datetime.date.today())
key = 'all_deleted_packages_%s' + week_commences.strftime(DATE_FORMAT)
deleted_packages = our_cache.get_value(key=key,
createfunc=deleted_packages)
else:
deleted_packages = deleted_packages()
return deleted_packages
@classmethod
def get_num_packages_by_week(cls):
def num_packages():
new_packages_by_week = cls.get_by_week('new_packages')
deleted_packages_by_week = cls.get_by_week('deleted_packages')
first_date = (min(datetime.datetime.strptime(new_packages_by_week[0][0], DATE_FORMAT),
datetime.datetime.strptime(deleted_packages_by_week[0][0], DATE_FORMAT))).date()
cls._cumulative_num_pkgs = 0
new_pkgs = []
deleted_pkgs = []
def build_weekly_stats(week_commences, new_pkg_ids, deleted_pkg_ids):
num_pkgs = len(new_pkg_ids) - len(deleted_pkg_ids)
new_pkgs.extend([model.Session.query(model.Package).get(id).name for id in new_pkg_ids])
deleted_pkgs.extend([model.Session.query(model.Package).get(id).name for id in deleted_pkg_ids])
cls._cumulative_num_pkgs += num_pkgs
return (week_commences.strftime(DATE_FORMAT),
num_pkgs, cls._cumulative_num_pkgs)
week_ends = first_date
today = datetime.date.today()
new_package_week_index = 0
deleted_package_week_index = 0
weekly_numbers = [] # [(week_commences, num_packages, cumulative_num_pkgs])]
while week_ends <= today:
week_commences = week_ends
week_ends = week_commences + datetime.timedelta(days=7)
if datetime.datetime.strptime(new_packages_by_week[new_package_week_index][0], DATE_FORMAT).date() == week_commences:
new_pkg_ids = new_packages_by_week[new_package_week_index][1]
new_package_week_index += 1
else:
new_pkg_ids = []
if datetime.datetime.strptime(deleted_packages_by_week[deleted_package_week_index][0], DATE_FORMAT).date() == week_commences:
deleted_pkg_ids = deleted_packages_by_week[deleted_package_week_index][1]
deleted_package_week_index += 1
else:
deleted_pkg_ids = []
weekly_numbers.append(build_weekly_stats(week_commences, new_pkg_ids, deleted_pkg_ids))
# just check we got to the end of each count
assert new_package_week_index == len(new_packages_by_week)
assert deleted_package_week_index == len(deleted_packages_by_week)
return weekly_numbers
if cache_enabled:
week_commences = cls.get_date_week_started(datetime.date.today())
key = 'number_packages_%s' + week_commences.strftime(DATE_FORMAT)
num_packages = our_cache.get_value(key=key,
createfunc=num_packages)
else:
num_packages = num_packages()
return num_packages
@classmethod
def get_by_week(cls, object_type):
cls._object_type = object_type
def objects_by_week():
if cls._object_type == 'new_packages':
objects = cls.get_new_packages()
def get_date(object_date):
return datetime.date.fromordinal(object_date)
elif cls._object_type == 'deleted_packages':
objects = cls.get_deleted_packages()
def get_date(object_date):
return datetime.date.fromordinal(object_date)
elif cls._object_type == 'package_revisions':
objects = cls.get_package_revisions()
def get_date(object_date):
return datetime2date(object_date)
else:
raise NotImplementedError()
first_date = get_date(objects[0][1]) if objects else datetime.date.today()
week_commences = cls.get_date_week_started(first_date)
week_ends = week_commences + datetime.timedelta(days=7)
week_index = 0
weekly_pkg_ids = [] # [(week_commences, [pkg_id1, pkg_id2, ...])]
pkg_id_stack = []
cls._cumulative_num_pkgs = 0
def build_weekly_stats(week_commences, pkg_ids):
num_pkgs = len(pkg_ids)
cls._cumulative_num_pkgs += num_pkgs
return (week_commences.strftime(DATE_FORMAT),
pkg_ids, num_pkgs, cls._cumulative_num_pkgs)
for pkg_id, date_field in objects:
date_ = get_date(date_field)
if date_ >= week_ends:
weekly_pkg_ids.append(build_weekly_stats(week_commences, pkg_id_stack))
pkg_id_stack = []
week_commences = week_ends
week_ends = week_commences + datetime.timedelta(days=7)
pkg_id_stack.append(pkg_id)
weekly_pkg_ids.append(build_weekly_stats(week_commences, pkg_id_stack))
today = datetime.date.today()
while week_ends <= today:
week_commences = week_ends
week_ends = week_commences + datetime.timedelta(days=7)
weekly_pkg_ids.append(build_weekly_stats(week_commences, []))
return weekly_pkg_ids
if cache_enabled:
week_commences = cls.get_date_week_started(datetime.date.today())
key = '%s_by_week_%s' % (cls._object_type, week_commences.strftime(DATE_FORMAT))
objects_by_week_ = our_cache.get_value(key=key,
createfunc=objects_by_week)
else:
objects_by_week_ = objects_by_week()
return objects_by_week_
@classmethod
def get_objects_in_a_week(cls, date_week_commences,
type_='new-package-rate'):
'''
@param type: Specifies what to return about the specified week:
"package_addition_rate" number of new packages
"package_revision_rate" number of package revisions
"new_packages" a list of the packages created
in a tuple with the date.
"deleted_packages" a list of the packages deleted
in a tuple with the date.
@param dates: date range of interest - a tuple:
(start_date, end_date)
'''
assert isinstance(date_week_commences, datetime.date)
if type_ in ('package_addition_rate', 'new_packages'):
object_type = 'new_packages'
elif type_ == 'deleted_packages':
object_type = 'deleted_packages'
elif type_ == 'package_revision_rate':
object_type = 'package_revisions'
else:
raise NotImplementedError()
objects_by_week = cls.get_by_week(object_type)
date_wc_str = date_week_commences.strftime(DATE_FORMAT)
object_ids = None
for objects_in_a_week in objects_by_week:
if objects_in_a_week[0] == date_wc_str:
object_ids = objects_in_a_week[1]
break
if object_ids is None:
raise TypeError('Week specified is outside range')
assert isinstance(object_ids, list)
if type_ in ('package_revision_rate', 'package_addition_rate'):
return len(object_ids)
elif type_ in ('new_packages', 'deleted_packages'):
return [ model.Session.query(model.Package).get(pkg_id) \
for pkg_id in object_ids ]
| 46.981233
| 196
| 0.606597
|
794c669e7e2d777573d1f216393803f752a944b1
| 10,840
|
py
|
Python
|
fline/models/models/object_detection/map_net.py
|
asromahin/fline
|
a34243878093b3b883607557eeaf968ef4b8acf6
|
[
"MIT"
] | 5
|
2021-07-01T08:19:51.000Z
|
2022-03-28T06:09:55.000Z
|
fline/models/models/object_detection/map_net.py
|
asromahin/fline
|
a34243878093b3b883607557eeaf968ef4b8acf6
|
[
"MIT"
] | 1
|
2022-03-21T02:42:44.000Z
|
2022-03-28T06:10:57.000Z
|
fline/models/models/object_detection/map_net.py
|
asromahin/fline
|
a34243878093b3b883607557eeaf968ef4b8acf6
|
[
"MIT"
] | null | null | null |
import torch
import segmentation_models_pytorch as smp
from fline.models.models.segmentation.fpn import TimmFPN
from fline.models.encoders.timm import TimmEncoder
from fline.models.models.research.extractor import VectorsFromMask, VectorsFromMaskV2
from fline.models.models.research.connect_net import ConnectNet
from fline.models.models.research.connected_components import ConnectedComponents
from fline.losses.object_detection.iou import IouDotsLoss
class MapNetTimm(torch.nn.Module):
def __init__(
self,
backbone_name,
#features_size=64,
classes=1,
device='cpu',
):
super(MapNetTimm, self).__init__()
self.device = device
self.backbone_name = backbone_name
self.model = TimmFPN(
backbone_name=backbone_name,
#activation=None,
#classes=features_size,
)
features_size = self.model.out_feature_channels
self.out_classes = torch.nn.Conv2d(
in_channels=features_size,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
)
self.out_bbox = torch.nn.Conv2d(
in_channels=features_size,
out_channels=4,
kernel_size=3,
stride=1,
padding=1,
)
self.softmax = torch.nn.Softmax2d()
def forward(self, x, left_top_points=None, right_bottom_points=None):
encoded = self.model(x)
points = self.out_classes(encoded)
points = self.softmax(points)
bboxes = self.out_bbox(encoded)
pos_mask = make_pos_mask(encoded.shape[-2:], self.device)
bboxes[:,:2,:,:] += pos_mask
return points, bboxes
class MapNetSMP(torch.nn.Module):
def __init__(
self,
backbone_name,
features_size=64,
classes=1,
device='cpu',
):
super(MapNetSMP, self).__init__()
self.device = device
self.backbone_name = backbone_name
self.model = smp.FPN(
encoder_name=backbone_name,
#activation=None,
classes=features_size,
)
#features_size = self.model.out_feature_channels
self.out_classes = torch.nn.Conv2d(
in_channels=features_size,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
)
self.out_bbox = torch.nn.Conv2d(
in_channels=features_size,
out_channels=4,
kernel_size=3,
stride=1,
padding=1,
)
self.softmax = torch.nn.Softmax2d()
def forward(self, x, left_top_points=None, right_bottom_points=None):
encoded = self.model(x)
points = self.out_classes(encoded)
points = self.softmax(points)
bboxes = self.out_bbox(encoded)
pos_mask = make_pos_mask(encoded.shape[-2:], self.device)
bboxes[:,:2,:,:] += pos_mask
return points, bboxes
def make_pos_mask(shape, device):
res = torch.arange(shape[0]*shape[1], device=device).reshape(shape)
x = ((res % shape[1])/shape[1]).unsqueeze(dim=0)
y = ((res / shape[1])/shape[0]).unsqueeze(dim=0)
res_mask = torch.cat([x, y], dim=0)
return res_mask
# class MapLoss(torch.nn.Module):
# def __init__(self, device):
# super(MapLoss, self).__init__()
# self.loss = IouDotsLoss(device)
# self.device = device
# self.mse = torch.nn.MSELoss()
# self.limit = 32
#
# def forward(self, points, bboxes, target_bboxes):
# res_loss = None
# for b in range(points.shape[0]):
# mask = points[b].argmax(dim=0) == 1
# ph = torch.zeros(points[b].shape, device=self.device)
# ph[0, :, :] = 1
# #if res_loss is None:
# # res_loss = self.mse(points[b], ph)
# #else:
# # res_loss += self.mse(points[b], ph)
# #print(points.shape, mask.shape, bboxes.shape)
# pred_bboxes = bboxes[b, :, mask]
# #print(pred_bboxes.shape)
# pred_bboxes = pred_bboxes.transpose(0, 1)
# for i, target_bbox in enumerate(target_bboxes[b]):
# target_loss = None
# target_ind = None
# # add_loss = len(pred_bboxes) - self.limit
# dif_bboxes = torch.abs(pred_bboxes[:,:2] - target_bbox[:2])
#
# pred_bboxes = pred_bboxes[:self.limit]
# pred_bboxes_ind = list(range(len(pred_bboxes)))
# for j in pred_bboxes_ind:
# box = pred_bboxes[j]
# #print(box.shape, target_bbox.shape)
# cur_loss = self.loss(box.unsqueeze(dim=0), target_bbox.unsqueeze(dim=0))
# if target_loss is None:
# target_loss = cur_loss
# target_ind = j
# elif cur_loss < target_loss:
# target_loss = cur_loss
# target_ind = j
# if target_loss is not None:
# if res_loss is None:
# res_loss = target_loss
# else:
# res_loss += target_loss
# pred_bboxes_ind.remove(target_ind)
# else:
# res_loss += 1
# #for i in pred_bboxes_ind:
# res_loss += len(pred_bboxes_ind)
# #res_loss += add_loss
# return res_loss/points.shape[0]
class MapLoss(torch.nn.Module):
def __init__(self, device):
super(MapLoss, self).__init__()
self.loss = IouDotsLoss(device)
self.device = device
self.mse = torch.nn.CrossEntropyLoss()
self.limit = 32
def forward(self, points, bboxes, target_bboxes):
res_loss = None
for b in range(points.shape[0]):
mask = points[b].argmax(dim=0) == 1
for i, target_bbox in enumerate(target_bboxes[b]):
x1 = int((target_bbox[0] - target_bbox[2] / 2) * mask.shape[1])
x2 = int((target_bbox[0] + target_bbox[2] / 2) * mask.shape[1])
y1 = int((target_bbox[1] - target_bbox[3] / 2) * mask.shape[0])
y2 = int((target_bbox[1] + target_bbox[3] / 2) * mask.shape[0])
cmask = torch.zeros_like(points[b:b+1], dtype=torch.bool, device=self.device)
cmask[b, 1, y1:y2, x1:x2] = 1
cmask[:, 0] = ~cmask[:, 1]
# closs = self.mse(points[b:b+1], cmask.to(dtype=torch.float32).argmax(dim=1))
# if res_loss is None:
# res_loss = closs
# else:
# res_loss += closs
kmask = cmask[0, 1] * mask
# print(mask.shape, cmask.shape)
pred_bboxes = bboxes[b, :, kmask]
pred_bboxes = pred_bboxes.transpose(0, 1)
cur_iou = bbox_iou(target_bbox, pred_bboxes)
if len(cur_iou) > 0:
cur_loss = 1-cur_iou.mean()
else:
cur_loss = self.mse(points[b:b + 1], cmask.to(dtype=torch.float32).argmax(dim=1))
if res_loss is None:
res_loss = cur_loss
else:
res_loss += cur_loss
# for i in pred_bboxes_ind:
#res_loss += len(pred_bboxes_ind)
# res_loss += add_loss
return res_loss / points.shape[0]
#
# class MapLoss(torch.nn.Module):
# def __init__(self, device):
# super(MapLoss, self).__init__()
# self.loss = IouDotsLoss(device)
# self.device = device
# self.mse = torch.nn.CrossEntropyLoss()
# #self.limit = 1000
#
# def forward(self, points, bboxes, target_bboxes):
# res_loss = None
# for b in range(points.shape[0]):
# mask = points[b].argmax(dim=0) == 1
# pred_bboxes = bboxes[b, :, mask]
# pred_bboxes = pred_bboxes.transpose(0, 1)
# for box in target_bboxes[b]:
# #print(box.shape, pred_bboxes.shape)
# cur_loss = 1-bbox_iou(box, pred_bboxes).max()
# if res_loss is None:
# res_loss = cur_loss
# else:
# res_loss += cur_loss
# if cur_loss is None:
# res_loss += 1
# return res_loss / points.shape[0]
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
# convex (smallest enclosing box) width
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * \
torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / ((1 + eps) - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
| 38.853047
| 108
| 0.535701
|
794c671aaf3bd021bb604cd8ebede6943861d255
| 1,658
|
py
|
Python
|
vendimia/users/models.py
|
soru13/vendimia
|
ebcf85473855e6f990b1a49574ac669fdd4d443e
|
[
"MIT"
] | null | null | null |
vendimia/users/models.py
|
soru13/vendimia
|
ebcf85473855e6f990b1a49574ac669fdd4d443e
|
[
"MIT"
] | 5
|
2020-02-11T23:26:24.000Z
|
2022-01-13T00:39:54.000Z
|
vendimia/users/models.py
|
soru13/vendimia
|
ebcf85473855e6f990b1a49574ac669fdd4d443e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from save_the_change.mixins import SaveTheChange
from django.contrib.auth.models import User
from users.midlerdamanager import MidlerManager,BorradoLogico
from django.db import models
class Perfil(SaveTheChange, models.Model):
user = models.OneToOneField(User) # Solo tener 1 perfil
avatar = models.ImageField(upload_to='AvatarUser', blank=True)
fecha_registro = models.DateTimeField(auto_now_add=True)
is_active = models.DateTimeField(blank=True, null=True)
origin = models.IntegerField(blank=True, null=True)
objects = MidlerManager()
def delete(self):
BorradoLogico(self)
class Meta:
verbose_name = 'Perfil'
verbose_name_plural = 'Perfiles'
def __unicode__(self):
return '%s' % (self.user)
class Clientes(SaveTheChange, models.Model):
nombre = models.CharField(max_length=40, blank=False, null=False)
apellido_paterno = models.CharField(max_length=13, blank=False, null=False)
apellido_materno = models.CharField(max_length=13, blank=False, null=False)
rfc = models.CharField(max_length=13, blank=True, null=False)
fecha_registro = models.DateTimeField(auto_now_add=True)
is_active = models.DateTimeField(blank=True, null=True)
origin = models.IntegerField(blank=True, null=True)
objects = MidlerManager()
usuario_registro = models.ForeignKey(User)
def delete(self):
BorradoLogico(self)
class Meta:
ordering = ["-id"]
verbose_name = 'Clientes'
verbose_name_plural = 'Clientes'
def __unicode__(self):
return '%s' % (self.nombre)
| 39.47619
| 79
| 0.717732
|
794c6786c27ebaba4e7e35d2c74622169ef50485
| 5,649
|
py
|
Python
|
kwueBackend/kwue/controllers/food.py
|
bounswe/bounswe2016group4
|
cbc8201aa86049b81f20ef44ee37eb065a469d46
|
[
"Apache-2.0"
] | 6
|
2016-02-14T18:04:48.000Z
|
2016-12-18T20:09:15.000Z
|
kwueBackend/kwue/controllers/food.py
|
bounswe/bounswe2016group4
|
cbc8201aa86049b81f20ef44ee37eb065a469d46
|
[
"Apache-2.0"
] | 113
|
2016-02-14T18:06:57.000Z
|
2021-06-10T17:57:12.000Z
|
kwueBackend/kwue/controllers/food.py
|
bounswe/bounswe2016group4
|
cbc8201aa86049b81f20ef44ee37eb065a469d46
|
[
"Apache-2.0"
] | 1
|
2017-02-15T18:48:55.000Z
|
2017-02-15T18:48:55.000Z
|
from django.shortcuts import render
from kwue.DB_functions.food_db_functions import *
from kwue.helper_functions.nutrition_helpers import request_nutrition
import json
from django.http import HttpResponse
from kwue.DB_functions.tag_db_functions import *
from django.views.decorators.csrf import csrf_exempt
def get_food(req):
food_id = req.GET.dict()['food_id']
food_dict = db_retrieve_food(food_id).__dict__
del food_dict['_state'] # alptekin fix FacePalm
tag_list = return_tags(food_id, "Food")
food_dict['tag_list'] = tag_list
food_dict['comments'] = db_get_comments(food_id)
food_json = json.dumps(food_dict)
return HttpResponse(food_json, content_type="application/json")
def get_food_page(req):
user_id = req.session['user_id']
if user_id == -2:
user_type = 0
user_name = 'Guest'
else:
user = db_retrieve_user(user_id)
user_type = user.user_type
user_name = user.user_name
food_id = req.GET.dict()['food_id']
food_dict = db_retrieve_food(food_id).__dict__
del food_dict['_state'] # alptekin fix FacePalm
tag_list = return_tags(food_id, "Food")
food_dict['tag_list'] = tag_list
food_dict['comments'] = db_get_comments(food_id)
food_dict['user_name'] = user_name
food_dict['user_type'] = user_type
food_dict['user_id'] = user_id
return render(req, 'kwue/food.html', food_dict)
@csrf_exempt
def add_food(req):
user_id = 0
if req.session.has_key('user_id'):
user_id = req.session['user_id']
else:
user_id = req.POST.dict()['user_id']
food_dict = req.POST.dict()
food_dict['food_owner'] = user_id
# get nutrition values from api
ingredients = json.loads(food_dict['ingredients'])
food_recipe = ""
ingredient_list = []
for ingredient in ingredients:
food_recipe += ingredient["value"] + " " + ingredient["ingredient"] + "\n"
ingredient_list.append(ingredient["ingredient"])
try:
nutrition_dict = request_nutrition(food_recipe, food_dict['number_of_servings'])
except:
nutrition_dict = request_nutrition(food_recipe)
food_dict['food_recipe'] = food_recipe
is_success = False
reason = ""
if nutrition_dict is not None:
# insert food
new_food_id = db_insert_food(food_dict, nutrition_dict, ingredient_list)
if new_food_id:
# add tags
tag_dict = {}
tag_dict['generic_id'] = new_food_id
tag_dict["type"] = "Food"
tag_list = json.loads(food_dict['food_tags'])
for tag_item in tag_list:
tag_dict['tag_name'] = tag_item['tag_name']
tag_dict['tag_id'] = tag_item['tag_id']
tag_dict['tag_label'] = tag_item['tag_label']
tag_dict['tag_description'] = tag_item['tag_description']
db_insert_tag(tag_dict)
#print(req.session['username'] + " has added a food successfully.")
is_success = True
else:
reason = 'Adding food failed.'
else:
reason = 'Nutritional value calculation failed.'
return HttpResponse(json.dumps({'is_success': is_success, 'reason': reason}), content_type='application/json')
def get_add_food_page(req):
user_id = req.session['user_id']
if user_id == -2:
return render(req, 'kwue/home.html', {'recommendations': db_retrieve_all_foods(), 'user_type': 0, 'user_name': 'Guest'})
else:
user = db_retrieve_user(user_id)
user_type = user.user_type
user_name = user.user_name
return render(req, 'kwue/add_food.html', {'user_type': user_type, 'user_name': user_name, 'user_id': user_id})
def get_my_food_page(req):
user_id = req.session['user_id']
user = db_retrieve_user(user_id)
foods = db_get_user_foods(user_id)
return render(req, 'kwue/my_foods.html', {'my_foods': foods, 'user_name': user.user_name, 'user_type': user.user_type, 'user_id': user_id})
@csrf_exempt
def get_nutritional_values(req):
ingredients = json.loads(req.POST.dict()['ingredients'])
food_recipe = ""
for ingredient in ingredients:
food_recipe += ingredient["value"] + " " + ingredient["ingredient"] + "\n"
nutrition_dict = request_nutrition(food_recipe)
return HttpResponse(json.dumps(nutrition_dict), content_type='application/json')
# def remove_food(req):
# food_id = req.GET.dict()['food_id']
# is_success = False
# reason = ""
# if db_delete_food(food_id):
# is_success = True
# else:
# reason = 'Removing food failed.'
# return HttpResponse(json.dumps({'is_success': is_success, 'reason': reason}), content_type='application/json')
@csrf_exempt
def rate_food(req):
rate_dict = req.POST.dict()
is_success = False
reason = ""
if db_rate_food(rate_dict['food_id'], rate_dict['rate_value']):
is_success = True
else:
reason = 'Rating food failed.'
return HttpResponse(json.dumps({'is_success': is_success, 'reason': reason}), content_type='application/json')
@csrf_exempt
def comment_food(req):
user_id = 0
if req.session.has_key('user_id'):
user_id = req.session['user_id']
else:
user_id = req.POST.dict()['user_id']
comment_dict = req.POST.dict()
is_success = False
reason = ""
if db_comment_food(comment_dict['food_id'], user_id, comment_dict['comment_text']):
is_success = True
else:
reason = 'Commenting food failed.'
return HttpResponse(json.dumps({'is_success': is_success, 'reason': reason}), content_type='application/json')
| 34.445122
| 143
| 0.663657
|
794c678b121352bf3da9f74bb60572b9e9083ce3
| 1,327
|
py
|
Python
|
tests/getl/common/test_delta_table.py
|
husqvarnagroup/GETL
|
37251abf12bac2efed1fe463b09a288d85969141
|
[
"MIT"
] | 8
|
2020-06-10T09:00:17.000Z
|
2021-06-07T18:02:19.000Z
|
tests/getl/common/test_delta_table.py
|
husqvarnagroup/GETL
|
37251abf12bac2efed1fe463b09a288d85969141
|
[
"MIT"
] | 5
|
2020-07-03T10:39:25.000Z
|
2021-08-30T14:52:47.000Z
|
tests/getl/common/test_delta_table.py
|
husqvarnagroup/GETL
|
37251abf12bac2efed1fe463b09a288d85969141
|
[
"MIT"
] | 1
|
2020-05-28T07:53:48.000Z
|
2020-05-28T07:53:48.000Z
|
"""Testing the module delta tables."""
from pyspark.sql import types as T
from getl.common.delta_table import DeltaTable
def create_dataframe(spark_session, data):
schema = T.StructType(
[
T.StructField("file_path", T.StringType(), True),
T.StructField("file_desc", T.StringType(), True),
]
)
return spark_session.createDataFrame(data, schema)
def test_upsert_all(spark_session, tmp_dir):
"""Correct parameters are passed to the upsert all fuction."""
# ARRANGE
create_dataframe(
spark_session,
[
("path/to/file1", "about stuff"),
("path/to/file2", "gloomhaven is a nice place"),
],
).write.save(tmp_dir, format="delta")
update_df = create_dataframe(
spark_session,
[
("path/to/file2", "gloomhaven is a bad place"),
("path/to/file3", "my little haven"),
],
)
delta_table = DeltaTable(path=tmp_dir, spark=spark_session)
# ACT
dataframe = delta_table.upsert_all(
update_df, merge_statement="source.file_path = updates.file_path"
)
# ASSER
assert dataframe.collect()[0][1] == "gloomhaven is a bad place"
assert dataframe.collect()[1][1] == "my little haven"
assert dataframe.collect()[2][1] == "about stuff"
| 27.645833
| 73
| 0.617935
|
794c679866791c77bc99e83c5b45b1b86df08a91
| 6,681
|
py
|
Python
|
yongheegram/images/views.py
|
devyonghee/yongheegram
|
eb2d4b459e28a900f087a9039a3a2fdd77e7748d
|
[
"MIT"
] | null | null | null |
yongheegram/images/views.py
|
devyonghee/yongheegram
|
eb2d4b459e28a900f087a9039a3a2fdd77e7748d
|
[
"MIT"
] | 10
|
2020-09-06T22:56:25.000Z
|
2022-02-18T07:57:17.000Z
|
yongheegram/images/views.py
|
devyonghee/yongheegram
|
eb2d4b459e28a900f087a9039a3a2fdd77e7748d
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
from yongheegram.users import models as user_models
from yongheegram.users import serializers as user_serializers
from yongheegram.notifications import views as notifications_views
class Images(APIView):
def get(sef, request, format=None):
user = request.user
following_users = user.following.all()
image_list = []
for following_user in following_users:
user_images = following_user.images.all()[:2]
for image in user_images:
image_list.append(image)
my_images = user.images.all()[:2]
for image in my_images:
image_list.append(image)
sorted_list = sorted(image_list, key=lambda image: image.created_at, reverse=True)
serializer = serializers.ImageSerializer(sorted_list, many=True, context={'request': request})
return Response(serializer.data)
def post(self, request, format=None):
user = request.user
serializer = serializers.InputImageSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class LikeImage(APIView):
def get(self, request, image_id, format=None):
likes = models.Like.objects.filter(image_id=image_id)
like_creators_ids = likes.values('creator_id')
users = user_models.User.objects.filter(id__in=like_creators_ids)
serializer = user_serializers.ListUserSerializer(users, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisiting_like = models.Like.objects.get(
creator=user,
image=found_image
)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except models.Like.DoesNotExist:
new_like = models.Like.objects.create(
creator=user,
image=found_image
)
notifications_views.create_notifications(user, found_image.creator, 'like', found_image)
new_like.save()
return Response(status=status.HTTP_201_CREATED)
class UnLikeImage(APIView):
def delete(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisiting_like = models.Like.objects.get(
creator=user,
image=found_image
)
preexisiting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Like.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class CommentOnImage(APIView):
def post(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user, image=found_image)
notifications_views.create_notifications(
user, found_image.creator, 'comment', found_image, serializer.data['message'])
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class Comment(APIView):
def delete(self, request, comment_id, format=None):
user = request.user
try:
comment = models.Comment.objects.get(id=comment_id, creator=user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Comment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
class Search(APIView):
def get(sefl, request, format=None):
hashtags = request.query_params.get('hashtags', None)
if hashtags is not None:
hashtags = hashtags.split(',')
images = models.Image.objects.filter(tags__name__in=hashtags).distinct()
serializer = serializers.CountImageSerializer(images, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ImageDetail(APIView):
def find_own_image(self, image_id, user):
try:
image = models.Image.objects.get(id=image_id, creator=user)
return image
except models.Image.DoesNotExist:
return None
def get(self, request, image_id, format=None):
user = request.user
try:
image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.ImageSerializer(image, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, image_id, formay=None):
user = request.user
image = self.find_own_image(image_id, user)
if image is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.InputImageSerializer(image, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_204_NO_CONTENT)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, image_id, format=None):
user = request.user
image = self.find_own_image(image_id, user)
if image is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
image.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 31.366197
| 104
| 0.662027
|
794c6831ef14a531f2efc3b935d3cf9dbfa71517
| 169
|
py
|
Python
|
notes/urls.py
|
charliealpha094/Django-notes
|
4f7f2eb0872f61280e705a39bf3bc5bd727e72ae
|
[
"MIT"
] | null | null | null |
notes/urls.py
|
charliealpha094/Django-notes
|
4f7f2eb0872f61280e705a39bf3bc5bd727e72ae
|
[
"MIT"
] | null | null | null |
notes/urls.py
|
charliealpha094/Django-notes
|
4f7f2eb0872f61280e705a39bf3bc5bd727e72ae
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('document.urls')),
]
| 16.9
| 39
| 0.680473
|
794c6887555a1953c7a7a7c9fe145aefc2484b49
| 5,077
|
py
|
Python
|
sdk/python/pulumi_aws_native/iot/get_provisioning_template.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/iot/get_provisioning_template.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/iot/get_provisioning_template.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetProvisioningTemplateResult',
'AwaitableGetProvisioningTemplateResult',
'get_provisioning_template',
'get_provisioning_template_output',
]
@pulumi.output_type
class GetProvisioningTemplateResult:
def __init__(__self__, description=None, enabled=None, pre_provisioning_hook=None, provisioning_role_arn=None, tags=None, template_arn=None, template_body=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if pre_provisioning_hook and not isinstance(pre_provisioning_hook, dict):
raise TypeError("Expected argument 'pre_provisioning_hook' to be a dict")
pulumi.set(__self__, "pre_provisioning_hook", pre_provisioning_hook)
if provisioning_role_arn and not isinstance(provisioning_role_arn, str):
raise TypeError("Expected argument 'provisioning_role_arn' to be a str")
pulumi.set(__self__, "provisioning_role_arn", provisioning_role_arn)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
if template_arn and not isinstance(template_arn, str):
raise TypeError("Expected argument 'template_arn' to be a str")
pulumi.set(__self__, "template_arn", template_arn)
if template_body and not isinstance(template_body, str):
raise TypeError("Expected argument 'template_body' to be a str")
pulumi.set(__self__, "template_body", template_body)
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="preProvisioningHook")
def pre_provisioning_hook(self) -> Optional['outputs.ProvisioningTemplateProvisioningHook']:
return pulumi.get(self, "pre_provisioning_hook")
@property
@pulumi.getter(name="provisioningRoleArn")
def provisioning_role_arn(self) -> Optional[str]:
return pulumi.get(self, "provisioning_role_arn")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.ProvisioningTemplateTag']]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="templateArn")
def template_arn(self) -> Optional[str]:
return pulumi.get(self, "template_arn")
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> Optional[str]:
return pulumi.get(self, "template_body")
class AwaitableGetProvisioningTemplateResult(GetProvisioningTemplateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProvisioningTemplateResult(
description=self.description,
enabled=self.enabled,
pre_provisioning_hook=self.pre_provisioning_hook,
provisioning_role_arn=self.provisioning_role_arn,
tags=self.tags,
template_arn=self.template_arn,
template_body=self.template_body)
def get_provisioning_template(template_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProvisioningTemplateResult:
"""
Creates a fleet provisioning template.
"""
__args__ = dict()
__args__['templateName'] = template_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:iot:getProvisioningTemplate', __args__, opts=opts, typ=GetProvisioningTemplateResult).value
return AwaitableGetProvisioningTemplateResult(
description=__ret__.description,
enabled=__ret__.enabled,
pre_provisioning_hook=__ret__.pre_provisioning_hook,
provisioning_role_arn=__ret__.provisioning_role_arn,
tags=__ret__.tags,
template_arn=__ret__.template_arn,
template_body=__ret__.template_body)
@_utilities.lift_output_func(get_provisioning_template)
def get_provisioning_template_output(template_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProvisioningTemplateResult]:
"""
Creates a fleet provisioning template.
"""
...
| 40.616
| 165
| 0.703171
|
794c69d34d2942d7b0019a05e06542dcc9bd0980
| 2,400
|
py
|
Python
|
signals/mask_util.py
|
davmre/sigvisa
|
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
|
[
"BSD-3-Clause"
] | null | null | null |
signals/mask_util.py
|
davmre/sigvisa
|
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
|
[
"BSD-3-Clause"
] | null | null | null |
signals/mask_util.py
|
davmre/sigvisa
|
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import numpy.ma as ma
def grow_mask(mask, n):
N = len(mask)
return [mask[max(0, i - n):min(N, i + n + 1)].any() for i in range(N)]
def mask_blocks(mask):
"""
Return a list of masked blocks (contiguous portions of the signal in which the mask is True).
Throws an IndexError if the mask is False.
"""
blocks = []
block_start = 0
try:
in_block = mask[0]
except:
return []
for i in range(len(mask)):
if in_block and not mask[i]: # end of a block
blocks.append((block_start, i))
in_block = False
if not in_block and mask[i]: # start of a block
in_block = True
block_start = i
if in_block: # special case for blocks reaching the end of the mask
blocks.append((block_start, i + 1))
return blocks
def mirror_missing(m):
"""
Fills in missing values by mirroring the values to either side.
"""
data = m.filled(m.mean())
mask = m.mask
try:
blocks = mask_blocks(mask)
except IndexError:
return ma.masked_array(data, mask)
for i, block in enumerate(blocks):
start = block[0]
end = block[1]
n = end - start
# we can copy forward into each block an amount of signal
# equal to the time since the end of the previous block.
forward_copy = start if i == 0 else (start - blocks[i - 1][1])
# we can copy backwards into each block an amount of signal
# equal to the time until the start of the next block.
backward_copy = (len(data)) - end if i == (len(blocks) - 1) else (blocks[i + 1][0] - end)
max_copy = max(forward_copy, backward_copy)
if forward_copy >= n / 2:
if backward_copy >= n / 2.0:
forward_copy = int(np.floor(n / 2.0))
backward_copy = int(np.ceil(n / 2.0))
max_copy = backward_copy
else:
forward_copy = min(forward_copy, n - backward_copy)
elif backward_copy >= n / 2:
backward_copy = min(backward_copy, n - forward_copy)
for k in range(max_copy):
if k < forward_copy:
data[start + k] = data[start - k - 1]
if k < backward_copy:
data[end - k - 1] = data[end + k]
return ma.masked_array(data, mask)
| 28.235294
| 97
| 0.5625
|
794c6a4a36b4bb6e20dbfca7eb77af604b9ab2ce
| 3,532
|
py
|
Python
|
scripts/joint_recorder.py
|
maciej-przybylski/baxter_examples
|
4ec2263231d0daa7c942616462a43a9a133b2629
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/joint_recorder.py
|
maciej-przybylski/baxter_examples
|
4ec2263231d0daa7c942616462a43a9a133b2629
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/joint_recorder.py
|
maciej-przybylski/baxter_examples
|
4ec2263231d0daa7c942616462a43a9a133b2629
|
[
"BSD-3-Clause"
] | 3
|
2021-02-23T06:44:08.000Z
|
2022-03-09T22:29:23.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import rospy
import baxter_interface
from baxter_examples import JointRecorder
from baxter_interface import CHECK_VERSION
def main():
"""RSDK Joint Recorder Example
Record timestamped joint and gripper positions to a file for
later play back.
Run this example while moving the robot's arms and grippers
to record a time series of joint and gripper positions to a
new csv file with the provided *filename*. This example can
be run in parallel with any other example or standalone
(moving the arms in zero-g mode while pressing the cuff
buttons to open/close grippers).
You can later play the movements back using one of the
*_file_playback examples.
"""
epilog = """
Related examples:
joint_position_file_playback.py; joint_trajectory_file_playback.py.
"""
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__,
epilog=epilog)
required = parser.add_argument_group('required arguments')
required.add_argument(
'-f', '--file', dest='filename', required=True,
help='the file name to record to'
)
parser.add_argument(
'-r', '--record-rate', type=int, default=100, metavar='RECORDRATE',
help='rate at which to record (default: 100)'
)
args = parser.parse_args(rospy.myargv()[1:])
print("Initializing node... ")
rospy.init_node("rsdk_joint_recorder")
print("Getting robot state... ")
rs = baxter_interface.RobotEnable(CHECK_VERSION)
print("Enabling robot... ")
rs.enable()
recorder = JointRecorder(args.filename, args.record_rate)
rospy.on_shutdown(recorder.stop)
print("Recording. Press Ctrl-C to stop.")
recorder.record()
print("\nDone.")
if __name__ == '__main__':
main()
| 37.978495
| 77
| 0.72735
|
794c6ae4b73b01880eb61d7c6a402fb16db407c1
| 699
|
py
|
Python
|
day61/problem.py
|
Nitin-Diwakar/100-days-of-code
|
637006b111054ab9a3b81867d3395fefc6584f02
|
[
"MIT"
] | 1
|
2021-04-24T11:51:00.000Z
|
2021-04-24T11:51:00.000Z
|
day61/problem.py
|
Nitin-Diwakar/100-days-of-code
|
637006b111054ab9a3b81867d3395fefc6584f02
|
[
"MIT"
] | null | null | null |
day61/problem.py
|
Nitin-Diwakar/100-days-of-code
|
637006b111054ab9a3b81867d3395fefc6584f02
|
[
"MIT"
] | 1
|
2021-07-13T17:59:38.000Z
|
2021-07-13T17:59:38.000Z
|
class Roman:
def __init__(self, num):
self.num = num
def int_to_roman(self):
number = [
1000, 900, 500, 400,
100, 90, 50, 40,
10, 9, 5, 4,
1
]
symbol = [
"M", "CM", "D", "CD",
"C", "XC", "L", "XL",
"X", "IX", "V", "IV",
"I"
]
roman_num = ""
i = 0
while self.num > 0:
for _ in range(self.num // number[i]):
roman_num += symbol[i]
self.num -= number[i]
i += 1
return roman_num
all_roman = Roman(15)
print(all_roman.int_to_roman())
| 21.181818
| 51
| 0.363376
|
794c6af7191f65ba23f8111c5b2aef50d1eefe66
| 337
|
py
|
Python
|
modules/persons/application/get/query/person/person_getter_query.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 6
|
2020-08-09T23:41:08.000Z
|
2021-03-16T22:05:40.000Z
|
modules/persons/application/get/query/person/person_getter_query.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 1
|
2020-10-02T02:59:38.000Z
|
2020-10-02T02:59:38.000Z
|
modules/persons/application/get/query/person/person_getter_query.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 2
|
2021-03-16T22:05:43.000Z
|
2021-04-30T06:35:25.000Z
|
# -*- coding: utf-8 -*-
from modules.shared.domain.bus.query import Query
class PersonGetterQuery(Query):
"""
Get Person Query
"""
def __init__(self, id):
self.__id = id
@property
def id(self):
self.__id
@id.setter
def id(self):
raise Exception("You can't assign directly")
| 15.318182
| 52
| 0.58457
|
794c6bf1ba095b131bfb815c8e7257672f9e3643
| 49
|
py
|
Python
|
tests/components/lyric/__init__.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/lyric/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/lyric/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Tests for the Honeywell Lyric integration."""
| 24.5
| 48
| 0.734694
|
794c6bfa9d5ebc9f1dd72903a748b90a0dbd1dd6
| 111,821
|
py
|
Python
|
google/cloud/aiplatform_v1/services/featurestore_service/async_client.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/featurestore_service/async_client.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/featurestore_service/async_client.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.featurestore_service import pagers
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import entity_type
from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type
from google.cloud.aiplatform_v1.types import feature
from google.cloud.aiplatform_v1.types import feature as gca_feature
from google.cloud.aiplatform_v1.types import featurestore
from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore
from google.cloud.aiplatform_v1.types import featurestore_service
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport
from .client import FeaturestoreServiceClient
class FeaturestoreServiceAsyncClient:
"""The service that handles CRUD and List for resources for
Featurestore.
"""
_client: FeaturestoreServiceClient
DEFAULT_ENDPOINT = FeaturestoreServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = FeaturestoreServiceClient.DEFAULT_MTLS_ENDPOINT
entity_type_path = staticmethod(FeaturestoreServiceClient.entity_type_path)
parse_entity_type_path = staticmethod(
FeaturestoreServiceClient.parse_entity_type_path
)
feature_path = staticmethod(FeaturestoreServiceClient.feature_path)
parse_feature_path = staticmethod(FeaturestoreServiceClient.parse_feature_path)
featurestore_path = staticmethod(FeaturestoreServiceClient.featurestore_path)
parse_featurestore_path = staticmethod(
FeaturestoreServiceClient.parse_featurestore_path
)
common_billing_account_path = staticmethod(
FeaturestoreServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
FeaturestoreServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(FeaturestoreServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
FeaturestoreServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
FeaturestoreServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
FeaturestoreServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(FeaturestoreServiceClient.common_project_path)
parse_common_project_path = staticmethod(
FeaturestoreServiceClient.parse_common_project_path
)
common_location_path = staticmethod(FeaturestoreServiceClient.common_location_path)
parse_common_location_path = staticmethod(
FeaturestoreServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeaturestoreServiceAsyncClient: The constructed client.
"""
return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeaturestoreServiceAsyncClient: The constructed client.
"""
return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return FeaturestoreServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> FeaturestoreServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeaturestoreServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(FeaturestoreServiceClient).get_transport_class,
type(FeaturestoreServiceClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, FeaturestoreServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the featurestore service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.FeaturestoreServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = FeaturestoreServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_featurestore(
self,
request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None,
*,
parent: str = None,
featurestore: gca_featurestore.Featurestore = None,
featurestore_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Featurestore in a given project and
location.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_featurestore():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateFeaturestoreRequest(
parent="parent_value",
featurestore_id="featurestore_id_value",
)
# Make the request
operation = client.create_featurestore(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest, dict]):
The request object. Request message for
[FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore].
parent (:class:`str`):
Required. The resource name of the Location to create
Featurestores. Format:
``projects/{project}/locations/{location}'``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
featurestore (:class:`google.cloud.aiplatform_v1.types.Featurestore`):
Required. The Featurestore to create.
This corresponds to the ``featurestore`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
featurestore_id (:class:`str`):
Required. The ID to use for this Featurestore, which
will become the final component of the Featurestore's
resource name.
This value may be up to 60 characters, and valid
characters are ``[a-z0-9_]``. The first character cannot
be a number.
The value must be unique within the project and
location.
This corresponds to the ``featurestore_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing,
storing, and serving ML features. The Featurestore is
a top-level container for your features and their
values.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, featurestore, featurestore_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.CreateFeaturestoreRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if featurestore is not None:
request.featurestore = featurestore
if featurestore_id is not None:
request.featurestore_id = featurestore_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_featurestore,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_featurestore.Featurestore,
metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata,
)
# Done; return the response.
return response
async def get_featurestore(
self,
request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> featurestore.Featurestore:
r"""Gets details of a single Featurestore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_featurestore():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetFeaturestoreRequest(
name="name_value",
)
# Make the request
response = client.get_featurestore(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetFeaturestoreRequest, dict]):
The request object. Request message for
[FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore].
name (:class:`str`):
Required. The name of the
Featurestore resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Featurestore:
Vertex AI Feature Store provides a
centralized repository for organizing,
storing, and serving ML features. The
Featurestore is a top-level container
for your features and their values.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.GetFeaturestoreRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_featurestore,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_featurestores(
self,
request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListFeaturestoresAsyncPager:
r"""Lists Featurestores in a given project and location.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_featurestores():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListFeaturestoresRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_featurestores(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListFeaturestoresRequest, dict]):
The request object. Request message for
[FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores].
parent (:class:`str`):
Required. The resource name of the Location to list
Featurestores. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager:
Response message for
[FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.ListFeaturestoresRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_featurestores,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListFeaturestoresAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_featurestore(
self,
request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None,
*,
featurestore: gca_featurestore.Featurestore = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the parameters of a single Featurestore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_featurestore():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.UpdateFeaturestoreRequest(
)
# Make the request
operation = client.update_featurestore(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest, dict]):
The request object. Request message for
[FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore].
featurestore (:class:`google.cloud.aiplatform_v1.types.Featurestore`):
Required. The Featurestore's ``name`` field is used to
identify the Featurestore to be updated. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
This corresponds to the ``featurestore`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Field mask is used to specify the fields to be
overwritten in the Featurestore resource by the update.
The fields specified in the update_mask are relative to
the resource, not the full request. A field will be
overwritten if it is in the mask. If the user does not
provide a mask then only the non-empty fields present in
the request will be overwritten. Set the update_mask to
``*`` to override all fields.
Updatable fields:
- ``labels``
- ``online_serving_config.fixed_node_count``
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing,
storing, and serving ML features. The Featurestore is
a top-level container for your features and their
values.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([featurestore, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.UpdateFeaturestoreRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if featurestore is not None:
request.featurestore = featurestore
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_featurestore,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("featurestore.name", request.featurestore.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_featurestore.Featurestore,
metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata,
)
# Done; return the response.
return response
async def delete_featurestore(
self,
request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None,
*,
name: str = None,
force: bool = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single Featurestore. The Featurestore must not contain
any EntityTypes or ``force`` must be set to true for the request
to succeed.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_featurestore():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteFeaturestoreRequest(
name="name_value",
)
# Make the request
operation = client.delete_featurestore(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest, dict]):
The request object. Request message for
[FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore].
name (:class:`str`):
Required. The name of the Featurestore to be deleted.
Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
force (:class:`bool`):
If set to true, any EntityTypes and
Features for this Featurestore will also
be deleted. (Otherwise, the request will
only work if the Featurestore has no
EntityTypes.)
This corresponds to the ``force`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, force])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.DeleteFeaturestoreRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if force is not None:
request.force = force
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_featurestore,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def create_entity_type(
self,
request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None,
*,
parent: str = None,
entity_type: gca_entity_type.EntityType = None,
entity_type_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new EntityType in a given Featurestore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_entity_type():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateEntityTypeRequest(
parent="parent_value",
entity_type_id="entity_type_id_value",
)
# Make the request
operation = client.create_entity_type(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateEntityTypeRequest, dict]):
The request object. Request message for
[FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType].
parent (:class:`str`):
Required. The resource name of the Featurestore to
create EntityTypes. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entity_type (:class:`google.cloud.aiplatform_v1.types.EntityType`):
The EntityType to create.
This corresponds to the ``entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entity_type_id (:class:`str`):
Required. The ID to use for the EntityType, which will
become the final component of the EntityType's resource
name.
This value may be up to 60 characters, and valid
characters are ``[a-z0-9_]``. The first character cannot
be a number.
The value must be unique within a featurestore.
This corresponds to the ``entity_type_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and
have stored information about. For example, driver is
an entity type, and driver0 is an instance of an
entity type driver.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entity_type, entity_type_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.CreateEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if entity_type is not None:
request.entity_type = entity_type
if entity_type_id is not None:
request.entity_type_id = entity_type_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_entity_type.EntityType,
metadata_type=featurestore_service.CreateEntityTypeOperationMetadata,
)
# Done; return the response.
return response
async def get_entity_type(
self,
request: Union[featurestore_service.GetEntityTypeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> entity_type.EntityType:
r"""Gets details of a single EntityType.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_entity_type():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetEntityTypeRequest(
name="name_value",
)
# Make the request
response = client.get_entity_type(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetEntityTypeRequest, dict]):
The request object. Request message for
[FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType].
name (:class:`str`):
Required. The name of the EntityType resource. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.EntityType:
An entity type is a type of object in
a system that needs to be modeled and
have stored information about. For
example, driver is an entity type, and
driver0 is an instance of an entity type
driver.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.GetEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_entity_types(
self,
request: Union[featurestore_service.ListEntityTypesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEntityTypesAsyncPager:
r"""Lists EntityTypes in a given Featurestore.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_entity_types():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListEntityTypesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_entity_types(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListEntityTypesRequest, dict]):
The request object. Request message for
[FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes].
parent (:class:`str`):
Required. The resource name of the Featurestore to list
EntityTypes. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesAsyncPager:
Response message for
[FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.ListEntityTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_entity_types,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEntityTypesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_entity_type(
self,
request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None,
*,
entity_type: gca_entity_type.EntityType = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_entity_type.EntityType:
r"""Updates the parameters of a single EntityType.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_entity_type():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.UpdateEntityTypeRequest(
)
# Make the request
response = client.update_entity_type(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest, dict]):
The request object. Request message for
[FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType].
entity_type (:class:`google.cloud.aiplatform_v1.types.EntityType`):
Required. The EntityType's ``name`` field is used to
identify the EntityType to be updated. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
This corresponds to the ``entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Field mask is used to specify the fields to be
overwritten in the EntityType resource by the update.
The fields specified in the update_mask are relative to
the resource, not the full request. A field will be
overwritten if it is in the mask. If the user does not
provide a mask then only the non-empty fields present in
the request will be overwritten. Set the update_mask to
``*`` to override all fields.
Updatable fields:
- ``description``
- ``labels``
- ``monitoring_config.snapshot_analysis.disabled``
- ``monitoring_config.snapshot_analysis.monitoring_interval``
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.EntityType:
An entity type is a type of object in
a system that needs to be modeled and
have stored information about. For
example, driver is an entity type, and
driver0 is an instance of an entity type
driver.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([entity_type, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.UpdateEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if entity_type is not None:
request.entity_type = entity_type
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("entity_type.name", request.entity_type.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_entity_type(
self,
request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None,
*,
name: str = None,
force: bool = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single EntityType. The EntityType must not have any
Features or ``force`` must be set to true for the request to
succeed.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_entity_type():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteEntityTypeRequest(
name="name_value",
)
# Make the request
operation = client.delete_entity_type(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest, dict]):
The request object. Request message for
[FeaturestoreService.DeleteEntityTypes][].
name (:class:`str`):
Required. The name of the EntityType to be deleted.
Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
force (:class:`bool`):
If set to true, any Features for this
EntityType will also be deleted.
(Otherwise, the request will only work
if the EntityType has no Features.)
This corresponds to the ``force`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, force])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.DeleteEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if force is not None:
request.force = force
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def create_feature(
self,
request: Union[featurestore_service.CreateFeatureRequest, dict] = None,
*,
parent: str = None,
feature: gca_feature.Feature = None,
feature_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Feature in a given EntityType.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_feature():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
feature = aiplatform_v1.Feature()
feature.value_type = "BYTES"
request = aiplatform_v1.CreateFeatureRequest(
parent="parent_value",
feature=feature,
feature_id="feature_id_value",
)
# Make the request
operation = client.create_feature(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateFeatureRequest, dict]):
The request object. Request message for
[FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature].
parent (:class:`str`):
Required. The resource name of the EntityType to create
a Feature. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
feature (:class:`google.cloud.aiplatform_v1.types.Feature`):
Required. The Feature to create.
This corresponds to the ``feature`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
feature_id (:class:`str`):
Required. The ID to use for the Feature, which will
become the final component of the Feature's resource
name.
This value may be up to 60 characters, and valid
characters are ``[a-z0-9_]``. The first character cannot
be a number.
The value must be unique within an EntityType.
This corresponds to the ``feature_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Feature` Feature Metadata information that describes an attribute of an entity type.
For example, apple is an entity type, and color is a
feature that describes apple.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, feature, feature_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.CreateFeatureRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if feature is not None:
request.feature = feature
if feature_id is not None:
request.feature_id = feature_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_feature,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_feature.Feature,
metadata_type=featurestore_service.CreateFeatureOperationMetadata,
)
# Done; return the response.
return response
async def batch_create_features(
self,
request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None,
*,
parent: str = None,
requests: Sequence[featurestore_service.CreateFeatureRequest] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a batch of Features in a given EntityType.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_batch_create_features():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
requests = aiplatform_v1.CreateFeatureRequest()
requests.parent = "parent_value"
requests.feature.value_type = "BYTES"
requests.feature_id = "feature_id_value"
request = aiplatform_v1.BatchCreateFeaturesRequest(
parent="parent_value",
requests=requests,
)
# Make the request
operation = client.batch_create_features(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest, dict]):
The request object. Request message for
[FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
parent (:class:`str`):
Required. The resource name of the EntityType to create
the batch of Features under. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
requests (:class:`Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]`):
Required. The request message specifying the Features to
create. All Features must be created under the same
parent EntityType. The ``parent`` field in each child
request message can be omitted. If ``parent`` is set in
a child request, then the value must match the
``parent`` value in this request message.
This corresponds to the ``requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.BatchCreateFeaturesResponse`
Response message for
[FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.BatchCreateFeaturesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if requests:
request.requests.extend(requests)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_create_features,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
featurestore_service.BatchCreateFeaturesResponse,
metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata,
)
# Done; return the response.
return response
async def get_feature(
self,
request: Union[featurestore_service.GetFeatureRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feature.Feature:
r"""Gets details of a single Feature.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_feature():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetFeatureRequest(
name="name_value",
)
# Make the request
response = client.get_feature(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetFeatureRequest, dict]):
The request object. Request message for
[FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature].
name (:class:`str`):
Required. The name of the Feature resource. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Feature:
Feature Metadata information that
describes an attribute of an entity
type. For example, apple is an entity
type, and color is a feature that
describes apple.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.GetFeatureRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_feature,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_features(
self,
request: Union[featurestore_service.ListFeaturesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListFeaturesAsyncPager:
r"""Lists Features in a given EntityType.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_features():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListFeaturesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_features(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListFeaturesRequest, dict]):
The request object. Request message for
[FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures].
parent (:class:`str`):
Required. The resource name of the Location to list
Features. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesAsyncPager:
Response message for
[FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.ListFeaturesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_features,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListFeaturesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_feature(
self,
request: Union[featurestore_service.UpdateFeatureRequest, dict] = None,
*,
feature: gca_feature.Feature = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_feature.Feature:
r"""Updates the parameters of a single Feature.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_feature():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
feature = aiplatform_v1.Feature()
feature.value_type = "BYTES"
request = aiplatform_v1.UpdateFeatureRequest(
feature=feature,
)
# Make the request
response = client.update_feature(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateFeatureRequest, dict]):
The request object. Request message for
[FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature].
feature (:class:`google.cloud.aiplatform_v1.types.Feature`):
Required. The Feature's ``name`` field is used to
identify the Feature to be updated. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}``
This corresponds to the ``feature`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Field mask is used to specify the fields to be
overwritten in the Features resource by the update. The
fields specified in the update_mask are relative to the
resource, not the full request. A field will be
overwritten if it is in the mask. If the user does not
provide a mask then only the non-empty fields present in
the request will be overwritten. Set the update_mask to
``*`` to override all fields.
Updatable fields:
- ``description``
- ``labels``
- ``monitoring_config.snapshot_analysis.disabled``
- ``monitoring_config.snapshot_analysis.monitoring_interval``
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Feature:
Feature Metadata information that
describes an attribute of an entity
type. For example, apple is an entity
type, and color is a feature that
describes apple.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([feature, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.UpdateFeatureRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if feature is not None:
request.feature = feature
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_feature,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("feature.name", request.feature.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_feature(
self,
request: Union[featurestore_service.DeleteFeatureRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single Feature.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_feature():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteFeatureRequest(
name="name_value",
)
# Make the request
operation = client.delete_feature(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteFeatureRequest, dict]):
The request object. Request message for
[FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature].
name (:class:`str`):
Required. The name of the Features to be deleted.
Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.DeleteFeatureRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_feature,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def import_feature_values(
self,
request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None,
*,
entity_type: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Imports Feature values into the Featurestore from a
source storage.
The progress of the import is tracked by the returned
operation. The imported features are guaranteed to be
visible to subsequent read operations after the
operation is marked as successfully done.
If an import operation fails, the Feature values
returned from reads and exports may be inconsistent. If
consistency is required, the caller must retry the same
import request again and wait till the new operation
returned is marked as successfully done.
There are also scenarios where the caller can cause
inconsistency.
- Source data for import contains multiple distinct
Feature values for the same entity ID and timestamp.
- Source is modified during an import. This includes
adding, updating, or removing source data and/or
metadata. Examples of updating metadata include but are
not limited to changing storage location, storage class,
or retention policy.
- Online serving cluster is under-provisioned.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_import_feature_values():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
avro_source = aiplatform_v1.AvroSource()
avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2']
feature_specs = aiplatform_v1.FeatureSpec()
feature_specs.id = "id_value"
request = aiplatform_v1.ImportFeatureValuesRequest(
avro_source=avro_source,
feature_time_field="feature_time_field_value",
entity_type="entity_type_value",
feature_specs=feature_specs,
)
# Make the request
operation = client.import_feature_values(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest, dict]):
The request object. Request message for
[FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues].
entity_type (:class:`str`):
Required. The resource name of the EntityType grouping
the Features for which values are being imported.
Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``
This corresponds to the ``entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.ImportFeatureValuesResponse`
Response message for
[FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([entity_type])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.ImportFeatureValuesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if entity_type is not None:
request.entity_type = entity_type
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.import_feature_values,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("entity_type", request.entity_type),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
featurestore_service.ImportFeatureValuesResponse,
metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata,
)
# Done; return the response.
return response
async def batch_read_feature_values(
self,
request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None,
*,
featurestore: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Batch reads Feature values from a Featurestore.
This API enables batch reading Feature values, where
each read instance in the batch may read Feature values
of entities from one or more EntityTypes. Point-in-time
correctness is guaranteed for Feature values of each
read instance as of each instance's read timestamp.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_batch_read_feature_values():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
csv_read_instances = aiplatform_v1.CsvSource()
csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2']
destination = aiplatform_v1.FeatureValueDestination()
destination.bigquery_destination.output_uri = "output_uri_value"
entity_type_specs = aiplatform_v1.EntityTypeSpec()
entity_type_specs.entity_type_id = "entity_type_id_value"
entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1.BatchReadFeatureValuesRequest(
csv_read_instances=csv_read_instances,
featurestore="featurestore_value",
destination=destination,
entity_type_specs=entity_type_specs,
)
# Make the request
operation = client.batch_read_feature_values(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest, dict]):
The request object. Request message for
[FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues].
featurestore (:class:`str`):
Required. The resource name of the Featurestore from
which to query Feature values. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
This corresponds to the ``featurestore`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.BatchReadFeatureValuesResponse`
Response message for
[FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([featurestore])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.BatchReadFeatureValuesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if featurestore is not None:
request.featurestore = featurestore
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_read_feature_values,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("featurestore", request.featurestore),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
featurestore_service.BatchReadFeatureValuesResponse,
metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata,
)
# Done; return the response.
return response
async def export_feature_values(
self,
request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None,
*,
entity_type: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Exports Feature values from all the entities of a
target EntityType.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_export_feature_values():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
destination = aiplatform_v1.FeatureValueDestination()
destination.bigquery_destination.output_uri = "output_uri_value"
feature_selector = aiplatform_v1.FeatureSelector()
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1.ExportFeatureValuesRequest(
entity_type="entity_type_value",
destination=destination,
feature_selector=feature_selector,
)
# Make the request
operation = client.export_feature_values(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest, dict]):
The request object. Request message for
[FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues].
entity_type (:class:`str`):
Required. The resource name of the EntityType from which
to export Feature values. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}``
This corresponds to the ``entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.ExportFeatureValuesResponse`
Response message for
[FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([entity_type])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.ExportFeatureValuesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if entity_type is not None:
request.entity_type = entity_type
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.export_feature_values,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("entity_type", request.entity_type),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
featurestore_service.ExportFeatureValuesResponse,
metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata,
)
# Done; return the response.
return response
async def search_features(
self,
request: Union[featurestore_service.SearchFeaturesRequest, dict] = None,
*,
location: str = None,
query: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchFeaturesAsyncPager:
r"""Searches Features matching a query in a given
project.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_search_features():
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.SearchFeaturesRequest(
location="location_value",
)
# Make the request
page_result = client.search_features(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.SearchFeaturesRequest, dict]):
The request object. Request message for
[FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures].
location (:class:`str`):
Required. The resource name of the Location to search
Features. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``location`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
query (:class:`str`):
Query string that is a conjunction of field-restricted
queries and/or field-restricted filters.
Field-restricted queries and filters can be combined
using ``AND`` to form a conjunction.
A field query is in the form FIELD:QUERY. This
implicitly checks if QUERY exists as a substring within
Feature's FIELD. The QUERY and the FIELD are converted
to a sequence of words (i.e. tokens) for comparison.
This is done by:
- Removing leading/trailing whitespace and tokenizing
the search value. Characters that are not one of
alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or
asterisk ``*`` are treated as delimiters for tokens.
``*`` is treated as a wildcard that matches
characters within a token.
- Ignoring case.
- Prepending an asterisk to the first and appending an
asterisk to the last token in QUERY.
A QUERY must be either a singular token or a phrase. A
phrase is one or multiple words enclosed in double
quotation marks ("). With phrases, the order of the
words is important. Words in the phrase must be matching
in order and consecutively.
Supported FIELDs for field-restricted queries:
- ``feature_id``
- ``description``
- ``entity_type_id``
Examples:
- ``feature_id: foo`` --> Matches a Feature with ID
containing the substring ``foo`` (eg. ``foo``,
``foofeature``, ``barfoo``).
- ``feature_id: foo*feature`` --> Matches a Feature
with ID containing the substring ``foo*feature`` (eg.
``foobarfeature``).
- ``feature_id: foo AND description: bar`` --> Matches
a Feature with ID containing the substring ``foo``
and description containing the substring ``bar``.
Besides field queries, the following exact-match filters
are supported. The exact-match filters do not support
wildcards. Unlike field-restricted queries, exact-match
filters are case-sensitive.
- ``feature_id``: Supports = comparisons.
- ``description``: Supports = comparisons. Multi-token
filters should be enclosed in quotes.
- ``entity_type_id``: Supports = comparisons.
- ``value_type``: Supports = and != comparisons.
- ``labels``: Supports key-value equality as well as
key presence.
- ``featurestore_id``: Supports = comparisons.
Examples:
- ``description = "foo bar"`` --> Any Feature with
description exactly equal to ``foo bar``
- ``value_type = DOUBLE`` --> Features whose type is
DOUBLE.
- ``labels.active = yes AND labels.env = prod`` -->
Features having both (active: yes) and (env: prod)
labels.
- ``labels.env: *`` --> Any Feature which has a label
with ``env`` as the key.
This corresponds to the ``query`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesAsyncPager:
Response message for
[FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([location, query])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = featurestore_service.SearchFeaturesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if location is not None:
request.location = location
if query is not None:
request.query = query
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.search_features,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.SearchFeaturesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeaturestoreServiceAsyncClient",)
| 41.739828
| 186
| 0.613776
|
794c6ca47fc672fa81dffbe5e834d72e490f6e71
| 1,898
|
py
|
Python
|
aliyun-python-sdk-vs/aliyunsdkvs/request/v20181212/DescribeVsTopDomainsByFlowRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-vs/aliyunsdkvs/request/v20181212/DescribeVsTopDomainsByFlowRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-vs/aliyunsdkvs/request/v20181212/DescribeVsTopDomainsByFlowRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvs.endpoint import endpoint_data
class DescribeVsTopDomainsByFlowRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vs', '2018-12-12', 'DescribeVsTopDomainsByFlow')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_Limit(self):
return self.get_query_params().get('Limit')
def set_Limit(self,Limit):
self.add_query_param('Limit',Limit)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
| 33.892857
| 78
| 0.75922
|
794c6cdb643dc7b310ceadd73a26dad2655fe339
| 12,378
|
py
|
Python
|
Python/klampt/control/simrobotinterface.py
|
smeng9/Klampt
|
7ff91bead90ac04280eff310623338fd10aaba79
|
[
"BSD-3-Clause"
] | 238
|
2015-01-09T15:21:27.000Z
|
2022-03-30T22:48:45.000Z
|
Python/klampt/control/simrobotinterface.py
|
smeng9/Klampt
|
7ff91bead90ac04280eff310623338fd10aaba79
|
[
"BSD-3-Clause"
] | 89
|
2015-08-26T16:56:42.000Z
|
2022-03-29T23:45:46.000Z
|
Python/klampt/control/simrobotinterface.py
|
smeng9/Klampt
|
7ff91bead90ac04280eff310623338fd10aaba79
|
[
"BSD-3-Clause"
] | 84
|
2015-01-10T18:41:52.000Z
|
2022-03-30T03:32:50.000Z
|
"""Used for testing code that works with the Klamp't Robot Interface Layer on a
simualted robot. Defines a variety of RobotInterfaceBase interfaces that work
with Klamp't simulations.
For each of the classes in this module, if you provide the simulator argument
then this will automatically update your simulation upon each startStep() /
endStep() pair. Otherwise, you will have to step the simulation manually.
"""
from .robotinterface import RobotInterfaceBase
from klampt.model.robotinfo import RobotInfo
from klampt import RobotModel,Simulator,SimRobotController
import functools
class _SimControlInterface(RobotInterfaceBase):
def __init__(self,sim_controller,simulator=None,robotInfo=None):
assert isinstance(sim_controller,SimRobotController)
self.sim_controller = sim_controller
self.robot = sim_controller.model()
if simulator is not None:
assert isinstance(simulator,Simulator),"If you want to simulate, pass a klampt.Simulator object"
self.simulator = simulator
else:
self.simulator = None
self._status = 'disconnected'
self.robotInfo = robotInfo
if robotInfo is not None:
assert isinstance(robotInfo,RobotInfo)
RobotInterfaceBase.__init__(self,name=self.__class__.__name__)
def initialize(self):
self._status = 'ok'
return True
def klamptModel(self):
return self.robot
@functools.lru_cache(maxsize=None)
def parts(self):
if self.robotInfo is None:
return RobotInterfaceBase.parts(self)
res = {None:list(range(self.numJoints()))}
for (k,v) in self.robotInfo.parts:
res[k] = self.robotInfo.toIndices(v)
return res
def controlRate(self):
return 1.0/self.sim_controller.getRate()
def sensors(self):
sensorNames = []
index = 0
while True:
s = self.sim_controller.sensor(index)
sname = s.name()
if len(sname) > 0:
sensorNames.append(sname)
else:
break
index += 1
return sensorNames
def enabledSensors(self):
return self.sensors()
def hasSensor(self,sensor):
return len(self.sim_controller.sensor(sensor).type()) > 0
def enableSensor(self,sensor,enabled=True):
if not enabled:
raise NotImplementedError("Can't disable a simulation sensor")
return True
def sensorMeasurements(self,name):
return self.sim_controller.sensor(name).getMeasurements()
def endStep(self):
if self.simulator is not None:
self.simulator.simulate(self.sim_controller.getRate())
if self.simulator.getStatus() >= Simulator.STATUS_UNSTABLE:
self._status = self.simulator.getStatusString()
def status(self):
return self._status
class SimPositionControlInterface(_SimControlInterface):
"""Adapts a SimRobotController to the RobotInterfaceBase class in position
control mode.
Only implements setPosition, sensedPosition, and commandedPosition; you
should use :class:`RobotInterfaceCompleter` to fill in move-to control,
cartesian control, velocity control, etc.
"""
def __init__(self,sim_controller,simulator=None,robotInfo=None):
_SimControlInterface.__init__(self,sim_controller,simulator,robotInfo)
def setPosition(self,q):
self.sim_controller.setPIDCommand(q,[0]*len(q))
def sensedPosition(self):
return self.configFromKlampt(self.sim_controller.getSensedConfig())
def commandedPosition(self):
return self.configFromKlampt(self.sim_controller.getCommandedConfig())
class SimVelocityControlInterface(_SimControlInterface):
"""Adapts a SimRobotController to the RobotInterfaceBase class in velocity
control mode.
Only implements setVelocity, sensedPosition, and commandedPosition; you
should use :class:`RobotInterfaceCompleter` to fill in move-to control,
cartesian control, position control, etc.
"""
def __init__(self,sim_controller,simulator=None,robotInfo=None):
_SimControlInterface.__init__(self,sim_controller,simulator,robotInfo)
def setVelocity(self,v,ttl=None):
if ttl is None:
ttl = 1.0
self.sim_controller.setVelocity(self.velocityToKlampt(v),ttl)
def sensedPosition(self):
return self.configFromKlampt(self.sim_controller.getSensedConfig())
def commandedPosition(self):
return self.configFromKlampt(self.sim_controller.getCommandedConfig())
class SimMoveToControlInterface(_SimControlInterface):
"""Adapts a SimRobotController to the RobotInterfaceBase class in move-to
control mode.
Only implements moveToPosition, sensedPosition, and commandedPosition; you
should use :class:`RobotInterfaceCompleter` to fill in position control,
cartesian control, velocity control, etc.
"""
def __init__(self,sim_controller,simulator=None,robotInfo=None):
_SimControlInterface.__init__(self,sim_controller,simulator,robotInfo)
def moveToPosition(self,q,speed=1.0):
assert speed == 1.0,"Can't accept non-max speed commands yet"
self.sim_controller.setMilestone(self.configToKlampt(q))
def sensedPosition(self):
return self.configFromKlampt(self.sim_controller.getSensedConfig())
def commandedPosition(self):
return self.configFromKlampt(self.sim_controller.getCommandedConfig())
def isMoving(self,part=None,joint_idx=None):
assert part is None
return self.sim_controller.remainingTime() > 0
class SimFullControlInterface(_SimControlInterface):
"""Adapts a SimRobotController to the RobotInterfaceBase class, accepting
position control, move to control, velocity control, and torque control
modes.
You should use :class:`RobotInterfaceCompleter` to fill in move-to control,
cartesian control, velocity control, etc.
"""
def __init__(self,sim_controller,simulator=None,robotInfo=None):
_SimControlInterface.__init__(self,sim_controller,simulator,robotInfo)
def setPosition(self,q):
self.sim_controller.setPIDCommand(q,[0]*len(q))
def setVelocity(self,v,ttl=None):
if ttl is None:
ttl = 0.1
self.sim_controller.setVelocity(v,ttl)
def setTorque(self,t,ttl=None):
if ttl is not None:
raise NotImplementedError("Can't set TTL on torque commands yet")
self.sim_controller.setTorque(t)
def setPID(self,q,dq,t=None):
if t is not None:
self.sim_controller.setPIDCommand(q,dq,t)
else:
self.sim_controller.setPIDCommand(q,dq)
def setPIDGains(self,kP,kD,kI):
self.sim_controller.setPIDGains(kP,kD,kI)
def setPiecewiseLinear(self,ts,qs,relative=True):
if not relative:
raise NotImplementedError("Can't accept absolute-time piecewise linear commands")
if len(ts)==0: return
assert ts[0] >= 0,"First timing needs to be nonnegative"
self.sim_controller.setLinear(self.configToKlampt(qs[0]),ts[0])
tlast = ts[0]
for (t,q) in zip(ts[1:],qs[1:]):
if t < tlast: raise ValueError("Invalid timing, not monotonic")
self.sim_controller.addLinear(self.configToKlampt(q),t-tlast)
tlast = t
def setPiecewiseCubic(self,ts,qs,vs,relative=True):
if not relative:
raise NotImplementedError("Can't accept absolute-time piecewise cubic commands")
if len(ts)==0: return
assert ts[0] >= 0,"First timing needs to be nonnegative"
self.sim_controller.setCubic(self.configToKlampt(qs[0]),self.velocityToKlampt(vs[0]),ts[0])
tlast = ts[0]
for (t,q,v) in zip(ts[1:],qs[1:],vs[1:]):
if t < tlast: raise ValueError("Invalid timing, not monotonic")
self.sim_controller.addCubic(self.configToKlampt(q),self.velocityToKlampt(v),t-tlast)
tlast = t
def moveToPosition(self,q,speed=1.0):
assert speed == 1.0,"Can't accept non-max speed commands yet"
self.sim_controller.setMilestone(self.configToKlampt(q))
def isMoving(self,part=None,joint_idx=None):
assert part is None
return self.sim_controller.remainingTime() > 0
def sensedPosition(self):
return self.configFromKlampt(self.sim_controller.getSensedConfig())
def sensedVelocity(self):
return self.velocityFromKlampt(self.sim_controller.getSensedVelocity())
def sensedTorque(self):
try:
return self.sim_controller.getSensedTorque()
except Exception:
raise NotImplementedError()
def commandedPosition(self):
return self.configFromKlampt(self.sim_controller.getCommandedConfig())
def commandedVelocity(self):
return self.velocityFromKlampt(self.sim_controller.getCommandedVelocity())
def commandedTorque(self):
return self.velocityFromKlampt(self.sim_controller.getCommandedVelocity())
def commandedPosition(self):
return self.configFromKlampt(self.sim_controller.getCommandedConfig())
class KinematicSimControlInterface(RobotInterfaceBase):
"""A very basic control interface that just sets the robot's config to the
last setPosition command. Can also perform kinematic simulation of
simulators.
Also performs joint limit testing and self collision checking. These change
the status of the interface to non-'ok' error codes.
"""
def __init__(self,robot,robotInfo=None):
assert isinstance(robot,RobotModel)
self.robot = robot
self._status = 'ok'
self.robotInfo = robotInfo
if robotInfo is not None:
assert isinstance(robotInfo,RobotInfo)
q0 = robot.getConfig()
self.q = self.configFromKlampt(robot.getConfig())
qmin,qmax = robot.getJointLimits()
for i in range(robot.numDrivers()):
if robot.driver(i).getType() == 'affine':
links = robot.driver(i).getAffectedLinks()
scale,offset = robot.driver(i).getAffineCoeffs()
for l,s in zip(links,scale):
if s < 0:
qmin[l],qmax[l] = qmax[l],qmin[l]
self.qmin,self.qmax = self.configFromKlampt(qmin),self.configFromKlampt(qmax)
robot.setConfig(q0)
RobotInterfaceBase.__init__(self,name=self.__class__.__name__)
def klamptModel(self):
return self.robot
@functools.lru_cache(maxsize=None)
def parts(self):
if self.robotInfo is None:
return RobotInterfaceBase.parts(self)
res = {None:list(range(self.numJoints()))}
for (k,v) in self.robotInfo.parts:
res[k] = self.robotInfo.toIndices(v)
return res
def controlRate(self):
return 200.0
def sensors(self):
sensorNames = []
index = 0
while True:
s = self.robot.sensor(index)
sname = s.name()
if len(sname) > 0:
sensorNames.append(sname)
else:
break
index += 1
return sensorNames
def enabledSensors(self):
return self.sensors()
def sensorMeasurements(self,name):
self.robot.setConfig(self.configToKlampt(self.q))
return self.robot.sensor(name).getMeasurements()
def endStep(self):
pass
def status(self):
return self._status
def setPosition(self,q):
if self._status != 'ok':
return
if len(q) != len(self.q):
raise ValueError("Invalid position command")
self.q = q
if any(v < a or v > b for (v,a,b) in zip(q,self.qmin,self.qmax)):
for i,(v,a,b) in enumerate(zip(q,self.qmin,self.qmax)):
if v < a or v > b:
self._status = 'joint %d limit violation: %f <= %f <= %f'%(i,a,v,b)
self.robot.setConfig(self.configToKlampt(self.q))
if self.robot.selfCollides():
self._status = 'self collision'
def reset(self):
self._status = 'ok'
return True
def sensedPosition(self):
return self.q
def commandedPosition(self):
return self.q
| 36.192982
| 108
| 0.66287
|
794c6ed1dac196b4be74a4af0a2a24bee5faa66a
| 6,165
|
py
|
Python
|
src/purplebook/DummyGraphics.py
|
katieteixeira/purplebook
|
7493d6af5899ae639c5ffdca54227d11daa4b857
|
[
"MIT"
] | 9
|
2016-04-05T00:30:10.000Z
|
2019-01-11T18:02:33.000Z
|
GEOL351/CoursewareModules/DummyGraphics.py
|
CommonClimate/teaching_notebooks
|
576108231d5bbca8cbe6636752317f823b59429c
|
[
"MIT"
] | null | null | null |
GEOL351/CoursewareModules/DummyGraphics.py
|
CommonClimate/teaching_notebooks
|
576108231d5bbca8cbe6636752317f823b59429c
|
[
"MIT"
] | 2
|
2021-01-24T01:04:34.000Z
|
2021-08-25T13:56:07.000Z
|
#----------Section 3: Plotting utilities-------------------------------
#This is a dummy graphics routine, to import if a graphics driver
#is not found. It is the fallback import if the import of ClimateGraphics
#fails in ClimateUtilities. This dummy routine allows courseware
#scripts to be run without the user needing to comment out plot commands.
#It also can be used as a template for customizing the plotter interface
#to work with you locally favored Python graphics driver (e.g. MatPlotLib).
#----------------------------------------------------------------------=
#A dummy class useful for passing parameters.
#Just make an instance, then add new members
#at will. Not sure why this also has to be defined
#here, since it's defined in ClimateUtilities, but
#it seems to be necessary
class Dummy:
pass
# A little class to make resources for Ngl plot options
class resource:
def __init__(self):
self.trYReverse = False
self.trXReverse = False
self.trXLog = False
self.trYLog = False
self.nglFrame = False
self.nglDraw = False
#ToDo: Missing data code resource, line styles, line colors
# A little class for use as a return object from plot(), so
# the user has an easy way to delete a window or save a plot
# to an eps file.
class plotObj:
def __init__(self,workstation,plot,WorkstationResources = None):
self.workstation = workstation
self.plot = plot
self.WorkstationResources = WorkstationResources
#Deletes a plot window
def delete(self):
#Deletes a plot window, cleans up
pass
def save(self,filename = 'plot'):
#Saves a plot to a file
pass
#Makes a line pot from a Curve object
def plot(c):
print "Plotting not implemented"
#Set axis options according to information
#in the curve object c.
#
#c.reverseY:
# if True, reverse the Y axis
#c.reverseX:
# if True, reverse the X axis
#c.XlogAxis:
# if True, use a logarithmic X axis
#c.YlogAxis:
# if True, use a logarithmic Y axis
#
#Customize Line styles and colors here
#
#Set thePlot title
#c.PlotTitle:
# String containing the plot title
#Axis labels
#X and Y axis labels
#c.Xlabel:
# String containing the X axis label
#c.Ylabel:
# String containing the Y axis label
#
#Interchange the X and Y axes
if c.switchXY:
pass
#If True, exchang the axes
# Legends, for multicurve plot
legends = []
for id in c.listVariables():
if not id == c.Xid:
if len(c.label[id]) > 0:
legends.append(c.label[id])
else:
legends.append(id)
#
#Suppress line drawing and just plot symbol for scatter plot curves
#Customize plotting symbols and marker sizes if desired
for id in c.listVariables():
if not id == c.Xid:
if c.scatter[id]:
#If True, treat this data column as a scatter
#plot and don't draw lines
pass
else:
#If False, draw lines (default)
pass
#
#Initialize the plot window here, if necessary.
#w is the handle to the plot window
w = None
if c.switchXY:
#Put in the command for doing the line plot here
#Do the plot with the X and Y axes in the usual
#order
pass
else:
#Put in the command for doing the line plot here,
#but switch the order of the axes.
pass
#
#Now draw the plot
#Depending on your graphics software, the preceding
#command may already have drawn the plot. In some graphics
#packages, after the plot is created, another command needs
#to be executed in orter to display it. Either way, the
#variable plotHandle below is a handle referring to the plot.
#It is used to build a plotObj that can be used for further
#manipulation of the plot such as saving or deleting. In some
#graphics packages, which give control over such things from
#menus in the plot window, the use of a plotObj for this may
#be unnecessary.
plotHandle = None
return plotObj(w,plotHandle) #So that user can delete window or save plot
# A basic contour plotter, which will plot a contour plot
# of a numpy array. The x and y scales can optionally
# be specified using keyword arguments x and y. For example,
# if we want the x scale to be the array (or list) lat, and
# the y scale to be the array (or list) lon, we would call
# contour as contour(A,x=lat,y=lon).
def contour(A,**kwargs):
print "Plotting not implemented"
#The following allows an expert user to pass
# options directly to the plotter.
if 'resource' in kwargs.keys():
r = kwargs['resource']
else:
r = Dummy()
#
r.cnFillOn = True #Use color fill
if 'x' in kwargs.keys():
#Set the X array for the contour plot
XArray = kwargs['x']
if 'y' in kwargs.keys():
#Set the Y array for the contour plot
YArray = kwargs['y']
#
# Now create the plot
rw = Dummy()
#Set the color map
if 'colors' in kwargs.keys():
if (kwargs['colors'] == 'gray') or (kwargs['colors'] == 'grey') :
#Set the default greyscale
#(Substitute the appropriate command for your driver)
rw.wkColorMap = 'gsdtol'
else:
rw.wkColorMap = kwargs['colors']
else:
#Default rainbow color table
rw.wkColorMap = "temp1"
#Open/initialize a plot window
w = None
#Make the plot. plotHandle is the handle returned
#by the plotter, used for further manipulation of the
#plot. (Redundant for some kinds of plotting packages)
plotHandle = None
#Now draw the plot, if your driver needs this as a separate step
#(Insert command for drawing the plot here, e.g.
#ShowPlot(plotHandle).
#
#Return a plotObj with the necessary data for further
#manipulation.
return plotObj(w,plotHandle,rw) #So user can delete or save plot
| 34.830508
| 77
| 0.630008
|
794c6f02c7bf380516055625cea0e5b4962d07f8
| 7,324
|
py
|
Python
|
main.py
|
jing-interactive/irobot
|
032fc6ae069328eaccf343e885d4915835f2340e
|
[
"MIT"
] | 1
|
2016-11-19T15:37:48.000Z
|
2016-11-19T15:37:48.000Z
|
main.py
|
vnm-interactive/irobot
|
032fc6ae069328eaccf343e885d4915835f2340e
|
[
"MIT"
] | null | null | null |
main.py
|
vnm-interactive/irobot
|
032fc6ae069328eaccf343e885d4915835f2340e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
main.py
'''
# NOTE: this example requires PyAudio because it uses the Microphone class
import argparse
import speech_recognition as sr
from pythonosc import udp_client
import io, os, subprocess, wave, aifc, math, audioop
import collections, threading
import platform, stat
import json, hashlib, hmac, time, base64, random, uuid
import tempfile, shutil
try: # attempt to use the Python 2 modules
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
except ImportError: # use the Python 3 modules
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
import requests
class MyRecognizer(sr.Recognizer):
# Baidu Speech Recognition API-Decalogue
# From https://github.com/Uberi/speech_recognition/pull/170/commits/8058cef1bb5f7c1a0fdc89e527b26a7c81de03aa
def recognize_baidu(self, audio_data, *, language = "zh", key = None, secret_key = None, show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Baidu Speech Recognition API.
The Baidu Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Baidu at any time**.
百度语音识别接口支持 POST 方式
目前 API 仅支持整段语音识别的模式,即需要上传整段语音进行识别
语音数据上传方式有两种:隐示发送和显示发送
原始语音的录音格式目前只支持评测 8k/16k 采样率 16bit 位深的单声道语音
压缩格式支持:pcm(不压缩)、wav、opus、speex、amr、x-flac
系统支持语言种类:中文(zh)、粤语(ct)、英文(en)
正式地址:http://vop.baidu.com/server_api
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the key isn't valid, the quota for the key is maxed out, or there is no internet connection.
"""
assert isinstance(audio_data, sr.AudioData), "`audio_data` must be audio data"
assert key is None or isinstance(key, str), "`key` must be `None` or a string"
assert secret_key is None or isinstance(secret_key, str), "`secret_key` must be `None` or a string"
# Using Rain's default keys of baidu asr api
if key is None: key = "QrhsINLcc3Io6w048Ia8kcjS"
if secret_key is None: secret_key = "e414b3ccb7d51fef12f297ffea9ec41d"
access_token = get_token_baidu(key, secret_key)
mac_address = uuid.UUID(int=uuid.getnode()).hex[-12:]
flac_data, sample_rate = audio_data.get_flac_data(), audio_data.sample_rate
url_post_base = "http://vop.baidu.com/server_api"
data = {
"format": "x-flac",
"lan": language,
"token": access_token,
"len": len(flac_data),
"rate": sample_rate,
"speech": base64.b64encode(flac_data).decode('UTF-8'),
"cuid": mac_address,
"channel": 1,
}
json_data = json.dumps(data).encode('UTF-8')
headers = {"Content-Type": "application/json", "Content-Length": len(json_data)}
# Obtain audio transcription results
try:
response = requests.post(url_post_base, data=json.dumps(data), headers=headers)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code))))
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code))))
if int(response.json()['err_no']) != 0:
return 'err_msg'
else:
results = response.json()['result'][0].split(",")
for item in results:
if item != "":
return item
return 'err_msg'
# Get token from baidu
def get_token_baidu(app_key, secret_key):
url_get_base = "https://openapi.baidu.com/oauth/2.0/token"
url = url_get_base + "?grant_type=client_credentials" + "&client_id=" + app_key + "&client_secret=" + secret_key
response = urlopen(url)
response_text = response.read().decode('UTF-8')
json_result = json.loads(response_text)
return json_result['access_token']
def main():
'''
main()
'''
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="127.0.0.1",
help="The ip of the OSC server")
parser.add_argument("--port", type=int, default=3000,
help="The port the OSC server is listening on")
parser.add_argument("--filename", default="./words.txt",
help="The filename that wil contain the recognized words.")
parser.add_argument("--back_volume", type=int, default=-1,
help="Background volume.")
args = parser.parse_args()
client = udp_client.SimpleUDPClient(args.ip, args.port)
rec = MyRecognizer()
mic = sr.Microphone()
try:
if args.back_volume == -1:
print("A moment of silence, please...")
with mic as source:
rec.adjust_for_ambient_noise(source)
else:
rec.energy_threshold = args.back_volume
print("Set minimum energy threshold to {}".format(rec.energy_threshold))
while True:
print("Say something!")
with mic as source:
audio = rec.listen(source)
print("Got it! Now to recognize it...")
try:
style = "sphinx"
if style == "google":
value = rec.recognize_google(audio)
elif style == "bing":
value = rec.recognize_bing(audio, key="0211831985124fdbb41fe2161bc1cd10", language="zh-CN")
elif style == "baidu":
value = rec.recognize_baidu(audio, key="KS7NnNQetwOkanR5x92OHVxB", secret_key="7e87ec1ff0c9c8c9bbe99a1115cc2464")
elif style == "sphinx":
value = rec.recognize_sphinx(audio, language="zh-CN")
else:
value = rec.recognize_sphinx(audio)
if value == "":
print("Found nothing!")
continue
# we need some special handling here to correctly print unicode
# characters to standard output
if str is bytes: # this version of Python uses bytes for strings (Python 2)
value = u"{}".format(value).encode("utf-8")
print("You said", value)
with open(args.filename, 'w', encoding='utf8') as f:
f.write(value);
client.send_message("/say", value)
except sr.UnknownValueError:
print("Oops! Didn't catch that")
except sr.RequestError as err:
print("Uh oh! Couldn't request results from; {0}".format(err))
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| 45.490683
| 252
| 0.612507
|
794c6f7638f1aa23159b60ef5b712325ad63541a
| 543
|
py
|
Python
|
_note_/_unittest_.py
|
By2048/_python_
|
be57738093676a1273e6f69232723669e408986e
|
[
"MIT"
] | 2
|
2017-02-16T14:50:33.000Z
|
2018-02-03T01:49:06.000Z
|
_note_/_unittest_.py
|
By2048/_python_
|
be57738093676a1273e6f69232723669e408986e
|
[
"MIT"
] | null | null | null |
_note_/_unittest_.py
|
By2048/_python_
|
be57738093676a1273e6f69232723669e408986e
|
[
"MIT"
] | null | null | null |
import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOOp')
def test_isupper(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main()
| 23.608696
| 69
| 0.620626
|
794c700836c2e0adf35203b22ba5a254e0bdb92c
| 1,978
|
py
|
Python
|
samples/sample_uploads/sample_uploads_uber.py
|
jlangdev/falconpy
|
75d5824ad5d36c430b709110d7719a3f3fc7e9b0
|
[
"Unlicense"
] | 111
|
2020-11-19T00:44:18.000Z
|
2022-03-03T21:02:32.000Z
|
samples/sample_uploads/sample_uploads_uber.py
|
jlangdev/falconpy
|
75d5824ad5d36c430b709110d7719a3f3fc7e9b0
|
[
"Unlicense"
] | 227
|
2020-12-05T03:02:27.000Z
|
2022-03-22T14:12:42.000Z
|
samples/sample_uploads/sample_uploads_uber.py
|
jlangdev/falconpy
|
75d5824ad5d36c430b709110d7719a3f3fc7e9b0
|
[
"Unlicense"
] | 47
|
2020-11-23T21:00:14.000Z
|
2022-03-28T18:30:19.000Z
|
# ____ _ _ _ _ _
# / ___| __ _ _ __ ___ _ __ | | ___ | | | |_ __ | | ___ __ _ __| |___
# \___ \ / _` | '_ ` _ \| '_ \| |/ _ \ | | | | '_ \| |/ _ \ / _` |/ _` / __|
# ___) | (_| | | | | | | |_) | | __/ | |_| | |_) | | (_) | (_| | (_| \__ \
# |____/ \__,_|_| |_| |_| .__/|_|\___| \___/| .__/|_|\___/ \__,_|\__,_|___/
# |_| |_|
#
#
# _ _ _ ____ _
# | | | | |__ ___ _ __ / ___| | __ _ ___ ___
# | | | | '_ \ / _ \ '__| | | | |/ _` / __/ __|
# | |_| | |_) | __/ | | |___| | (_| \__ \__ \
# \___/|_.__/ \___|_| \____|_|\__,_|___/___/
#
# These examples show how to interact with the Sample Uploads API using the Uber class.
#
import json
# Import the Uber Class
from falconpy import api_complete as FalconSDK
# Grab our config parameters
with open('../config.json', 'r') as file_config:
config = json.loads(file_config.read())
# Create an instance of the Uber class
falcon = FalconSDK.APIHarness(creds={
"client_id": config["falcon_client_id"],
"client_secret": config["falcon_client_secret"]
}
)
# Define our file
FILENAME = "testfile.jpg"
# Open the file for binary read, this will be our payload
PAYLOAD = open(FILENAME, 'rb').read()
# Upload the file using the Sample Uploads API, name this file "newfile.jpg" in the API
response = falcon.command('UploadSampleV3', file_name="newfile.jpg", data=PAYLOAD, content_type="application/octet-stream")
# Grab the SHA256 unique identifier for the file we just uploaded
sha = response["body"]["resources"][0]["sha256"]
# Download a copy of this file, use the SHA256 ID to retrieve it
response = falcon.command("GetSampleV3", ids=sha)
# Save the result to a new file
open('uberclass.jpg', 'wb').write(response)
# Delete the file from the API
response = falcon.command("DeleteSampleV3", ids=sha)
# Print the results of our delete command
print(json.dumps(response, indent=4))
| 41.208333
| 123
| 0.581395
|
794c701b0b9b3d8501dacfed622482e082617c09
| 7,123
|
py
|
Python
|
openfl/component/director/experiment.py
|
ssg-research/openfl
|
b60cbfbdad595e653c94cee23fd35add993b94b0
|
[
"Apache-2.0"
] | 297
|
2021-01-13T08:49:35.000Z
|
2022-03-31T15:06:43.000Z
|
openfl/component/director/experiment.py
|
ssg-research/openfl
|
b60cbfbdad595e653c94cee23fd35add993b94b0
|
[
"Apache-2.0"
] | 265
|
2021-02-02T09:57:33.000Z
|
2022-03-30T22:51:55.000Z
|
openfl/component/director/experiment.py
|
ssg-research/openfl
|
b60cbfbdad595e653c94cee23fd35add993b94b0
|
[
"Apache-2.0"
] | 81
|
2021-01-18T07:52:36.000Z
|
2022-03-26T18:55:54.000Z
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Experiment module."""
import asyncio
import logging
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Iterable
from typing import List
from typing import Union
from openfl.federated import Plan
from openfl.transport import AggregatorGRPCServer
from openfl.utilities.workspace import ExperimentWorkspace
logger = logging.getLogger(__name__)
class Status:
"""Experiment's statuses."""
PENDING = 'pending'
FINISHED = 'finished'
IN_PROGRESS = 'in_progress'
FAILED = 'failed'
class Experiment:
"""Experiment class."""
def __init__(
self, *,
name: str,
archive_path: Union[Path, str],
collaborators: List[str],
sender: str,
init_tensor_dict: dict,
plan_path: Union[Path, str] = 'plan/plan.yaml',
users: Iterable[str] = None,
) -> None:
"""Initialize an experiment object."""
self.name = name
if isinstance(archive_path, str):
archive_path = Path(archive_path)
self.archive_path = archive_path
self.collaborators = collaborators
self.sender = sender
self.init_tensor_dict = init_tensor_dict
if isinstance(plan_path, str):
plan_path = Path(plan_path)
self.plan_path = plan_path
self.users = set() if users is None else set(users)
self.status = Status.PENDING
self.aggregator = None
async def start(
self, *,
tls: bool = True,
root_certificate: Union[Path, str] = None,
private_key: Union[Path, str] = None,
certificate: Union[Path, str] = None,
):
"""Run experiment."""
self.status = Status.IN_PROGRESS
try:
logger.info(f'New experiment {self.name} for '
f'collaborators {self.collaborators}')
with ExperimentWorkspace(self.name, self.archive_path):
aggregator_grpc_server = self._create_aggregator_grpc_server(
tls=tls,
root_certificate=root_certificate,
private_key=private_key,
certificate=certificate,
)
self.aggregator = aggregator_grpc_server.aggregator
await self._run_aggregator_grpc_server(
aggregator_grpc_server=aggregator_grpc_server,
)
self.status = Status.FINISHED
logger.info(f'Experiment "{self.name}" was finished successfully.')
except Exception as e:
self.status = Status.FAILED
logger.error(f'Experiment "{self.name}" was failed with error: {e}.')
def _create_aggregator_grpc_server(
self, *,
tls: bool = True,
root_certificate: Union[Path, str] = None,
private_key: Union[Path, str] = None,
certificate: Union[Path, str] = None,
) -> AggregatorGRPCServer:
plan = Plan.parse(plan_config_path=Path(self.plan_path))
plan.authorized_cols = list(self.collaborators)
logger.info('🧿 Starting the Aggregator Service.')
aggregator_grpc_server = plan.interactive_api_get_server(
tensor_dict=self.init_tensor_dict,
root_certificate=root_certificate,
certificate=certificate,
private_key=private_key,
tls=tls,
)
return aggregator_grpc_server
@staticmethod
async def _run_aggregator_grpc_server(aggregator_grpc_server: AggregatorGRPCServer) -> None:
"""Run aggregator."""
logger.info('🧿 Starting the Aggregator Service.')
grpc_server = aggregator_grpc_server.get_server()
grpc_server.start()
logger.info('Starting Aggregator gRPC Server')
try:
while not aggregator_grpc_server.aggregator.all_quit_jobs_sent():
# Awaiting quit job sent to collaborators
await asyncio.sleep(10)
except KeyboardInterrupt:
pass
finally:
grpc_server.stop(0)
# Temporary solution to free RAM used by TensorDB
aggregator_grpc_server.aggregator.tensor_db.clean_up(0)
class ExperimentsRegistry:
"""ExperimentsList class."""
def __init__(self) -> None:
"""Initialize an experiments list object."""
self.__active_experiment_name = None
self.__pending_experiments = []
self.__archived_experiments = []
self.__dict = {}
@property
def active_experiment(self) -> Union[Experiment, None]:
"""Get active experiment."""
if self.__active_experiment_name is None:
return None
return self.__dict[self.__active_experiment_name]
@property
def pending_experiments(self) -> List[str]:
"""Get queue of not started experiments."""
return self.__pending_experiments
def add(self, experiment: Experiment) -> None:
"""Add experiment to queue of not started experiments."""
self.__dict[experiment.name] = experiment
self.__pending_experiments.append(experiment.name)
def remove(self, name: str) -> None:
"""Remove experiment from everywhere."""
if self.__active_experiment_name == name:
self.__active_experiment_name = None
if name in self.__pending_experiments:
self.__pending_experiments.remove(name)
if name in self.__archived_experiments:
self.__archived_experiments.remove(name)
if name in self.__dict:
del self.__dict[name]
def __getitem__(self, key: str) -> Experiment:
"""Get experiment by name."""
return self.__dict[key]
def get(self, key: str, default=None) -> Experiment:
"""Get experiment by name."""
return self.__dict.get(key, default)
def get_user_experiments(self, user: str) -> List[Experiment]:
"""Get list of experiments for specific user."""
return [
exp
for exp in self.__dict.values()
if user in exp.users
]
def __contains__(self, key: str) -> bool:
"""Check if experiment exists."""
return key in self.__dict
def finish_active(self) -> None:
"""Finish active experiment."""
self.__archived_experiments.insert(0, self.__active_experiment_name)
self.__active_experiment_name = None
@asynccontextmanager
async def get_next_experiment(self):
"""Context manager.
On enter get experiment from pending_experiments.
On exit put finished experiment to archive_experiments.
"""
while True:
if self.active_experiment is None and self.pending_experiments:
break
await asyncio.sleep(10)
try:
self.__active_experiment_name = self.pending_experiments.pop(0)
yield self.active_experiment
finally:
self.finish_active()
| 34.245192
| 96
| 0.62221
|
794c71bff13999d2032e725932a3d5bf45bef754
| 454
|
py
|
Python
|
lippukala_tests/test_pos.py
|
kcsry/lippukala
|
05f11d14d3cb86a59a4a1ec2bbb403ac303a6c3b
|
[
"MIT"
] | 1
|
2019-03-04T15:35:39.000Z
|
2019-03-04T15:35:39.000Z
|
lippukala_tests/test_pos.py
|
kcsry/lippukala
|
05f11d14d3cb86a59a4a1ec2bbb403ac303a6c3b
|
[
"MIT"
] | 8
|
2016-03-26T10:07:16.000Z
|
2020-12-10T09:06:36.000Z
|
lippukala_tests/test_pos.py
|
kcsry/lippukala
|
05f11d14d3cb86a59a4a1ec2bbb403ac303a6c3b
|
[
"MIT"
] | null | null | null |
from lippukala.models import Code
from .utils import _create_test_order
def test_pos_view(admin_client):
for x in range(5):
_create_test_order()
admin_client.get("/pos/")
codes = admin_client.get("/pos/", {"json": "1"}).json()["codes"]
code_id = codes[0]["id"]
admin_client.post("/pos/", {"use": code_id, "station": "hurp"})
code = Code.objects.get(pk=code_id)
assert code.is_used
assert code.used_at == "hurp"
| 28.375
| 68
| 0.64978
|
794c72b7ea8eb9dad5f3c1be820121d48d17b481
| 6,789
|
py
|
Python
|
examples/pwr_run/checkpointing/debug/v100_only/job28.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/debug/v100_only/job28.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/debug/v100_only/job28.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.004
args_model = 'densenet169'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_v100_only/' + job_name + '*'
total_epochs = 46
starting_epoch = 0
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_v100_only/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 31.873239
| 118
| 0.703786
|
794c7375e89b8820e30b6fd608f1f70c4905169b
| 62,272
|
py
|
Python
|
src/asm6502.py
|
jsissom/py6502
|
241afc7eebb7e64c40d7783401b30fdc00962c5e
|
[
"BSD-2-Clause"
] | 26
|
2017-04-15T05:27:24.000Z
|
2022-03-21T13:46:13.000Z
|
src/asm6502.py
|
KrisKennaway/py6502
|
241afc7eebb7e64c40d7783401b30fdc00962c5e
|
[
"BSD-2-Clause"
] | 9
|
2016-07-11T07:19:40.000Z
|
2022-01-27T18:01:57.000Z
|
src/asm6502.py
|
KrisKennaway/py6502
|
241afc7eebb7e64c40d7783401b30fdc00962c5e
|
[
"BSD-2-Clause"
] | 7
|
2017-04-16T08:23:51.000Z
|
2022-02-20T15:21:14.000Z
|
import re
class asm6502():
def __init__(self, debug=0):
# print "65C02 Assembler"
self.debuglevel = debug
self.text_of_lines = list() # of strings
self.lines = list() # parsed lines (symbol, opcode, addrmode, value
self.symbols = list() # of (name,line#) tuples
self.labeldict = dict()
self.labellist = list()
self.opcodelist = list()
self.opcodedict = dict()
self.addressmodes = dict()
self.addressmmodelist = list()
self.object_code = list() # 64 K entries to cover whole memory map
for i in xrange(0, 65536):
self.object_code.append(-1) # -1 indicate location not populated
self.littleendian = True # Use le and be directives to change this
self.genopcodelist() # generate the tables
self.build_opcode_map()
self.build_encoding_table()
# some handy lookups
self.decimal_digits = "0123456789"
self.hex_digits = "abcdefABCDEF0123456789"
self.octal_digits = "01234567"
self.letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
self.allstuff = list()
self.line = 1
def clear_state(self):
self.text_of_lines = list() # of strings
self.lines = list() # parsed lines (symbol, opcode, addrmode, value
self.symbols = list() # of (name,line#) tuples
self.labeldict = dict()
self.labellist = list()
self.opcodelist = list()
self.opcodedict = dict()
self.addressmodes = dict()
self.addressmmodelist = list()
self.littleendian = True # Use le and be directives to change this
self.allstuff = list()
self.line = 1
def info(self, linenumber, text):
self.debug(1, "INFO: Line %d :%s" % (linenumber, text))
def warning(self, linenumber, linetext, text):
print "WARNING: Line %d :%s" % (linenumber, text)
print " " + linetext
def strip_comments(self, thestring):
self.debug(3, "string passed to strip_comments()=%s" % thestring)
position = thestring.find(';')
if (position == -1):
return (thestring, "")
else:
return (thestring[:position].rstrip(), thestring[position:].rstrip())
def debug(self, level=0, astring="No String Given"):
if (level > self.debuglevel):
pass
else:
print " DEBUG(%d):%s" % (level, astring)
# find a label at the front. Strip it and return the symbol
def strip_label(self, thestring, linenumber):
position = thestring.find(':')
if (position == -1):
return ("", thestring.strip())
else:
labelstr = thestring[:position].strip()
returnstr = thestring[position + 1:].strip()
position = labelstr.find(' ')
if (position == -1):
self.labeldict[labelstr] = linenumber
self.labellist.append((linenumber, labelstr))
self.debug(2, "Line %d Label %s found at line %d" % (linenumber, labelstr, linenumber))
return (labelstr, returnstr)
else:
labelstr = labelstr[:position]
self.warning(linenumber=linenumber, linetext=thestring,
text="More than one thing in the label field. Ignoring everything between the first space and the colon")
self.labellist.append((linenum, labelstr))
self.labeldict[labelstr] = linenum
self.info(linenumber, text="Label %s found at line %d" % (labelstr, linenumber))
return (labelstr, returnstr)
# Consider the next thing an opcode
# strip it and return the opcode with the remainder of the line
def strip_opcode(self, thestring, linenumber):
mystring = thestring.strip()
noopcode = False
noremainder = False
if len(mystring) == 0:
opcodestr = ""
remainderstr = ""
noopcode = True
noremainder = True
elif ' ' in mystring:
position = thestring.find(' ')
opcodestr = thestring[:position].strip()
remainderstr = thestring[position + 1:].strip()
noopcode = False
noremainder = False
else:
opcodestr = mystring
remainderstr = ""
noopcode = False
noremainder = True
if noopcode:
# print "no opcode or remainder"
return (("", ""))
else:
if noremainder:
# print "opcode %s but no remainder" % opcodestr
return ((opcodestr, ""))
else:
# print "opcode %s with remainder %s" % (opcodestr,remainderstr)
return ((opcodestr, remainderstr))
def check_opcode(self, opcode_in, linenumber):
opcode = opcode_in.lower()
if opcode == "":
self.debug(3, "check_opcode returning null")
return None
elif opcode in self.validopcodes:
self.opcodelist.append((linenumber, opcode))
self.debug(3, "check_opcode found %s in validopcodes" % opcode)
return opcode
elif opcode in self.validdirectives:
self.opcodelist.append((linenumber, opcode))
self.debug(3, "check_opcode found %s in validirectives" % opcode)
return opcode
else:
self.debug(3, "check_opcode could not find opcode %s " % opcode)
self.warning(linenumber=linenumber, linetext="", text="unknown opcode %s" % opcode)
return None
def identify_addressmodeformat(self, remainderstr, linenumber):
# remove all whitespace
thestring = remainderstr.replace(" ", "")
if (thestring == ""):
premode = "nothing"
value = ""
elif thestring[0] == "#":
# It's immediate
premode = "immediate"
value = thestring[1:]
elif (thestring == "a") or (thestring == "A"):
premode = "accumulator"
value = ""
elif re.search("""^\((.*),[xX]\)$""", thestring):
premode = "bracketedindexedx"
b = re.search("""^\((.*),[xX]\)$""", thestring)
value = b.group(1)
elif re.search("""^\((.*)\),[yY]$""", thestring):
premode = "bracketedcommay"
b = re.search("""^\((.*)\),[yY]$""", thestring)
value = b.group(1)
elif re.search("""^(.*),[xX]$""", thestring):
b = re.search("""^(.*),[xX]$""", thestring)
value = b.group(1)
premode = "numbercommax"
elif re.search("""^(.*),[yY]$""", thestring):
b = re.search("""^(.*),[yY]$""", thestring)
value = b.group(1)
premode = "numbercommay"
elif (thestring[0] == '$') or (thestring[0] == '@') \
or (thestring[0] == '%') \
or (thestring[0] == '&') \
or (thestring[0] in self.decimal_digits):
premode = "number"
value = thestring
elif ((thestring[0] in self.letters) and ((thestring != "A") or (thestring != "a"))):
premode = "number"
value = thestring
elif (thestring[0] == "+") or (thestring[0] == "-"):
premode = "offset"
value = thestring
elif re.search("""^\((.*),[xX]\)$""", thestring):
premode = "bracketedindexedx"
b = re.search("""^\((.*),[xX]\)$""", thestring)
value = b.group(1)
elif re.search("""^\((.*)\),[yY]$""", thestring):
premode = "bracketedcommay"
b = re.search("""^\((.*)\),[yY]$""", thestring)
value = b.group(1)
elif re.search("""^\(.*\)$""", thestring):
premode = "bracketed"
value = thestring[1:-1]
elif thestring[0] in self.letters:
premode = "name"
value = thestring
else:
self.warning(linenumber, linetext=remainderstr, text="Can\'t make sense of address mode %s" % remainderstr)
premode = "nothing"
value = ""
self.debug(2, "premode = %s, value = %s" % (premode, value))
# We've classified the basic formats in premode
# some formats mean different things with different instructions
# E.G. a number is an offset with a branch but absolute with a load
# So we need to cross check the combinations of instruction with format
# to derive the actual address mode and whether or not it is allowed.
return (premode, value)
# Address mode format name applied
# implicit ~ "implicit"
# immediate #num ~ "immediate"
# accumulator A ~ "accumulator"
# absolute $2000 ~ "absolute"
# zero page $20 ~ "zeropage"
# absolute indexed x $5000,X ~ "absolutex"
# absolute indexed y $5000,y ~ "absolutey"
# zeropage indexed x $20,X ~ "zeropagex"
# zeropage indexed y $20,Y ~ "zeropagey"
# relative +10 (or label) ~ "relative"
# zeropage indexed indirect x ($20,X) ~ "zeropageindexedindirectx"
# zeropage indexed indirect y ($20),Y ~ "zeropageindexedindirecty"
# absolute indexed indirect ($5000,X) - only JMP ~ "absoluteindexedindirect"
# zeropage indirect ($20) ~ "zeropageindirect"
# absolute indirect ($5000) - only JMP ~ "absoluteindirect"
#
# names are numbers..
def identify_addressmode(self, opcode, premode, value, linenumber):
if (opcode in self.implicitopcodes) and (premode == "nothing"):
return "implicit"
if (opcode in self.immediateopcodes) and (premode == "immediate"):
return "immediate"
if (opcode in self.accumulatoropcodes) and (premode == "accumulator"):
return "accumulator"
if (opcode in self.accumulatoropcodes) and (premode == "nothing"):
return "accumulator"
if (opcode == "jmp"):
if (premode == "bracketed"):
return "absoluteindirect"
if (premode == "bracketedindexedx"):
return "absoluteindexedindirect"
if (premode == "number"):
return "absolute"
return "UNDECIDED"
if (opcode in self.zeropageopcodes) and (premode == "number") and (self.decode_value(value) != -1):
if (self.decode_value(value) < 256):
return "zeropage"
if (opcode in self.relativeopcodes) and ((premode == "number") or (premode == "offset")):
return "relative"
if (opcode in self.absoluteopcodes) and (premode == "number"):
return "absolute"
self.debug(3, "IDENTIFY_ADDRESSMODE for zeropagex opcode=%s premode=%s" % (opcode, premode))
if (opcode in self.zeropagexopcodes):
self.debug(3, "opcode was in zeropagexopcodes")
else:
self.debug(3, "opcode wasnt in zeropagexopcodes")
if (opcode in self.zeropagexopcodes) and (premode == "numbercommax"):
self.debug(3, "IDENTIFY_ADDRESSMODE (opcode was in self.zeropagexopcodes) and (premode was== numbercommax)")
self.debug(3, "IDENTIFY_ADDRESSMODE decoded value = 0x%x" % self.decode_value(value))
if (self.decode_value(value) < 256):
return "zeropagex"
if (opcode in self.zeropageyopcodes) and (premode == "numbercommay"):
if (self.decode_value(value) < 256):
return "zeropagey"
if (opcode in self.absolutexopcodes) and (premode == "numbercommax"):
return "absolutex"
if (opcode in self.absoluteyopcodes) and (premode == "numbercommay"):
return "absolutey"
if (opcode in self.zeropageyopcodes) and (premode == "numbercommay"):
return "zeropagey"
if (opcode in self.zeropageindexedindirectxopcodes) and (premode == "bracketedindexedx"):
return "zeropageindexedindirectx"
if (opcode in self.zeropageindexedindirectyopcodes) and (premode == "bracketedcommay"):
return "zeropageindexedindirecty"
if (opcode in self.zeropageindirectopcodes) and (premode == "bracketed"):
if (self.decode_value(value) < 256):
return "zeropageindirect"
self.debug(2, "INFO: GOT TO END OF IDENTIFY_ADDRESSMODE: Line %d opcode:%s premode:%s" % (
linenumber, opcode, premode))
return "UNDECIDED"
def decode_extraquadwords(self, linenumber, linetext, s):
newstring = "["
for c in s:
if c == "$":
newstring = newstring + "0x"
elif c == "@":
newstring = newstring + "0"
else:
newstring = newstring + c
newstring = newstring + "]"
thelist = eval(newstring)
newlist = list()
for i in thelist:
if type(i) == int:
a = i & 0x00ff
b = (((i & 0x000000000000ff00) >> 8) & 0x000000ff)
c = (((i & 0x0000000000ff0000) >> 16) & 0x000000ff)
d = (((i & 0x00000000ff000000) >> 24) & 0x000000ff)
e = (((i & 0x000000ff00000000) >> 32) & 0x000000ff)
f = (((i & 0x0000ff0000000000) >> 40) & 0x000000ff)
g = (((i & 0x00ff000000000000) >> 48) & 0x000000ff)
h = (((i & 0xff00000000000000) >> 56) & 0x000000ff)
if (self.littleendian == True):
newlist.append(a)
newlist.append(b)
newlist.append(c)
newlist.append(d)
newlist.append(e)
newlist.append(f)
newlist.append(g)
newlist.append(h)
else:
newlist.append(g)
newlist.append(g)
newlist.append(f)
newlist.append(e)
newlist.append(d)
newlist.append(c)
newlist.append(b)
newlist.append(a)
else:
self.warning(linenumber, linetext, "Can't parse word string %s" % newstring)
emptylist = list()
return emptylist
return newlist
def decode_extradoublewords(self, linenumber, linetext, s):
newstring = "["
for c in s:
if c == "$":
newstring = newstring + "0x"
elif c == "@":
newstring = newstring + "0"
else:
newstring = newstring + c
newstring = newstring + "]"
thelist = eval(newstring)
newlist = list()
for i in thelist:
if type(i) == int:
a = i & 0x00ff
b = (((i & 0x0000ff00) >> 8) & 0x000000ff)
c = (((i & 0x00ff0000) >> 16) & 0x000000ff)
d = (((i & 0xff000000) >> 24) & 0x000000ff)
if (self.littleendian == True):
newlist.append(a)
newlist.append(b)
newlist.append(c)
newlist.append(d)
else:
newlist.append(d)
newlist.append(c)
newlist.append(b)
newlist.append(a)
else:
self.warning(linenumber, linetext, "Can't parse word string %s" % newstring)
emptylist = list()
return emptylist
return newlist
# Just count the number of bytes without working out what they are
def count_extrabytes(self, opcode, operand):
count = len(operand.split(','))
if opcode == "db":
return count
elif opcode == "dw":
return count * 2
elif opcode == "ddw":
return count * 4
elif opcode == "dqw":
return count * 8
else:
return None
def decode_extrawords(self, linenumber, linetext, s):
csl = s.split(',')
newlist = list()
for theword in csl:
if theword[0] == '&':
label = theword[1:]
value = self.symbols[label]
elif theword[0] == '$':
value = eval("0x" + theword[1:])
elif theword[0] == '@':
value = eval("0" + theword[1:])
else:
value = eval(theword)
if type(value) == int:
a = value & 0x00ff
b = (((value & 0xff00) >> 8) & 0x00ff)
if (self.littleendian == True):
newlist.append(a)
newlist.append(b)
else:
newlist.append(b)
newlist.append(a)
else:
self.warning(linenumber, linetext, "Can't parse word string %s" % newstring)
emptylist = list()
return emptylist
return newlist
def decode_extrabytes(self, linenumber, linetext, s):
newstring = "["
for c in s:
if c == "$":
newstring = newstring + "0x"
elif c == "@":
newstring = newstring + "0"
else:
newstring = newstring + c
newstring = newstring + "]"
# Now parse the list
thelist = eval(newstring)
newlist = list()
for i in thelist:
if type(i) == int:
newlist.append(i)
else:
self.warning(linenumber, linetext, "Can't parse byte string %s" % newstring)
emptylist = list()
return emptylist
return newlist
def decode_value(self, s):
if (s[0] == '$'):
ns = int(s[1:], 16)
return ns
if (s[0] == '@'):
ns = int(s[1:], 8)
return ns
if (s[0] == '%'):
ns = int(s[1:], 2)
return ns
if (s[0] in self.decimal_digits):
ns = int(s)
return ns
return (-1)
# Address mode format name applied
# implicit ~ "implicit"
# immediate #num ~ "immediate"
# accumulator A ~ "accumulator"
# absolute $2000 ~ "absolute"
# zero page $20 ~ "zeropage"
# absolute indexed x $5000,X ~ "absolutex"
# absolute indexed y $5000,y ~ "absolutey"
# zeropage indexed x $20,X ~ "zeropagex"
# zeropage indexed y $20,Y ~ "zeropagey"
# relative +10 (or label) ~ "relative"
# zeropage indexed indirect x ($20,X) ~ "zeropageindexedindirectx"
# zeropage indexed indirect y ($20),Y ~ "zeropageindexedindirecty"
# absolute indexed indirect ($5000,X) - only JMP ~ "absoluteindexedindirect"
# zeropage indirect ($20) ~ "zeropageindirect"
# absolute indirect ($5000) - only JMP ~ "absoluteindirect"
def genopcodelist(self):
self.modeswithlowbytevalue = \
["immediate", "absolute", "zeropage", "absolutex", "absolutey", \
"zeropagex", "zeropagey", "zeropageindexedindirectx", "zeropageindexedindirecty" \
"absoluteindexedindirect", "zeropageindirect",
"absoluteindirect"]
self.modeswithhighbytevalue = \
["absolute", "absolutex", "absolutey", \
"absoluteindexedindirect", "absoluteindirect"]
self.validdirectives = \
["db", "dw", "ddw", "dqw", "str", "org", "le", "be"]
self.validopcodes = \
["adc", "and", "asl", "bcc", "bcs", "beq", "bit", "bmi", "bne", \
"bpl", "bra", "brk", "bvc", "bvs", "clc", "cld", "cli", "clv", \
"cmp", "cpx", "cpy", "dea", "dec", "dex", "dey", "eor", "inc", "ina", "inx", \
"iny", "jmp", "jsr", "lda", "ldx", "ldy", "lsr", "nop", "ora", \
"pha", "php", "phx", "phy", "pla", "plp", "plx", "ply", "rol", \
"ror", "rti", "rts", "sbc", "sec", "sed", "sei", "sta", "stx", \
"sty", "stz", "tax", "tay", "trb", "tsb", "tsx", "txa", "txs", \
"tya"]
self.implicitopcodes = \
["brk", "clc", "cld", "cli", "clv", "dex", "dey", "inx", "iny", "nop", \
"pha", "php", "phx", "phy", "pla", "plp", "plx", "ply", "rti", "rts", \
"sec", "sed", "sei", "tax", "tay", "trb", "tsb", "tsx", "txa", "txs", \
"tya"]
self.immediateopcodes = \
["adc", "and", "bit", "cmp", "cpx", "cpy", "eor", "lda", "ldx", \
"ldy", "ora", "sbc"]
self.accumulatoropcodes = \
["asl", "dea", "dec", "ina", "inc", "lsr", "rol", "ror"]
self.zeropageopcodes = \
["adc", "and", "asl", "bit", "cmp", "cpx", "cpy", "dec", "eor", "inc", \
"lda", "ldx", "ldy", "lsr", "ora", "rol", "ror", "sbc", "sta", "stx", \
"sty", "stz", "trb", "tsb"]
self.absoluteopcodes = \
["adc", "and", "asl", "bit", "cmp", "cpx", "cpy", "dec", "eor", "inc", \
"jmp", "jsr", "lda", "ldx", "ldy", "lsr", "ora", "rol", "ror", "sbc", \
"sta", "stx", "sty", "stz", "trb", "tsb"]
self.absolutexopcodes = \
["adc", "and", "asl", "bit", "cmp", "dec", "eor", "inc", \
"lda", "lsr", "ora", "rol", "ror", "sbc", \
"sta", "stz", "ldy"]
self.absoluteyopcodes = \
["adc", "and", "cmp", "eor", \
"lda", "ldx", "ora", "sbc", "sta"]
self.zeropagexopcodes = \
["adc", "and", "cmp", "eor", "lda", "dec", "bit", "asl", "ldy", \
"ora", "sbc", "sta", "sty", "ror", "rol", "lsr", "inc", "stz"]
self.zeropageyopcodes = \
["ldx", "stx"]
self.relativeopcodes = \
["bmi", "bne", "bpl", "bra", "bvc", "bvs", "bcc", "bcs", "beq"]
self.zeropageindexedindirectxopcodes = \
["adc", "and", "cmp", "eor", "lda", "ora", "sbc", "sta"]
self.zeropageindexedindirectyopcodes = \
["adc", "and", "cmp", "eor", "lda", "ora", "sbc", "sta"]
self.zeropageindirectopcodes = \
["adc", "and", "cmp", "eor", "lda", "ora", "sbc", "sta"]
def build_opcode_map(self):
self.map = dict()
for opcode in self.validopcodes:
self.map[opcode] = list()
if opcode in self.implicitopcodes:
self.map[opcode].append("implicit")
if opcode in self.immediateopcodes:
self.map[opcode].append("immediate")
if opcode in self.accumulatoropcodes:
self.map[opcode].append("accumulator")
if opcode in self.zeropageopcodes:
self.map[opcode].append("zeropage")
if opcode in self.absoluteopcodes:
self.map[opcode].append("absolute")
if opcode in self.absolutexopcodes:
self.map[opcode].append("absolutex")
if opcode in self.absoluteyopcodes:
self.map[opcode].append("absolutey")
if opcode in self.zeropagexopcodes:
self.map[opcode].append("zeropagex")
if opcode in self.zeropageyopcodes:
self.map[opcode].append("zeropagey")
if opcode in self.relativeopcodes:
self.map[opcode].append("relative")
if opcode in self.zeropageindexedindirectxopcodes:
self.map[opcode].append("zeropageindexedindirectx")
if opcode in self.zeropageindexedindirectyopcodes:
self.map[opcode].append("zeropageindexedindirecty")
if opcode in self.zeropageindirectopcodes:
self.map[opcode].append("zeropageindirect")
def build_encoding_table(self):
self.hexcodes = dict()
self.hexcodes[0x00] = ("brk", "implicit")
self.hexcodes[0x10] = ("bpl", "relative")
self.hexcodes[0x20] = ("jsr", "absolute")
self.hexcodes[0x30] = ("bmi", "relative")
self.hexcodes[0x40] = ("rti", "implicit")
self.hexcodes[0x50] = ("bvc", "relative")
self.hexcodes[0x60] = ("rts", "implicit")
self.hexcodes[0x70] = ("bvs", "relative")
self.hexcodes[0x80] = ("bra", "relative")
self.hexcodes[0x90] = ("bcc", "relative")
self.hexcodes[0xA0] = ("ldy", "immediate")
self.hexcodes[0xB0] = ("bcs", "relative")
self.hexcodes[0xC0] = ("cpy", "immediate")
self.hexcodes[0xD0] = ("bne", "relative")
self.hexcodes[0xE0] = ("cpx", "immediate")
self.hexcodes[0xF0] = ("beq", "relative")
self.hexcodes[0x01] = ("ora", "zeropageindexedindirectx")
self.hexcodes[0x11] = ("ora", "zeropageindexedindirecty")
self.hexcodes[0x21] = ("and", "zeropageindexedindirectx")
self.hexcodes[0x31] = ("and", "zeropageindexedindirecty")
self.hexcodes[0x41] = ("eor", "zeropageindexedindirectx")
self.hexcodes[0x51] = ("eor", "zeropageindexedindirecty")
self.hexcodes[0x61] = ("adc", "zeropageindexedindirectx")
self.hexcodes[0x71] = ("adc", "zeropageindexedindirecty")
self.hexcodes[0x81] = ("sta", "zeropageindexedindirectx")
self.hexcodes[0x91] = ("sta", "zeropageindexedindirecty")
self.hexcodes[0xA1] = ("lda", "zeropageindexedindirectx")
self.hexcodes[0xB1] = ("lda", "zeropageindexedindirecty")
self.hexcodes[0xC1] = ("cmp", "zeropageindexedindirectx")
self.hexcodes[0xD1] = ("cmp", "zeropageindexedindirecty")
self.hexcodes[0xE1] = ("sbc", "zeropageindexedindirectx")
self.hexcodes[0xF1] = ("sbc", "zeropageindexedindirecty")
self.hexcodes[0x02] = ("", "")
self.hexcodes[0x12] = ("ora", "zeropageindirect")
self.hexcodes[0x22] = ("", "")
self.hexcodes[0x32] = ("and", "zeropageindirect")
self.hexcodes[0x42] = ("", "")
self.hexcodes[0x52] = ("eor", "zeropageindirect")
self.hexcodes[0x62] = ("", "")
self.hexcodes[0x72] = ("adc", "zeropageindirect")
self.hexcodes[0x82] = ("", "")
self.hexcodes[0x92] = ("sta", "zeropageindirect")
self.hexcodes[0xA2] = ("ldx", "immediate")
self.hexcodes[0xB2] = ("lda", "zeropageindirect")
self.hexcodes[0xC2] = ("", "")
self.hexcodes[0xD2] = ("cmp", "zeropageindirect")
self.hexcodes[0xE2] = ("", "")
self.hexcodes[0xF2] = ("sbc", "zeropageindirect")
self.hexcodes[0x03] = ("", "")
self.hexcodes[0x13] = ("", "")
self.hexcodes[0x23] = ("", "")
self.hexcodes[0x33] = ("", "")
self.hexcodes[0x43] = ("", "")
self.hexcodes[0x53] = ("", "")
self.hexcodes[0x63] = ("", "")
self.hexcodes[0x73] = ("", "")
self.hexcodes[0x83] = ("", "")
self.hexcodes[0x93] = ("", "")
self.hexcodes[0xA3] = ("", "")
self.hexcodes[0xB3] = ("", "")
self.hexcodes[0xC3] = ("", "")
self.hexcodes[0xD3] = ("", "")
self.hexcodes[0xE3] = ("", "")
self.hexcodes[0xF3] = ("", "")
self.hexcodes[0x04] = ("tsb", "zeropage")
self.hexcodes[0x14] = ("trb", "zeropage")
self.hexcodes[0x24] = ("bit", "zeropage")
self.hexcodes[0x34] = ("bit", "zeropagex")
self.hexcodes[0x44] = ("", "")
self.hexcodes[0x54] = ("", "")
self.hexcodes[0x64] = ("stz", "zeropage")
self.hexcodes[0x74] = ("stz", "zeropagex")
self.hexcodes[0x84] = ("sty", "zeropage")
self.hexcodes[0x94] = ("sty", "zeropagex")
self.hexcodes[0xA4] = ("ldy", "zeropage")
self.hexcodes[0xB4] = ("ldy", "zeropagex")
self.hexcodes[0xC4] = ("cpy", "zeropage")
self.hexcodes[0xD4] = ("", "")
self.hexcodes[0xE4] = ("cpx", "zeropage")
self.hexcodes[0xF4] = ("", "")
self.hexcodes[0x05] = ("ora", "zeropage")
self.hexcodes[0x15] = ("ora", "zeropagex")
self.hexcodes[0x25] = ("and", "zeropage")
self.hexcodes[0x35] = ("and", "zeropagex")
self.hexcodes[0x45] = ("eor", "zeropage")
self.hexcodes[0x55] = ("eor", "zeropagex")
self.hexcodes[0x65] = ("adc", "zeropage")
self.hexcodes[0x75] = ("adc", "zeropagex")
self.hexcodes[0x85] = ("sta", "zeropage")
self.hexcodes[0x95] = ("sta", "zeropagex")
self.hexcodes[0xA5] = ("lda", "zeropage")
self.hexcodes[0xB5] = ("lda", "zeropagex")
self.hexcodes[0xC5] = ("cmp", "zeropage")
self.hexcodes[0xD5] = ("cmp", "zeropagex")
self.hexcodes[0xE5] = ("sbc", "zeropage")
self.hexcodes[0xF5] = ("sbc", "zeropagex")
self.hexcodes[0x06] = ("asl", "zeropage")
self.hexcodes[0x16] = ("asl", "zeropagex")
self.hexcodes[0x26] = ("rol", "zeropage")
self.hexcodes[0x36] = ("rol", "zeropagex")
self.hexcodes[0x46] = ("lsr", "zeropage")
self.hexcodes[0x56] = ("lsr", "zeropagex")
self.hexcodes[0x66] = ("ror", "zeropage")
self.hexcodes[0x76] = ("ror", "zeropagex")
self.hexcodes[0x86] = ("stx", "zeropage")
self.hexcodes[0x96] = ("stx", "zeropagey")
self.hexcodes[0xA6] = ("ldx", "zeropage")
self.hexcodes[0xB6] = ("ldx", "zeropagey")
self.hexcodes[0xC6] = ("dec", "zeropage")
self.hexcodes[0xD6] = ("dec", "zeropagex")
self.hexcodes[0xE6] = ("inc", "zeropage")
self.hexcodes[0xF6] = ("inc", "zeropagex")
self.hexcodes[0x07] = ("", "")
self.hexcodes[0x17] = ("", "")
self.hexcodes[0x27] = ("", "")
self.hexcodes[0x37] = ("", "")
self.hexcodes[0x47] = ("", "")
self.hexcodes[0x57] = ("", "")
self.hexcodes[0x67] = ("", "")
self.hexcodes[0x77] = ("", "")
self.hexcodes[0x87] = ("", "")
self.hexcodes[0x97] = ("", "")
self.hexcodes[0xA7] = ("", "")
self.hexcodes[0xB7] = ("", "")
self.hexcodes[0xC7] = ("", "")
self.hexcodes[0xD7] = ("", "")
self.hexcodes[0xE7] = ("", "")
self.hexcodes[0xF7] = ("", "")
self.hexcodes[0x08] = ("php", "implicit")
self.hexcodes[0x18] = ("clc", "implicit")
self.hexcodes[0x28] = ("plp", "implicit")
self.hexcodes[0x38] = ("sec", "implicit")
self.hexcodes[0x48] = ("pha", "implicit")
self.hexcodes[0x58] = ("cli", "implicit")
self.hexcodes[0x68] = ("pla", "implicit")
self.hexcodes[0x78] = ("sei", "implicit")
self.hexcodes[0x88] = ("dey", "implicit")
self.hexcodes[0x98] = ("tya", "implicit")
self.hexcodes[0xA8] = ("tay", "implicit")
self.hexcodes[0xB8] = ("clv", "implicit")
self.hexcodes[0xC8] = ("iny", "implicit")
self.hexcodes[0xD8] = ("cld", "implicit")
self.hexcodes[0xE8] = ("inx", "implicit")
self.hexcodes[0xF8] = ("sed", "implicit")
self.hexcodes[0x09] = ("ora", "immediate")
self.hexcodes[0x19] = ("ora", "absolutey")
self.hexcodes[0x29] = ("and", "immediate")
self.hexcodes[0x39] = ("and", "absolutey")
self.hexcodes[0x49] = ("eor", "immediate")
self.hexcodes[0x59] = ("eor", "absolutey")
self.hexcodes[0x69] = ("adc", "immediate")
self.hexcodes[0x79] = ("adc", "absolutey")
self.hexcodes[0x89] = ("bit", "immediate")
self.hexcodes[0x99] = ("sta", "absolutey")
self.hexcodes[0xA9] = ("lda", "immediate")
self.hexcodes[0xB9] = ("lda", "absolutey")
self.hexcodes[0xC9] = ("cmp", "immediate")
self.hexcodes[0xD9] = ("cmp", "absolutey")
self.hexcodes[0xE9] = ("sbc", "immediate")
self.hexcodes[0xF9] = ("sbc", "absolutey")
self.hexcodes[0x0A] = ("asl", "accumulator")
self.hexcodes[0x1A] = ("ina", "accumulator")
self.hexcodes[0x2A] = ("rol", "accumulator")
self.hexcodes[0x3A] = ("dea", "accumulator")
self.hexcodes[0x4A] = ("lsr", "accumulator")
self.hexcodes[0x5A] = ("phy", "implicit")
self.hexcodes[0x6A] = ("ror", "accumulator")
self.hexcodes[0x7A] = ("ply", "implicit")
self.hexcodes[0x8A] = ("txa", "implicit")
self.hexcodes[0x9A] = ("txs", "implicit")
self.hexcodes[0xAA] = ("tax", "implicit")
self.hexcodes[0xBA] = ("tsx", "implicit")
self.hexcodes[0xCA] = ("dex", "implicit")
self.hexcodes[0xDA] = ("phx", "implicit")
self.hexcodes[0xEA] = ("nop", "implicit")
self.hexcodes[0xFA] = ("plx", "implicit")
self.hexcodes[0x0B] = ("", "")
self.hexcodes[0x1B] = ("", "")
self.hexcodes[0x2B] = ("", "")
self.hexcodes[0x3B] = ("", "")
self.hexcodes[0x4B] = ("", "")
self.hexcodes[0x5B] = ("", "")
self.hexcodes[0x6B] = ("", "")
self.hexcodes[0x7B] = ("", "")
self.hexcodes[0x8B] = ("", "")
self.hexcodes[0x9B] = ("", "")
self.hexcodes[0xAB] = ("", "")
self.hexcodes[0xBB] = ("", "")
self.hexcodes[0xCB] = ("", "")
self.hexcodes[0xDB] = ("", "")
self.hexcodes[0xEB] = ("", "")
self.hexcodes[0xFB] = ("", "")
self.hexcodes[0x0C] = ("tsb", "absolute")
self.hexcodes[0x1C] = ("trb", "absolute")
self.hexcodes[0x2C] = ("bit", "absolute")
self.hexcodes[0x3C] = ("bit", "absolutex")
self.hexcodes[0x4C] = ("jmp", "absolute")
self.hexcodes[0x5C] = ("", "")
self.hexcodes[0x6C] = ("jmp", "absoluteindirect")
self.hexcodes[0x7C] = ("jmp", "absoluteindexedindirect")
self.hexcodes[0x8C] = ("sty", "absolute")
self.hexcodes[0x9C] = ("stz", "absolute")
self.hexcodes[0xAC] = ("ldy", "absolute")
self.hexcodes[0xBC] = ("ldy", "absolutex")
self.hexcodes[0xCC] = ("cpy", "absolute")
self.hexcodes[0xDC] = ("", "")
self.hexcodes[0xEC] = ("cpx", "absolute")
self.hexcodes[0xFC] = ("", "")
self.hexcodes[0x0D] = ("ora", "absolute")
self.hexcodes[0x1D] = ("ora", "absolutex")
self.hexcodes[0x2D] = ("and", "absolute")
self.hexcodes[0x3D] = ("and", "absolutex")
self.hexcodes[0x4D] = ("eor", "absolute")
self.hexcodes[0x5D] = ("eor", "absolutex")
self.hexcodes[0x6D] = ("adc", "absolute")
self.hexcodes[0x7D] = ("adc", "absolutex")
self.hexcodes[0x8D] = ("sta", "absolute")
self.hexcodes[0x9D] = ("sta", "absolutex")
self.hexcodes[0xAD] = ("lda", "absolute")
self.hexcodes[0xBD] = ("lda", "absolutex")
self.hexcodes[0xCD] = ("cmp", "absolute")
self.hexcodes[0xDD] = ("cmp", "absolutex")
self.hexcodes[0xED] = ("sbc", "absolute")
self.hexcodes[0xFD] = ("sbc", "absolutex")
self.hexcodes[0x0E] = ("asl", "absolute")
self.hexcodes[0x1E] = ("asl", "absolutex")
self.hexcodes[0x2E] = ("rol", "absolute")
self.hexcodes[0x3E] = ("rol", "absolutex")
self.hexcodes[0x4E] = ("lsr", "absolute")
self.hexcodes[0x5E] = ("lsr", "absolutex")
self.hexcodes[0x6E] = ("ror", "absolute")
self.hexcodes[0x7E] = ("ror", "absolutex")
self.hexcodes[0x8E] = ("stx", "absolute")
self.hexcodes[0x9E] = ("stz", "absolutex")
self.hexcodes[0xAE] = ("ldx", "absolute")
self.hexcodes[0xBE] = ("ldx", "absolutey")
self.hexcodes[0xCE] = ("dec", "absolute")
self.hexcodes[0xDE] = ("dec", "absolutex")
self.hexcodes[0xEE] = ("inc", "absolute")
self.hexcodes[0xFE] = ("inc", "absolutex")
self.hexcodes[0x0F] = ("", "")
self.hexcodes[0x1F] = ("", "")
self.hexcodes[0x2F] = ("", "")
self.hexcodes[0x3F] = ("", "")
self.hexcodes[0x4F] = ("", "")
self.hexcodes[0x5F] = ("", "")
self.hexcodes[0x6F] = ("", "")
self.hexcodes[0x7F] = ("", "")
self.hexcodes[0x8F] = ("", "")
self.hexcodes[0x9F] = ("", "")
self.hexcodes[0xAF] = ("", "")
self.hexcodes[0xBF] = ("", "")
self.hexcodes[0xCF] = ("", "")
self.hexcodes[0xDF] = ("", "")
self.hexcodes[0xEF] = ("", "")
self.hexcodes[0xFF] = ("", "")
self.otherhexcodes = dict() # Make another list for synonyms
for hexval in xrange(256):
self.otherhexcodes[hexval] = ("", "")
self.otherhexcodes[0x1A] = ("inc", "accumulator")
self.otherhexcodes[0x3A] = ("dec", "accumulator")
self.otherhexcodes[0x90] = ("blt", "relative")
self.otherhexcodes[0xB0] = ("bge", "relative")
self.hexmap = dict()
for hexval in xrange(256):
op, mode = self.hexcodes[hexval]
astring = op + mode
if len(astring) > 1:
self.hexmap[astring] = hexval
op, mode = self.otherhexcodes[hexval]
astring = op + mode
if len(astring) > 1:
self.hexmap[astring] = hexval
# implicit ~ "implicit"
# immediate #num ~ "immediate"
# accumulator A ~ "accumulator"
# absolute $2000 ~ "absolute"
# zero page $20 ~ "zeropage"
# absolute indexed x $5000,X ~ "absolutex"
# absolute indexed y $5000,y ~ "absolutey"
# zeropage indexed x $20,X ~ "zeropagex"
# zeropage indexed y $20,Y ~ "zeropagey"
# relative +10 (or label) ~ "relative"
# zeropage indexed indirect x ($20,X) ~ "zeropageindexedindirectx"
# zeropage indexed indirect y ($20),Y ~ "zeropageindexedindirecty"
# absolute indexed indirect ($5000,X) - only JMP ~ "absoluteindexedindirect"
# zeropage indirect ($20) ~ "zeropageindirect"
# absolute indirect ($5000) - only JMP ~ "absoluteindirect"
def addrmode_length(self, addrmode):
if addrmode == "implicit":
return 0
if addrmode == "immediate":
return 1
if addrmode == "accumulator":
return 0
if addrmode == "absolute":
return 2
if addrmode == "zeropage":
return 1
if addrmode == "absolutex":
return 2
if addrmode == "absolutey":
return 2
if addrmode == "zeropagex":
return 1
if addrmode == "zeropagey":
return 1
if addrmode == "relative":
return 1
if addrmode == "zeropageindexedindirectx":
return 1
if addrmode == "zeropageindexedindirecty":
return 1
if addrmode == "absoluteindexedindirect":
return 2
if addrmode == "zeropageindirect":
return 1
if addrmode == "absoluteindirect":
return 2
def firstpasstext(self, thetuple):
(offset, linenumber, labelstring, opcode_val, lowbyte, highbyte, opcode, operand, addressmode, value, comment,
extrabytes, num_extrabytes, linetext) = thetuple
a = ("%d" % linenumber).ljust(4)
if (labelstring != None):
b = (": %s" % labelstring).ljust(10)
else:
b = " "
if (opcode_val == None):
c = " "
else:
if (opcode_val > -1):
c = "%02X " % opcode_val
else:
c = "?? "
if (lowbyte == None):
d = " "
else:
if (lowbyte > -1):
d = "%02X " % lowbyte
else:
d = "?? "
if (highbyte == None):
e = " "
else:
if (highbyte > -1):
e = "%02X " % highbyte
else:
e = "?? "
# Print the opcode in 4 spaces
if (opcode == None):
f = " "
else:
f = opcode.ljust(4)
# Either print the operand in 10 spaces or print 10 spaces
# when there is no operand
if (operand == None):
g = " "
else:
if (len(operand) > 0):
g = operand.ljust(10)
else:
g = " "
h = comment
astring = a + b + c + d + e + f + g + h
self.debug(1, astring)
return astring
def secondpasstext(self, thetuple):
(offset, linenumber, labelstring, opcode_val, lowbyte, highbyte, opcode, operand, addressmode, value, comment,
extrabytes, num_extrabytes, linetext) = thetuple
a = ("%d " % linenumber).ljust(5)
aa = ("%04X " % offset)
if (labelstring != None) and (labelstring != ""):
b = (": %s:" % labelstring).ljust(10)
else:
b = ": "
if (opcode_val == None):
c = " "
else:
if (opcode_val > -1):
c = "%02X " % opcode_val
else:
c = "?? "
if (lowbyte == None):
d = " "
else:
if (lowbyte > -1):
d = "%02X " % lowbyte
else:
d = "?? "
if (highbyte == None):
e = " "
else:
if (highbyte > -1):
e = "%02X " % highbyte
else:
e = "?? "
# Print the opcode in 4 spaces
if (opcode == None):
f = " "
else:
f = opcode.ljust(4)
if (operand == None):
g = " "
else:
if (len(operand) > 0):
g = operand.ljust(10)
else:
g = " "
h = comment
astring = a + aa + b + c + d + e + f + g + h
self.debug(1, astring)
self.debug(2, thetuple)
# If there are extra bytes from a db, dw, dq, do or text operator,
# print the resulting hex bytes on the next line.
if (extrabytes != None) and (len(extrabytes) > 1):
hexchars = ""
index = 0
for index in range(0, len(extrabytes) - 1):
hexchars = hexchars + "%02X " % extrabytes[index]
hexchars = hexchars + "%02X" % extrabytes[len(extrabytes) - 1]
bytestring = a + aa + ": " + hexchars
self.debug(1, bytestring)
return astring + "\n" + bytestring
return astring
# Separate out the label, opcode, operand and comment fields.
# Identify the address mode as we go along
# The results end up in self.allstuff in a tuple per entry
# -1 in fields indicates a value not known yet
# None in a field indicates that it doesn't exist
def parse_line(self, thestring):
linenumber = self.line
self.line += 1
thetext = "LINE #" + ("%d" % linenumber).ljust(5) + (": %s" % thestring)
self.debug(2, thetext)
mystring, comment = self.strip_comments(thestring)
labelstring, mystring = self.strip_label(mystring, linenumber)
opcode_anycase, operand = self.strip_opcode(mystring, linenumber)
opcode = self.check_opcode(opcode_anycase, linenumber)
premode, value = self.identify_addressmodeformat(operand, linenumber)
addressmode = self.identify_addressmode(opcode, premode, value, linenumber)
self.debug(3, "PARSE LINE: opcode=%s addressmode=%s" % (str(opcode), addressmode))
if (opcode != None) and (addressmode != "UNDECIDED"):
astring = opcode + addressmode
self.debug(3, "PARSE LINE 2 astring=%s" % astring)
if astring in self.hexmap:
self.debug(3, "PARSE LINE 3 astring=%s self.hexmap[astring]=0x%x" % (astring, self.hexmap[astring]))
opcode_val = self.hexmap[astring]
else:
opcode_val = None
else:
opcode_val = None
astring = ""
if (self.addrmode_length(addressmode) == 0):
lowbyte = None
highbyte = None
elif (self.addrmode_length(addressmode) == 1) and (self.decode_value(value) != -1):
lowbyte = self.decode_value(value) & 0x00FF
highbyte = None
elif (self.addrmode_length(addressmode) == 2) and (self.decode_value(value) != -1):
lowbyte = self.decode_value(value) & 0x00FF
highbyte = ((self.decode_value(value) & 0xFF00) >> 8) & 0x00FF
elif (self.addrmode_length(addressmode) == 1) and (self.decode_value(value) == -1):
lowbyte = -1
highbyte = None
elif (self.addrmode_length(addressmode) == 2) and (self.decode_value(value) == -1):
lowbyte = -1
highbyte = -1
else:
lowbyte = None
highbyte = None
offset = -1
# Handle switches between little endian and big endian
if (opcode == "le"):
self.littleendian = True
if (opcode == "be"):
self.littleendian = False
# interpret extra bytes from the db, dw, ddw, dqw directives.
extrabytes = list()
if (opcode == "db" or opcode == "dw" or opcode == "ddw" or opcode == "dqw"):
num_extrabytes = self.count_extrabytes(opcode, operand)
else:
num_extrabytes = None
# We are moving the extrabytes parsing to pass 3, so we can
# add label addresses into DWs and have the label defined when we need it.
#
# if (opcode=="db") and (operand != None) and (len(operand) > 0):
# extrabytes = self.decode_extrabytes(linenumber, thestring, operand)
# elif (opcode=="dw") and (operand != None) and (len(operand) > 0):
# extrabytes = self.decode_extrawords(linenumber, thestring, operand)
# elif (opcode=="ddw") and (operand != None) and (len(operand) > 0):
# extrabytes = self.decode_extradoublewords(linenumber, thestring, operand)
# elif (opcode=="dqw") and (operand != None) and (len(operand) > 0):
# extrabytes = self.decode_extraquadwords(linenumber, thestring, operand)
linetext = thestring
thetuple = (
offset, linenumber, labelstring, opcode_val, lowbyte, highbyte, opcode, operand, addressmode, value, comment,
extrabytes, num_extrabytes, linetext)
self.allstuff.append(thetuple)
self.firstpasstext(thetuple)
self.debug(2, "addressmode = %s" % addressmode)
self.debug(2, str(self.allstuff[linenumber - 1]))
self.debug(2, "-----------------------")
# Perform the three passes of the assembly
def assemble(self, lines):
self.clear_state()
# First pass, parse each line for label, opcode, operand and comments
self.debug(1, "First Pass")
for line in lines:
self.parse_line(line)
# Second pass, compute the offsets and populate the symbol table
self.debug(1, "Second Pass")
self.symbols = dict()
# Default to 0x0000. ORG directive overrides
self.address = 0x0000
# Add the offset to each line by counting the opcodes and operands
for i in xrange(len(self.allstuff)):
tuple = self.allstuff[i]
(offset, linenumber, labelstring, opcode_val, lowbyte, highbyte, opcode, operand, addressmode, value,
comment, extrabytes, num_extrabytes, linetext) = tuple
# Handle ORG directive
if (opcode == "org"):
newaddr = self.decode_value(value)
if (newaddr != -1):
self.address = newaddr & 0x00ffff
offset = self.address
if (opcode_val != None):
self.address += 1
if (lowbyte != None):
self.address += 1
if (highbyte != None):
self.address += 1
# self.address += len(extrabytes)
if type(num_extrabytes) == int:
self.address += num_extrabytes
# If there is a label, we now know its address. So store it in the symbol table
if (labelstring != None) and (labelstring != ""):
self.symbols[labelstring] = offset
tuple = (
offset, linenumber, labelstring, opcode_val, lowbyte, highbyte, opcode, operand, addressmode, value,
comment, extrabytes, num_extrabytes, linetext)
self.allstuff[i] = tuple
self.secondpasstext(tuple)
# Print out the symbol table
self.debug(1, "Symbol Table")
for label in self.symbols:
offset = self.symbols[label]
astring = (("%s" % label).ljust(10)) + (" = " + "$%04X" % offset)
self.debug(1, astring)
# Third pass
# Go through filling in the unknown values from the symbol table
self.debug(1, "Third Pass")
self.listing = list()
self.instruction_map = [None] * 65536 # A map for where the instructions are so the debugger can know
# where the start byte of real instructions are.
# The opcode is entered in the location
# non instruction locations are set to None.
for i in xrange(len(self.allstuff)):
tuple = self.allstuff[i]
(offset, linenumber, labelstring, opcode_val, lowbyte, highbyte, opcode, operand, addressmode, value,
comment, extrabytes, num_extrabytes, linetext) = tuple
# Compute the offset for relative branches
if (lowbyte == -1) and (addressmode == "relative"):
destination = self.symbols[value]
start = offset + 2 # Delta is relative to the first byte after the branch instruction
delta = destination - start
lowbyte = delta & 0x00ff
if (delta > 127) or (delta < -128):
self.warning(linenumber, "", "branch can't reach destination, delta is %d" % delta)
elif (lowbyte == -1) and (
(addressmode in self.modeswithlowbytevalue) or (addressmode in self.modeswithhighbytevalue)):
if (value in self.symbols):
newvalue = self.symbols[value]
lowbyte = newvalue & 0x00ff
if (highbyte == -1) and (addressmode in self.modeswithhighbytevalue):
if (value in self.symbols):
newvalue = self.symbols[value]
highbyte = ((newvalue & 0xff00) >> 8) & 0x00ff
# populate the extrabytes lists
if (opcode == "db") and (operand != None) and (len(operand) > 0):
extrabytes = self.decode_extrabytes(linenumber, linetext, operand)
elif (opcode == "dw") and (operand != None) and (len(operand) > 0):
extrabytes = self.decode_extrawords(linenumber, linetext, operand)
elif (opcode == "ddw") and (operand != None) and (len(operand) > 0):
extrabytes = self.decode_extradoublewords(linenumber, linetext, operand)
elif (opcode == "dqw") and (operand != None) and (len(operand) > 0):
extrabytes = self.decode_extraquadwords(linenumber, linetext, operand)
tuple = (
offset, linenumber, labelstring, opcode_val, lowbyte, highbyte, opcode, operand, addressmode, value,
comment, extrabytes, num_extrabytes, linetext)
self.allstuff[i] = tuple
line = self.secondpasstext(tuple)
self.listing.append(line)
# Fill in the instruction map
# This makes it easy for an interactive disassembler to
# know what is instruction code and what is data.
# By signaling which are operand bytes, it's easy to
# disassemble backwards from the current position
# None = Not an instruction or operand
# positive numbers < 256 = an opcode
# -1 = first operand byte
# -2 = second operand bytecount
if opcode_val != None:
self.instruction_map[offset] = opcode_val
if self.addrmode_length(addressmode) > 0:
self.instruction_map[offset + 1] = -1 # -1 signals the first operand byte
if self.addrmode_length(addressmode) > 1:
self.instruction_map[offset + 2] = -2 # -2 signals the second operand byte
# write generated bytes to object code map
addr = offset
if (opcode_val != None) and (opcode_val != -1):
self.object_code[addr] = opcode_val
addr = addr + 1
if (lowbyte != None):
self.object_code[addr] = lowbyte
addr = addr + 1
if (highbyte != None):
self.object_code[addr] = highbyte
addr = addr + 1
if (extrabytes != None):
for i in extrabytes:
self.object_code[addr] = i
addr = addr + 1
listingtext = list()
listingtext.append("LISTING")
listingtext += self.listing
symboltext = list()
symboltext.append("SYMBOL TABLE")
for label in self.symbols:
offset = self.symbols[label]
astring = (("%s" % label).ljust(10)) + (" = " + "$%04X" % offset)
symboltext.append(astring)
# print "LISTING"
# for i in self.listing:
# print i
#
# print
# print "SYMBOL TABLE"
# for label in self.symbols:
# offset = self.symbols[label]
# astring=(("%s" % label).ljust(10)) +(" = "+"$%04X" % offset)
# print astring
#
# print
# self.print_object_code()
return (listingtext, symboltext)
def print_object_code(self):
print "OBJECT CODE"
# Insert a star when there are empty spots in the memory map
i = 0
astring = ""
printed_a_star = 0
while (i < 65536):
if self.object_code[i] != -1:
printed_a_star = 0
astring = "%04X: %02X" % (i, self.object_code[i])
localrun = 1
i = i + 1
if (i < 65536):
nextval = self.object_code[i]
while (nextval != -1) and (localrun < 16):
astring = astring + " %02X" % self.object_code[i]
i = i + 1
localrun = localrun + 1
if (i < 65536):
nextval = self.object_code[i]
else:
nextval = -1
print astring
else:
print astring
else:
if (printed_a_star == 0):
print "*"
printed_a_star = 1
i = i + 1
def srecord_checksum(self, astring):
checksum = 0
for i in xrange(len(astring) / 2):
hexpair = "0x" + astring[(i * 2):(i * 2) + 2]
bytevalue = eval(hexpair)
checksum = checksum + bytevalue
checksum = checksum & 0x0ff
checksum = checksum ^ 0xff
return "%02x" % checksum
def str2asciibytes(self, astring):
ascii = ""
for c in astring:
num = ord(c)
ascii += "%02x" % num
return ascii
def srecords(self, version, revision, module_name, comment):
# print "S19 FORMAT OUTPUT"
# print
i = 0
astring = ""
theoutput = list()
bytelist = list()
bytecount = 0
address = 0
# Make the Header Record
if len(module_name) > 20:
modname_trimmed = module_name[:20]
else:
modname_trimmed = module_name.ljust(20)
if (len(comment) > 36):
comment_trimmed = comment[:36]
else:
comment_trimmed = comment
text = "%02x%02x" % (version, revision)
text = text + self.str2asciibytes(module_name + comment)
addr = "0000"
countedpart = addr + text
length = "%02x" % (len(addr + text))
checksum = self.srecord_checksum(length + addr + text)
header = "S0" + length + addr + text + checksum
theoutput.append(header)
last_addr = 0
while (i < 65536):
if self.object_code[i] != -1:
address = i
values = list()
values.append(self.object_code[i])
localrun = 1
i = i + 1
if (i < 65536):
nextval = self.object_code[i]
while (nextval != -1) and (localrun < 16):
values.append(nextval)
last_addr = i
i = i + 1
localrun = localrun + 1
if (i < 65536):
nextval = self.object_code[i]
else:
nextval = -1
# We reached 16 bytes, or hit the end or hit -1 So
# Output the data record
data = ""
for value in values:
data = ("%02X" % value) + data
addr = "%02x%02x" % (((address >> 8) & 0xff), (address & 0xff))
length = "%02x" % (len(addr + text))
checksum = self.srecord_checksum(length + addr + data)
record = "S1" + length + addr + data + checksum
theoutput.append(record)
else:
i = i + 1
# Output the count
record_count = len(theoutput)
data = "%02x%02x" % (((record_count >> 8) & 0xff), (record_count & 0xff))
length = "03"
checksum = self.srecord_checksum(length + data)
record = "S5" + length + data + checksum
theoutput.append(record)
# Output the terminator
length = "03"
addr = "%02x%02x" % (((last_addr >> 8) & 0xff), (last_addr & 0xff))
checksum = self.srecord_checksum(length + addr)
record = "S9" + length + addr + checksum
theoutput.append(record)
return (theoutput)
def print_srecords(self, version, revision, module_name, comment):
lines = self.srecords(version, revision, module_name, comment)
for line in lines:
print line
def intelhex(self):
# print "INTEL HEX FORMAT OUTPUT"
# print
# Insert a star when there are empty spots in the memory map
i = 0
astring = ""
theoutput = list()
bytelist = list()
bytecount = 0
address = 0
datarecord = "00"
eofrecord = ":00000001FF"
while (i < 65536):
if self.object_code[i] != -1:
address = i
values = list()
values.append(self.object_code[i])
localrun = 1
i = i + 1
if (i < 65536):
nextval = self.object_code[i]
while (nextval != -1) and (localrun < 16):
values.append(nextval)
i = i + 1
localrun = localrun + 1
if (i < 65536):
nextval = self.object_code[i]
else:
nextval = -1
length = len(values)
astring = ":%02X%04x" % (length, address)
astring += datarecord
for value in values:
astring += "%02X" % value
theoutput.append(astring)
else:
length = len(values)
astring = "addr=%04x len=%02x data=" % (address, length)
for value in values:
astring += "%02X" % value
theoutput.append(astring)
else:
i = i + 1
theoutput.append(eofrecord)
return theoutput
def print_intelhex(self):
lines = self.intelhex()
for line in lines:
print line
# returns entire 64K memory as hex in the form of 64 bytes per line.
def hex(self, noaddress=False):
# print "HEX FORMAT OUTPUT"
# print
theoutput = list()
for i in xrange(1024):
addr = 64 * i
# Prepend with an address field, or not if not desired
if noaddress:
line = ""
else:
line = "%04x:" % addr
# add the bytes as hex to the line
for j in xrange(64):
val = self.object_code[(i * 64) + j]
# Range check the bytes
if val < 0:
val = 0
if val > 255:
val = 255
line = line + ("%02x" % val)
theoutput.append(line)
return theoutput
def print_hex(self):
lines = self.hex()
for line in lines:
print line
| 40.995392
| 134
| 0.503934
|
794c74ac089a168156d1e881ec319a0af8b91534
| 7,086
|
py
|
Python
|
rebalance.py
|
hainingpan/inverse_volatility_caculation
|
b1684cc9bd2c399468c67841ce6360db88c45a88
|
[
"MIT"
] | 3
|
2020-10-08T06:31:14.000Z
|
2021-12-14T03:05:34.000Z
|
rebalance.py
|
hainingpan/inverse_volatility_caculation
|
b1684cc9bd2c399468c67841ce6360db88c45a88
|
[
"MIT"
] | null | null | null |
rebalance.py
|
hainingpan/inverse_volatility_caculation
|
b1684cc9bd2c399468c67841ce6360db88c45a88
|
[
"MIT"
] | 1
|
2020-10-28T10:27:53.000Z
|
2020-10-28T10:27:53.000Z
|
from datetime import datetime, date
import math
import numpy as np
import time
import sys
import requests
import re
from ortools.linear_solver import pywraplp
# if len(sys.argv) == 1:
# symbols = ['UPRO', 'TMF']
# else:
# symbols = sys.argv[1].split(',')
# for i in range(len(symbols)):
# symbols[i] = symbols[i].strip().upper()
symbols = ['TMF', 'UPRO']
num_trading_days_per_year = 252
window_size = 20
date_format = "%Y-%m-%d"
end_timestamp = int(time.time())
start_timestamp = int(end_timestamp - (1.4 * (window_size + 1) + 4) * 86400)
def get_volatility_and_performance(symbol,cookie,crumb):
download_url = "https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events=history".format(symbol, start_timestamp, end_timestamp)
lines = requests.get(
download_url,
headers={
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2866.71 Safari/537.36'
}).text.strip().split('\n')
assert lines[0].split(',')[0] == 'Date'
assert lines[0].split(',')[4] == 'Close'
prices = []
for line in lines[1:]:
prices.append(float(line.split(',')[4]))
prices.reverse()
volatilities_in_window = []
for i in range(window_size):
volatilities_in_window.append(math.log(prices[i] / prices[i+1]))
most_recent_date = datetime.strptime(lines[-1].split(',')[0], date_format).date()
assert (date.today() - most_recent_date).days <= 4, "today is {}, most recent trading day is {}".format(date.today(), most_recent_date)
return np.std(volatilities_in_window, ddof = 1) * np.sqrt(num_trading_days_per_year), prices[0] / prices[window_size] - 1.0, prices[0]
def get_cookie():
url = 'https://finance.yahoo.com/quote/VOO/history?p=VOO'
r = requests.get(url)
txt = r.text
cookie = r.cookies['B']
pattern = re.compile('.*"CrumbStore":\{"crumb":"(?P<crumb>[^"]+)"\}')
for line in txt.splitlines():
m = pattern.match(line)
if m is not None:
crumb = m.groupdict()['crumb']
return cookie,crumb
def get_data():
#cookie,crumb=get_cookie()
cookie='9mev4idf68vgk&b=3&s=g9'
crumb='Xpr8Z7BQn4W'
volatilities = []
performances = []
current_prices = []
sum_inverse_volatility = 0.0
for symbol in symbols:
volatility, performance, current_price = get_volatility_and_performance(symbol,cookie,crumb)
sum_inverse_volatility += 1 / volatility
volatilities.append(volatility)
performances.append(performance)
current_prices.append(current_price)
alpha=1/(np.array(volatilities) * sum_inverse_volatility)
print ("Portfolio: {}, as of {} (window size is {} days)".format(str(symbols), date.today().strftime('%Y-%m-%d'), window_size))
for i in range(len(symbols)):
print ('{} allocation ratio: {:.2f}% (anualized volatility: {:.2f}%, performance: {:.2f}%)'.format(symbols[i], 100*(alpha[i]), float(volatilities[i] * 100), float(performances[i] * 100)))
return alpha,current_prices
def create_model(epsilon=0.01):
alpha[0]/alpha[1]
data={}
data['constraint_coeffs']=[
[current_prices[0],-(epsilon+alpha[0]/alpha[1])*current_prices[1],current_prices[0],-(epsilon+alpha[0]/alpha[1])*current_prices[1]],
[current_prices[0],-(alpha[0]/alpha[1]-epsilon)*current_prices[1],current_prices[0],-(alpha[0]/alpha[1]-epsilon)*current_prices[1]],
[current_prices[0],current_prices[1],current_prices[0],current_prices[1]],
[current_prices[0],current_prices[1],0,0],
[0,0,current_prices[0],current_prices[1]],
[1,0,0,0],
[0,1,0,0],
[1,1,1,1]
]
data['lb']=[-np.inf, 0,0,0,0,N_Tax_T,N_Tax_U,1]
data['ub']=[0, np.inf,S,S_Tax,S_IRA,np.inf,np.inf,np.inf]
data['obj_coeffs']=[current_prices[0],current_prices[1],current_prices[0],current_prices[1]]
data['xub']=[np.floor(S_Tax/current_prices[0]),np.floor(S_Tax/current_prices[1]),np.floor(S_IRA/current_prices[0]),np.floor(S_IRA/current_prices[1])]
data['num_vars']=len(data['obj_coeffs'])
data['num_constraints']=len(data['constraint_coeffs'])
return data
def findsol(epsilon=0.01):
data = create_model(epsilon)
solver = pywraplp.Solver.CreateSolver('CBC')
x={}
for j in range(data['num_vars']):
x[j] = solver.IntVar(0, data['xub'][j], 'x[%i]' % j)
for i in range(data['num_constraints']):
constraint = solver.RowConstraint(data['lb'][i], data['ub'][i], '')
for j in range(data['num_vars']):
constraint.SetCoefficient(x[j], data['constraint_coeffs'][i][j])
objective = solver.Objective()
for j in range(data['num_vars']):
objective.SetCoefficient(x[j], data['obj_coeffs'][j])
objective.SetMaximization()
status = solver.Solve()
if status==pywraplp.Solver.OPTIMAL:
sol=[x[i].solution_value() for i in range(4)]
else:
sol=[0,0,0,0]
return sol,status
alpha,current_prices=get_data()
N_Tax_T=float(input("Current shares of "+symbols[0]+" in taxable: "))
N_Tax_U=float(input("Current shares of "+symbols[1]+" in taxable: "))
Tax_C=float(input("Current cash in taxable: "))
N_IRA_T=float(input("Current shares of "+symbols[0]+" in IRA: "))
N_IRA_U=float(input("Current shares of "+symbols[1]+" in IRA: "))
IRA_C=float(input("Current cash in IRA: "))
Tax_T=N_Tax_T*current_prices[0]
Tax_U=N_Tax_U*current_prices[1]
IRA_T=N_IRA_T*current_prices[0]
IRA_U=N_IRA_U*current_prices[1]
S_Tax=Tax_T+Tax_U+Tax_C
S_IRA=IRA_T+IRA_U+IRA_C
S=S_Tax+S_IRA
epsilon=0.01
sol,status=findsol(epsilon)
while status != pywraplp.Solver.OPTIMAL:
epsilon=epsilon+0.01
sol,status=findsol(epsilon)
N_Tax_T2,N_Tax_U2,N_IRA_T2,N_IRA_U2=sol
print('-'*10+'result'+'-'*10)
Tax_C2=S_Tax-N_Tax_T2*current_prices[0]-N_Tax_U2*current_prices[1]
IRA_C2=S_IRA-N_IRA_T2*current_prices[0]-N_IRA_U2*current_prices[1]
S_T2=(N_Tax_T2+N_IRA_T2)*current_prices[0]
S_U2=(N_Tax_U2+N_IRA_U2)*current_prices[1]
print('Cash in Taxable %f' % Tax_C2)
print('Cash in IRA %f' % IRA_C2)
print('Achievable balance of TMF/UPRO: ({:.2f}%/{:.2f}%), target ({:.2f}%/{:.2f}%)'.format(100*S_T2/(S_T2+S_U2),100*S_U2/(S_T2+S_U2),100*alpha[0],100*alpha[1]))
print('-'*10+'action'+'-'*10)
print(('buy'*(N_Tax_T2-N_Tax_T>=0)+'sell'*(N_Tax_T2-N_Tax_T<0))+' TMF in Taxable: '+str(int(abs(N_Tax_T2-N_Tax_T)))+' at price '+str(current_prices[0]))
print(('buy'*(N_Tax_U2-N_Tax_U>=0)+'sell'*(N_Tax_U2-N_Tax_U<0))+' UPRO in Taxable: '+str(int(abs(N_Tax_U2-N_Tax_U)))+' at price '+str(current_prices[1]))
print(('buy'*(N_IRA_T2-N_IRA_T>=0)+'sell'*(N_IRA_T2-N_IRA_T<0))+' TMF in IRA: '+str(int(abs(N_IRA_T2-N_IRA_T)))+' at price '+str(current_prices[0]))
print(('buy'*(N_IRA_U2-N_IRA_U>=0)+'sell'*(N_IRA_U2-N_IRA_U<0))+' UPRO in IRA: '+str(int(abs(N_IRA_U2-N_IRA_U)))+' at price '+str(current_prices[1]))
| 42.945455
| 196
| 0.650155
|
794c750d8021f13f9731043a89ea4fc33bd13248
| 5,266
|
py
|
Python
|
docs/conf.py
|
ngachung/incubator-sdap-nexus
|
38e768694fcc142e2d88283cb1e44e05f88da847
|
[
"Apache-2.0"
] | 17
|
2017-11-16T07:36:33.000Z
|
2021-11-07T00:02:20.000Z
|
docs/conf.py
|
ngachung/incubator-sdap-nexus
|
38e768694fcc142e2d88283cb1e44e05f88da847
|
[
"Apache-2.0"
] | 35
|
2018-01-11T00:50:20.000Z
|
2022-03-17T23:08:07.000Z
|
docs/conf.py
|
ngachung/incubator-sdap-nexus
|
38e768694fcc142e2d88283cb1e44e05f88da847
|
[
"Apache-2.0"
] | 25
|
2017-11-16T07:36:38.000Z
|
2022-02-03T20:48:46.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'incubator-sdap-nexus'
copyright = '2018, Apache SDAP'
author = 'Apache SDAP'
# The short X.Y version
version = '1.0'
# The full version, including alpha/beta/rc tags
release = '1.0.0-SNAPSHOT'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'incubator-sdap-nexusdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'incubator-sdap-nexus.tex', 'incubator-sdap-nexus Documentation',
'Apache SDAP', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'incubator-sdap-nexus', 'incubator-sdap-nexus Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'incubator-sdap-nexus', 'incubator-sdap-nexus Documentation',
author, 'incubator-sdap-nexus', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 31.532934
| 82
| 0.650779
|
794c759ba4c940a06b7bcdae7acf51bfbd2a07d9
| 1,342
|
py
|
Python
|
src/lib/dataset/datasets/custom_dataset.py
|
xping-zhou/CenterTrack
|
38c89dd5dc9c339385d0386189245eb8c3a9ac22
|
[
"MIT"
] | 2,093
|
2020-04-02T21:19:01.000Z
|
2022-03-31T04:29:45.000Z
|
src/lib/dataset/datasets/custom_dataset.py
|
xping-zhou/CenterTrack
|
38c89dd5dc9c339385d0386189245eb8c3a9ac22
|
[
"MIT"
] | 251
|
2020-04-03T06:41:33.000Z
|
2022-03-20T12:43:34.000Z
|
src/lib/dataset/datasets/custom_dataset.py
|
xping-zhou/CenterTrack
|
38c89dd5dc9c339385d0386189245eb8c3a9ac22
|
[
"MIT"
] | 506
|
2020-04-03T02:58:17.000Z
|
2022-03-28T13:13:33.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..generic_dataset import GenericDataset
class CustomDataset(GenericDataset):
num_categories = 1
default_resolution = [-1, -1]
class_name = ['']
max_objs = 128
cat_ids = {1: 1}
def __init__(self, opt, split):
assert (opt.custom_dataset_img_path != '') and \
(opt.custom_dataset_ann_path != '') and \
(opt.num_classes != -1) and \
(opt.input_h != -1) and (opt.input_w != -1), \
'The following arguments must be specified for custom datasets: ' + \
'custom_dataset_img_path, custom_dataset_ann_path, num_classes, ' + \
'input_h, input_w.'
img_dir = opt.custom_dataset_img_path
ann_path = opt.custom_dataset_ann_path
self.num_categories = opt.num_classes
self.class_name = ['' for _ in range(self.num_categories)]
self.default_resolution = [opt.input_h, opt.input_w]
self.cat_ids = {i: i for i in range(1, self.num_categories + 1)}
self.images = None
# load image list and coco
super().__init__(opt, split, ann_path, img_dir)
self.num_samples = len(self.images)
print('Loaded Custom dataset {} samples'.format(self.num_samples))
def __len__(self):
return self.num_samples
def run_eval(self, results, save_dir):
pass
| 33.55
| 75
| 0.695976
|
794c7683b545a543ae42b9c3d18137a15b824634
| 2,620
|
py
|
Python
|
youtube_dl/views.py
|
Shovon588/api_collection
|
f348ffa8dc5c4dc69ba4c2a7d145c71e8273e0a2
|
[
"MIT"
] | null | null | null |
youtube_dl/views.py
|
Shovon588/api_collection
|
f348ffa8dc5c4dc69ba4c2a7d145c71e8273e0a2
|
[
"MIT"
] | null | null | null |
youtube_dl/views.py
|
Shovon588/api_collection
|
f348ffa8dc5c4dc69ba4c2a7d145c71e8273e0a2
|
[
"MIT"
] | null | null | null |
from pytube import YouTube
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import YoutubeDLSerializer
from .utils import make_time, make_size
class YoutubeDL(APIView):
serializer_class = YoutubeDLSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
url = serializer.validated_data.get("url")
try:
file = YouTube(url)
except:
return Response({
"status": "failed",
"message": "Invalid url",
}, status=status.HTTP_404_NOT_FOUND)
videos = file.streams
thumbnail = file.thumbnail_url
title = file.title
duration = make_time(file.length)
video_res = {
"1080p": None,
"720p": None,
"480p": None,
"360p": None,
"240p": None,
"144p": None
}
aud_size = 0
audio = None
for video in videos:
if video.resolution in video_res and video_res[video.resolution] is None:
video_res[video.resolution] = {"resolution": video.resolution, "video_type": video.subtype,
"size": make_size(video.filesize),
"url": video.url}
if video.type == "audio":
if video.filesize > aud_size:
audio = video
aud_size = video.filesize
video_data = [value for key, value in video_res.items() if value is not None]
audio_data = None
if audio is not None:
audio_type = audio.subtype
size = make_size(audio.filesize)
url = audio.url
audio_data = {"audio_type": audio_type, "size": size, "url": url}
return Response({
"status": "success",
"message": "Got some data.",
"title": title,
"duration": duration,
"thumbnail": thumbnail,
"video_data": video_data,
}, status=status.HTTP_200_OK)
return Response({"status": "failed",
"message": "Something went wrong.",
"error": serializer.errors},
status=status.HTTP_400_BAD_REQUEST)
| 34.933333
| 111
| 0.500763
|
794c787f07e33c8991c289b0bfd88bafa3660e12
| 11,796
|
py
|
Python
|
openstackclient/network/v2/subnet.py
|
ankur-gupta91/osc-ip-cap
|
9a64bbc31fcc0872f52ad2d92c550945eea5cc97
|
[
"Apache-2.0"
] | null | null | null |
openstackclient/network/v2/subnet.py
|
ankur-gupta91/osc-ip-cap
|
9a64bbc31fcc0872f52ad2d92c550945eea5cc97
|
[
"Apache-2.0"
] | null | null | null |
openstackclient/network/v2/subnet.py
|
ankur-gupta91/osc-ip-cap
|
9a64bbc31fcc0872f52ad2d92c550945eea5cc97
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Subnet action implementations"""
import copy
from json.encoder import JSONEncoder
from openstackclient.common import command
from openstackclient.common import parseractions
from openstackclient.common import utils
from openstackclient.identity import common as identity_common
def _format_allocation_pools(data):
pool_formatted = ['%s-%s' % (pool.get('start', ''), pool.get('end', ''))
for pool in data]
return ','.join(pool_formatted)
def _format_host_routes(data):
try:
return '\n'.join([JSONEncoder().encode(route) for route in data])
except (TypeError, KeyError):
return ''
_formatters = {
'allocation_pools': _format_allocation_pools,
'dns_nameservers': utils.format_list,
'host_routes': _format_host_routes,
}
def _get_columns(item):
columns = list(item.keys())
if 'tenant_id' in columns:
columns.remove('tenant_id')
columns.append('project_id')
return tuple(sorted(columns))
def convert_entries_to_nexthop(entries):
# Change 'gateway' entry to 'nexthop'
changed_entries = copy.deepcopy(entries)
for entry in changed_entries:
entry['nexthop'] = entry['gateway']
del entry['gateway']
return changed_entries
def convert_entries_to_gateway(entries):
# Change 'nexhop' entry to 'gateway'
changed_entries = copy.deepcopy(entries)
for entry in changed_entries:
entry['gateway'] = entry['nexthop']
del entry['nexthop']
return changed_entries
def _get_attrs(client_manager, parsed_args):
attrs = {}
if parsed_args.name is not None:
attrs['name'] = str(parsed_args.name)
if 'project' in parsed_args and parsed_args.project is not None:
identity_client = client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['tenant_id'] = project_id
client = client_manager.network
attrs['network_id'] = client.find_network(parsed_args.network,
ignore_missing=False).id
if parsed_args.subnet_pool is not None:
subnet_pool = client.find_subnet_pool(parsed_args.subnet_pool,
ignore_missing=False)
attrs['subnetpool_id'] = subnet_pool.id
if parsed_args.use_default_subnet_pool:
attrs['use_default_subnetpool'] = True
if parsed_args.gateway.lower() != 'auto':
if parsed_args.gateway.lower() == 'none':
attrs['gateway_ip'] = None
else:
attrs['gateway_ip'] = parsed_args.gateway
if parsed_args.prefix_length is not None:
attrs['prefixlen'] = parsed_args.prefix_length
if parsed_args.subnet_range is not None:
attrs['cidr'] = parsed_args.subnet_range
if parsed_args.ip_version is not None:
attrs['ip_version'] = parsed_args.ip_version
if parsed_args.ipv6_ra_mode is not None:
attrs['ipv6_ra_mode'] = parsed_args.ipv6_ra_mode
if parsed_args.ipv6_address_mode is not None:
attrs['ipv6_address_mode'] = parsed_args.ipv6_address_mode
if parsed_args.allocation_pools is not None:
attrs['allocation_pools'] = parsed_args.allocation_pools
if parsed_args.enable_dhcp is not None:
attrs['enable_dhcp'] = parsed_args.enable_dhcp
if parsed_args.dns_nameservers is not None:
attrs['dns_nameservers'] = parsed_args.dns_nameservers
if parsed_args.host_routes is not None:
# Change 'gateway' entry to 'nexthop' to match the API
attrs['host_routes'] = convert_entries_to_nexthop(
parsed_args.host_routes)
return attrs
class CreateSubnet(command.ShowOne):
"""Create a subnet"""
def get_parser(self, prog_name):
parser = super(CreateSubnet, self).get_parser(prog_name)
parser.add_argument(
'name',
help='New subnet name',
)
parser.add_argument(
'--project',
metavar='<project>',
help="Owner's project (name or ID)",
)
identity_common.add_project_domain_option_to_parser(parser)
subnet_pool_group = parser.add_mutually_exclusive_group()
subnet_pool_group.add_argument(
'--subnet-pool',
metavar='<subnet-pool>',
help='Subnet pool from which this subnet will obtain a CIDR '
'(Name or ID)',
)
subnet_pool_group.add_argument(
'--use-default-subnet-pool',
action='store_true',
help='Use default subnet pool for --ip-version',
)
parser.add_argument(
'--prefix-length',
metavar='<prefix-length>',
help='Prefix length for subnet allocation from subnetpool',
)
parser.add_argument(
'--subnet-range',
metavar='<subnet-range>',
help='Subnet range in CIDR notation '
'(required if --subnet-pool is not specified, '
'optional otherwise)',
)
parser.add_argument(
'--allocation-pool',
metavar='start=<ip-address>,end=<ip-address>',
dest='allocation_pools',
action=parseractions.MultiKeyValueAction,
required_keys=['start', 'end'],
help='Allocation pool IP addresses for this subnet '
'e.g.: start=192.168.199.2,end=192.168.199.254 '
'(This option can be repeated)',
)
dhcp_enable_group = parser.add_mutually_exclusive_group()
dhcp_enable_group.add_argument(
'--dhcp',
dest='enable_dhcp',
action='store_true',
default=True,
help='Enable DHCP (default)',
)
dhcp_enable_group.add_argument(
'--no-dhcp',
dest='enable_dhcp',
action='store_false',
help='Disable DHCP',
)
parser.add_argument(
'--dns-nameserver',
metavar='<dns-nameserver>',
action='append',
dest='dns_nameservers',
help='DNS name server for this subnet '
'(This option can be repeated)',
)
parser.add_argument(
'--gateway',
metavar='<gateway>',
default='auto',
help="Specify a gateway for the subnet. The three options are: "
" <ip-address>: Specific IP address to use as the gateway "
" 'auto': Gateway address should automatically be "
" chosen from within the subnet itself "
" 'none': This subnet will not use a gateway "
"e.g.: --gateway 192.168.9.1, --gateway auto, --gateway none"
"(default is 'auto')",
)
parser.add_argument(
'--host-route',
metavar='destination=<subnet>,gateway=<ip-address>',
dest='host_routes',
action=parseractions.MultiKeyValueAction,
required_keys=['destination', 'gateway'],
help='Additional route for this subnet '
'e.g.: destination=10.10.0.0/16,gateway=192.168.71.254 '
'destination: destination subnet (in CIDR notation) '
'gateway: nexthop IP address '
'(This option can be repeated)',
)
parser.add_argument(
'--ip-version',
type=int,
default=4,
choices=[4, 6],
help='IP version (default is 4). Note that when subnet pool is '
'specified, IP version is determined from the subnet pool '
'and this option is ignored.',
)
parser.add_argument(
'--ipv6-ra-mode',
choices=['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'],
help='IPv6 RA (Router Advertisement) mode, '
'valid modes: [dhcpv6-stateful, dhcpv6-stateless, slaac]',
)
parser.add_argument(
'--ipv6-address-mode',
choices=['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'],
help='IPv6 address mode, '
'valid modes: [dhcpv6-stateful, dhcpv6-stateless, slaac]',
)
parser.add_argument(
'--network',
required=True,
metavar='<network>',
help='Network this subnet belongs to (name or ID)',
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
attrs = _get_attrs(self.app.client_manager, parsed_args)
obj = client.create_subnet(**attrs)
columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters=_formatters)
return (columns, data)
class DeleteSubnet(command.Command):
"""Delete subnet"""
def get_parser(self, prog_name):
parser = super(DeleteSubnet, self).get_parser(prog_name)
parser.add_argument(
'subnet',
metavar="<subnet>",
help="Subnet to delete (name or ID)",
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
client.delete_subnet(
client.find_subnet(parsed_args.subnet))
class ListSubnet(command.Lister):
"""List subnets"""
def get_parser(self, prog_name):
parser = super(ListSubnet, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
return parser
def take_action(self, parsed_args):
data = self.app.client_manager.network.subnets()
headers = ('ID', 'Name', 'Network', 'Subnet')
columns = ('id', 'name', 'network_id', 'cidr')
if parsed_args.long:
headers += ('Project', 'DHCP', 'Name Servers',
'Allocation Pools', 'Host Routes', 'IP Version',
'Gateway')
columns += ('tenant_id', 'enable_dhcp', 'dns_nameservers',
'allocation_pools', 'host_routes', 'ip_version',
'gateway_ip')
return (headers,
(utils.get_item_properties(
s, columns,
formatters=_formatters,
) for s in data))
class ShowSubnet(command.ShowOne):
"""Show subnet details"""
def get_parser(self, prog_name):
parser = super(ShowSubnet, self).get_parser(prog_name)
parser.add_argument(
'subnet',
metavar="<subnet>",
help="Subnet to show (name or ID)",
)
return parser
def take_action(self, parsed_args):
obj = self.app.client_manager.network.find_subnet(parsed_args.subnet,
ignore_missing=False)
columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters=_formatters)
return (columns, data)
| 35.745455
| 79
| 0.592659
|
794c7962eeb6f3fc0476aa93940e66ff4a5135e6
| 1,493
|
py
|
Python
|
Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/MyFirstPlugin/__init__.py
|
karant17/Test
|
e44bf79f597d53de2b891372ffccf7f13c74ede3
|
[
"MIT"
] | 7
|
2017-02-16T15:25:47.000Z
|
2021-11-08T13:10:15.000Z
|
Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/MyFirstPlugin/__init__.py
|
karant17/Test
|
e44bf79f597d53de2b891372ffccf7f13c74ede3
|
[
"MIT"
] | null | null | null |
Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/MyFirstPlugin/__init__.py
|
karant17/Test
|
e44bf79f597d53de2b891372ffccf7f13c74ede3
|
[
"MIT"
] | 7
|
2017-03-06T08:47:27.000Z
|
2021-12-11T12:42:43.000Z
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
MyFirstPlugin
A QGIS plugin
This is my first plugin
-------------------
begin : 2016-01-23
copyright : (C) 2016 by Anita
email : foo@bar.com
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load MyFirstPlugin class from file MyFirstPlugin.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .my_first_plugin import MyFirstPlugin
return MyFirstPlugin(iface)
| 41.472222
| 77
| 0.389149
|
794c79758692170ff7672a88128cae3f84e920d7
| 936
|
py
|
Python
|
wk/extra/node/boostrap.py
|
Peiiii/wk
|
dcf948c1cb36c1eec9b2a554ea0296c6d3dbbdc4
|
[
"MIT"
] | null | null | null |
wk/extra/node/boostrap.py
|
Peiiii/wk
|
dcf948c1cb36c1eec9b2a554ea0296c6d3dbbdc4
|
[
"MIT"
] | null | null | null |
wk/extra/node/boostrap.py
|
Peiiii/wk
|
dcf948c1cb36c1eec9b2a554ea0296c6d3dbbdc4
|
[
"MIT"
] | null | null | null |
from .node import *
class BoostrapBadgeBase(Span):
def __init__(self,text,type):
super().__init__(_class="badge badge-%s"%(type))
self.__call__(text)
class Badges:
@staticmethod
def primary(text):
return BoostrapBadgeBase(text,type="primary")
@staticmethod
def secondary(text):
return BoostrapBadgeBase(text,type="secondary")
@staticmethod
def success(text):
return BoostrapBadgeBase(text,type="success")
@staticmethod
def danger(text):
return BoostrapBadgeBase(text,type="danger")
@staticmethod
def warning(text):
return BoostrapBadgeBase(text,type="warning")
@staticmethod
def info(text):
return BoostrapBadgeBase(text,type="info")
@staticmethod
def light(text):
return BoostrapBadgeBase(text,type="light")
@staticmethod
def dark(text):
return BoostrapBadgeBase(text,type="dark")
| 25.297297
| 56
| 0.66453
|
794c7afdd68cc3613c41c9bcdb731a648639d6d7
| 1,774
|
py
|
Python
|
mac/google-cloud-sdk/lib/surface/container/binauthz/attestors/update.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
mac/google-cloud-sdk/lib/surface/container/binauthz/attestors/update.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
mac/google-cloud-sdk/lib/surface/container/binauthz/attestors/update.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T18:47:35.000Z
|
2020-07-24T18:47:35.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Update Attestor command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.container.binauthz import apis
from googlecloudsdk.api_lib.container.binauthz import attestors
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container.binauthz import flags
class Update(base.UpdateCommand):
r"""Update an existing Attestor.
## EXAMPLES
To update an existing Attestor `my_attestor`:
$ {command} my_attestor \
--description="my new attestor description"
"""
@classmethod
def Args(cls, parser):
flags.AddConcepts(
parser,
flags.GetAttestorPresentationSpec(
positional=True, group_help='The attestor to update.'),
)
parser.add_argument(
'--description',
required=False,
help='The new description for the attestor')
def Run(self, args):
attestor_ref = args.CONCEPTS.attestor.Parse()
api_version = apis.GetApiVersion(self.ReleaseTrack())
return attestors.Client(api_version).Update(
attestor_ref, description=args.description)
| 32.254545
| 74
| 0.738444
|
794c7bb3a409ad7d7411aed94ba2bee120b724cd
| 18,469
|
py
|
Python
|
userbot/plugins/animazioni1.py
|
Kazutettoh/strafattinoh-bot
|
e8ab44b6e720c8133fd43695355fabf20d37fe1c
|
[
"MIT"
] | null | null | null |
userbot/plugins/animazioni1.py
|
Kazutettoh/strafattinoh-bot
|
e8ab44b6e720c8133fd43695355fabf20d37fe1c
|
[
"MIT"
] | null | null | null |
userbot/plugins/animazioni1.py
|
Kazutettoh/strafattinoh-bot
|
e8ab44b6e720c8133fd43695355fabf20d37fe1c
|
[
"MIT"
] | null | null | null |
"""
Commands:
.hypno
.plane
.pula
.sega
.solarsystem
.sorpresa
"""
import os
import sys
import asyncio
from telethon import events
from userbot import CMD_HELP
from userbot import ALIVE_NAME
from userbot.utils import admin_cmd
from platform import uname
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "I'M STUPID"
@borg.on(admin_cmd(pattern=f"hypno", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 15)
#input_str = event.pattern_match.group(1)
#if input_str == "hypno":
await event.edit("hypno")
animation_chars = [
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬛⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬛⬛⬛⬜⬜\n⬜⬜⬛⬜⬛⬜⬜\n⬜⬜⬛⬛⬛⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛",
"⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬛\n⬛⬜⬛⬜⬛\n⬛⬜⬜⬜⬛\n⬛⬛⬛⬛⬛",
"⬜⬜⬜\n⬜⬛⬜\n⬜⬜⬜",
"[👉🔴👈])"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 15])
@borg.on(admin_cmd(pattern=f"plane", outgoing=True))
async def _(event):
if event.fwd_from:
return
await event.edit("✈-------------")
await event.edit("-✈------------")
await event.edit("--✈-----------")
await event.edit("---✈----------")
await event.edit("----✈---------")
await event.edit("-----✈--------")
await event.edit("------✈-------")
await event.edit("-------✈------")
await event.edit("--------✈-----")
await event.edit("---------✈----")
await event.edit("----------✈---")
await event.edit("-----------✈--")
await event.edit("------------✈-")
await event.edit("-------------✈")
await asyncio.sleep(3)
await event.delete()
@borg.on(admin_cmd(pattern=r"pula"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 12)
await event.edit("🚨 👮")
animation_chars = [
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
"🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴\n🔵🔵🔵⬜⬜⬜🔴🔴🔴",
"🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵\n🔴🔴🔴⬜⬜⬜🔵🔵🔵",
f"{DEFAULTUSER} **🚨 👮♂» Oh no!C'è pula😞**"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 12])
@borg.on(admin_cmd(pattern="sega", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 100)
#input_str = event.pattern_match.group(1)
#if input_str == "sega":
await event.edit("sega")
animation_chars = [
"8✊️===D",
"8=✊️==D",
"8==✊️=D",
"8===✊️D",
"8==✊️=D",
"8=✊️==D",
"8✊️===D",
"8===✊️D💦",
"8==✊️=D💦💦",
"8=✊️==D💦💦💦"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 8])
@borg.on(admin_cmd(pattern=f"snake", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 27)
#input_str = event.pattern_match.group(1)
#if input_str == "snake":
await event.edit("snake")
animation_chars = [
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◻️◼️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 27])
@borg.on(admin_cmd(pattern=f"solarsystem", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0, 549755813888)
#input_str = event.pattern_match.group(1)
#if input_str == "solarsystem":
await event.edit("solarsystem")
animation_chars = [
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 549755813888])
@borg.on(admin_cmd(pattern="sorpresa", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 17)
#input_str = event.pattern_match.group(1)
#if input_str == "sorpresa":
await event.edit("sorpresa")
animation_chars = [
"⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬜⬜⬜⬜\n👇⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬜⬜⬜\n⬜👇⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬜⬜\n⬜⬜👇⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜👇⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜👇⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜👇⬜\n⬜⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜👇⬜\n⬜⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜\n⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬜\n⬜⬜⬜👇⬜\n⬜⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬛⬛⬛⬜⬜\n⬜⬜👇⬜⬜\n⬜⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬛⬛⬜⬜⬜\n⬜👇⬜⬜⬜\n⬜[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬛⬜⬜⬜⬜\n👇⬜⬜⬜⬜\n[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜\n⬜⬜⬜⬜\n⬜⬜⬜⬜\n⬜⬜⬜⬜",
"⬜⬜⬜\n⬜⬜⬜\n⬜⬜⬜",
"⬜⬜\n⬜⬜",
"[🎁](https://wpml.org/wp-content/uploads/2018/12/3033640-FUCKYOU.jpg)"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 17])
| 47.114796
| 120
| 0.200119
|
794c7bb42251321268fbb5d9819fec86daa6cb20
| 28,949
|
py
|
Python
|
pytid/scint2ix.py
|
aldebaran1/pyTID
|
f4a2fc3a5398306573af924c74e2f12a23e60d51
|
[
"MIT"
] | null | null | null |
pytid/scint2ix.py
|
aldebaran1/pyTID
|
f4a2fc3a5398306573af924c74e2f12a23e60d51
|
[
"MIT"
] | null | null | null |
pytid/scint2ix.py
|
aldebaran1/pyTID
|
f4a2fc3a5398306573af924c74e2f12a23e60d51
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 23 09:55:19 2019
@author: smrak
"""
import os
import yaml
import h5py
import numpy as np
from datetime import datetime
from pyGnss import pyGnss
from pyGnss import gnssUtils as gu
from pyGnss import scintillation as scint
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from scipy.interpolate import CubicSpline
from pymap3d import aer2geodetic
from argparse import ArgumentParser
import platform
if platform.system() == 'Linux':
separator = '/'
else:
separator = '\\'
def _runningMedian(x, N):
n2 = int(N/2)
iterate = np.arange(n2, x.size-n2)
y = np.nan * np.copy(x)
for i in iterate:
y[i] = np.nanmedian(abs(x[i-n2:i+n2]))
return y
def _runningMax(x,N):
n2 = int(N/2)
iterate = np.arange(n2, x.size-n2)
y = np.nan * np.copy(x)
for i in iterate:
chunk = x[i-n2:i+n2]
if np.sum(np.isfinite(chunk)) > 1:
y[i] = np.nanmax(abs(chunk))
return y
def _removeRipple(y, E = 5, L = 300, eps=False):
std = np.nanstd(y[L:])
envelope = _runningMax(y, N=60)
std = np.nanmedian(envelope)
e = E * std
if np.where(abs(np.nan_to_num(y[:L])) >= e)[0].size > 0:
if np.where(abs(np.nan_to_num(y[:L])) >= e)[0].size == 1:
ex = np.where(abs(np.nan_to_num(y[:L])) >= e)[0].item() + 1
else:
ex = np.where(abs(np.nan_to_num(y[:L])) >= e)[0][-1] + 1
else:
ex = -999
if eps:
return ex, e
else:
return ex
def _mkrngs(y0, idf, gap_length=10, lim=0.05, min_length=None, max_length=None,
zero_mean=False, extend=0):
gap = np.diff(np.where(idf)[0])
i00 = np.where(idf)[0][0]
i99 = np.where(idf)[0][-1]
ixg = np.squeeze(np.argwhere(gap >= gap_length))
LL = np.sort(np.hstack((ixg, ixg+1)))
inner_limits = np.where(idf)[0][LL]
limits = np.sort(np.hstack((i00,inner_limits,i99)))
assert limits.size % 2 == 0
ranges = limits.reshape(int(limits.size/2), 2)
# Check for ranges vlidity: approx. zero mean
if zero_mean:
mask = []
for i, r in enumerate(ranges):
m_hat = np.nanmean(y0[r[0]:r[1]])
if abs(m_hat) < lim: mask.append(i)
if len(mask) > 0:
mask = np.array(mask)
ranges = ranges[mask]
if min_length is not None:
mask = np.squeeze(np.diff(ranges) > min_length)
ranges = ranges[mask]
if max_length is not None:
mask = np.squeeze(np.diff(ranges) < max_length)
ranges = ranges[mask]
if len(ranges.shape) == 3 and ranges.shape[0] != 0: ranges = ranges[0]
try:
if extend > 0:
start = ranges[:,0]
ixstart = start > extend + 1
ranges[ixstart,0] -= extend
stop = ranges[:,1]
ixstop = stop < (y0.size - extend - 1)
ranges[ixstop, 1] += extend
except:
pass
return ranges
def _scintillationMask(X, X_hat, X_eps, N_median=60, min_length=60,
gap_close=60*5, extend=0,
diagnostic=False):
#
# Empty output arrays
events = np.array([])
Y = np.copy(X)
# Detect the events
# SIGMA_TEC
# Reject suspecious data : np.nanmedian(sT) / st_hat < 1.5
# median value of individual link has to be reasonably close to median
# of the receiver
if np.nanmedian(X) / X_hat < 2:
X_med = _runningMedian(X, N_median)
idx = (np.nan_to_num(X_med) > np.nan_to_num(X_eps))
idquet = np.ones(X.size, dtype = bool)
if np.sum(idx) > 0:
events = _mkrngs(X_med, idx, gap_length = 10,
min_length = min_length,
zero_mean = False, extend=extend)
if events.size == 0:
Y[idquet] = np.nan
if diagnostic:
return Y, X_med
else:
return Y
if gap_close is not None:
if len(events.shape) == 3: events = events[0]
if events.shape[0] > 1:
gaps = np.empty(events.shape[0]-1, dtype=np.int32)
for i in np.arange(1, events.shape[0]):
gaps[i-1] = events[i, 0] - events[i-1, 1]
if events[i, 0] - events[i-1, 1] < gap_close:
events = np.vstack((events, [events[i-1, 1], events[i, 0]]))
if len(events.shape) == 3: events = events[0]
# Remove questionably low ranges. Median must be above mean
event_mask = np.zeros(events.shape[0], dtype=bool)
for sci, sct in enumerate(events):
event_mask[sci] = True
# Skip if there are no scintillation events at this place
if events.size > 0:
events = events[event_mask]
for r in events:
idquet[r[0]:r[1]] = False
Y[idquet] = np.nan
if diagnostic:
return Y, X_med
else:
return Y
def _partialProcess(dt,r, x, fs=1, fc=0.1, hpf_order=6,
plot_ripple = False,
plot_outlier = False):
idf = np.isfinite(x)
# If there are NaNs in the interval, do a cubic spline.
# Max gap is 10 seconds set by the "make ranges routine"
# 1. dTEC Split
if np.sum(np.isnan(x)) > 0:
x0 = np.where(idf)[0]
x1 = np.arange(x.size)
CSp = CubicSpline(x0, x[idf])
x_cont = CSp(x1)
else:
x_cont = np.copy(x)
# 2. Tec/snr scintillation (high-pass) filtering!
tec_hpf = gu.hpf(x_cont, fs=fs, order=hpf_order, fc=fc)
tec_hpf[~idf] = np.nan
# 3. Remove initial ripple on the scintillation time-series
sT_exit, eps = _removeRipple(tec_hpf, E=1.5, L=300, eps = True)
if plot_ripple:
plt.figure()
plt.plot(dt[r[0]:r[1]], tec_hpf, 'b')
plt.plot([dt[r[0]], dt[r[1]]], [eps, eps], '--r')
if sT_exit != -999: plt.plot(dt[r[0]:r[1]][:sT_exit], tec_hpf[:sT_exit], 'xr')
if sT_exit != -999:
tec_hpf[:sT_exit] = np.nan
tec_hpf_original = np.copy(tec_hpf)
# 4. Outlier detection and removal. Still on the scintillation time-series.
# 4.1 TEC Scintillation
envelope = _runningMax(abs(tec_hpf), N = 10)
median_envelope = _runningMedian(envelope, N = 120)
outlier_margin = median_envelope + 5 * np.nanstd(tec_hpf)
idoutlier = np.nan_to_num(abs(tec_hpf)) > outlier_margin
outlier_mask = np.zeros(tec_hpf.size, dtype = bool)
if np.nansum(idoutlier) > 0:
outlier_intervals = _mkrngs(tec_hpf, idoutlier,
max_length = 60, gap_length = 10,
zero_mean = False)
if outlier_intervals.size > 0:
if len(outlier_intervals.shape) == 3:
outlier_intervals = outlier_intervals[0]
for out_ran in outlier_intervals:
ratio = np.median(envelope[out_ran[0]:out_ran[1]+1]) / np.median(median_envelope[out_ran[0]:out_ran[1]+1])
if np.round(ratio,1) >= 3:
backward = 10 if out_ran[0] > 10 else out_ran[0]
forward = 10 if tec_hpf.size - out_ran[1] > 10 else -1
outlier_mask[out_ran[0]-backward : out_ran[1]+1+forward] = True
if plot_outlier:
plt.figure(figsize = [8,5])
# plt.title('2017-5-28 / Rxi: {}, svi: {}'.format(irx, isv))
plt.plot(dt[r[0] : r[1]], tec_hpf, 'b', label = '$\delta TEC_{0.1 Hz}$')
plt.plot(dt[r[0] : r[1]], median_envelope, 'g', label = 'env = <$\widehat{\delta TEC}>|_{10s}$')
plt.plot(dt[r[0] : r[1]], outlier_margin, '--r', label = '$\epsilon$ = env + 4$\cdot \sigma(\delta TEC)|_{60s}$')
plt.plot(dt[r[0] : r[1]], -outlier_margin, '--r')
plt.plot(dt[r[0] : r[1]][outlier_mask], tec_hpf[outlier_mask], 'xr')
plt.ylabel('$\delta$ TEC [TECu]')
plt.xlabel('Time [UTC]')
plt.grid(axis='both')
plt.legend()
tec_hpf[outlier_mask] = np.nan
return tec_hpf, tec_hpf_original, outlier_mask
def ranges(x, idf, min_gap=10, gap_length=10, min_length=30*60, zero_mean=False):
gap = np.diff(np.where(idf)[0])
intervals = []
if np.argwhere(gap >= min_gap).size > 0:
intervals = _mkrngs(x, idf, gap_length=gap_length,
min_length=min_length, zero_mean=zero_mean)
else:
intervals = np.array([ [np.where(idf)[0][0],
np.where(idf)[0][-1]+1] ])
if len(intervals.shape) == 3:
try:
intervals = intervals[0]
except:
intervals = np.array([])
return intervals
def _toLLT(rxp=None, az=None, el=None, H=350):
"""
Default height of the IPP is 350 km.
"""
H *= 1e3
r = H / np.sin(np.radians(el))
lat, lon, alt = aer2geodetic(az=az, el=el, srange=r, lat0=rxp[0], lon0=rxp[1], h0=rxp[2])
return lat, lon
def process(fn, odir=None, cfg=None, log=None, irxforce=None):
############################### Open data ##################################
if irxforce is not None:
irxforce = int(irxforce)
if odir is None:
odir = os.path.split(fn)[0] + separator
if cfg is None:
plot_ripple = 0
plot_outlier = 0
savefig = 1
figfolder = os.path.join(odir, 'scint_plots' + separator)
plot = 0
fs = 1
fc = 0.1
hpf_order = 6
H = 350
else:
assert os.path.splitext(cfg)[1] in ('.yml', '.yaml')
stream = yaml.load(open(cfg, 'r'))
plot_ripple = stream.get('plot_ripple')
plot_outlier = stream.get('plot_outlier')
plot = stream.get('plot')
savefig = stream.get('savefig')
figfolder = stream.get('figfolder')
if figfolder is None:
figfolder = os.path.join(odir, 'scint_plots' + separator)
fs = stream.get('fs')
fc = stream.get('fc')
hpf_order = stream.get('hpf_order')
H = stream.get('alt_km')
# Output file
if odir is None:
odir = os.path.split(fn)[0] + separator
ofn = odir + 'ix_' + '_'.join(os.path.split(fn)[1].split('.')[:2]) + '_{}km.h5'.format(H)
# Dealing with duplicate file names
if os.path.exists(ofn):
head = os.path.splitext(ofn)[0]
c = 0
while os.path.exists(ofn):
try:
c = int(os.path.splitext(ofn)[0].split('_')[-1])
c += 1
except:
c += 1
ofn = head + '_' + str(c) + '.h5'
if log:
logfn = os.path.splitext(ofn)[0] + '.log'
LOG = open(logfn, 'w')
LOG.close()
# Open data file
f = h5py.File(fn, 'r')
time = f['obstimes'][:]
dt = np.array([datetime.utcfromtimestamp(t) for t in time])
if irxforce is None:
rnx = f['el'].shape[2]
else:
rnx = 1
#rnx = 5
svx = f['el'].shape[1]
rxpall = f['rx_positions'][:]
# New arrays
ipp = np.nan * np.ones((dt.size, svx, rnx, 2)) # [time, SV, Rx, [lat, lon]]
sigma_tec = np.nan * np.ones((dt.size, svx, rnx))
snr4 = np.nan * np.ones((dt.size, svx, rnx))
s4 = np.nan * np.ones((dt.size, svx, rnx))
roti = np.nan * np.ones((dt.size, svx, rnx))
if plot:
rot = np.nan * np.ones((dt.size, svx, rnx))
# tec_hpf = np.nan * np.ones((dt.size, svx, rnx))
# Bookkeeping
scint_limits = np.nan * np.zeros((rnx,2))
receiver_std = np.nan * np.zeros((rnx,2))
receiver_std_median = np.nan * np.zeros((rnx,2))
for irx in range(rnx):
if log:
with open(logfn, 'a') as LOG:
LOG.write('Processing Rx/all #{}/{}\n'.format(irx+1, rnx))
LOG.close()
else:
print ('Processing Rx/all #{}/{}'.format(irx+1, rnx))
if plot:
tec_hpf_all = np.nan * np.ones((dt.size, svx))
snr_hpf_all = np.nan * np.ones((dt.size, svx))
sigma_tec_all = np.nan * np.ones((dt.size, svx))
snr4_all = np.nan * np.ones((dt.size, svx))
# Reset to zero for each iteration
tec_outliers = np.zeros((dt.size, svx), dtype=bool)
snr_outliers = np.zeros((dt.size, svx), dtype=bool)
try:
for isv in range(svx):
try:
if irxforce is not None:
el = f['el'][:,isv,irxforce]
az = f['az'][:,isv,irxforce]
res = f['res'][:,isv,irxforce]
snr = f['snr'][:,isv,irxforce]
rxp = rxpall[irxforce]
else:
el = f['el'][:,isv,irx]
az = f['az'][:,isv,irx]
res = f['res'][:,isv,irx]
snr = f['snr'][:,isv,irx]
rxp = rxpall[irx]
# Compute location of the IPP
lat, lon = _toLLT(rxp, az=az, el=el, H=H)
# Get Mapping Function
F = pyGnss.getMappingFunction(el, h = 350)
# Stack into the output array
ipp[:, isv, irx, 0] = lat
ipp[:, isv, irx, 1] = lon
except Exception as e:
if log:
with open(logfn, 'a') as LOG:
LOG.write('{}\n'.format(e))
LOG.close()
else:
print (e)
# Check for a minum length of valid observables == 30 min
if np.nansum(np.isfinite(res)) < 30 * 60:
continue
tec_hpf_copy = np.nan * np.copy(res)
rot_copy = np.nan * np.copy(res)
roti_copy = np.nan * np.copy(res)
sigma_tec_copy = np.nan * np.copy(res)
snr4_copy = np.nan * np.copy(snr)
s4_copy = np.nan * np.copy(snr)
tec_hpf_original = np.nan * np.copy(res)
snr_hpf_original = np.nan * np.copy(res)
# 0.0 To ranges: Multipe visits of a satellite per day.
# New interval for a gap bigger than 10 samples.
# Minimum length of interval is 30 minutes
# Create empty arrays
idf_tec = np.isfinite(res)
idf_snr = np.isfinite(snr)
# 0.1 Do for TEC
try:
tec_ranges = ranges(res, idf_tec, min_gap=10, gap_length=10, min_length=30*60, zero_mean=True)
except:
tec_ranges = np.array([])
try:
snr_ranges = ranges(snr, idf_snr, min_gap=10, gap_length=10, min_length=30*60)
except:
snr_ranges = np.array([])
# Process TEC per intervals
if tec_ranges.size > 0:
for ith_range, r in enumerate(tec_ranges):
# Remove to short ranges if accidentaly do occur
if np.diff(r) < 10: continue
try:
chunk = res[r[0] : r[1]]
tec_hpf, tec_hpf_original[r[0]:r[1]], tec_mask = _partialProcess(dt, r, chunk, fs=fs, fc=fc, hpf_order=hpf_order,
plot_ripple=plot_ripple, plot_outlier=plot_outlier)
tec_outliers[r[0] : r[1], isv] = tec_mask
sigma_tec_copy[r[0] : r[1]] = scint.sigmaTEC(tec_hpf, N = 60)
tec_hpf_copy[r[0] : r[1]] = tec_hpf
tmp_diff = np.diff(chunk)
tmp_diff[tec_mask[1:]] = np.nan
rot_copy[r[0]+1 : r[1]] = tmp_diff
roti_copy[r[0]+1 : r[1]] = scint.sigmaTEC(np.diff(chunk), N=60)
except Exception as e:
if log:
with open(logfn, 'a') as LOG:
LOG.write('{}\n'.format(e))
LOG.close()
else:
print (e)
if snr_ranges.size > 0:
for ith_range, r in enumerate(snr_ranges):
# Remove to short ranges if accidentaly do occur
if np.diff(r) < 60: continue
try:
Schunk = snr[r[0] : r[1]].astype(np.float64)
snr_hpf, snr_hpf_original[r[0]:r[1]], snr_mask = _partialProcess(dt, r, Schunk, fs=fs, fc=fc, hpf_order=hpf_order,
plot_ripple=plot_ripple, plot_outlier=plot_outlier)
snr_outliers[r[0] : r[1], isv] = snr_mask
snr4_copy[r[0] : r[1]] = scint.sigmaTEC(snr_hpf, N = 60)
s4_copy[r[0] : r[1]] = scint.AmplitudeScintillationIndex(10**(Schunk/10), 60)
except Exception as e:
if log:
with open(logfn, 'a') as LOG:
LOG.write('{}\n'.format(e))
LOG.close()
else:
print (e)
# Save scintillation indices
sigma_tec[:, isv, irx] = sigma_tec_copy
snr4[:, isv, irx] = (snr4_copy * (F**0.9))
s4[:, isv, irx] = (s4_copy * (F**0.9))
roti[:, isv, irx] = roti_copy
if plot:
rot[:, isv, irx] = rot_copy
tec_hpf_all[:,isv] = tec_hpf_original
snr_hpf_all[:,isv] = snr_hpf_original
sigma_tec_all[:,isv] = sigma_tec_copy
snr4_all[:,isv] = snr4_copy
# 4. Define the scintillation event masks per receiver
# 4.1 Define limits
# sigma_tec: limit ------------------------------------------------------ #
st_std = np.nanstd(sigma_tec[:, :, irx])
st_std_tec = np.nanstd(sigma_tec[:, :, irx])
st_hat = np.nanmedian(sigma_tec[:, :, irx])
st_eps = 2.5 * st_hat # + st_std
# SNR4 limit
s4_std = np.nanstd(snr4[:, :, irx])
s4_hat = np.nanmedian(snr4[:, :, irx])
s4_eps = 2.5 * s4_hat # + st_std
# 4.2 Store the limits ----------------------------------------------- #
scint_limits[irx, 0] = st_eps
receiver_std[irx, 0] = st_std
receiver_std_median[irx, 0] = st_std_tec
# ----------------------------------------------------------------------- #
scint_limits[irx, 1] = s4_eps
receiver_std[irx, 1] = s4_std
receiver_std_median[irx, 1] = s4_std
# ----------------------------------------------------------------------- #
for isv in range(svx):
if log:
with open(logfn, 'a') as LOG:
LOG.write('Processing scintillation sv/all {}/{}\n'.format(isv+1, svx))
LOG.close()
else:
print ('Processing scintillation sv/all {}/{}'.format(isv+1, svx))
sigma_tec[:,isv,irx] = _scintillationMask(sigma_tec[:,isv,irx], X_hat=st_hat,
X_eps=st_eps, extend=0, N_median=60,
min_length=120, gap_close=5*60)
snr4[:,isv,irx] = _scintillationMask(snr4[:,isv,irx], X_hat=s4_hat, X_eps=s4_eps,
extend=0, min_length=120, gap_close=5*60)
#######################################################################
# Plot for refernce
if plot:
try:
if np.nansum(np.isfinite(sigma_tec_all[:,isv])) > 1:
print ("Plotting PRN:{}".format(isv+1))
fig = plt.figure(figsize=[15,12])
ax1 = fig.add_subplot(421)
ax12 = ax1.twinx()
if irxforce is None:
ax1.plot(dt, f['res'][:,isv,irx], 'b', label='RXi {}; PRN {}'.format(irx, isv+1))
ax12.plot(dt, f['el'][:,isv,irx], 'g')
else:
ax1.plot(dt, f['res'][:,isv,irxforce], 'b', label='RXi {}; PRN {}'.format(irx, isv+1))
ax12.plot(dt, f['el'][:,isv,irxforce], 'g', lw=0.5)
ax1.set_ylabel('$\Delta$ TEC')
ax1.grid(axis='both')
ax12.set_ylabel('Elevation', color='g')
ax12.tick_params(axis='y', colors='green')
ax1.legend()
ax1.set_xticklabels([])
# Second
ax2 = fig.add_subplot(423, sharex=ax1)
ax2.plot(dt, tec_hpf_all[:,isv], 'b')
ax2.plot(dt[tec_outliers[:,isv]], tec_hpf_all[:,isv][tec_outliers[:,isv]], 'xr')
ax2.set_ylabel('$\delta TEC_{0.1 Hz}$')
ax2.grid(axis='both')
# Third
ax3 = fig.add_subplot(427, sharex=ax1)
ax3.plot(dt, sigma_tec_all[:,isv], '.b')
i0 = np.argwhere(np.isfinite(sigma_tec_all[:,isv]))[0]
i1 = np.argwhere(np.isfinite(sigma_tec_all[:,isv]))[-1]
ax3.plot([dt[i0], dt[i1]], [st_eps, st_eps], '--r')
if sum(np.isfinite(sigma_tec[:,isv,irx])) > 0:
ax3.plot(dt, sigma_tec[:,isv,irx], '.g')
ax3.set_ylabel('$\sigma_{TEC}$ [TECu]')
# ax3.set_xlim([datetime(2017,9,8,0), datetime(2017,9,8,5)])
ax3.grid(axis='both')
ax3.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
######################### SNR
ax11 = fig.add_subplot(422, sharex=ax1)
if irxforce is None:
ax11.plot(dt, f['snr'][:,isv,irx], 'b', label='RXi {}; PRN {}'.format(irx, isv+1))
else:
ax11.plot(dt, f['snr'][:,isv,irxforce], 'b', label='RXi {}; PRN {}'.format(irx, isv+1))
ax11.set_ylabel('SNR')
ax11.grid(axis='both')
ax11.legend()
# Second
ax21 = fig.add_subplot(424, sharex=ax1)
ax21.plot(dt, snr_hpf_all[:,isv], 'b')
ax2.plot(dt[snr_outliers[:,isv]], tec_hpf_all[:,isv][snr_outliers[:,isv]], 'xr')
ax21.set_ylabel('$SNR4_{0.1 Hz}$')
ax21.grid(axis='both')
# Third
ax31 = fig.add_subplot(426, sharex=ax1)
ax31.plot(dt, snr4_all[:,isv], '.b')
i0 = np.argwhere(np.isfinite(snr4_all[:,isv]))[0]
i1 = np.argwhere(np.isfinite(snr4_all[:,isv]))[-1]
ax31.plot([dt[i0], dt[i1]], [s4_eps, s4_eps], '--r')
if sum(np.isfinite(snr4[:,isv,irx])) > 0:
ax31.plot(dt, snr4[:,isv,irx], '.g')
ax31.plot(dt, s4[:,isv,irx], 'k', lw=0.5)
ax31.set_ylabel('SNR$_4$ [dB]')
ax31.grid(axis='both')
ax31.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
prefix = dt[0].strftime("%Y%m%d")
svf = '{}_rxi{}_prni{}'.format(prefix, irx,isv)
ax1.set_title('E($\sigma_T$) = {}'.format(st_eps))
ax11.set_title('E(SNR$_4$) = {}'.format(s4_eps))
ax41 = fig.add_subplot(428, sharex=ax1)
ax41.plot(dt, roti[:,isv,irx], '.b')
ax41.set_ylabel('ROTI [TECu]')
ax41.grid(axis='both')
ax41.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
ax42 = fig.add_subplot(425, sharex=ax1)
ax42.plot(dt, rot[:,isv,irx], 'b')
ax42.set_ylabel('ROT [TECu]')
ax42.grid(axis='both')
ax42.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
if savefig:
if not os.path.exists(figfolder):
import subprocess
if platform.system() == 'Linux':
subprocess.call('mkdir -p {}'.format(figfolder), shell=True, timeout=5)
else:
subprocess.call('mkdir "{}"'.format(figfolder), shell=True, timeout=5)
plt.savefig(figfolder+'{}.png'.format(svf), dpi=100)
plt.close(fig)
else:
print ("Not enoughd data from PRN:{}".format(isv+1))
except Exception as e:
print (e)
except Exception as e:
print (e)
if irxforce is not None:
break
rxn = f['rx_name'][:]
rxm = f['rx_model'][:]
f.close()
# Save to new hdf5 file
if irxforce is None:
if log:
with open(logfn, 'a') as LOG:
LOG.write('Saving data to : \n {}'.format(ofn))
LOG.close()
else:
print ('Saving data to : \n {}'.format(ofn))
f = h5py.File(ofn, 'w')
gr = f.create_group('data')
gr.create_dataset('rx_name', data = rxn, dtype='S10')
gr.create_dataset('rx_model', data = rxm, dtype='S25')
gr.create_dataset('time', data = time, compression = 'gzip', compression_opts = 9)
gr.create_dataset('sigma_tec', data = sigma_tec, compression = 'gzip', compression_opts = 9)
gr.create_dataset('snr4', data = snr4, compression = 'gzip', compression_opts = 9)
gr.create_dataset('s4', data = s4, compression = 'gzip', compression_opts = 9)
gr.create_dataset('roti', data = roti, compression = 'gzip', compression_opts = 9)
gr.create_dataset('ipp', data = ipp, compression = 'gzip', compression_opts = 9)
gr.create_dataset('rxp', data = rxpall, compression = 'gzip', compression_opts = 9)
gr.create_dataset('scint_limits', data = scint_limits, compression = 'gzip', compression_opts = 9)
gr.create_dataset('rxstd', data = receiver_std, compression = 'gzip', compression_opts = 9)
gr.create_dataset('rxstdmedian', data = receiver_std_median, compression = 'gzip', compression_opts = 9)
gr.attrs[u'altitude_km'] = H
gr.attrs[u'hpf_fc'] = fc
gr.attrs[u'hpf_order'] = hpf_order
f.close()
if log:
with open(logfn, 'a') as LOG:
LOG.write('Successfully saved!')
LOG.close()
else:
print ('Successfully saved!')
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument('infile')
p.add_argument('-o', '--odir', help = 'Output directory ', default=None)
p.add_argument('--cfg', help = 'Path to the config (yaml) file', default = None)
p.add_argument('--log', help = 'If you prefer to make a .log file?', action = 'store_true')
p.add_argument('--irx', help = 'Process one rx only', default=None)
P = p.parse_args()
process(fn=P.infile, odir=P.odir, cfg=P.cfg, log=P.log, irxforce=P.irx)
| 45.80538
| 144
| 0.465785
|
794c7c8283e8db510fcb4a2cf4d9b63f5af6d56a
| 353
|
py
|
Python
|
pytoast/tests/coverage/all_success/steps/getting_started.py
|
daniloster/pytoast
|
b0ceb02134aa4249888faeb8b5617e562aba6002
|
[
"MIT"
] | null | null | null |
pytoast/tests/coverage/all_success/steps/getting_started.py
|
daniloster/pytoast
|
b0ceb02134aa4249888faeb8b5617e562aba6002
|
[
"MIT"
] | null | null | null |
pytoast/tests/coverage/all_success/steps/getting_started.py
|
daniloster/pytoast
|
b0ceb02134aa4249888faeb8b5617e562aba6002
|
[
"MIT"
] | null | null | null |
from pytoast.decorators import step
@step('^I have given passed$')
def i_have_given_passed_step():
pass
@step('^I pass by when$')
def i_pass_by_when_step():
pass
@step('^should be passed (?P<name>(\w+))$')
def should_be_passed_step(name=''):
assert name in ['Leticia',
'Danilo'], 'Incorrect name "{}"'.format(name)
| 19.611111
| 65
| 0.631728
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.