text
stringlengths 2
999k
|
|---|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
try:
from urllib import urlretrieve # python2
except ImportError:
from urllib.request import urlretrieve
import shutil
import time
PYTHON_MAJOR_VER = sys.version_info.major
BASE_URL = 'https://home.apache.org/~mikemccand'
BASE_URL2 = 'https://home.apache.org/~sokolov'
DATA_FILES = [
('enwiki-20120502-lines-1k-fixed-utf8-with-random-label.txt.lzma', BASE_URL),
('enwiki-20120502-lines-1k-100d.vec', BASE_URL2),
('wikimedium500.tasks', BASE_URL),
('glove.6B.zip', 'https://downloads.cs.stanford.edu/nlp/data/')
]
USAGE= """
Usage: python setup.py [-download]
Options:
-download downloads a 5GB linedoc file
"""
DEFAULT_LOCAL_CONST = """
BASE_DIR = '%(base_dir)s'
BENCH_BASE_DIR = '%(base_dir)s/%(cwd)s'
"""
def runSetup(download):
cwd = os.getcwd()
parent, base = os.path.split(cwd)
data_dir = os.path.join(parent, 'data')
idx_dir = os.path.join(parent, 'indices')
if not os.path.exists(data_dir):
print('create data directory at %s' % (data_dir))
os.mkdir(data_dir)
else:
print('data directory already exists %s' % (data_dir))
if not os.path.exists(idx_dir):
os.mkdir(idx_dir)
print('create indices directory at %s' % (idx_dir))
else:
print('indices directory already exists %s' % (idx_dir))
pySrcDir = os.path.join(cwd, 'src', 'python')
local_const = os.path.join(pySrcDir, 'localconstants.py')
if not os.path.exists(local_const):
f = open(local_const, 'w')
try:
f.write(DEFAULT_LOCAL_CONST % ({'base_dir' : parent, 'cwd' : base}))
finally:
f.close()
else:
print('localconstants.py already exists - skipping')
local_run = os.path.join(pySrcDir, 'localrun.py')
example = os.path.join(pySrcDir, 'example.py')
if not os.path.exists(local_run):
shutil.copyfile(example, local_run)
else:
print('localrun.py already exists - skipping')
if download:
for filename, base_url in DATA_FILES:
url = base_url + '/' + filename
target_file = os.path.join(data_dir, filename)
if os.path.exists(target_file):
print('file %s already exists - skipping' % (target_file))
else:
print('download ', url, ' - might take a long time!')
Downloader(url, target_file).download()
print('')
print('downloading %s to %s done ' % (url, target_file))
if target_file.endswith('.bz2') or target_file.endswith('.lzma') or target_file.endswith('.zip'):
print('NOTE: make sure you decompress %s' % (target_file))
print('setup successful')
class Downloader:
HISTORY_SIZE = 100
def __init__(self, url, target_path):
self.__url = url
self.__target_path = target_path
Downloader.times = [time.time()] * Downloader.HISTORY_SIZE
Downloader.sizes = [0] * Downloader.HISTORY_SIZE
Downloader.index = 0
def download(self):
urlretrieve(self.__url, self.__target_path, Downloader.reporthook)
@staticmethod
def reporthook(count, block_size, total_size):
current_time = time.time()
current_size = long(count * block_size) if PYTHON_MAJOR_VER < 3 else int(count * block_size)
last_time = Downloader.times[Downloader.index]
last_size = Downloader.sizes[Downloader.index]
delta_size = current_size - last_size
delta_time = current_time - last_time
Downloader.times[Downloader.index] = current_time
Downloader.sizes[Downloader.index] = current_size
Downloader.index = (Downloader.index + 1) % Downloader.HISTORY_SIZE
speed = float(delta_size) / (1024 * delta_time)
percent = int(current_size * 100 / total_size)
sys.stdout.write('\r ')
# sys.stdout.write('(%d, %d), (%d, %d), (%d, %d) ' % (current_size, current_time, last_size, last_time, delta_size, delta_time))
sys.stdout.write('downloading ... %d%%, %.2f MB/%.2fMB, speed %.2f KB/s' % \
(percent, float(current_size) / (1024 * 1024), float(total_size) / (1024 * 1024), speed))
sys.stdout.flush()
if __name__ == '__main__':
if '-help' in sys.argv or '--help' in sys.argv:
print(USAGE)
else:
download = '-download' in sys.argv
runSetup(download)
|
"""
Russian-specific forms helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.contrib.localflavor.ru.ru_regions import RU_COUNTY_CHOICES, RU_REGIONS_CHOICES
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
phone_digits_re = re.compile(r'^(?:[78]-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
class RUCountySelect(Select):
"""
A Select widget that uses a list of Russian Counties as its choices.
"""
def __init__(self, attrs=None):
super(RUCountySelect, self).__init__(attrs, choices=RU_COUNTY_CHOICES)
class RURegionSelect(Select):
"""
A Select widget that uses a list of Russian Regions as its choices.
"""
def __init__(self, attrs=None):
super(RURegionSelect, self).__init__(attrs, choices=RU_REGIONS_CHOICES)
class RUPostalCodeField(RegexField):
"""
Russian Postal code field.
Format: XXXXXX, where X is any digit, and first digit is not zero.
"""
default_error_messages = {
'invalid': _('Enter a postal code in the format XXXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(RUPostalCodeField, self).__init__(r'^\d{6}$',
max_length, min_length, *args, **kwargs)
class RUPassportNumberField(RegexField):
"""
Russian internal passport number format:
XXXX XXXXXX where X - any digit.
"""
default_error_messages = {
'invalid': _('Enter a passport number in the format XXXX XXXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(RUPassportNumberField, self).__init__(r'^\d{4} \d{6}$',
max_length, min_length, *args, **kwargs)
class RUAlienPassportNumberField(RegexField):
"""
Russian alien's passport number format:
XX XXXXXXX where X - any digit.
"""
default_error_messages = {
'invalid': _('Enter a passport number in the format XX XXXXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(RUAlienPassportNumberField, self).__init__(r'^\d{2} \d{7}$',
max_length, min_length, *args, **kwargs)
|
# -*- coding: utf-8 -*-
"""
(beta) Channels Last Memory Format in PyTorch
*******************************************************
**Author**: `Vitaly Fedyunin <https://github.com/VitalyFedyunin>`_
What is Channels Last
---------------------
Channels last memory format is an alternative way of ordering NCHW tensors in memory preserving dimensions ordering. Channels last tensors ordered in such a way that channels become the densest dimension (aka storing images pixel-per-pixel).
For example, classic (contiguous) storage of NCHW tensor (in our case it is two 4x4 images with 3 color channels) look like this:
.. figure:: /_static/img/classic_memory_format.png
:alt: classic_memory_format
Channels last memory format orders data differently:
.. figure:: /_static/img/channels_last_memory_format.png
:alt: channels_last_memory_format
Pytorch supports memory formats (and provides back compatibility with existing models including eager, JIT, and TorchScript) by utilizing existing strides structure.
For example, 10x3x16x16 batch in Channels last format will have strides equal to (768, 1, 48, 3).
"""
######################################################################
# Channels last memory format is implemented for 4D NCWH Tensors only.
#
######################################################################
# Memory Format API
# -----------------------
#
# Here is how to convert tensors between contiguous and channels
# last memory formats.
######################################################################
# Classic PyTorch contiguous tensor
import torch
N, C, H, W = 10, 3, 32, 32
x = torch.empty(N, C, H, W)
print(x.stride()) # Ouputs: (3072, 1024, 32, 1)
######################################################################
# Conversion operator
x = x.to(memory_format=torch.channels_last)
print(x.shape) # Outputs: (10, 3, 32, 32) as dimensions order preserved
print(x.stride()) # Outputs: (3072, 1, 96, 3)
######################################################################
# Back to contiguous
x = x.to(memory_format=torch.contiguous_format)
print(x.stride()) # Outputs: (3072, 1024, 32, 1)
######################################################################
# Alternative option
x = x.contiguous(memory_format=torch.channels_last)
print(x.stride()) # Ouputs: (3072, 1, 96, 3)
######################################################################
# Format checks
print(x.is_contiguous(memory_format=torch.channels_last)) # Ouputs: True
######################################################################
# There are minor difference between the two APIs ``to`` and
# ``contiguous``. We suggest to stick with ``to`` when explicitly
# converting memory format of tensor.
#
# For general cases the two APIs behave the same. However in special
# cases for a 4D tensor with size ``NCHW`` when either: ``C==1`` or
# ``H==1 && W==1``, only ``to`` would generate a proper stride to
# represent channels last memory format.
#
# This is because in either of the two cases above, the memory format
# of a tensor is ambiguous, i.e. a contiguous tensor with size
# ``N1HW`` is both ``contiguous`` and channels last in memory storage.
# Therefore, they are already considered as ``is_contiguous``
# for the given memory format and hence ``contiguous`` call becomes a
# no-op and would not update the stride. On the contrary, ``to``
# would restride tensor with a meaningful stride on dimensions whose
# sizes are 1 in order to properly represent the intended memory
# format
special_x = torch.empty(4, 1, 4, 4)
print(special_x.is_contiguous(memory_format=torch.channels_last)) # Ouputs: True
print(special_x.is_contiguous(memory_format=torch.contiguous_format)) # Ouputs: True
######################################################################
# Same thing applies to explicit permutation API ``permute``. In
# special case where ambiguity could occur, ``permute`` does not
# guarantee to produce a stride that properly carry the intended
# memory format. We suggest to use ``to`` with explicit memory format
# to avoid unintended behavior.
#
# And a side note that in the extreme case, where three non-batch
# dimensions are all equal to ``1`` (``C==1 && H==1 && W==1``),
# current implementation cannot mark a tensor as channels last memory
# format.
######################################################################
# Create as channels last
x = torch.empty(N, C, H, W, memory_format=torch.channels_last)
print(x.stride()) # Ouputs: (3072, 1, 96, 3)
######################################################################
# ``clone`` preserves memory format
y = x.clone()
print(y.stride()) # Ouputs: (3072, 1, 96, 3)
######################################################################
# ``to``, ``cuda``, ``float`` ... preserves memory format
if torch.cuda.is_available():
y = x.cuda()
print(y.stride()) # Ouputs: (3072, 1, 96, 3)
######################################################################
# ``empty_like``, ``*_like`` operators preserves memory format
y = torch.empty_like(x)
print(y.stride()) # Ouputs: (3072, 1, 96, 3)
######################################################################
# Pointwise operators preserves memory format
z = x + y
print(z.stride()) # Ouputs: (3072, 1, 96, 3)
######################################################################
# Conv, Batchnorm modules using cudnn backends support channels last
# (only works for CudNN >= 7.6). Convolution modules, unlike binary
# p-wise operator, have channels last as the dominating memory format.
# IFF all inputs are in contiguous memory format, the operator
# produces output in contiguous memory format. Otherwise, output wil
# be in channels last memroy format.
if torch.backends.cudnn.version() >= 7603:
model = torch.nn.Conv2d(8, 4, 3).cuda().half()
model = model.to(memory_format=torch.channels_last) # Module parameters need to be channels last
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, requires_grad=True)
input = input.to(device="cuda", memory_format=torch.channels_last, dtype=torch.float16)
out = model(input)
print(out.is_contiguous(memory_format=torch.channels_last)) # Ouputs: True
######################################################################
# When input tensor reaches a operator without channels last support,
# a permutation should automatically apply in the kernel to restore
# contiguous on input tensor. This introduces overhead and stops the
# channels last memory format propagation. Nevertheless, it guarantees
# correct output.
######################################################################
# Performance Gains
# --------------------------------------------------------------------
# The most significant performance gains are observed on NVidia's
# hardware with Tensor Cores support running on reduced precision
# (``torch.float16``).
# We were able to archive over 22% perf gains with channels last
# comparing to contiguous format, both while utilizing
# 'AMP (Automated Mixed Precision)' training scripts.
# Our scripts uses AMP supplied by NVidia
# https://github.com/NVIDIA/apex.
#
# ``python main_amp.py -a resnet50 --b 200 --workers 16 --opt-level O2 ./data``
# opt_level = O2
# keep_batchnorm_fp32 = None <class 'NoneType'>
# loss_scale = None <class 'NoneType'>
# CUDNN VERSION: 7603
# => creating model 'resnet50'
# Selected optimization level O2: FP16 training with FP32 batchnorm and FP32 master weights.
# Defaults for this optimization level are:
# enabled : True
# opt_level : O2
# cast_model_type : torch.float16
# patch_torch_functions : False
# keep_batchnorm_fp32 : True
# master_weights : True
# loss_scale : dynamic
# Processing user overrides (additional kwargs that are not None)...
# After processing overrides, optimization options are:
# enabled : True
# opt_level : O2
# cast_model_type : torch.float16
# patch_torch_functions : False
# keep_batchnorm_fp32 : True
# master_weights : True
# loss_scale : dynamic
# Epoch: [0][10/125] Time 0.866 (0.866) Speed 230.949 (230.949) Loss 0.6735125184 (0.6735) Prec@1 61.000 (61.000) Prec@5 100.000 (100.000)
# Epoch: [0][20/125] Time 0.259 (0.562) Speed 773.481 (355.693) Loss 0.6968704462 (0.6852) Prec@1 55.000 (58.000) Prec@5 100.000 (100.000)
# Epoch: [0][30/125] Time 0.258 (0.461) Speed 775.089 (433.965) Loss 0.7877287269 (0.7194) Prec@1 51.500 (55.833) Prec@5 100.000 (100.000)
# Epoch: [0][40/125] Time 0.259 (0.410) Speed 771.710 (487.281) Loss 0.8285319805 (0.7467) Prec@1 48.500 (54.000) Prec@5 100.000 (100.000)
# Epoch: [0][50/125] Time 0.260 (0.380) Speed 770.090 (525.908) Loss 0.7370464802 (0.7447) Prec@1 56.500 (54.500) Prec@5 100.000 (100.000)
# Epoch: [0][60/125] Time 0.258 (0.360) Speed 775.623 (555.728) Loss 0.7592862844 (0.7472) Prec@1 51.000 (53.917) Prec@5 100.000 (100.000)
# Epoch: [0][70/125] Time 0.258 (0.345) Speed 774.746 (579.115) Loss 1.9698858261 (0.9218) Prec@1 49.500 (53.286) Prec@5 100.000 (100.000)
# Epoch: [0][80/125] Time 0.260 (0.335) Speed 770.324 (597.659) Loss 2.2505953312 (1.0879) Prec@1 50.500 (52.938) Prec@5 100.000 (100.000)
######################################################################
# Passing ``--channels-last true`` allows running a model in Channels last format with observed 22% perf gain.
#
# ``python main_amp.py -a resnet50 --b 200 --workers 16 --opt-level O2 --channels-last true ./data``
# opt_level = O2
# keep_batchnorm_fp32 = None <class 'NoneType'>
# loss_scale = None <class 'NoneType'>
#
# CUDNN VERSION: 7603
#
# => creating model 'resnet50'
# Selected optimization level O2: FP16 training with FP32 batchnorm and FP32 master weights.
#
# Defaults for this optimization level are:
# enabled : True
# opt_level : O2
# cast_model_type : torch.float16
# patch_torch_functions : False
# keep_batchnorm_fp32 : True
# master_weights : True
# loss_scale : dynamic
# Processing user overrides (additional kwargs that are not None)...
# After processing overrides, optimization options are:
# enabled : True
# opt_level : O2
# cast_model_type : torch.float16
# patch_torch_functions : False
# keep_batchnorm_fp32 : True
# master_weights : True
# loss_scale : dynamic
#
# Epoch: [0][10/125] Time 0.767 (0.767) Speed 260.785 (260.785) Loss 0.7579724789 (0.7580) Prec@1 53.500 (53.500) Prec@5 100.000 (100.000)
# Epoch: [0][20/125] Time 0.198 (0.482) Speed 1012.135 (414.716) Loss 0.7007197738 (0.7293) Prec@1 49.000 (51.250) Prec@5 100.000 (100.000)
# Epoch: [0][30/125] Time 0.198 (0.387) Speed 1010.977 (516.198) Loss 0.7113101482 (0.7233) Prec@1 55.500 (52.667) Prec@5 100.000 (100.000)
# Epoch: [0][40/125] Time 0.197 (0.340) Speed 1013.023 (588.333) Loss 0.8943189979 (0.7661) Prec@1 54.000 (53.000) Prec@5 100.000 (100.000)
# Epoch: [0][50/125] Time 0.198 (0.312) Speed 1010.541 (641.977) Loss 1.7113249302 (0.9551) Prec@1 51.000 (52.600) Prec@5 100.000 (100.000)
# Epoch: [0][60/125] Time 0.198 (0.293) Speed 1011.163 (683.574) Loss 5.8537774086 (1.7716) Prec@1 50.500 (52.250) Prec@5 100.000 (100.000)
# Epoch: [0][70/125] Time 0.198 (0.279) Speed 1011.453 (716.767) Loss 5.7595844269 (2.3413) Prec@1 46.500 (51.429) Prec@5 100.000 (100.000)
# Epoch: [0][80/125] Time 0.198 (0.269) Speed 1011.827 (743.883) Loss 2.8196096420 (2.4011) Prec@1 47.500 (50.938) Prec@5 100.000 (100.000)
######################################################################
# The following list of models has the full support of Channels last and showing 8%-35% perf gains on Volta devices:
# ``alexnet``, ``mnasnet0_5``, ``mnasnet0_75``, ``mnasnet1_0``, ``mnasnet1_3``, ``mobilenet_v2``, ``resnet101``, ``resnet152``, ``resnet18``, ``resnet34``, ``resnet50``, ``resnext50_32x4d``, ``shufflenet_v2_x0_5``, ``shufflenet_v2_x1_0``, ``shufflenet_v2_x1_5``, ``shufflenet_v2_x2_0``, ``squeezenet1_0``, ``squeezenet1_1``, ``vgg11``, ``vgg11_bn``, ``vgg13``, ``vgg13_bn``, ``vgg16``, ``vgg16_bn``, ``vgg19``, ``vgg19_bn``, ``wide_resnet101_2``, ``wide_resnet50_2``
#
######################################################################
# Converting existing models
# --------------------------
#
# Channels last support is not limited by existing models, as any
# model can be converted to channels last and propagate format through
# the graph as soon as input (or certain weight) is formatted
# correctly.
#
# Need to be done once, after model initialization (or load)
model = model.to(memory_format=torch.channels_last) # Replace with your model
# Need to be done for every input
input = input.to(memory_format=torch.channels_last) # Replace with your input
output = model(input)
#######################################################################
# However, not all operators fully converted to support channels last
# (usually returning contiguous output instead). In the example posted
# above, layers that does not support channels last will stop the
# memory format propagation. In spite of that, as we have converted the
# model to channels last format, that means each convolution layer,
# which has its 4 dimensional weight in channels last memory format,
# will restore channels last memory format and benefit from faster
# kernels.
#
# But operators that does not support channels last does introduce
# overhead by permutation. Optionally, you can investigate and identify
# operators in your model that does not support channels last, if you
# want to improve the performance of converted model.
#
# That means you need to verify the list of used operators
# against supported operators list https://github.com/pytorch/pytorch/wiki/Operators-with-Channels-Last-support,
# or introduce memory format checks into eager execution mode and run your model.
#
# After running the code below, operators will raise an exception if the output of the
# operator doesn't match the memory format of the input.
#
#
def contains_cl(args):
for t in args:
if isinstance(t, torch.Tensor):
if t.is_contiguous(memory_format=torch.channels_last) and not t.is_contiguous():
return True
elif isinstance(t, list) or isinstance(t, tuple):
if contains_cl(list(t)):
return True
return False
def print_inputs(args, indent=""):
for t in args:
if isinstance(t, torch.Tensor):
print(indent, t.stride(), t.shape, t.device, t.dtype)
elif isinstance(t, list) or isinstance(t, tuple):
print(indent, type(t))
print_inputs(list(t), indent=indent + " ")
else:
print(indent, t)
def check_wrapper(fn):
name = fn.__name__
def check_cl(*args, **kwargs):
was_cl = contains_cl(args)
try:
result = fn(*args, **kwargs)
except Exception as e:
print("`{}` inputs are:".format(name))
print_inputs(args)
print("-------------------")
raise e
failed = False
if was_cl:
if isinstance(result, torch.Tensor):
if result.dim() == 4 and not result.is_contiguous(memory_format=torch.channels_last):
print(
"`{}` got channels_last input, but output is not channels_last:".format(name),
result.shape,
result.stride(),
result.device,
result.dtype,
)
failed = True
if failed and True:
print("`{}` inputs are:".format(name))
print_inputs(args)
raise Exception("Operator `{}` lost channels_last property".format(name))
return result
return check_cl
old_attrs = dict()
def attribute(m):
old_attrs[m] = dict()
for i in dir(m):
e = getattr(m, i)
exclude_functions = ["is_cuda", "has_names", "numel", "stride", "Tensor", "is_contiguous", "__class__"]
if i not in exclude_functions and not i.startswith("_") and "__call__" in dir(e):
try:
old_attrs[m][i] = e
setattr(m, i, check_wrapper(e))
except Exception as e:
print(i)
print(e)
attribute(torch.Tensor)
attribute(torch.nn.functional)
attribute(torch)
######################################################################
# If you found an operator that doesn't support channels last tensors
# and you want to contribute, feel free to use following developers
# guide https://github.com/pytorch/pytorch/wiki/Writing-memory-format-aware-operators.
#
######################################################################
# Code below is to recover the attributes of torch.
for (m, attrs) in old_attrs.items():
for (k, v) in attrs.items():
setattr(m, k, v)
######################################################################
# Work to do
# ----------
# There are still many things to do, such as:
#
# - Resolving ambiguity of N1HW and NC11 Tensors;
# - Testing of Distributed Training support;
# - Improving operators coverage.
#
# If you have feedback and/or suggestions for improvement, please let us
# know by creating `an issue <https://github.com/pytorch/pytorch/issues>`_.
|
"""
Server's configuration variables
"""
import six
from conans import tools
from conans.util.env_reader import get_env
from datetime import timedelta
import os
import random
import string
from conans.errors import ConanException
from conans.util.files import save, mkdir
from six.moves.configparser import ConfigParser, NoSectionError
from conans.paths import SimplePaths, conan_expand_user
from conans.server.store.disk_adapter import ServerDiskAdapter
from conans.server.store.file_manager import FileManager
from conans.util.log import logger
from conans.server.conf.default_server_conf import default_server_conf
MIN_CLIENT_COMPATIBLE_VERSION = '0.25.0'
class ConanServerConfigParser(ConfigParser):
""" defines the configuration of the server. It can load
values from environment variables or from file.
Environment variables have PRECEDENCE over file values
"""
def __init__(self, base_folder, storage_folder=None, environment=os.environ):
ConfigParser.__init__(self)
self.optionxform = str # This line keeps the case of the key, important for users case
self.conan_folder = os.path.join(base_folder, '.conan_server')
self.config_filename = os.path.join(self.conan_folder, 'server.conf')
self._loaded = False
self.env_config = {"updown_secret": get_env("CONAN_UPDOWN_SECRET", None, environment),
"store_adapter": get_env("CONAN_STORE_ADAPTER", None, environment),
"authorize_timeout": get_env("CONAN_AUTHORIZE_TIMEOUT", None, environment),
"disk_storage_path": get_env("CONAN_STORAGE_PATH", storage_folder, environment),
"jwt_secret": get_env("CONAN_JWT_SECRET", None, environment),
"jwt_expire_minutes": get_env("CONAN_JWT_EXPIRE_MINUTES", None, environment),
"write_permissions": [],
"read_permissions": [],
"ssl_enabled": get_env("CONAN_SSL_ENABLED", None, environment),
"port": get_env("CONAN_SERVER_PORT", None, environment),
"public_port": get_env("CONAN_SERVER_PUBLIC_PORT", None, environment),
"host_name": get_env("CONAN_HOST_NAME", None, environment),
"custom_authenticator": get_env("CONAN_CUSTOM_AUTHENTICATOR", None, environment),
# "user:pass,user2:pass2"
"users": get_env("CONAN_SERVER_USERS", None, environment)}
def _get_file_conf(self, section, varname=None):
"""Gets the section from config file or raises an exception"""
try:
if not os.path.exists(self.config_filename):
jwt_random_secret = ''.join(random.choice(string.ascii_letters) for _ in range(24))
updown_random_secret = ''.join(random.choice(string.ascii_letters) for _ in range(24))
server_conf = default_server_conf.format(jwt_secret=jwt_random_secret,
updown_secret=updown_random_secret)
save(self.config_filename, server_conf)
if not self._loaded:
self._loaded = True
# To avoid encoding problems we use our tools.load
if six.PY3:
self.read_string(tools.load(self.config_filename))
else:
self.read(self.config_filename)
if varname:
section = dict(self.items(section))
return section[varname]
else:
return self.items(section)
except NoSectionError as exc:
raise ConanException("No section '%s' found" % section)
except Exception as exc:
logger.debug(exc)
raise ConanException("Invalid configuration, "
"missing %s: %s" % (section, varname))
@property
def ssl_enabled(self):
if self.env_config["ssl_enabled"]:
return self.env_config["ssl_enabled"] == "true" or \
self.env_config["ssl_enabled"] == "1"
else:
return self._get_file_conf("server", "ssl_enabled").lower() == "true" or \
self._get_file_conf("server", "ssl_enabled").lower() == "1"
@property
def port(self):
if self.env_config["port"]:
return int(self.env_config["port"])
else:
return int(self._get_file_conf("server", "port"))
@property
def public_port(self):
if self.env_config["public_port"]:
return int(self.env_config["public_port"])
elif self._get_file_conf("server", "public_port"):
return int(self._get_file_conf("server", "public_port"))
else:
return self.port
@property
def host_name(self):
return self._get_conf_server_string("host_name")
@property
def public_url(self):
protocol = "https" if self.ssl_enabled else "http"
port = ":%s" % self.public_port if self.public_port != 80 else ""
return "%s://%s%s/v1" % (protocol, self.host_name, port)
@property
def disk_storage_path(self):
"""If adapter is disk, means the directory for storage"""
if self.env_config["disk_storage_path"]:
ret = self.env_config["disk_storage_path"]
else:
try:
ret = conan_expand_user(self._get_file_conf("server", "disk_storage_path"))
except ConanException:
# If storage_path is not defined in file, use the current dir
# So tests use test folder instead of user/.conan_server
ret = os.path.dirname(self.config_filename)
ret = os.path.normpath(ret) # Convert to O.S paths
mkdir(ret)
return ret
@property
def read_permissions(self):
if self.env_config["read_permissions"]:
return self.env_config["read_permissions"]
else:
return self._get_file_conf("read_permissions")
@property
def write_permissions(self):
if self.env_config["write_permissions"]:
return self.env_config["write_permissions"]
else:
return self._get_file_conf("write_permissions")
@property
def custom_authenticator(self):
try:
return self._get_conf_server_string("custom_authenticator")
except ConanException:
return None
@property
def users(self):
def validate_pass_encoding(password):
try:
password.encode('ascii')
except (UnicodeDecodeError, UnicodeEncodeError):
raise ConanException("Password contains invalid characters. "
"Only ASCII encoding is supported")
return password
if self.env_config["users"]:
pairs = self.env_config["users"].split(",")
return {pair.split(":")[0]: validate_pass_encoding(pair.split(":")[1]) for pair in pairs}
else:
tmp = dict(self._get_file_conf("users"))
tmp = {key: validate_pass_encoding(value) for key, value in tmp.items()}
return tmp
@property
def jwt_secret(self):
tmp = self._get_conf_server_string("jwt_secret")
if not tmp:
raise ConanException("'jwt_secret' setting is needed. Please, write a value "
"in server.conf or set CONAN_JWT_SECRET env value.")
return tmp
@property
def updown_secret(self):
tmp = self._get_conf_server_string("updown_secret")
if not tmp:
raise ConanException("'updown_secret' setting is needed. Please, write a value "
"in server.conf or set CONAN_UPDOWN_SECRET env value.")
return self._get_conf_server_string("updown_secret")
@property
def store_adapter(self):
return self._get_conf_server_string("store_adapter")
def _get_conf_server_string(self, keyname):
if self.env_config[keyname]:
return self.env_config[keyname]
else:
return self._get_file_conf("server", keyname)
@property
def authorize_timeout(self):
if self.env_config["authorize_timeout"]:
return timedelta(seconds=int(self.env_config["authorize_timeout"]))
else:
tmp = self._get_file_conf("server", "authorize_timeout")
return timedelta(seconds=int(tmp))
@property
def jwt_expire_time(self):
if self.env_config["jwt_expire_minutes"]:
return timedelta(minutes=int(self.env_config["jwt_expire_minutes"]))
else:
tmp = float(self._get_file_conf("server", "jwt_expire_minutes"))
return timedelta(minutes=tmp)
def get_file_manager(config, public_url=None, updown_auth_manager=None):
store_adapter = config.store_adapter
if store_adapter == "disk":
public_url = public_url or config.public_url
disk_controller_url = "%s/%s" % (public_url, "files")
if not updown_auth_manager:
raise Exception("Updown auth manager needed for disk controller (not s3)")
adapter = ServerDiskAdapter(disk_controller_url, config.disk_storage_path, updown_auth_manager)
paths = SimplePaths(config.disk_storage_path)
else:
# Want to develop new adapter? create a subclass of
# conans.server.store.file_manager.ServerStorageAdapter and implement the abstract methods
raise Exception("Store adapter not implemented! Change 'store_adapter' "
"variable in server.conf file to one of the available options: 'disk' ")
return FileManager(paths, adapter)
|
import numpy as np
from transonic import boost, Array, Type
A = Array[Type(np.float64, np.complex128), "3d"]
Af = "float[:,:,:]"
A = Af # issue fused type with Cython
def proj(vx: A, vy: A, vz: A, kx: Af, ky: Af, kz: Af, inv_k_square_nozero: Af):
tmp = (kx * vx + ky * vy + kz * vz) * inv_k_square_nozero
vx -= kx * tmp
vy -= ky * tmp
vz -= kz * tmp
def proj_loop(
vx: A, vy: A, vz: A, kx: Af, ky: Af, kz: Af, inv_k_square_nozero: Af
):
# type annotations only useful for Cython
n0: int
n1: int
n2: int
i0: int
i1: int
i2: int
tmp: float
n0, n1, n2 = kx.shape[0], kx.shape[1], kx.shape[2]
for i0 in range(n0):
for i1 in range(n1):
for i2 in range(n2):
tmp = (
kx[i0, i1, i2] * vx[i0, i1, i2]
+ ky[i0, i1, i2] * vy[i0, i1, i2]
+ kz[i0, i1, i2] * vz[i0, i1, i2]
) * inv_k_square_nozero[i0, i1, i2]
vx[i0, i1, i2] -= kx[i0, i1, i2] * tmp
vy[i0, i1, i2] -= ky[i0, i1, i2] * tmp
vz[i0, i1, i2] -= kz[i0, i1, i2] * tmp
proj_pythran = boost(backend="pythran")(proj)
proj_numba = boost(backend="numba")(proj)
proj_cython = boost(backend="cython")(proj)
proj_loop_pythran = boost(backend="pythran")(proj_loop)
proj_loop_numba = boost(backend="numba")(proj_loop)
proj_loop_cython = boost(backend="cython", boundscheck=False, wraparound=False)(
proj_loop
)
if __name__ == "__main__":
from textwrap import dedent
from transonic.util import print_versions, timeit_verbose
loc = locals()
print_versions()
setup = dedent(
"""
shape = n0, n1, n2 = 64, 512, 512
k0 = np.linspace(0, 100, n0)
k1 = np.linspace(0, 100, n1)
k2 = np.linspace(0, 100, n2)
K1, K0, K2 = np.meshgrid(k1, k0, k2, copy=False)
kz = np.ascontiguousarray(K0)
ky = np.ascontiguousarray(K1)
kx = np.ascontiguousarray(K2)
k_square_nozero = K0 ** 2 + K1 ** 2 + K2 ** 2
k_square_nozero[0, 0, 0] = 1e-14
inv_k_square_nozero = 1.0 / k_square_nozero
vx = np.ones(shape)
vy = np.ones(shape)
vz = np.ones(shape)
"""
)
print()
norm = timeit_verbose(
"proj(vx, vy, vz, kx, ky, kz, inv_k_square_nozero)",
setup=setup,
globals=loc,
)
for backend in ("cython", "numba", "pythran"):
timeit_verbose(
f"proj_{backend}(vx, vy, vz, kx, ky, kz, inv_k_square_nozero)",
setup=setup,
globals=loc,
norm=norm,
)
timeit_verbose(
f"proj_loop_{backend}(vx, vy, vz, kx, ky, kz, inv_k_square_nozero)",
setup=setup,
globals=loc,
norm=norm,
)
|
import setuptools
with open("README.md",'r') as f:
long_description = f.read()
setuptools.setup(
name="world-time-api",
version='2020.05.31',
author='Tyler Dula',
author_email='echo.dulatr@gmail.com',
description='A wrapper for the World Time API.',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Dulatr/WorldTimeAPI",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
from math import ceil
from os import listdir
from os.path import isfile, join
from PIL import Image, ImageOps
from shutil import copyfile
import os
import json
DIRFILE = open('directories.json')
DIRS = json.load(DIRFILE)
DIRFILE.close()
POST_CONTENT = '''---
title: {}
image: {}
thumbnail: {}
caption: {}
---
'''
ROOT = DIRS['root']
THUMB_ROOT = DIRS['thumbs']
FULL_ROOT = DIRS['fulls']
POST_ROOT = DIRS['post']
# TODO: Change ROOT to newest path of instagram data-download
dated_folders = os.walk(ROOT)
dry_run = False
def resize(image):
img = Image.open(image)
# If it is not 4:3, make it 4:3 ratio first by adding borders
# then resize to 1200:750
w = img.width
h = img.height
new_h = h
new_w = w
if w > h:
new_h = 3/4 * w
if w <= h:
new_w = 4/3 * h
horizontal_border = new_w - w
vertical_border = new_h - h
up = ceil(vertical_border/2)
down = ceil(vertical_border/2)
left = ceil(horizontal_border/2)
right = ceil(horizontal_border/2)
new_img = ImageOps.expand(img, (left, up, right, down), fill = 'black')
size = (1200, 800)
new_img.save(image)
for folder in dated_folders:
path_to_folder = folder[0]
if path_to_folder == ROOT:
continue
images = [f for f in listdir(path_to_folder) if isfile(join(path_to_folder, f)) and (f.endswith('.jpg') or f.endswith('png') or f.endswith('jpeg'))]
post_num = 1
for image_filename in images:
# time_of_posting = 'YYYYMM'
time_of_posting = path_to_folder.split('\\')[-1]
# 'YYYYMM Art#X'
output_image_title = time_of_posting + ' Art#' + str(post_num)
output_filename = time_of_posting + '_' + str(post_num)
destination_thumbnail = THUMB_ROOT + '\\' + output_filename + '.jpg'
destination_full = FULL_ROOT + '\\' + output_filename + '.jpg'
source_image = path_to_folder + '\\' + image_filename
resize(source_image)
print('Copying', source_image, 'to', destination_thumbnail)
print('Copying', source_image, 'to', destination_full)
if not dry_run:
copyfile(source_image, destination_thumbnail)
copyfile(source_image, destination_full)
new_post_filepath = POST_ROOT + '\\' + output_filename + '.md'
print('Creating file:', new_post_filepath)
new_post = None
if not dry_run:
new_post = open(new_post_filepath, 'w')
content_image_path = 'assets/images/fulls/' + output_filename + '.jpg'
content_thumbnail_path = 'assets/images/thumbs/' + output_filename + '.jpg'
content = POST_CONTENT.format(output_image_title, content_image_path, content_thumbnail_path, '')
new_post.write(content)
if not dry_run:
new_post.close()
post_num += 1
print('Success!')
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_load_balancer_backend_set_facts
short_description: Fetches details about one or multiple BackendSet resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple BackendSet resources in Oracle Cloud Infrastructure
- Lists all backend sets associated with a given network load balancer.
- If I(backend_set_name) is specified, the details of a single BackendSet will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
network_load_balancer_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network load balancer to update.
type: str
aliases: ["id"]
required: true
backend_set_name:
description:
- The name of the backend set to retrieve.
- "Example: `example_backend_set`"
- Required to get a specific backend_set.
type: str
aliases: ["name"]
sort_order:
description:
- The sort order to use, either 'asc' (ascending) or 'desc' (descending).
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List backend_sets
oci_network_load_balancer_backend_set_facts:
network_load_balancer_id: "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
- name: Get a specific backend_set
oci_network_load_balancer_backend_set_facts:
network_load_balancer_id: "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
backend_set_name: example_backend_set
"""
RETURN = """
backend_sets:
description:
- List of BackendSet resources
returned: on success
type: complex
contains:
name:
description:
- A user-friendly name for the backend set that must be unique and cannot be changed.
- Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot
contain spaces. Avoid entering confidential information.
- "Example: `example_backend_set`"
returned: on success
type: str
sample: example_backend_set
policy:
description:
- The network load balancer policy for the backend set.
- "Example: `FIVE_TUPLE`"
returned: on success
type: str
sample: FIVE_TUPLE
is_preserve_source:
description:
- If this parameter is enabled, then the network load balancer preserves the source IP of the packet when it is forwarded to backends.
Backends see the original source IP. If the isPreserveSourceDestination parameter is enabled for the network load balancer resource, then this
parameter cannot be disabled.
The value is true by default.
returned: on success
type: bool
sample: true
backends:
description:
- Array of backends.
returned: on success
type: complex
contains:
name:
description:
- A read-only field showing the IP address/IP OCID and port that uniquely identify this backend server in the backend set.
- "Example: `10.0.0.3:8080`, or `ocid1.privateip..oc1.<var><unique_ID></var>:443` or `10.0.0.3:0`"
returned: on success
type: str
sample: 10.0.0.3:8080
ip_address:
description:
- "The IP address of the backend server.
Example: `10.0.0.3`"
returned: on success
type: str
sample: 10.0.0.3
target_id:
description:
- "The IP OCID/Instance OCID associated with the backend server.
Example: `ocid1.privateip..oc1.<var><unique_ID></var>`"
returned: on success
type: str
sample: "ocid1.privateip..oc1.unique_ID"
port:
description:
- The communication port for the backend server.
- "Example: `8080`"
returned: on success
type: int
sample: 8080
weight:
description:
- The network load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger
proportion of incoming traffic. For example, a server weighted '3' receives three times the number of new connections
as a server weighted '1'.
For more information about load balancing policies, see
L(How Network Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm).
- "Example: `3`"
returned: on success
type: int
sample: 3
is_drain:
description:
- "Whether the network load balancer should drain this server. Servers marked \\"isDrain\\" receive no
incoming traffic."
- "Example: `false`"
returned: on success
type: bool
sample: false
is_backup:
description:
- "Whether the network load balancer should treat this server as a backup unit. If `true`, then the network load balancer forwards no
ingress
traffic to this backend server unless all other backend servers not marked as \\"isBackup\\" fail the health check policy."
- "Example: `false`"
returned: on success
type: bool
sample: false
is_offline:
description:
- Whether the network load balancer should treat this server as offline. Offline servers receive no incoming
traffic.
- "Example: `false`"
returned: on success
type: bool
sample: false
health_checker:
description:
- ""
returned: on success
type: complex
contains:
protocol:
description:
- The protocol the health check must use; either HTTP or HTTPS, or UDP or TCP.
- "Example: `HTTP`"
returned: on success
type: str
sample: HTTP
port:
description:
- The backend server port against which to run the health check. If the port is not specified, then the network load balancer uses the
port information from the `Backend` object. The port must be specified if the backend port is 0.
- "Example: `8080`"
returned: on success
type: int
sample: 8080
retries:
description:
- "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies
when recovering a server to the \\"healthy\\" state. The default value is 3."
- "Example: `3`"
returned: on success
type: int
sample: 3
timeout_in_millis:
description:
- The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply
returns within this timeout period. The default value is 3000 (3 seconds).
- "Example: `3000`"
returned: on success
type: int
sample: 3000
interval_in_millis:
description:
- The interval between health checks, in milliseconds. The default value is 10000 (10 seconds).
- "Example: `10000`"
returned: on success
type: int
sample: 10000
url_path:
description:
- The path against which to run the health check.
- "Example: `/healthcheck`"
returned: on success
type: str
sample: /healthcheck
response_body_regex:
description:
- A regular expression for parsing the response body from the backend server.
- "Example: `^((?!false).|\\\\s)*$`"
returned: on success
type: str
sample: "^((?!false).|\\\\s)*$"
return_code:
description:
- "The status code a healthy backend server should return. If you configure the health check policy to use the HTTP protocol,
then you can use common HTTP status codes such as \\"200\\"."
- "Example: `200`"
returned: on success
type: int
sample: 0
request_data:
description:
- Base64 encoded pattern to be sent as UDP or TCP health check probe.
returned: on success
type: str
sample: "example_request_data"
response_data:
description:
- Base64 encoded pattern to be validated as UDP or TCP health check probe response.
returned: on success
type: str
sample: "example_response_data"
sample: [{
"name": "example_backend_set",
"policy": "FIVE_TUPLE",
"is_preserve_source": true,
"backends": [{
"name": "10.0.0.3:8080",
"ip_address": "10.0.0.3",
"target_id": "ocid1.privateip..oc1.unique_ID",
"port": 8080,
"weight": 3,
"is_drain": false,
"is_backup": false,
"is_offline": false
}],
"health_checker": {
"protocol": "HTTP",
"port": 8080,
"retries": 3,
"timeout_in_millis": 3000,
"interval_in_millis": 10000,
"url_path": "/healthcheck",
"response_body_regex": "^((?!false).|\\\\s)*$",
"return_code": 0,
"request_data": UNKNOWN TYPE - str,
"response_data": UNKNOWN TYPE - str
}
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.network_load_balancer import NetworkLoadBalancerClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class NetworkLoadBalancerBackendSetFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"network_load_balancer_id",
"backend_set_name",
]
def get_required_params_for_list(self):
return [
"network_load_balancer_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_backend_set,
network_load_balancer_id=self.module.params.get("network_load_balancer_id"),
backend_set_name=self.module.params.get("backend_set_name"),
)
def list_resources(self):
optional_list_method_params = [
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_backend_sets,
network_load_balancer_id=self.module.params.get("network_load_balancer_id"),
**optional_kwargs
)
NetworkLoadBalancerBackendSetFactsHelperCustom = get_custom_class(
"NetworkLoadBalancerBackendSetFactsHelperCustom"
)
class ResourceFactsHelper(
NetworkLoadBalancerBackendSetFactsHelperCustom,
NetworkLoadBalancerBackendSetFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
network_load_balancer_id=dict(aliases=["id"], type="str", required=True),
backend_set_name=dict(aliases=["name"], type="str"),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="backend_set",
service_client_class=NetworkLoadBalancerClient,
namespace="network_load_balancer",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(backend_sets=result)
if __name__ == "__main__":
main()
|
broker_url = 'redis://127.0.0.1:6379/0'
# broker_pool_limit=100
#
# timezone = 'Asia/Shanghai'
#
# accept_content = ['pickle','json']
# task_serializer='pickle'
#
# result_backend='redis://127.0.0.1:6379/0'
# result_serializer='pickle'
# result_cache_max=10000
# result_expires=3600
#
# worker_redirect_stdouts_level='INFO'
|
# coding=utf-8
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
from decorators import stats_container
@stats_container
class Stats(models.Model):
content_type = models.ForeignKey(ContentType,
verbose_name=_('Content Type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.IntegerField(_('Object ID'))
item = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
class Meta(object):
verbose_name = _('Item Stats')
verbose_name_plural = _('Item Stats')
unique_together = ('content_type', 'object_pk')
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List
import jmespath
from cpo.lib.error import (
JmespathPathExpressionNotFoundException,
UnexpectedTypeException,
)
def get_jmespath_list_of_strings(expression: str, data: Any) -> List[str]:
"""Returns a list of strings extracted from the given object based on
the given JMESPath expression
Parameters
----------
expression
JMESPath expression
data
object to be searched
Returns
-------
List[str]
list of strings extracted from the given object based on the given
JMESPath expression
"""
search_result: Any = jmespath.search(expression, data)
if search_result is None:
raise JmespathPathExpressionNotFoundException(expression)
if not isinstance(search_result, List) or not all(isinstance(element, str) for element in search_result):
raise UnexpectedTypeException(search_result)
return search_result
def get_jmespath_string(expression: str, data: Any) -> str:
"""Returns a string extracted from the given object based on the given
JMESPath expression
Parameters
----------
expression
JMESPath expression
data
object to be searched
Returns
-------
List[str]
string extracted from the given object based on the given JMESPath
expression
"""
search_result = jmespath.search(expression, data)
if search_result is None:
raise JmespathPathExpressionNotFoundException(expression)
if not isinstance(search_result, str):
raise UnexpectedTypeException(search_result)
return search_result
|
from rad import *
from yolov3_base import YOLO
class YOLOv3(Model):
def __init__(self, model_attack, model_detect):
super().__init__(model_attack, model_detect, 'YOLOv3')
def preprocess_image(self, image_path):
image, self.val_image, self.resized = self.model_detect.preprocess_image(PIL.Image.open(image_path).convert('RGB'))
return image
def extract_valid_image(self, image):
return image[self.val_image].reshape(self.resized)
def de_preprocess_image(self, image):
return self.extract_valid_image((image[0] * 255).astype(np.uint8))
def detect(self, image):
detection, bbox_number = self.model_detect.detect_image(PIL.Image.fromarray(self.de_preprocess_image(image)), return_box_number=True)
return detection, bbox_number
def attack(self, adv_image, alpha, direction_value, ori_image, epsilon):
adv_image[0][self.val_image] = np.clip(adv_image - alpha / 255 * direction_value, 0, 1)[0][self.val_image]
adv_image = np.clip(adv_image, ori_image - epsilon / 255, ori_image + epsilon / 255)
return adv_image
if __name__ == "__main__":
# weights from https://pjreddie.com/media/files/yolov3.weights
# cfg from https://github.com/pjreddie/darknet/blob/master/cfg/yolov3.cfg
# convert to .h5 by https://github.com/qqwweee/keras-yolo3/blob/master/convert.py
# python convert.py yolov3.cfg yolov3.weights model_data/yolo.h5
assert os.path.exists('model_data/yolo.h5')
def load_model():
yolo = YOLO()
model = YOLOv3(yolo.yolo_model_reshape, yolo)
return model
def get_index(pred):
get_sorted_index = lambda x: np.argsort(x[4::85])[::-1] * 85 + 4
indexes = [x for x in get_sorted_index(pred)]
return indexes[:20]
rad_coco(load_model, get_index, group_dimension=85, attack_dimension=4, transfer_enhance=['SI'])
|
1、搭建好pytenv环境,理解local、global、shell3种方式区别,安装部署完成jupyter并运行
local 对当前的目录进行python版本的设定,当前目录下的子目录会继承这个设定。
global 全局修改python版本。
shell 只对当前会话的shell做设定,当退出当前会话的shell,这个设定也就失效了。
2、打印出100以内的斐波那契数列,使用2种方法实现
第一种:
#fib
a=0
b=1
print(0,a)
print(1,b)
i = 2
while True:
c = a + b
if c > 100:
break
print(i , c)
a = b
b = c
i += 1
第二种:不会
3、使用 Python 实现随机生成 200 无重复激活码(或者优惠券),字符串长度大于5以上
不会
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Target data structure."""
import json
import os
import re
import warnings
import tvm._ffi
from tvm._ffi import register_func as _register_func
from tvm.runtime import Object, convert
from tvm.runtime.container import String
from tvm.ir.container import Map, Array
from . import _ffi_api
@tvm._ffi.register_object
class TargetKind(Object):
"""Kind of a compilation target"""
@property
def options(self):
"""Returns the dict of available option names and types"""
return dict(_ffi_api.ListTargetKindOptions(self))
@staticmethod
def options_from_name(kind_name: str):
"""Returns the dict of available option names and types from a name of TargetKind"""
return dict(_ffi_api.ListTargetKindOptionsFromName(kind_name))
@tvm._ffi.register_object
class Target(Object):
"""Target device information, use through TVM API.
Note
----
You can create target using the constructor or the following functions
- :py:func:`tvm.target.arm_cpu` create arm_cpu target
- :py:func:`tvm.target.cuda` create CUDA target
- :py:func:`tvm.target.rocm` create ROCM target
- :py:func:`tvm.target.mali` create Mali target
- :py:func:`tvm.target.intel_graphics` create Intel Graphics target
"""
def __init__(self, target, host=None):
"""Construct a TVM target object from
1) Raw target string
2) Target config dict
3) Target tag
Parameters
----------
target : Union[str, Dict[str, Any]]
Can be one of a literal target string, a json string describing
a configuration, or a dictionary of configuration options.
When using a dictionary or json string to configure target, the
possible values are:
kind : str (required)
Which codegen path to use, for example 'llvm' or 'cuda'.
keys : List of str (optional)
A set of strategies that can be dispatched to. When using
"kind=opencl" for example, one could set keys to ["mali", "opencl", "gpu"].
device : str (optional)
A single key that corresponds to the actual device being run on.
This will be effectively appended to the keys.
libs : List of str (optional)
The set of external libraries to use. For example ['cblas', 'mkl'].
system-lib : bool (optional)
If True, build a module that contains self registered functions.
Useful for environments where dynamic loading like dlopen is banned.
mcpu : str (optional)
The specific cpu being run on. Serves only as an annotation.
model : str (optional)
An annotation indicating what model a workload came from.
runtime : str (optional)
An annotation indicating which runtime to use with a workload.
mtriple : str (optional)
The llvm triplet describing the target, for example "arm64-linux-android".
mattr : List of str (optional)
The llvm features to compile with, for example ["+avx512f", "+mmx"].
mfloat-abi : str (optional)
An llvm setting that is one of 'hard' or 'soft' indicating whether to use
hardware or software floating-point operations.
mabi : str (optional)
An llvm setting. Generate code for the specified ABI, for example "lp64d".
host : Union[str, Dict[str, Any]] (optional)
Description for target host. Can be recursive. Similar to target.
host : Optional[Union[str, Dict[str, Any]]]
Similar to target but for target host. Can be one of a literal target host string,
a json string describing a configuration, or a dictionary of configuration options.
When using a dictionary or json string to configure target, the possible values are
same as target.
"""
if isinstance(target, (dict, str)):
target = convert(target)
if isinstance(host, (dict, str)):
host = convert(host)
if target is None or not isinstance(target, (Map, String, Target)):
raise ValueError("target has to be a string or dictionary.")
if host is not None:
if not isinstance(host, (Map, String, Target)):
raise ValueError("target host has to be a string or dictionary.")
self.__init_handle_by_constructor__(_ffi_api.Target, Target(target), Target(host))
else:
self.__init_handle_by_constructor__(_ffi_api.Target, target)
def __enter__(self):
_ffi_api.TargetEnterScope(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_api.TargetExitScope(self)
def export(self):
return _ffi_api.TargetExport(self)
def with_host(self, host=None):
return _ffi_api.WithHost(self, Target(host))
@staticmethod
def current(allow_none=True):
"""Returns the current target.
Parameters
----------
allow_none : bool
Whether allow the current target to be none
Raises
------
ValueError if current target is not set.
"""
return _ffi_api.TargetCurrent(allow_none)
@property
def arch(self):
"""Returns the cuda arch from the target if it exists."""
return str(self.attrs.get("arch", ""))
@property
def max_num_threads(self):
"""Returns the max_num_threads from the target if it exists."""
return int(self.attrs["max_num_threads"])
@property
def thread_warp_size(self):
"""Returns the thread_warp_size from the target if it exists."""
return int(self.attrs["thread_warp_size"])
@property
def max_function_args(self):
return int(self.attrs.get("max_function_args", -1))
@property
def device_name(self):
return str(self.attrs.get("device", ""))
@property
def model(self):
"""Returns model from the target if it exists."""
return str(self.attrs.get("model", "unknown"))
@property
def mcpu(self):
"""Returns the mcpu from the target if it exists."""
return str(self.attrs.get("mcpu", ""))
@property
def mattr(self):
"""Returns the mattr from the target if it exists."""
return list(self.attrs.get("mattr", []))
@property
def supports_integer_dot_product(self):
if self.attrs.get("supports_integer_dot_product", []):
return bool(self.attrs["supports_integer_dot_product"])
return False
@property
def libs(self):
return list(self.attrs.get("libs", []))
def get_kind_attr(self, attr_name):
"""Get additional attribute about the target kind.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : object
The attribute value
"""
return _ffi_api.TargetKindGetAttr(self.kind, attr_name)
@staticmethod
def list_kinds():
"""Returns the list of available target names."""
return list(_ffi_api.ListTargetKinds())
@staticmethod
def canonicalize_target(target):
"""Given a single target-like object, returns the TVM Target object representing it.
Can convert from:
- None (to None).
- An existing TVM Target object.
- A string.
- A Python dictionary binding the target 'kind' and other attributes.
"""
if target is None:
return None
if isinstance(target, Target):
return target
return Target(target)
@staticmethod
def canonicalize_multi_targets(multi_targets):
"""Given a single or collection of target-like objects, returns a TVM Array of Target
objects representing then. Can convert from:
- None (to None).
- A single target-like object in a form recognized by canonicalize_target.
- A Python list or TVM Array of target-like objects in a form recognized by
canonicalize_target.
- A Python dict or TVM Map from TVM IntImm objects representing device types to
a target-like object in a form recognized by canonicalize_target.
"""
if multi_targets is None:
return None
if isinstance(multi_targets, (dict, Map)) and "kind" not in multi_targets:
# Convert legacy heterogeneous map representation to ordinary list of targets.
return Target.canonicalize_multi_targets([t for _, t in multi_targets.items()])
if isinstance(multi_targets, (list, Array)):
# Multiple Target results.
return convert([Target.canonicalize_target(t) for t in multi_targets])
# Single Target result.
return convert([Target.canonicalize_target(multi_targets)])
@staticmethod
def canonicalize_target_and_host(target, target_host=None):
"""Returns a TVM Array<Target> capturing target and target_host. The given target can be in
any form recognized by Target.canonicalize_target or Target.canonicalize_multi_targets. If
given target_host can be in any form recognized by Target.canonicalize_target. If
target_host is given it will be set as the 'host' in each result Target object (and a
warning given).
"""
# Convert target to Array<Target>, but not yet accounting for any host.
raw_targets = Target.canonicalize_multi_targets(target)
assert raw_targets is not None
# Convert host to Target, if given.
target_host = Target.canonicalize_target(target_host)
if target_host is None:
return raw_targets
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
# Make sure the (canonical) host is captured in all the (canonical) targets.
return convert([Target(t, target_host) for t in raw_targets])
@staticmethod
def check_and_update_host_consist(target, host=None, target_is_dict_key=True):
"""A helper function that merges a legacy "target, target_host" pair, then returns
the merged target and its host field. The function is for legacy target and target
host pair only, and should not be used in the new target system.
Parameters
----------
target : Union[str, Dict[str, Any], Target]
The target or heterogeneous target
host : Union[str, Dict[str, Any], Target, None]
The target host
target_is_dict_key : Bool
When the type of target is dict, whether Target is the key (Otherwise the value)
"""
if isinstance(target, (dict, str)):
target = convert(target)
if isinstance(host, (dict, str)):
host = convert(host)
if target is None:
assert host is None, "Target host is not empty when target is empty."
return target, host
if isinstance(target, Map) and "kind" not in target:
new_target = {}
for tgt, mod in target.items():
if not target_is_dict_key:
tgt, mod = mod, tgt
if isinstance(tgt, (Map, String, Target)):
tgt, host = Target.check_and_update_host_consist(tgt, host)
if not target_is_dict_key:
tgt, mod = mod, tgt
new_target[tgt] = mod
target = new_target
else:
target = Target(target, host)
host = target.host
return target, host
# TODO(@tvm-team): Deprecate the helper functions below. Encourage the usage of config dict instead.
def _merge_opts(opts, new_opts):
"""Helper function to merge options"""
if isinstance(new_opts, str):
new_opts = new_opts.split()
if new_opts:
opt_set = set(opts)
new_opts = [opt for opt in new_opts if opt not in opt_set]
return opts + new_opts
return opts
def cuda(model="unknown", arch=None, options=None):
"""Returns a cuda target.
Parameters
----------
model: str
The model of cuda device (e.g. 1080ti)
arch: str
The cuda architecture (e.g. sm_61)
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
if arch:
opts = _merge_opts(["-arch=%s" % arch], opts)
if not any(["-arch" in opt for opt in opts]):
warnings.warn("Try specifying cuda arch by adding 'arch=sm_xx' to your target.")
return Target(" ".join(["cuda"] + opts))
def rocm(model="unknown", options=None):
"""Returns a ROCM target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
return Target(" ".join(["rocm"] + opts))
def mali(model="unknown", options=None):
"""Returns a ARM Mali GPU target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=mali", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def intel_graphics(model="unknown", options=None):
"""Returns an Intel Graphics target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=intel_graphics", "-model=%s" % model, "-thread_warp_size=16"]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
MICRO_SUPPORTED_MODELS = {
"host": [],
"atsamd51": ["-mcpu=cortex-m4"],
"cxd5602gg": ["-mcpu=cortex-m4"],
"esp32": [],
"imxrt10xx": ["-mcpu=cortex-m7"],
"mps2_an521": ["-mcpu=cortex-m33"],
"mps3_an547": ["-mcpu=cortex-m55"],
"nrf52840": ["-mcpu=cortex-m4"],
"nrf5340dk": ["-mcpu=cortex-m33"],
"sam3x8e": ["-mcpu=cortex-m3"],
"stm32f746xx": ["-mcpu=cortex-m7", "-march=armv7e-m"],
"stm32l4r5zi": ["-mcpu=cortex-m4"],
"stm32u5xx": ["-mcpu=cortex-m33"],
"zynq_mp_r5": ["-mcpu=cortex-r5"],
}
def micro(model="unknown", options=None):
"""Returns a microTVM target.
Parameters
----------
model : str
Canonically identifies the target device. This is typically a device board level name.
The allowed values are MICRO_SUPPORTED_MODELS.keys().
options : str or list of str
Additional options
"""
if model not in MICRO_SUPPORTED_MODELS:
raise ValueError(f"Model {model} not supported by tvm.target.micro.")
opts = _merge_opts(
MICRO_SUPPORTED_MODELS[model] + [f"-model={model}"],
options,
)
# NOTE: in the future, the default micro target will be LLVM except when
# external dependencies are present.
return Target(" ".join(["c"] + opts))
def arm_cpu(model="unknown", options=None):
"""Returns a ARM CPU target.
This function will also download pre-tuned op parameters when there is none.
Parameters
----------
model: str
SoC name or phone name of the arm board.
options : str or list of str
Additional options
"""
trans_table = {
"pixel2": ["-model=snapdragon835", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"rasp3b": ["-model=bcm2837", "-mtriple=armv7l-linux-gnueabihf", "-mattr=+neon"],
"rasp4b": [
"-model=bcm2711",
"-mtriple=armv8l-linux-gnueabihf",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rasp4b64": [
"-model=bcm2711",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rk3399": ["-model=rk3399", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"pynq": ["-model=pynq", "-mtriple=armv7a-linux-eabi", "-mattr=+neon"],
"ultra96": ["-model=ultra96", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"beagleai": [
"-model=beagleai",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a15",
],
"stm32mp1": [
"-model=stm32mp1",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a7",
],
"thunderx": [
"-model=thunderx",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon,+crc,+lse",
"-mcpu=thunderxt88",
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def rasp(options=None):
"""Return a Raspberry 3b target.
Parameters
----------
options : str or list of str
Additional options
"""
warnings.warn(
"tvm.target.rasp() is going to be deprecated. " 'Please use tvm.target.arm_cpu("rasp3b")'
)
return arm_cpu("rasp3b", options)
def vta(model="unknown", options=None):
opts = ["-device=vta", "-keys=vta,cpu", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["ext_dev"] + opts))
def bifrost(model="unknown", options=None):
"""Return an ARM Mali GPU target (Bifrost architecture).
Parameters
----------
options : str or list of str
Additional options
"""
opts = ["-device=bifrost", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def riscv_cpu(model="sifive-u54", options=None):
"""Returns a RISC-V CPU target.
Default: sifive-u54 rv64gc
Parameters
----------
model: str
CPU name.
options : str or list of str
Additional options
"""
trans_table = {
"sifive-e31": [
"-model=sifive-e31",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e31",
"-mabi=ilp32",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv32imac -mabi=ilp32 -mcpu=sifive-e31
],
"sifive-e76": [
"-model=sifive-e76",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e76",
"-mabi=ilp32",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv32imafc -mabi=ilp32 -mcpu=sifive-e76
],
"sifive-u54": [
"-model=sifive-u54",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u54",
"-mabi=lp64d",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u54
],
"sifive-u74": [
"-model=sifive-u74",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u74",
"-mabi=lp64d",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u74
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def hexagon(cpu_ver="v66", **kwargs):
"""Returns a Hexagon target.
Parameters
----------
cpu_ver : str (default: "v66")
CPU version used for code generation. Not all allowed cpu str
will be valid, LLVM will throw an error.
Recognized keyword parameters
-----------------------------
hvx : int (default: 128)
Size of HVX vector in bytes. Value of 0 disables HVX codegen.
sim_options : str or list of str (default: None)
User defined sim arguments. CPU version defaults to cpu_ver.
Otherwise, separate versions are used for codegen and sim. Not
all allowed cpu strings will be valid, simulator will throw an
error if invalid. Does not affect codegen.
llvm_options : str or list of str (default: None)
User defined compiler arguments.
use_qfloat : bool (default: True for cpu_ver >= v68, False otherwise)
Whether to use QFloat HVX instructions.
use_ieee_fp : bool (default: False)
Whether to use IEEE HVX instructions
link_params : bool (default: False)
Whether to link graph parameters into the LLVM module.
Note: Floating point support in HVX requires LLVM 14+.
"""
# Some of the target parameters correspond to target kind attributes
# listed in src/target/target_kind.cc. For those parameters, their
# names follow the attribute names with the exception of '_' being used
# in place of '-'.
# Example compiler arguments
# llvm -mtriple=hexagon -mcpu=hexagonv66 -mattr=+hvxv66,+hvx-length128b
def get_arch_version(cpu_ver):
m = re.match(r"v([0-9]+).*", cpu_ver)
assert m
return int(m.group(1))
# Check for valid codegen cpu
valid_hex = ["v65", "v66", "v67", "v67t", "v68", "v69"]
try:
cpu_ver = cpu_ver[cpu_ver.index("v") :].lower()
assert cpu_ver in valid_hex
except:
msg = "{} is not a valid Hexagon version\nvalid versions include {}"
raise ValueError(msg.format(cpu_ver, valid_hex)) from None
# Target configuration:
arch_version = get_arch_version(cpu_ver)
config = {
"hvx": 128,
"sim_options": None,
"llvm_options": None,
"use_qfloat": arch_version >= 68,
"use_ieee_fp": False,
"link_params": False,
}
config.update(kwargs)
# Warn about obsolete parameter names.
if config.get("sim_args"):
msg = "The keyword parameter 'sim_args' is deprecated, use 'sim_options' instead"
warnings.warn(msg, stacklevel=2)
config.update({"sim_options": config["sim_args"]})
if config.get("llvm_args"):
msg = "The keyword parameter 'llvm_args' is deprecated, use 'llvm_options' instead"
warnings.warn(msg, stacklevel=2)
config.update({"llvm_options": config["llvm_args"]})
# LLVM target string
def create_llvm_target(cpu_ver, config):
"""Create LLVM target string."""
target = " -mtriple=hexagon"
mcpu = " -mcpu=hexagon" + cpu_ver
# Process the options that affect target features and return the
# target feature string.
def create_target_features(config):
features = {
"use_qfloat": "hvx-qfloat",
"use_ieee_fp": "hvx-ieee-fp",
}
tfs = []
if config["hvx"] > 0:
valid_hvx = [0, 64, 128]
if not config["hvx"] in valid_hvx:
raise ValueError("Invalid hvx value, should be one of " + str(valid_hvx))
tfs += ["+hvx" + cpu_ver, "+hvx-length" + str(config["hvx"]) + "b"]
else:
tfs += ["-hvx"]
# All the additional features happen to only apply to v68+.
# Don't bother applying them (even with '-') to lower versions.
if arch_version >= 68:
tfs += ["-+"[config[f]] + features[f] for f in features]
return "-mattr=" + ",".join(tfs) if tfs else ""
return target + mcpu + " " + create_target_features(config)
# Simulator options string
def create_sim_options(cpu_ver, config):
"""Create simulator option string."""
def validate_hvx_length(codegen_hvx, sim_options):
if sim_options and "--hvx_length" in sim_options:
# If --hvx_length was specified, check HVX length of sim
# vs codegen
i = sim_options.index("hvx_length") + len("hvx_length") + 1
sim_hvx = sim_options[i : i + 3]
if sim_hvx != str(codegen_hvx):
msg = "sim hvx {} and codegen hvx {} mismatch!".format(sim_hvx, codegen_hvx)
# Set the stacklevel to the tvm.target.hexagon() call.
warnings.warn(msg, stacklevel=4)
elif codegen_hvx != 0:
# If --hvx_length was not given, add it if HVX is enabled
sim_options = sim_options + " " if isinstance(sim_options, str) else ""
sim_options += "--hvx_length " + str(codegen_hvx)
return sim_options or ""
hvx = config["hvx"]
sim_options = config["sim_options"]
if not sim_options:
return cpu_ver + " " + validate_hvx_length(hvx, sim_options)
sim_cpu = cpu_ver + " "
# Add user defined args
if isinstance(sim_options, list):
sim_options = " ".join(sim_options)
# Check for supplied sim cpu version
if "v6" in sim_options:
sim_cpu = ""
# Regex match for allowed cpus
valid_cpu_str_regex = (
r"(?P<pre>--.*\s)?(--m)?"
+ r"(?P<base_version>v6[25678])(?P<sub_version>[a-z])?"
+ r"(?P<l2_size>_[0-9]+)?(?P<rev>_rev[0-9])?\s?(?P<post>--.*)?"
)
m = re.match(valid_cpu_str_regex, sim_options.lower())
if not m:
raise ValueError('Invalid simulator argument string "{}"'.format(sim_options))
# Parse options into correct order
cpu_attr = {x: str(m.groupdict()[x] or "") for x in m.groupdict()}
sim_options = (
cpu_attr["base_version"]
+ cpu_attr["sub_version"]
+ cpu_attr["l2_size"]
+ cpu_attr["rev"]
+ " "
+ cpu_attr["pre"]
+ cpu_attr["post"]
)
return sim_cpu + " " + validate_hvx_length(hvx, sim_options)
# LLVM options string
def create_llvm_options(cpu_ver, config): # pylint: disable=unused-argument
"""Create LLVM options string."""
llvm_options = config["llvm_options"]
# TVM's option parser doesn't allow '=' in values, but '=' can
# appear in LLVM flags. Replace it with '@', since it's unlikely
# that '@' will be used in another context.
if llvm_options is None or len(llvm_options.strip()) == 0:
return ""
args = [s.replace("=", "@") for s in llvm_options.split()]
return "--llvm-options=" + ",".join(args)
# TVM target attributes string
def create_tvm_options(cpu_ver, config): # pylint: disable=unused-argument
"""Create TVM target features string."""
features = {
"link_params": "link-params",
}
opts = ""
for k in config:
if k in features:
opts += " --" + features[k] + "=" + str(config[k])
return opts
# Sim args
os.environ["HEXAGON_SIM_ARGS"] = create_sim_options(cpu_ver, config)
target_str = create_llvm_target(cpu_ver, config)
llvm_str = create_llvm_options(cpu_ver, config)
tvm_str = create_tvm_options(cpu_ver, config)
args_list = target_str.split() + llvm_str.split() + tvm_str.split()
return Target(" ".join(["hexagon"] + args_list))
STM32_SUPPORTED_SERIES = {
# High-Performance
"stm32H7xx": ["-device=arm_cpu", "-mcpu=cortex-m7", "-march=armv7e-m"],
"stm32F7xx": ["-device=arm_cpu", "-mcpu=cortex-m7"],
"stm32F4xx": ["-device=arm_cpu", "-mcpu=cortex-m4"],
"stm32F2xx": ["-device=arm_cpu", "-mcpu=cortex-m3"],
# Mainstream
"stm32G0xx": ["-device=arm_cpu", "-mcpu=cortex-m0+"],
"stm32F0xx": ["-device=arm_cpu", "-mcpu=cortex-m0"],
"stm32F1xx": ["-device=arm_cpu", "-mcpu=cortex-m3"],
"stm32G4xx": ["-device=arm_cpu", "-mcpu=cortex-m4"],
"stm32F3xx": ["-device=arm_cpu", "-mcpu=cortex-m4"],
# Low-power
"stm32U5xx": ["-device=arm_cpu", "-mcpu=cortex-m33"],
"stm32L5xx": ["-device=arm_cpu", "-mcpu=cortex-m33"],
"stm32L4xx": ["-device=arm_cpu", "-mcpu=cortex-m4"],
"stm32L1xx": ["-device=arm_cpu", "-mcpu=cortex-m3"],
"stm32L0xx": ["-device=arm_cpu", "-mcpu=cortex-m0+"],
}
def stm32(series="unknown", options=None):
"""Returns a STM32 target.
Parameters
----------
series: str
Series name of a STM32 board series, eg. stm32H7xx or stm32F4xx
options : str or list of str
Additional options
"""
if series not in STM32_SUPPORTED_SERIES:
raise ValueError(f"Series {series} is not supported by tvm.target.stm32.")
opts = _merge_opts(STM32_SUPPORTED_SERIES[series], options)
return Target(" ".join(["c"] + opts))
def create(target):
"""Deprecated. Use the constructor of :py:mod:`tvm.target.Target` directly."""
warnings.warn("tvm.target.create() is being deprecated. Please use tvm.target.Target() instead")
return Target(target)
@_register_func("target._load_config_dict")
def _load_config_dict(config_dict_str):
try:
config = json.loads(config_dict_str)
except json.decoder.JSONDecodeError:
return None
if not isinstance(config, dict):
return None
for key in config.keys():
if not isinstance(key, str):
return None
return config
|
from js9 import j
import os
import capnp
# import msgpack
import base64
ModelBaseCollection = j.data.capnp.getModelBaseClassCollection()
ModelBase = j.data.capnp.getModelBaseClass()
# from JumpScale9.clients.tarantool.KVSInterface import KVSTarantool
class $NameModel(ModelBase):
'''
'''
def __init__(self):
ModelBase.__init__(self)
def index(self):
#no need to put indexes because will be done by capnp
pass
def save(self):
self.reSerialize()
self._pre_save()
buff = self.dbobj.to_bytes()
key=self.key
# key=msgpack.dumps(self.key)
# key=base64.b64encode(self.key.encode())
return self.collection.client.call("model_$name_set",(key,buff))
def delete(self):
key=self.key
# key=base64.b64encode(self.key.encode())
return self.collection.client.call("model_$name_del",(key))
class $NameCollection(ModelBaseCollection):
'''
This class represent a collection of $Names
It's used to list/find/create new Instance of $Name Model object
'''
def __init__(self):
category = '$name'
namespace = ""
# instanciate the KVS interface on top of tarantool
# cl = j.clients.tarantool.client_get() # will get the tarantool from the config file, the main connection
# db = KVSTarantool(cl, category)
# mpath = j.sal.fs.getDirName(os.path.abspath(__file__)) + "/model.capnp"
# SchemaCapnp = j.data.capnp.getSchemaFromPath(mpath, name='$Name')
self.client = j.clients.tarantool.client_get() #will get the tarantool from the config file, the main connection
mpath=j.sal.fs.getDirName(os.path.abspath(__file__))+"/model.capnp"
SchemaCapnp=j.data.capnp.getSchemaFromPath(mpath,name='$Name')
super().__init__(SchemaCapnp, category=category, namespace=namespace, modelBaseClass=$NameModel, db=self.client, indexDb=self.client)
self.client.db.encoding=None
def new(self):
return $NameModel(collection=self, new=True)
def get(self,key):
resp=self.client.call("model_$name_get",key)
if len(resp.data) <= 1 and len(resp.data[0]) > 2:
raise KeyError("value for %s not found" % key)
value = resp.data[0][1]
return $NameModel(key=key,collection=self, new=False,data=value)
# BELOW IS ALL EXAMPLE CODE WHICH NEEDS TO BE REPLACED
def list(self):
resp=self.client.call("model_$name_list")
return [item.decode() for item in resp[0]]
# def list(self, actor="", service="", action="", state="", serviceKey="", fromEpoch=0, toEpoch=9999999999999,tags=[]):
# raise NotImplementedError()
# return res
# def find(self, actor="", service="", action="", state="", serviceKey="", fromEpoch=0, toEpoch=9999999999999, tags=[]):
# raise NotImplementedError()
# res = []
# for key in self.list(actor, service, action, state, serviceKey, fromEpoch, toEpoch, tags):
# if self.get(key):
# res.append(self.get(key))
# return res
|
from typing import Optional, Tuple, Union, List
import torch
import numpy as np
from ....data.subject import Subject
from ....utils import to_tuple
from ....torchio import DATA, TypeTuple, TypeData, TypeTripletInt
from ... import IntensityTransform
from .. import RandomTransform
class RandomSwap(RandomTransform, IntensityTransform):
r"""Randomly swap patches within an image.
This is typically used in `context restoration for self-supervised learning
<https://www.sciencedirect.com/science/article/pii/S1361841518304699>`_.
Args:
patch_size: Tuple of integers :math:`(w, h, d)` to swap patches
of size :math:`h \times w \times d`.
If a single number :math:`n` is provided, :math:`w = h = d = n`.
num_iterations: Number of times that two patches will be swapped.
p: Probability that this transform will be applied.
seed: See :py:class:`~torchio.transforms.augmentation.RandomTransform`.
keys: See :py:class:`~torchio.transforms.Transform`.
"""
def __init__(
self,
patch_size: TypeTuple = 15,
num_iterations: int = 100,
p: float = 1,
keys: Optional[List[str]] = None,
):
super().__init__(p=p, keys=keys)
self.patch_size = np.array(to_tuple(patch_size))
self.num_iterations = self.parse_num_iterations(num_iterations)
@staticmethod
def parse_num_iterations(num_iterations):
if not isinstance(num_iterations, int):
raise TypeError('num_iterations must be an int,'
f'not {num_iterations}')
if num_iterations < 0:
raise ValueError('num_iterations must be positive,'
f'not {num_iterations}')
return num_iterations
@staticmethod
def get_params(
tensor: torch.Tensor,
patch_size: np.ndarray,
num_iterations: int,
) -> List[Tuple[np.ndarray, np.ndarray]]:
spatial_shape = tensor.shape[-3:]
locations = []
for _ in range(num_iterations):
first_ini, first_fin = get_random_indices_from_shape(
spatial_shape,
patch_size,
)
while True:
second_ini, second_fin = get_random_indices_from_shape(
spatial_shape,
patch_size,
)
larger_than_initial = np.all(second_ini >= first_ini)
less_than_final = np.all(second_fin <= first_fin)
if larger_than_initial and less_than_final:
continue # patches overlap
else:
break # patches don't overlap
locations.append((first_ini, second_ini))
return locations
def apply_transform(self, subject: Subject) -> Subject:
for image in self.get_images(subject):
tensor = image[DATA]
locations = self.get_params(
tensor, self.patch_size, self.num_iterations)
image[DATA] = swap(tensor, self.patch_size, locations)
return subject
def swap(
tensor: torch.Tensor,
patch_size: TypeTuple,
locations: List[Tuple[np.ndarray, np.ndarray]],
) -> None:
tensor = tensor.clone()
patch_size = np.array(patch_size)
for first_ini, second_ini in locations:
first_fin = first_ini + patch_size
second_fin = second_ini + patch_size
first_patch = crop(tensor, first_ini, first_fin)
second_patch = crop(tensor, second_ini, second_fin).clone()
insert(tensor, first_patch, second_ini)
insert(tensor, second_patch, first_ini)
return tensor
def insert(tensor: TypeData, patch: TypeData, index_ini: np.ndarray) -> None:
index_fin = index_ini + np.array(patch.shape[-3:])
i_ini, j_ini, k_ini = index_ini
i_fin, j_fin, k_fin = index_fin
tensor[:, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin] = patch
def crop(
image: Union[np.ndarray, torch.Tensor],
index_ini: np.ndarray,
index_fin: np.ndarray,
) -> Union[np.ndarray, torch.Tensor]:
i_ini, j_ini, k_ini = index_ini
i_fin, j_fin, k_fin = index_fin
return image[:, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin]
def get_random_indices_from_shape(
spatial_shape: TypeTripletInt,
patch_size: TypeTripletInt,
) -> Tuple[np.ndarray, np.ndarray]:
shape_array = np.array(spatial_shape)
patch_size_array = np.array(patch_size)
max_index_ini = shape_array - patch_size_array
if (max_index_ini < 0).any():
message = (
f'Patch size {patch_size} cannot be'
f' larger than image spatial shape {spatial_shape}'
)
raise ValueError(message)
max_index_ini = max_index_ini.astype(np.uint16)
coordinates = []
for max_coordinate in max_index_ini.tolist():
if max_coordinate == 0:
coordinate = 0
else:
coordinate = torch.randint(max_coordinate, size=(1,)).item()
coordinates.append(coordinate)
index_ini = np.array(coordinates, np.uint16)
index_fin = index_ini + patch_size_array
return index_ini, index_fin
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on May, 2020
Nathan de Lara <ndelara@enst.fr>
"""
import numpy as np
from scipy import sparse
from sknetwork.utils.format import directed2undirected
def edgelist2adjacency(edgelist: list, undirected: bool = False) -> sparse.csr_matrix:
"""Build an adjacency matrix from a list of edges.
Parameters
----------
edgelist : list
List of edges as pairs (i, j) or triplets (i, j, w) for weighted edges.
undirected : bool
If ``True``, return a symmetric adjacency.
Returns
-------
adjacency : sparse.csr_matrix
Examples
--------
>>> edgelist = [(0, 1), (1, 2), (2, 0)]
>>> adjacency = edgelist2adjacency(edgelist)
>>> adjacency.shape, adjacency.nnz
((3, 3), 3)
>>> adjacency = edgelist2adjacency(edgelist, undirected=True)
>>> adjacency.shape, adjacency.nnz
((3, 3), 6)
>>> weighted_edgelist = [(0, 1, 0.2), (1, 2, 4), (2, 0, 1.3)]
>>> adjacency = edgelist2adjacency(weighted_edgelist)
>>> adjacency.dtype
dtype('float64')
"""
edges = np.array(edgelist)
row, col = edges[:, 0].astype(np.int32), edges[:, 1].astype(np.int32)
n = max(row.max(), col.max()) + 1
if edges.shape[1] > 2:
data = edges[:, 2]
else:
data = np.ones_like(row, dtype=bool)
adjacency = sparse.csr_matrix((data, (row, col)), shape=(n, n))
if undirected:
adjacency = directed2undirected(adjacency)
return adjacency
def edgelist2biadjacency(edgelist: list) -> sparse.csr_matrix:
"""Build a biadjacency matrix from a list of edges.
Parameters
----------
edgelist : list
List of edges as pairs (i, j) or triplets (i, j, w) for weighted edges.
Returns
-------
biadjacency : sparse.csr_matrix
Examples
--------
>>> edgelist = [(0, 0), (1, 0), (1, 1), (2, 1)]
>>> biadjacency = edgelist2biadjacency(edgelist)
>>> biadjacency.shape, biadjacency.nnz
((3, 2), 4)
>>> weighted_edgelist = [(0, 0, 0.5), (1, 0, 1), (1, 1, 1), (2, 1, 2)]
>>> biadjacency = edgelist2biadjacency(weighted_edgelist)
>>> biadjacency.dtype
dtype('float64')
"""
edges = np.array(edgelist)
row, col = edges[:, 0].astype(np.int32), edges[:, 1].astype(np.int32)
n_row, n_col = row.max() + 1, col.max() + 1
if edges.shape[1] > 2:
data = edges[:, 2]
else:
data = np.ones_like(row, dtype=bool)
biadjacency = sparse.csr_matrix((data, (row, col)), shape=(n_row, n_col))
return biadjacency
|
#!/usr/bin/env python
# Helpful little script that spits out a comma-separated list of
# language codes for Qt icons that should be included
# in binary stratis distributions
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $STRATISDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'stratis_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'stratis_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
avgArr = [684.0322959592726, 884.7363009861817, 888.8322884189091, 942.080300986182, 970.7522934458182, 991.2322959592727, 991.2323009861818, 1011.712300986182, 1036.288295959273, 1044.4803034996364, 1060.8642859054546, 1069.056306013091, 1077.2482984727276, 1122.304306013091, 1130.4963034996365, 1134.5923034996363, 1142.7843009861817, 1146.8802884189092, 1159.1682984727274, 1163.2643085265454, 1224.7042884189093, 1232.8963110400005, 1249.280295959273, 1261.5682934458184, 1265.6642833920002, 1282.0483060130912, 1290.2402984727278, 1298.4323110400003, 1331.2003034996367, 1351.680290932364, 1363.968290932364, 1363.9682959592733, 1409.0243009861824, 1409.024303499637, 1413.1202984727277, 1413.120306013091, 1429.5042884189097, 1429.5043060130913, 1470.464293445819, 1495.0402959592734, 1527.808285905455, 1556.4803034996369, 1593.3442934458185, 1613.8243009861826, 1634.3042859054556, 1638.400285905455, 1650.6882884189097, 1662.9763009861827, 1699.8403085265466, 1740.800303499637, 1757.1842959592736, 1757.1843009861827, 1761.2803160669096, 1781.7602909323643, 1794.0482959592732, 1794.0483034996373, 1802.2402909323644, 1810.432298472728, 1822.7202859054553, 1830.9122959592737, 1843.2003034996371, 1855.4882959592733, 1863.680295959274, 1867.7763009861828, 1892.3522909323647, 1908.736311040001, 1929.21628841891, 1982.4643135534557, 2002.944295959274, 2048.0002984727284, 2064.3842859054553, 2158.5922934458195, 2207.7442934458195, 2228.2242833920013, 2240.512300986183, 2269.1842884189105, 2285.568283392001, 2338.8162984727287, 2351.104295959274, 2416.6402984727292, 2424.832290932365, 2461.696298472729, 2478.080295959274, 2568.1922934458194, 2654.20829344582, 2715.648303499638, 2756.6083009861827, 2801.66429344582, 2936.8322909323656, 2990.0803160669107, 3043.3282984727293, 3117.0563210938194, 3125.2482959592744, 3170.304298472728, 3280.896278365093, 3379.200290932365, 3506.176295959273, 3850.240300986182, 3887.104285905455, 5201.920300986177]
maxArr = [1331.2002984727274, 1638.4003613090913, 1740.800235636364, 1740.8002984727277, 1740.8003613090914, 1740.8003613090914, 1843.2002356363641, 1843.2002984727278, 1843.2003613090915, 1843.2003613090915, 1945.600298472728, 1945.600298472728, 1945.600298472728, 1945.6003613090916, 2048.0002984727284, 2048.0002984727284, 2048.0002984727284, 2048.000361309092, 2048.000361309092, 2048.000361309092, 2150.400235636365, 2150.400235636365, 2150.4002984727285, 2150.400361309092, 2150.400361309092, 2150.400361309092, 2150.400361309092, 2252.8003613090923, 2252.8003613090923, 2355.2003613090924, 2355.2003613090924, 2355.2003613090924, 2355.2003613090924, 2355.2003613090924, 2457.600298472729, 2457.6003613090925, 2457.6003613090925, 2457.6003613090925, 2457.6003613090925, 2457.6003613090925, 2560.000298472729, 2560.0003613090926, 2560.0003613090926, 2560.0003613090926, 2560.0003613090926, 2560.0003613090926, 2560.0003613090926, 2662.4002356363653, 2662.400298472729, 2662.400298472729, 2662.4003613090927, 2662.4003613090927, 2764.800298472729, 2764.800298472729, 2764.8003613090928, 2764.8003613090928, 2867.200298472729, 2867.200361309093, 2867.200361309093, 2867.200361309093, 2969.600361309093, 3072.0002984727294, 3072.0002984727294, 3072.0002984727294, 3072.0002984727294, 3072.0002984727294, 3174.4002984727294, 3174.4002984727294, 3174.400361309093, 3174.400361309093, 3174.400361309093, 3276.800361309093, 3276.800361309093, 3379.200235636366, 3379.2003613090933, 3481.6002984727297, 3481.6002984727297, 3481.6003613090934, 3481.6003613090934, 3686.4002356363662, 3686.4003613090936, 3686.4003613090936, 3788.80029847273, 3788.8003613090937, 3993.60029847273, 3993.60029847273, 3993.600361309094, 3993.600361309094, 4096.000361309092, 4096.000361309092, 4198.400298472728, 4198.400361309092, 4198.400361309092, 4300.800361309091, 4608.000235636363, 4608.00036130909, 5017.6002356363615, 5017.600361309089, 5120.000235636361, 7168.000361309081]
a = np.array(avgArr)
m = np.array(maxArr)
x1 = pd.Series(a, name="Среднее время, мс")
x2 = pd.Series(m, name="Максимальное время, мс")
sns.jointplot(x1, x2, kind="hex")
plt.tight_layout()
plt.show()
|
import pytest
import uuid
from couchdb import Server
from openprocurement.auction.design import sync_design_chronograph, sync_design
from openprocurement.auction.tests.utils import test_public_document, \
put_test_doc
SERVER = Server('http://admin:zaq1xsw2@127.0.0.1:9000')
@pytest.fixture(scope='function')
def db(request):
name = 'test_{}'.format(uuid.uuid4().hex)
db = SERVER.create(name)
sync_design_chronograph(db)
sync_design(db)
request.cls.db = db
request.addfinalizer(lambda : SERVER.delete(name))
return db
@pytest.mark.usefixtures('db')
class TemplateTestViews(object):
def test_chronograph_view(self):
with put_test_doc(self.db, test_public_document):
data = next(iter(self.db.view('chronograph/start_date').rows))
assert not set(data.get('value').keys()).difference(
set(['start', 'mode', 'api_version', 'auction_type', 'procurementMethodType']))
def test_start_date_view(self):
"""see: https://github.com/openprocurement/openprocurement.auction/blob/master/openprocurement/auction/design.py#L18"""
def test_end_date_view(self):
"""see: https://github.com/openprocurement/openprocurement.auction/blob/master/openprocurement/auction/design.py#L8"""
def test_pre_announce_view(self):
"""https://github.com/openprocurement/openprocurement.auction/blob/master/openprocurement/auction/design.py#L31"""
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 1 11:36:22 2021
@author: arsii
"""
import pytest
from tscfat.Utils.plot_decorator import plot_decorator
from tscfat.Analysis.plot_timeseries import plot_timeseries
from tscfat.Utils.argument_loader import setup_pd
class TestPlotDecorator(object):
def test_plot_decorator(self):
df = setup_pd()
name = ['level']
_ = plot_timeseries(df,
name,
title = "test title",
roll = False,
xlab = "Time",
ylab = "Value",
ylim = False,
savename = False,
savepath = False,
highlight = False,
test=True
)
|
import unittest
from paddle.v2.framework.framework import Variable, g_main_program, Program
import paddle.v2.framework.core as core
import numpy as np
class TestVariable(unittest.TestCase):
def test_np_dtype_convert(self):
DT = core.DataType
convert = Variable._convert_np_dtype_to_dtype_
self.assertEqual(DT.FP32, convert(np.float32))
self.assertEqual(DT.FP16, convert("float16"))
self.assertEqual(DT.FP64, convert("float64"))
self.assertEqual(DT.INT32, convert("int32"))
self.assertEqual(DT.INT16, convert("int16"))
self.assertEqual(DT.INT64, convert("int64"))
self.assertEqual(DT.BOOL, convert("bool"))
self.assertRaises(ValueError, lambda: convert("int8"))
def test_var(self):
b = g_main_program.current_block()
w = b.create_var(
dtype="float64", shape=[784, 100], lod_level=0, name="fc.w")
self.assertNotEqual(str(w), "")
self.assertEqual(core.DataType.FP64, w.data_type)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual(0, w.lod_level)
w = b.create_var(name='fc.w')
self.assertEqual(core.DataType.FP64, w.data_type)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual(0, w.lod_level)
self.assertRaises(ValueError,
lambda: b.create_var(name="fc.w", shape=(24, 100)))
def test_step_scopes(self):
prog = Program()
b = prog.current_block()
var = b.create_var(
name='step_scopes', type=core.VarDesc.VarType.STEP_SCOPES)
self.assertEqual(core.VarDesc.VarType.STEP_SCOPES, var.type)
if __name__ == '__main__':
unittest.main()
|
# based on https://github.com/pypa/sampleproject/blob/master/setup.py
# see http://packaging.python.org/en/latest/tutorial.html#creating-your-own-project
from setuptools import setup, find_packages
from setuptools.command.install import install as stdinstall
import codecs
import os
import re
import sys
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_file_contents(filename):
with codecs.open(filename, encoding='utf-8') as f:
contents = f.read()
return contents
package_name = "typecheck-decorator"
class install_with_test(stdinstall):
def run(self):
stdinstall.run(self) # normal install
##pip/setuptools makes this unbuffering unhelpful:
#sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1) # make line-buffered
#sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1) # make line-buffered
#import typecheck.test_typecheck_decorator # execute post-install test (during beta only)
setup(
# setup customization:
cmdclass={'install': install_with_test},
# basic information:
name=package_name,
version=find_version('typecheck', '__init__.py'),
description="flexible explicit run-time type checking of function arguments (Python3-only)",
long_description=get_file_contents("README.rst"),
# The project URL:
url='http://github.com/prechelt/' + package_name,
# Author details:
author='Dmitry Dvoinikov, Lutz Prechelt',
author_email='prechelt@inf.fu-berlin.de',
# Classification:
license='BSD License',
classifiers=[
'License :: OSI Approved :: BSD License',
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Documentation',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='type-checking',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ['typing;python_version<"3.5"'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'typecheck': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
###data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
### entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
import numpy as np
import pandas as pd
import math
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, r2_score
df = pd.read_csv('CryptoCoinClose.csv')
#df.drop( ['BNB'], axis = 1)
features = ['BTC', 'XRP', 'XLM', 'LTC' ]
X = df[features]
y = df['ETH']
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
rf_model = RandomForestRegressor(random_state=1, n_estimators = 300, max_depth=15)
rf_model.fit(train_X, train_y)
rf_pred = rf_model.predict(val_X)
print(rf_model.predict(X.tail(10)))
df['ETH'].tail(10)
print(mean_absolute_error(val_y,rf_pred))
#print('Score=',r2_score(val_y,rf_pred))
|
import tensorflow as tf
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics
from tensorflow import keras
import os
import numpy as np
import sys
sys.path.append("../src")
from dopamine import Dopamine, dopamine
assert tf.__version__.startswith('2.')
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
batch_size = 200
num_classes = 100
shuffle_size = 60000
epochs = 20
validation_freq = 1
dopamine_batch_size = batch_size
def preprocess(x, y):
"""
x is a simple image, not a batch
"""
x = tf.cast(x, dtype=tf.float32) / 255.
# x = tf.reshape(x, [28*28])
y = tf.cast(y, dtype=tf.int32)
# y = tf.one_hot(y, depth=num_classes)
return x, y
def load_data():
(x, y), (x_val, y_val) = datasets.cifar100.load_data()
db = tf.data.Dataset.from_tensor_slices((x, y))
db = db.map(preprocess).shuffle(shuffle_size).batch(batch_size)
ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
ds_val = ds_val.map(preprocess).batch(batch_size)
return db, ds_val
def main():
db_train, db_test = load_data()
model = Sequential([
# unit 1
layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu, input_shape=(32, 32, 3)),
layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
# unit 2
layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
# unit 3
layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
# unit 4
layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
# unit 5
layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
layers.Flatten(),
layers.Dense(256, activation=tf.nn.relu),
Dopamine(input_shape=[256], batch_size=dopamine_batch_size),
layers.Dense(128, activation=tf.nn.relu),
Dopamine(input_shape=[128], batch_size=dopamine_batch_size, use_bias=True),
layers.Dense(100, activation='softmax')
])
optimizer = tf.keras.optimizers.Adam(lr=0.001)
# optimizer = tf.keras.optimizers.SGD(learning_rate=0.0002, momentum=0.5)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer,
loss=loss,
metrics=['accuracy']
)
model.summary()
model.fit(db_train, epochs=epochs, validation_data=db_test,
validation_freq=validation_freq)
model.evaluate(db_test)
if __name__ == '__main__':
main()
|
import neural_network_lyapunov.examples.quadrotor2d.quadrotor_2d as\
quadrotor_2d
import unittest
import numpy as np
import torch
import scipy.integrate
class TestQuadrotor2D(unittest.TestCase):
def test_dynamics_equilibrium(self):
plant = quadrotor_2d.Quadrotor2D(torch.float64)
u = plant.u_equilibrium
xdot = plant.dynamics(np.zeros((6, )), u)
np.testing.assert_allclose(xdot, np.zeros((6, )))
def test_dynamics(self):
plant = quadrotor_2d.Quadrotor2D(torch.float64)
def check_dynamics(x, u):
assert (isinstance(x, torch.Tensor))
xdot = plant.dynamics(x, u)
xdot_np = plant.dynamics(x.detach().numpy(), u.detach().numpy())
np.testing.assert_allclose(xdot_np, xdot.detach().numpy())
check_dynamics(torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.float64),
torch.tensor([7, 8], dtype=torch.float64))
check_dynamics(
torch.tensor([1, -2, 3, -4, 5, -6], dtype=torch.float64),
torch.tensor([7, -8], dtype=torch.float64))
def test_linearized_dynamics(self):
plant = quadrotor_2d.Quadrotor2D(torch.float64)
def check_linearized_dynamics(x, u):
assert (isinstance(x, torch.Tensor))
A, B = plant.linearized_dynamics(x, u)
xdot = plant.dynamics(x, u)
for i in range(6):
if x.grad is not None:
x.grad.zero_()
if u.grad is not None:
u.grad.zero_()
xdot[i].backward(retain_graph=True)
Ai_expected = x.grad.detach().numpy() if x.grad is not None\
else np.zeros((6,))
np.testing.assert_allclose(A[i, :].detach().numpy(),
Ai_expected)
Bi_expected = u.grad.detach().numpy() if u.grad is not None\
else np.zeros((2,))
np.testing.assert_allclose(B[i, :].detach().numpy(),
Bi_expected)
# Make sure numpy and torch input give same result.
A_np, B_np = plant.linearized_dynamics(x.detach().numpy(),
u.detach().numpy())
np.testing.assert_allclose(A_np, A.detach().numpy())
np.testing.assert_allclose(B_np, B.detach().numpy())
check_linearized_dynamics(
torch.tensor([1, 2, 3, 4, 5, 6],
dtype=torch.float64,
requires_grad=True),
torch.tensor([7, 8], dtype=torch.float64, requires_grad=True))
check_linearized_dynamics(
torch.tensor([-1, -2, 3, 4, 5, 6],
dtype=torch.float64,
requires_grad=True),
torch.tensor([7, -8], dtype=torch.float64, requires_grad=True))
def test_lqr_control(self):
plant = quadrotor_2d.Quadrotor2D(torch.float64)
x_star = np.zeros((6, ))
u_star = plant.u_equilibrium.detach().numpy()
Q = np.diag([10, 10, 10, 1, 1, plant.length / 2. / np.pi])
R = np.array([[0.1, 0.05], [0.05, 0.1]])
K, S = plant.lqr_control(Q, R, x_star, u_star)
result = scipy.integrate.solve_ivp(
lambda t, x: plant.dynamics(x, K @ (x - x_star) + u_star), (0, 10),
np.array([0.1, 0.1, -0.1, 0.2, 0.2, -0.3]))
np.testing.assert_allclose(result.y[:, -1], x_star, atol=1E-6)
if __name__ == "__main__":
unittest.main()
|
import contextlib
import json
import logging
import mimetypes
import os
import warnings
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus # type: ignore
from contextlib2 import ExitStack
from future.utils import raise_from
import requests
from requests.cookies import cookiejar_from_dict
from requests.utils import dict_from_cookiejar
from box import Box
from tavern.util import exceptions
from tavern.util.dict_util import format_keys, check_expected_keys
from tavern.schemas.extensions import get_wrapped_create_function
from tavern.request.base import BaseRequest
logger = logging.getLogger(__name__)
def get_request_args(rspec, test_block_config):
"""Format the test spec given values inthe global config
Todo:
Add similar functionality to validate/save $ext functions so input
can be generated from a function
Args:
rspec (dict): Test spec
test_block_config (dict): Test block config
Returns:
dict: Formatted test spec
Raises:
BadSchemaError: Tried to pass a body in a GET request
"""
# pylint: disable=too-many-locals
request_args = {}
# Ones that are required and are enforced to be present by the schema
required_in_file = ["method", "url"]
optional_in_file = [
"json",
"data",
"params",
"headers",
"files",
"timeout",
"cert",
# Ideally this would just be passed through but requests seems to error
# if we pass a list instead of a tuple, so we have to manually convert
# it further down
# "auth"
]
optional_with_default = {"verify": True, "stream": False}
if "method" not in rspec:
logger.debug("Using default GET method")
rspec["method"] = "GET"
content_keys = ["data", "json", "files"]
in_request = [c for c in content_keys if c in rspec]
if len(in_request) > 1:
# Explicitly raise an error here
# From requests docs:
# Note, the json parameter is ignored if either data or files is passed.
# However, we allow the data + files case, as requests handles it correctly
if set(in_request) != {"data", "files"}:
raise exceptions.BadSchemaError(
"Can only specify one type of request data in HTTP request (tried to "
"send {})".format(" and ".join(in_request))
)
headers = rspec.get("headers", {})
has_content_header = "content-type" in [h.lower() for h in headers.keys()]
if "files" in rspec:
if has_content_header:
logger.warning(
"Tried to specify a content-type header while sending a file - this will be ignored"
)
rspec["headers"] = {
i: j for i, j in headers.items() if i.lower() != "content-type"
}
fspec = format_keys(rspec, test_block_config["variables"])
def add_request_args(keys, optional):
for key in keys:
try:
request_args[key] = fspec[key]
except KeyError:
if optional or (key in request_args):
continue
# This should never happen
raise
add_request_args(required_in_file, False)
add_request_args(optional_in_file, True)
if "auth" in fspec:
request_args["auth"] = tuple(fspec["auth"])
if "cert" in fspec:
if isinstance(fspec["cert"], list):
request_args["cert"] = tuple(fspec["cert"])
if "timeout" in fspec:
# Needs to be a tuple, it being a list doesn't work
if isinstance(fspec["timeout"], list):
request_args["timeout"] = tuple(fspec["timeout"])
for key in optional_in_file:
try:
func = get_wrapped_create_function(request_args[key].pop("$ext"))
except (KeyError, TypeError, AttributeError):
pass
else:
request_args[key] = func()
# If there's any nested json in parameters, urlencode it
# if you pass nested json to 'params' then requests silently fails and just
# passes the 'top level' key, ignoring all the nested json. I don't think
# there's a standard way to do this, but urlencoding it seems sensible
# eg https://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
# > ...represented in an OAuth 2.0 request as UTF-8 encoded JSON (which ends
# > up being form-urlencoded when passed as an OAuth parameter)
for key, value in request_args.get("params", {}).items():
if isinstance(value, dict):
request_args["params"][key] = quote_plus(json.dumps(value))
for key, val in optional_with_default.items():
request_args[key] = fspec.get(key, val)
# TODO
# requests takes all of these - we need to parse the input to get them
# "cookies",
# These verbs _can_ send a body but the body _should_ be ignored according
# to the specs - some info here:
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
if request_args["method"] in ["GET", "HEAD", "OPTIONS"]:
if any(i in request_args for i in ["json", "data"]):
warnings.warn(
"You are trying to send a body with a HTTP verb that has no semantic use for it",
RuntimeWarning,
)
return request_args
@contextlib.contextmanager
def _set_cookies_for_request(session, request_args):
"""
Possibly reset session cookies for a single request then set them back.
If no cookies were present in the request arguments, do nothing.
This does not use try/finally because if it fails then we don't care about
the cookies anyway
Args:
session (requests.Session): Current session
request_args (dict): current request arguments
"""
if "cookies" in request_args:
old_cookies = dict_from_cookiejar(session.cookies)
session.cookies = cookiejar_from_dict({})
yield
session.cookies = cookiejar_from_dict(old_cookies)
else:
yield
class RestRequest(BaseRequest):
def __init__(self, session, rspec, test_block_config):
"""Prepare request
Args:
session (requests.Session): existing session
rspec (dict): test spec
test_block_config (dict): Any configuration for this the block of
tests
Raises:
UnexpectedKeysError: If some unexpected keys were used in the test
spec. Only valid keyword args to requests can be passed
"""
if "meta" in rspec:
meta = rspec.pop("meta")
if meta and "clear_session_cookies" in meta:
session.cookies.clear_session_cookies()
expected = {
"method",
"url",
"headers",
"data",
"params",
"auth",
"json",
"verify",
"files",
"stream",
"timeout",
"cookies",
"cert",
# "hooks",
}
check_expected_keys(expected, rspec)
request_args = get_request_args(rspec, test_block_config)
# Need to do this down here - it is separate from getting request args as
# it depends on the state of the session
if "cookies" in rspec:
existing_cookies = session.cookies.get_dict()
missing = set(rspec["cookies"]) - set(existing_cookies.keys())
if missing:
logger.error("Missing cookies")
raise exceptions.MissingCookieError(
"Tried to use cookies '{}' in request but only had '{}' available".format(
rspec["cookies"], existing_cookies
)
)
request_args["cookies"] = {
c: existing_cookies.get(c) for c in rspec["cookies"]
}
logger.debug("Request args: %s", request_args)
request_args.update(allow_redirects=False)
self._request_args = request_args
# There is no way using requests to make a prepared request that will
# not follow redirects, so instead we have to do this. This also means
# that we can't have the 'pre-request' hook any more because we don't
# create a prepared request.
def prepared_request():
# If there are open files, create a context manager around each so
# they will be closed at the end of the request.
with ExitStack() as stack:
stack.enter_context(_set_cookies_for_request(session, request_args))
self._request_args.update(self._get_file_arguments(stack))
return session.request(**self._request_args)
self._prepared = prepared_request
def _get_file_arguments(self, stack):
"""Get corect arguments for anything that should be passed as a file to
requests
Args:
stack (ExitStack): context stack to add file objects to so they're
closed correctly after use
Returns:
dict: mapping of {"files": ...} to pass directly to requests
"""
files_to_send = {}
for key, filepath in self._request_args.get("files", {}).items():
if not mimetypes.inited:
mimetypes.init()
filename = os.path.basename(filepath)
# a 2-tuple ('filename', fileobj)
file_spec = [filename, stack.enter_context(open(filepath, "rb"))]
# If it doesn't have a mimetype, or can't guess it, don't
# send the content type for the file
content_type, encoding = mimetypes.guess_type((filepath))
if content_type:
# a 3-tuple ('filename', fileobj, 'content_type')
logger.debug("content_type for '%s' = '%s'", filename, content_type)
file_spec.append(content_type)
if encoding:
# or a 4-tuple ('filename', fileobj, 'content_type', custom_headers)
logger.debug("encoding for '%s' = '%s'", filename, encoding)
# encoding is None for no encoding or the name of the
# program used to encode (e.g. compress or gzip). The
# encoding is suitable for use as a Content-Encoding header.
file_spec.append({"Content-Encoding": encoding})
files_to_send[key] = tuple(file_spec)
if files_to_send:
return {"files": files_to_send}
else:
return {}
def run(self):
""" Runs the prepared request and times it
Todo:
time it
Returns:
requests.Response: response object
"""
try:
return self._prepared()
except requests.exceptions.RequestException as e:
logger.exception("Error running prepared request")
raise_from(exceptions.RestRequestException, e)
@property
def request_vars(self):
return Box(self._request_args)
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "opalcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("enablezeromint=0\n")
f.write("precompute=0\n")
f.write("staking=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "opalcoin.conf")):
with open(os.path.join(datadir, "opalcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
#if flush_scheduler:
#for r in rpc_connections:
# r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Dict, List, Any
# from retriever.entitylinking.mentiondetector.ner_mention import NERMentionDetector
from retriever.entitylinking.mentiondetector.web_api_ner_mention import WebAPINERMentionDetector
from retriever.entitylinking.disambiguator.prior_only import PriorDisambiguator
from retriever.configs import config_utils
class EntityLinker(object):
def __init__(self, config: Dict[str, Any]):
if not isinstance(config, Dict):
config = config_utils.get_config(config)
self.config = config
# self.mention_detector = NERMentionDetector(config)
self.mention_detector = WebAPINERMentionDetector(config)
self.entity_disambiguator = PriorDisambiguator(config)
def predict(self, sentence, topk=3):
mentions = self.mention_detector.predict(sentence)
ed_output = self.entity_disambiguator.predict(mentions, topk=topk)
return ed_output
|
import unittest
from unittest.mock import MagicMock
from networking.PositionSender import PositionSender
class PositionSenderTest(unittest.TestCase):
def test_send(self):
connection_handler_mock = MagicMock()
sender = PositionSender(connection_handler_mock)
x = 42
z = 1337
sender.send(x, z)
expected_data = "new position x: '{x}' z: '{z}'\n".format(x=x, z=z)
connection_handler_mock.enqueue_message.assert_called_once_with(expected_data)
|
try:
import uio
uio.TextIOBase
except:
print('SKIP')
raise SystemExit
s = uio.TextIOBase(uio.BytesIO(b"123привет"))
print(s.read(1))
print(s.read(3))
print(s.read(1))
print(s.read())
|
import sys
import datetime
from csv import writer
import os
from pathlib import Path
# import getpass
## not using getpass so we can see when the string has been fully read
os.system('cls')
full_date = datetime.datetime.now()
short_date = full_date.strftime('%Y%m%d')
print(short_date)
print("Program to save Gift Card numbers to spreadsheet.")
print("swipe cards through reader and press enter to confirm")
print("types exit to quit the program. your results will be saved to the desktop.")
print('------------------------------------------------------------------------------')
def append_list_as_row(file_name, list_of_elem):
# Open file in append mode
with open(file_name, 'a+', newline='') as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(list_of_elem)
def check_values(card_string_1, card_string_2):
c1 = card_string_1
c2 = card_string_2
if (c1 != c2):
print('sorry please swipe again -> I didn\'t quite get that.....')
else:
print(f"successfully extracted card number: {c1}")
card_list = []
card_list.append(c1)
append_list_as_row(todays_file,card_list)
def parse_gc(gift_card_string):
GC = gift_card_string
card_split = GC.split('~')
card_1_r= card_split[1].split('^')
card_1 = card_1_r[0][1:]
card_2 = card_split[2].split('=')[0]
check_values(card_1 , card_2)
return
todays_file = f'{short_date}_gift_card_numbers.csv'
# todays_file = f'gift_card_numbers.csv'
if Path(todays_file).is_file():
print ("File exist")
else:
print ("File not exist")
if __name__ == "__main__":
while True:
try:
# GC = getpass.getpass(prompt='Please swipe a Gift Card: ')
GC = input("Please swipe a Gift Card: ")
except Exception as error:
print('Nuts something went wrong' , error)
else:
if (GC == "exit"):
sys.exit(0)
else:
parse_gc(GC)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2019 Linh Pham
# wwdtm is relased under the terms of the Apache License 2.0
"""Explicitly listing show modules in this package"""
from wwdtm.show import core, details, info, utility
__all__ = ["core", "details", "info", "utility"]
|
# Author: Grand Roman Joldes
# E-mail: grand.joldes@uwa.edu.au
import sys
dir = sys.argv[1]
def terminate(errorcode):
print("\n")
exit(errorcode)
def error(msg):
print msg
terminate(1)
res = slicer.util.loadVolume(dir+"\\CT_cropped.nrrd")
if (res == False):
error("Failed to load "+dir+"\\CT_cropped.nrrd")
res = slicer.util.loadLabelVolume(dir+"\\CT_blood_label.nrrd")
if (res == False):
print("Failed to load "+dir+"\\CT_blood_label.nrrd - ILT surface will not be created!")
print "\nExtracting AAA surface ..."
res = slicer.util.loadLabelVolume(dir+"\\CT_AAA_label.nrrd")
if (res == False):
error("Failed to load "+dir+"\\CT_AAA_label.nrrd")
wallNode = getNode("CT_AAA_Label")
parameters = {}
parameters["InputVolume"] = wallNode.GetID()
parameters["Name"] = "CT_wall"
parameters["Smooth"] = 100
parameters["FilterType"] = "Laplacian"
parameters["Decimate"] = 0.1
outModels = slicer.vtkMRMLModelHierarchyNode()
slicer.mrmlScene.AddNode( outModels )
parameters["ModelSceneFile"] = outModels.GetID()
modelmaker = slicer.modules.modelmaker
cliNode = slicer.cli.run(modelmaker, None, parameters, wait_for_completion=True)
status = cliNode.GetStatusString();
if status != 'Completed':
error("Failed to run modelmaker!")
print "\nSaving results ..."
surfNode = getNode("CT_wall_1_1")
res = slicer.util.saveNode(surfNode, dir+"\\CT_wall.vtp")
if (res == False):
error("Failed to save "+dir+"\\CT_wall.vtp")
terminate(0)
res = slicer.util.loadLabelVolume(dir+"\\CT_AAA_label.nrrd")
if (res == False):
error("Failed to load "+dir+"\\CT_AAA_label.nrrd")
print "Running ErodeEffect ..."
moduleSelector = slicer.util.mainWindow().moduleSelector()
moduleSelector.selectModule('Editor')
slicer.modules.EditorWidget.toolsBox.selectEffect('ErodeEffect')
slicer.modules.EditorWidget.toolsBox.currentOption.onApply()
slicer.modules.EditorWidget.toolsBox.currentOption.onApply()
slicer.modules.EditorWidget.toolsBox.currentOption.onApply()
slicer.modules.EditorWidget.toolsBox.currentOption.onApply()
print "\nRunning MaskScalarVolume ..."
bloodNode = getNode('CT_blood_label')
erodedAAANode = getNode('CT_AAA_label')
parameters = {}
parameters["InputVolume"] = bloodNode.GetID()
parameters["MaskVolume"] = erodedAAANode.GetID()
outModel = slicer.vtkMRMLScalarVolumeNode()
outModel.SetName("CT_blood_masked_label")
outModel.SetAttribute("LabelMap", "1")
slicer.mrmlScene.AddNode( outModel )
parameters["OutputVolume"] = outModel.GetID()
masker = slicer.modules.maskscalarvolume
cliNode = slicer.cli.run(masker, None, parameters, wait_for_completion=True)
status = cliNode.GetStatusString();
if status != 'Completed':
error("Failed to run maskscalarvolume!")
print "\nRunning SubtractScalarVolumes ..."
res = slicer.util.loadLabelVolume(dir+"\\CT_AAA_label.nrrd")
if (res == False):
error("Failed to load "+dir+"\\CT_AAA_label.nrrd")
AAANode = getNode('CT_AAA_label_1')
bloodNode = getNode('CT_blood_masked_label')
parameters = {}
parameters["inputVolume1"] = AAANode.GetID()
parameters["inputVolume2"] = bloodNode.GetID()
outModel = slicer.vtkMRMLScalarVolumeNode()
outModel.SetName("CT_Wall_Label")
outModel.SetAttribute("LabelMap", "1")
slicer.mrmlScene.AddNode( outModel )
parameters["outputVolume"] = outModel.GetID()
parameters["order"] = 0
subtracter = slicer.modules.subtractscalarvolumes
cliNode = slicer.cli.run(subtracter, None, parameters, wait_for_completion=True)
status = cliNode.GetStatusString();
if status != 'Completed':
error("Failed to run subtractscalarvolumes!")
print "\nExtracting AAA wall surface ..."
wallNode = getNode("CT_Wall_Label")
parameters = {}
parameters["InputVolume"] = wallNode.GetID()
parameters["Name"] = "CT_wall"
parameters["Smooth"] = 100
parameters["FilterType"] = "Laplacian"
parameters["Decimate"] = 0.1
outModels = slicer.vtkMRMLModelHierarchyNode()
slicer.mrmlScene.AddNode( outModels )
parameters["ModelSceneFile"] = outModels.GetID()
modelmaker = slicer.modules.modelmaker
cliNode = slicer.cli.run(modelmaker, None, parameters, wait_for_completion=True)
status = cliNode.GetStatusString();
if status != 'Completed':
error("Failed to run modelmaker!")
print "\nSaving results ..."
surfNode = getNode("CT_wall_1_jake")
res = slicer.util.saveNode(surfNode, dir+"\\CT_wall.vtp")
if (res == False):
error("Failed to save "+dir+"\\CT_wall.vtp")
wallNode = getNode("CT_Wall_Label")
res = slicer.util.saveNode(wallNode, dir+"\\CT_Wall_label.nrrd")
if (res == False):
error("Failed to save "+dir+"\\CT_Wall_label.nrrd")
print "\nDone!"
terminate(0)
|
import time
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
import cv2
import numpy as np
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from imutils.video import FPS
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')
flags.DEFINE_string('weights', './checkpoints/yolov4-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('video', './data/road.mp4', 'path to input video')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.25, 'score threshold')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'MJPG', 'codec used in VideoWriter when saving video to file, MJPG or XVID')
flags.DEFINE_boolean('dis_cv2_window', False, 'disable cv2 window during the process') # this is good for the .ipynb
def main(_argv):
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = FLAGS.size
video_path = FLAGS.video
print("Video from: ", video_path )
try:
vid = cv2.VideoCapture(int(video_path))
except:
vid = cv2.VideoCapture(video_path)
if FLAGS.framework == 'tflite':
interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
else:
saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
frame_id = 0
fps = FPS().start()
while True:
return_value, frame = vid.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
if frame_id == vid.get(cv2.CAP_PROP_FRAME_COUNT):
print("Video processing complete")
break
raise ValueError("No image! Try with another video format")
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
prev_time = time.time()
if FLAGS.framework == 'tflite':
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if FLAGS.model == 'yolov3' and FLAGS.tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
image = utils.draw_bbox(frame, pred_bbox)
curr_time = time.time()
exec_time = curr_time - prev_time
result = np.asarray(image)
info = "time: %.2f ms" %(1000*exec_time)
# print(info)
fps.update()
result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if not FLAGS.dis_cv2_window:
cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
cv2.imshow("result", result)
if cv2.waitKey(1) & 0xFF == ord('q'): break
if FLAGS.output:
out.write(result)
frame_id += 1
fps.stop()
print("Elasped time: {:.2f}".format(fps.elapsed()))
print("FPS: {:.2f}".format(fps.fps()))
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Aerele Technologies Private Limited and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestSitePurchaseConfirmation(unittest.TestCase):
pass
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
# @file
# @author Simon Heybrock
import scipp as sc
import numpy as np
from .common import assert_export
class TestDatasetSlice:
def setup_method(self):
var = sc.Variable(dims=['x'], values=np.arange(5, dtype=np.int64))
self._d = sc.Dataset(data={'a': var, 'b': var}, coords={'x': var})
def test_slice_with_range_datasetview_then_dataarrayview(self):
sl = self._d['x', 1:-1]['a'].data
ref = sc.Variable(dims=['x'], values=np.array([1, 2, 3], dtype=np.int64))
assert sc.identical(ref, sl)
# omitting range end
sl = self._d['x', 1:]['a'].data
ref = sc.Variable(dims=['x'], values=np.array([1, 2, 3, 4], dtype=np.int64))
assert sc.identical(ref, sl)
# omitting range begin
sl = self._d['x', :-1]['a'].data
ref = sc.Variable(dims=['x'], values=np.array([0, 1, 2, 3], dtype=np.int64))
assert sc.identical(ref, sl)
# omitting range both begin and end
sl = self._d['x', :]['b'].data
ref = sc.Variable(dims=['x'], values=np.array([0, 1, 2, 3, 4], dtype=np.int64))
assert sc.identical(ref, sl)
def test_slice_with_range_dataarrayview_then_dataarrayview(self):
sl = self._d['a']['x', 1:-1].data
ref = sc.Variable(dims=['x'], values=np.array([1, 2, 3], dtype=np.int64))
assert sc.identical(ref, sl)
# omitting range end
sl = self._d['a']['x', 1:].data
ref = sc.Variable(dims=['x'], values=np.array([1, 2, 3, 4], dtype=np.int64))
assert sc.identical(ref, sl)
# omitting range begin
sl = self._d['a']['x', :-1].data
ref = sc.Variable(dims=['x'], values=np.array([0, 1, 2, 3], dtype=np.int64))
assert sc.identical(ref, sl)
# omitting range both begin and end
sl = self._d['b']['x', :].data
ref = sc.Variable(dims=['x'], values=np.array([0, 1, 2, 3, 4], dtype=np.int64))
assert sc.identical(ref, sl)
def test_slice_single_index(self):
assert sc.identical(self._d['x', -2]['a'], self._d['x', 3]['a'])
assert sc.identical(self._d['a']['x', -2], self._d['a']['x', 3])
def _test_copy_exports_on(self, x):
assert_export(x.copy)
assert_export(x.__copy__)
assert_export(x.__deepcopy__, {})
def test_copy_dataarrayview_exports(self):
view = self._d['a']
self._test_copy_exports_on(view)
def test_set_item_via_temporary_slice(self):
N = 6
M = 4
d1 = sc.Dataset()
d1['x'] = sc.Variable(dims=['x'], values=np.arange(N).astype(np.float64))
d1['y'] = sc.Variable(dims=['y'], values=np.arange(M).astype(np.float64))
arr1 = np.arange(N * M).reshape(N, M).astype(np.float64) + 1
d1['a'] = sc.Variable(dims=['x', 'y'], values=arr1)
d1 = d1['x', 1:2]
d1['a'].data.values.tolist() == [[5.0, 6.0, 7.0, 8.0]]
def test_set_dataarrayview_slice_items(self):
d = self._d.copy()
d['a']['x', 0:2] += d['b']['x', 0:2]
assert d['a'].data.values.tolist() == [0, 2, 2, 3, 4]
d['a']['x', 4] += \
d['b']['x', 1]
assert d['a'].data.values.tolist() == [0, 2, 2, 3, 5]
def test_slice_and_dimensions_items_dataarray(self):
var = sc.Variable(dims=['x', 'y'], values=np.arange(50).reshape(5, 10))
da = sc.DataArray(var)
assert np.allclose(da['x', 0].values, da['x', 0:1].values)
assert np.allclose(da['x', 4].values, da['x', -1].values)
assert np.allclose(da['y', 1].values, da['y', -9].values)
assert ('y' in da['x', 0].dims)
assert ('x' not in da['x', 0].dims)
assert ('y' in da['x', 0:1].dims)
assert ('x' in da['x', 0:1].dims)
def test_slice_and_dimensions_items_dataset(self):
da = sc.DataArray(
sc.Variable(dims=['x', 'y'], values=np.arange(50).reshape(5, 10)))
ds = sc.Dataset(data={'a': da})
assert (np.allclose(ds['x', 0]['a'].values,
ds['x', 0:1]['a'].values[0],
atol=1e-9))
assert (np.allclose(ds['x', 4]['a'].values, ds['x', -1]['a'].values))
assert (np.allclose(ds['y', 1]['a'].values, ds['y', -9]['a'].values))
assert ('y' in da['x', 0].dims)
assert ('x' not in da['x', 0].dims)
assert ('y' in da['x', 0:1].dims)
assert ('x' in da['x', 0:1].dims)
def test_slice_dataset_with_data_only(self):
d = sc.Dataset()
d['data'] = sc.Variable(dims=['y'], values=np.arange(10))
sliced = d['y', :]
assert sc.identical(d, sliced)
sliced = d['y', 2:6]
assert sc.identical(sc.Variable(dims=['y'], values=np.arange(2, 6)),
sliced['data'].data)
def test_slice_dataset_with_coords_only(self):
d = sc.Dataset(
coords={'y-coord': sc.Variable(dims=['y'], values=np.arange(10))})
sliced = d['y', :]
assert sc.identical(d, sliced)
sliced = d['y', 2:6]
assert sc.identical(sc.Variable(dims=['y'], values=np.arange(2, 6)),
sliced.coords['y-coord'])
def test_slice_with_step_1(self):
var = sc.Variable(dims=['x'], values=np.arange(1, 4, dtype=np.int64))
expect = sc.Dataset(data={'a': var, 'b': var}, coords={'x': var})
assert sc.identical(self._d['x', 1:4:1], expect)
assert sc.identical(self._d['a']['x', 1:4:1], expect['a'])
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import layer_utils
import match_utils
class SentenceMatchModelGraph(object):
"""
Create Natural Language Sentence Matching Models.
-- sentence-sentence pairs
-- question-answer pairs
-- premise-hypothesis pairs
"""
def __init__(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, options=None, global_step=None):
self.options = options
self.create_placeholders()
self.create_embedding(num_classes, word_vocab, char_vocab, is_training, global_step)
match_representations = []
match_dims = 0
if 'feat' in options.using_algo:
with tf.variable_scope("feat"):
match_representation, match_dim = self.create_features(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using Features')
if 'bimpm' in options.using_algo:
with tf.variable_scope("bimpm"):
match_representation, match_dim = self.create_bimpm_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using BIMPM')
if 'bimpm_char' in options.using_algo:
with tf.variable_scope("bimpm_char"):
match_representation, match_dim = self.create_bimpm_char_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using BIMPM CHAR')
if 'mpcnn' in options.using_algo:
with tf.variable_scope("mpcnn"):
match_representation, match_dim = self.create_mpcnn_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using MPCNN')
if 'mpcnn_char' in options.using_algo:
with tf.variable_scope("mpcnn_char"):
match_representation, match_dim = self.create_mpcnn_char_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using MPCNN CHAR')
if 'siameseLSTM' in options.using_algo:
with tf.variable_scope("siameseLSTM"):
match_representation, match_dim = self.create_siameseLSTM_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using SiameseLSTM')
if 'siameseCNN' in options.using_algo:
with tf.variable_scope("siameseCNN"):
match_representation, match_dim = self.create_SiameseCNN_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using SiameseCNN')
if 'MatchPyramid' in options.using_algo:
with tf.variable_scope("MatchPyramid"):
match_representation, match_dim = self.create_MatchPyramid_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using MatchPyramid')
if 'esim' in options.using_algo:
with tf.variable_scope("esim"):
match_representation, match_dim = self.create_esim_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using ESIM')
if 'DecAtt' in options.using_algo:
with tf.variable_scope("DecAtt"):
match_representation, match_dim = self.create_DecAtt_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using DecAtt')
if 'imodel' in options.using_algo:
with tf.variable_scope("imodel"):
match_representation, match_dim = self.create_my_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using MyModel')
match_representations = tf.concat(axis=1, values=match_representations)
self.prediction_layer(num_classes, match_representations, match_dims, is_training, global_step)
def create_placeholders(self):
self.truth = tf.placeholder(tf.int32, [None]) # [batch_size]
self.question_lengths = tf.placeholder(tf.int32, [None]) # [batch_size]
self.passage_lengths = tf.placeholder(tf.int32, [None]) # [batch_size]
self.in_question_words = tf.placeholder(tf.int32, [None, None]) # [batch_size, question_len]
self.in_passage_words = tf.placeholder(tf.int32, [None, None]) # [batch_size, passage_len]
self.in_question_passage_features = tf.placeholder(tf.float32, [None, None]) # [batch_size, features_dim]
if self.options.with_char:
self.question_char_lengths = tf.placeholder(tf.int32, [None,None]) # [batch_size, question_len]
self.passage_char_lengths = tf.placeholder(tf.int32, [None,None]) # [batch_size, passage_len]
self.in_question_chars = tf.placeholder(tf.int32, [None, None, None]) # [batch_size, question_len, q_char_len]
self.in_passage_chars = tf.placeholder(tf.int32, [None, None, None]) # [batch_size, passage_len, p_char_len]
self.question_sent_char_lengths = tf.placeholder(tf.int32, [None]) # [batch_size]
self.passage_sent_char_lengths = tf.placeholder(tf.int32, [None]) # [batch_size]
self.in_question_sent_chars = tf.placeholder(tf.int32, [None, None]) # [batch_size, question_len]
self.in_passage_sent_chars = tf.placeholder(tf.int32, [None, None]) # [batch_size, passage_len]
def create_feed_dict(self, cur_batch, is_training=False):
feed_dict = {
self.truth : cur_batch.label_truth,
self.question_lengths: cur_batch.question_lengths,
self.passage_lengths: cur_batch.passage_lengths,
self.in_question_words: cur_batch.in_question_words,
self.in_passage_words: cur_batch.in_passage_words,
self.in_question_passage_features: cur_batch.in_question_passage_features,
}
if self.options.with_char:
feed_dict[self.question_char_lengths] = cur_batch.question_char_lengths
feed_dict[self.passage_char_lengths] = cur_batch.passage_char_lengths
feed_dict[self.in_question_chars] = cur_batch.in_question_chars
feed_dict[self.in_passage_chars] = cur_batch.in_passage_chars
feed_dict[self.question_sent_char_lengths] = cur_batch.question_sent_char_lengths
feed_dict[self.passage_sent_char_lengths] = cur_batch.passage_sent_char_lengths
feed_dict[self.in_question_sent_chars] = cur_batch.in_question_sent_chars
feed_dict[self.in_passage_sent_chars] = cur_batch.in_passage_sent_chars
return feed_dict
# ==================================================== Embedding =================================================
def create_embedding(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
options = self.options
if word_vocab is not None:
word_vec_trainable = True
cur_device = '/gpu:0'
if options.fix_word_vec:
word_vec_trainable = False
cur_device = '/cpu:0'
with tf.device(cur_device):
self.w_embedding = tf.placeholder(tf.float32, shape=word_vocab.word_vecs.shape)
self.word_embedding = tf.get_variable("word_embedding", trainable=word_vec_trainable,
initializer=self.w_embedding, dtype=tf.float32) # tf.constant(word_vocab.word_vecs)
# with tf.device('/gpu:0'):
# self.w_embedding_trainable = tf.placeholder(tf.float32, shape=word_vocab.word_vecs.shape)
# self.word_embedding_trainable = tf.get_variable("word_embedding_trainable", trainable=True,
# initializer=self.w_embedding_trainable, dtype=tf.float32) # tf.constant(word_vocab.word_vecs)
# tf.truncated_normal([tf.shape(self.w_embedding)[0], options.word_emb_dim])
if options.with_char and char_vocab is not None:
char_vec_trainable = True
cur_device = '/gpu:0'
if options.fix_char_vec:
char_vec_trainable = False
cur_device = '/cpu:0'
with tf.device(cur_device):
self.c_embedding = tf.placeholder(tf.float32, shape=char_vocab.word_vecs.shape)
self.char_embedding = tf.get_variable("char_embedding", trainable=char_vec_trainable,
initializer=self.c_embedding, dtype=tf.float32)
# ==================================================== BiMPM =====================================================
def create_bimpm_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if word_vocab is not None:
in_question_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_question_words) # [batch_size, question_len, word_dim]
in_passage_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_passage_words) # [batch_size, passage_len, word_dim]
in_question_repres.append(in_question_word_repres)
in_passage_repres.append(in_passage_word_repres)
input_shape = tf.shape(self.in_question_words)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_words)
passage_len = input_shape[1]
input_dim += word_vocab.word_dim
if options.with_char and char_vocab is not None:
input_shape = tf.shape(self.in_question_chars)
batch_size = input_shape[0]
question_len = input_shape[1]
q_char_len = input_shape[2]
input_shape = tf.shape(self.in_passage_chars)
passage_len = input_shape[1]
p_char_len = input_shape[2]
char_dim = char_vocab.word_dim
in_question_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_question_chars) # [batch_size, question_len, q_char_len, char_dim]
in_question_char_repres = tf.reshape(in_question_char_repres, shape=[-1, q_char_len, char_dim])
question_char_lengths = tf.reshape(self.question_char_lengths, [-1])
quesiton_char_mask = tf.sequence_mask(question_char_lengths, q_char_len, dtype=tf.float32) # [batch_size*question_len, q_char_len]
in_question_char_repres = tf.multiply(in_question_char_repres, tf.expand_dims(quesiton_char_mask, axis=-1))
in_passage_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_passage_chars) # [batch_size, passage_len, p_char_len, char_dim]
in_passage_char_repres = tf.reshape(in_passage_char_repres, shape=[-1, p_char_len, char_dim])
passage_char_lengths = tf.reshape(self.passage_char_lengths, [-1])
passage_char_mask = tf.sequence_mask(passage_char_lengths, p_char_len, dtype=tf.float32) # [batch_size*passage_len, p_char_len]
in_passage_char_repres = tf.multiply(in_passage_char_repres, tf.expand_dims(passage_char_mask, axis=-1))
(question_char_outputs_fw, question_char_outputs_bw, _) = layer_utils.my_lstm_layer(in_question_char_repres, options.char_lstm_dim,
input_lengths=question_char_lengths,scope_name="char_lstm", reuse=False,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
if options.lstm_out_type == 'mean':
question_char_outputs_fw = layer_utils.collect_mean_step_of_lstm(question_char_outputs_fw)
question_char_outputs_bw = layer_utils.collect_mean_step_of_lstm(question_char_outputs_bw)
elif options.lstm_out_type == 'end':
question_char_outputs_fw = layer_utils.collect_final_step_of_lstm(question_char_outputs_fw, question_char_lengths - 1)
question_char_outputs_bw = question_char_outputs_bw[:, 0, :]
question_char_outputs = tf.concat(axis=1, values=[question_char_outputs_fw, question_char_outputs_bw])
question_char_outputs = tf.reshape(question_char_outputs, [batch_size, question_len, 2*options.char_lstm_dim]) # [batch_size, question_len, 2*options.char_lstm_dim]
(passage_char_outputs_fw, passage_char_outputs_bw, _) = layer_utils.my_lstm_layer(in_passage_char_repres, options.char_lstm_dim,
input_lengths=passage_char_lengths, scope_name="char_lstm", reuse=True,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
if options.lstm_out_type == 'mean':
passage_char_outputs_fw = layer_utils.collect_mean_step_of_lstm(passage_char_outputs_fw)
passage_char_outputs_bw = layer_utils.collect_mean_step_of_lstm(passage_char_outputs_bw)
elif options.lstm_out_type == 'end':
passage_char_outputs_fw = layer_utils.collect_final_step_of_lstm(passage_char_outputs_fw, passage_char_lengths - 1)
passage_char_outputs_bw = passage_char_outputs_bw[:, 0, :]
passage_char_outputs = tf.concat(axis=1, values=[passage_char_outputs_fw, passage_char_outputs_bw])
passage_char_outputs = tf.reshape(passage_char_outputs, [batch_size, passage_len, 2*options.char_lstm_dim]) # [batch_size, passage_len, 2*options.char_lstm_dim]
in_question_repres.append(question_char_outputs)
in_passage_repres.append(passage_char_outputs)
input_dim += 2*options.char_lstm_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
# ======rcnn context layer======
if options.with_rcnn:
in_question_repres = layer_utils.my_rcnn_layer(
in_question_repres, options.word_emb_dim, options.word_context_dim, options.fc_dim,
input_lengths=self.question_lengths, scope_name="word_rcnn", reuse=False,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
in_passage_repres = layer_utils.my_rcnn_layer(
in_passage_repres, options.word_emb_dim, options.word_context_dim, options.fc_dim,
input_lengths=self.passage_lengths, scope_name="word_rcnn", reuse=True,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
input_dim += 2 * options.word_context_dim
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
mask = tf.sequence_mask(self.passage_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
question_mask = tf.sequence_mask(self.question_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
if options.with_highway:
with tf.variable_scope("input_highway"):
in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
tf.get_variable_scope().reuse_variables()
in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
# in_question_repres = tf.multiply(in_question_repres, tf.expand_dims(question_mask, axis=-1))
# in_passage_repres = tf.multiply(in_passage_repres, tf.expand_dims(mask, axis=-1))
# ========Bilateral Matching=====
(match_representation, match_dim) = match_utils.bilateral_match_func(in_question_repres, in_passage_repres,
self.question_lengths, self.passage_lengths, question_mask, mask, input_dim, is_training, options=options)
# ========Projection layer=====
# (output_representation, output_dim) = layer_utils.projection_layer2(match_representation, match_dim, num_classes, activation_func=tf.nn.relu)
return match_representation, match_dim
# ==================================================== BiMPM CHAR=================================================
def create_bimpm_char_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if char_vocab is not None:
in_question_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_question_sent_chars) # [batch_size, question_len, char_dim]
in_passage_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_passage_sent_chars) # [batch_size, passage_len, char_dim]
in_question_repres.append(in_question_char_repres)
in_passage_repres.append(in_passage_char_repres)
input_shape = tf.shape(self.in_question_sent_chars)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_sent_chars)
passage_len = input_shape[1]
input_dim += char_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
# ======rcnn context layer======
if options.with_rcnn and False:
in_question_repres = layer_utils.my_rcnn_layer(
in_question_repres, options.char_emb_dim, options.char_context_dim, options.fc_dim,
input_lengths=self.question_sent_char_lengths, scope_name="char_rcnn", reuse=False,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
in_passage_repres = layer_utils.my_rcnn_layer(
in_passage_repres, options.char_emb_dim, options.char_context_dim, options.fc_dim,
input_lengths=self.passage_sent_char_lengths, scope_name="char_rcnn", reuse=True,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
input_dim += 2 * options.char_context_dim
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
mask = tf.sequence_mask(self.passage_sent_char_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
question_mask = tf.sequence_mask(self.question_sent_char_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
if options.with_highway:
with tf.variable_scope("input_highway"):
in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
tf.get_variable_scope().reuse_variables()
in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
# in_question_repres = tf.multiply(in_question_repres, tf.expand_dims(question_mask, axis=-1))
# in_passage_repres = tf.multiply(in_passage_repres, tf.expand_dims(mask, axis=-1))
# ========Bilateral Matching=====
(match_representation, match_dim) = match_utils.bilateral_match_func(in_question_repres, in_passage_repres,
self.question_sent_char_lengths, self.passage_sent_char_lengths, question_mask, mask, input_dim, is_training, options=options)
# ========Projection layer=====
# (output_representation, output_dim) = layer_utils.projection_layer2(match_representation, match_dim, num_classes, activation_func=tf.nn.relu)
return match_representation, match_dim
# ==================================================== MPCNN =====================================================
def create_mpcnn_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if word_vocab is not None:
in_question_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_question_words) # [batch_size, question_len, word_dim]
in_passage_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_passage_words) # [batch_size, passage_len, word_dim]
in_question_repres.append(in_question_word_repres)
in_passage_repres.append(in_passage_word_repres)
input_shape = tf.shape(self.in_question_words)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_words)
passage_len = input_shape[1]
input_dim += word_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
# mask = tf.sequence_mask(self.passage_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
# question_mask = tf.sequence_mask(self.question_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
# if options.with_highway:
# with tf.variable_scope("input_highway"):
# in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
# tf.get_variable_scope().reuse_variables()
# in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
in_question_repres = tf.expand_dims(in_question_repres, -1) # [batch_size, question_len, word_dim, 1]
in_passage_repres = tf.expand_dims(in_passage_repres, -1) # [batch_size, passage_len, word_dim, 1]
# ======Multi-perspective CNN Matching======
filter_sizes = options.filter_sizes_1d
num_filters = options.num_filters_1d
poolings = list([tf.reduce_max, tf.reduce_min, tf.reduce_mean])[:options.num_poolings]
W1 = [tf.get_variable("W1_%s" %i, initializer=tf.truncated_normal([filter_sizes[i], input_dim, 1, num_filters[0]], stddev=0.1), dtype=tf.float32) for i in range(len(filter_sizes))]
b1 = [tf.get_variable("b1_%s" %i, initializer=tf.constant(0.01, shape=[num_filters[0]]), dtype=tf.float32) for i in range(len(filter_sizes))]
W2 = [tf.get_variable("W2_%s" %i, initializer=tf.truncated_normal([filter_sizes[i], input_dim, 1, num_filters[1]], stddev=0.1), dtype=tf.float32) for i in range(len(filter_sizes)-1)]
b2 = [tf.get_variable("b2_%s" %i, initializer=tf.constant(0.01, shape=[num_filters[1], input_dim]), dtype=tf.float32) for i in range(len(filter_sizes)-1)]
sent1_blockA = layer_utils.build_block_A(in_question_repres, filter_sizes, poolings, W1, b1, is_training) # len(poolings) * len(filter_sizes) * [batch_size, 1, num_filters_A]
sent2_blockA = layer_utils.build_block_A(in_passage_repres, filter_sizes, poolings, W1, b1, is_training) # len(poolings) * len(filter_sizes) * [batch_size, 1, num_filters_A]
sent1_blockB = layer_utils.build_block_B(in_question_repres, filter_sizes, poolings, W2, b2, is_training) # (len(poolings))-1 * (len(filter_sizes)-1) * [batch_size, embed_size, num_filters_B]
sent2_blockB = layer_utils.build_block_B(in_passage_repres, filter_sizes, poolings, W2, b2, is_training) # (len(poolings))-1 * (len(filter_sizes)-1) * [batch_size, embed_size, num_filters_B]
(match_representation, match_dim) = match_utils.mpcnn_match_func(sent1_blockA, sent2_blockA,
sent1_blockB, sent2_blockB, poolings, filter_sizes, num_filters)
return match_representation, match_dim
# ==================================================== MPCNN CHAR=================================================
def create_mpcnn_char_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if char_vocab is not None:
in_question_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_question_sent_chars) # [batch_size, question_len, char_dim]
in_passage_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_passage_sent_chars) # [batch_size, passage_len, char_dim]
in_question_repres.append(in_question_char_repres)
in_passage_repres.append(in_passage_char_repres)
input_shape = tf.shape(self.in_question_sent_chars)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_sent_chars)
passage_len = input_shape[1]
input_dim += char_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
# mask = tf.sequence_mask(self.passage_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
# question_mask = tf.sequence_mask(self.question_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
# if options.with_highway:
# with tf.variable_scope("input_highway"):
# in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
# tf.get_variable_scope().reuse_variables()
# in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
in_question_repres = tf.expand_dims(in_question_repres, -1) # [batch_size, question_len, word_dim, 1]
in_passage_repres = tf.expand_dims(in_passage_repres, -1) # [batch_size, passage_len, word_dim, 1]
# ======Multi-perspective CNN Matching======
filter_sizes = options.filter_sizes_1d
num_filters = options.num_filters_1d
poolings = list([tf.reduce_max, tf.reduce_min, tf.reduce_mean])[:options.num_poolings]
W1 = [tf.get_variable("W1_%s" %i, initializer=tf.truncated_normal([filter_sizes[i], input_dim, 1, num_filters[0]], stddev=0.1), dtype=tf.float32) for i in range(len(filter_sizes))]
b1 = [tf.get_variable("b1_%s" %i, initializer=tf.constant(0.01, shape=[num_filters[0]]), dtype=tf.float32) for i in range(len(filter_sizes))]
W2 = [tf.get_variable("W2_%s" %i, initializer=tf.truncated_normal([filter_sizes[i], input_dim, 1, num_filters[1]], stddev=0.1), dtype=tf.float32) for i in range(len(filter_sizes)-1)]
b2 = [tf.get_variable("b2_%s" %i, initializer=tf.constant(0.01, shape=[num_filters[1], input_dim]), dtype=tf.float32) for i in range(len(filter_sizes)-1)]
sent1_blockA = layer_utils.build_block_A(in_question_repres, filter_sizes, poolings, W1, b1, is_training) # len(poolings) * len(filter_sizes) * [batch_size, 1, num_filters_A]
sent2_blockA = layer_utils.build_block_A(in_passage_repres, filter_sizes, poolings, W1, b1, is_training) # len(poolings) * len(filter_sizes) * [batch_size, 1, num_filters_A]
sent1_blockB = layer_utils.build_block_B(in_question_repres, filter_sizes, poolings, W2, b2, is_training) # (len(poolings))-1 * (len(filter_sizes)-1) * [batch_size, embed_size, num_filters_B]
sent2_blockB = layer_utils.build_block_B(in_passage_repres, filter_sizes, poolings, W2, b2, is_training) # (len(poolings))-1 * (len(filter_sizes)-1) * [batch_size, embed_size, num_filters_B]
(match_representation, match_dim) = match_utils.mpcnn_match_func(sent1_blockA, sent2_blockA,
sent1_blockB, sent2_blockB, poolings, filter_sizes, num_filters)
return match_representation, match_dim
# ==================================================== SiameseLSTM ===============================================
def create_siameseLSTM_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
Create a model based off of "Siamese Recurrent Architectures for Learning
Sentence Similarity" at AAAI '16. The model is super simple: just encode
both sentences with a LSTM, and then use the function
exp(-||sentence_one - sentence_two||) to get a probability that the
two sentences are semantically identical.
Create a model based off of the baseline (no inner-attention) in
"Learning Natural Language Inference using Bidirectional LSTM model
and Inner-Attention" (https://arxiv.org/abs/1605.09090).
The model is super simple: just encode
both sentences with a LSTM, and take the mean pool over the timesteps
as the sentence representation. Then, create a vector with the
by concatenating (||) the following:
sentence1|sentence1-sentence2|sentence1*sentence2|sentence2
Lastly, run this vector through a dense layer to (relu activation)
to get the logits, which are then softmaxed to get a probability
distribution [is_not_duplicate, is_duplicate].
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if word_vocab is not None:
in_question_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_question_words) # [batch_size, question_len, word_dim]
in_passage_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_passage_words) # [batch_size, passage_len, word_dim]
in_question_repres.append(in_question_word_repres)
in_passage_repres.append(in_passage_word_repres)
input_shape = tf.shape(self.in_question_words)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_words)
passage_len = input_shape[1]
input_dim += word_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
passage_mask = tf.sequence_mask(self.passage_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
question_mask = tf.sequence_mask(self.question_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
if options.with_highway:
with tf.variable_scope("input_highway"):
in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
tf.get_variable_scope().reuse_variables()
in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
# ======BiLSTM context layer======
for i in range(options.context_layer_num): # support multiple context layer
with tf.variable_scope('bilstm-layer-{}'.format(i)):
# contextual lstm for both passage and question
in_question_repres = tf.multiply(in_question_repres, tf.expand_dims(question_mask, axis=-1))
(question_context_representation_fw, question_context_representation_bw,
in_question_repres) = layer_utils.my_lstm_layer(
in_question_repres, options.context_lstm_dim, input_lengths=self.question_lengths,scope_name="context_represent",
reuse=False, is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
# Encode the second sentence, using the same LSTM weights.
tf.get_variable_scope().reuse_variables()
in_passage_repres = tf.multiply(in_passage_repres, tf.expand_dims(passage_mask, axis=-1))
(passage_context_representation_fw, passage_context_representation_bw,
in_passage_repres) = layer_utils.my_lstm_layer(
in_passage_repres, options.context_lstm_dim, input_lengths=self.passage_lengths, scope_name="context_represent",
reuse=True, is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
if options.lstm_out_type == 'mean':
question_context_representation_fw = layer_utils.collect_mean_step_of_lstm(question_context_representation_fw)
question_context_representation_bw = layer_utils.collect_mean_step_of_lstm(question_context_representation_bw)
passage_context_representation_fw = layer_utils.collect_mean_step_of_lstm(passage_context_representation_fw)
passage_context_representation_bw = layer_utils.collect_mean_step_of_lstm(passage_context_representation_bw)
elif options.lstm_out_type == 'end':
question_context_representation_fw = layer_utils.collect_final_step_of_lstm(question_context_representation_fw, self.question_lengths - 1)
question_context_representation_bw = question_context_representation_bw[:, 0, :]
passage_context_representation_fw = layer_utils.collect_final_step_of_lstm(passage_context_representation_fw, self.passage_lengths - 1)
passage_context_representation_bw = passage_context_representation_bw[:, 0, :]
question_context_outputs = tf.concat(axis=1, values=[question_context_representation_fw, question_context_representation_bw])
passage_context_outputs = tf.concat(axis=1, values=[passage_context_representation_fw, passage_context_representation_bw])
(match_representation, match_dim) = match_utils.siameseLSTM_match_func(question_context_outputs, passage_context_outputs, options.context_lstm_dim)
return match_representation, match_dim
# ==================================================== SiameseCNN ================================================
def create_SiameseCNN_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if word_vocab is not None:
in_question_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_question_words) # [batch_size, question_len, word_dim]
in_passage_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_passage_words) # [batch_size, passage_len, word_dim]
in_question_repres.append(in_question_word_repres)
in_passage_repres.append(in_passage_word_repres)
input_shape = tf.shape(self.in_question_words)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_words)
passage_len = input_shape[1]
input_dim += word_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
# if is_training:
# in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
# in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
passage_mask = tf.sequence_mask(self.passage_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
question_mask = tf.sequence_mask(self.question_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
if options.with_highway:
with tf.variable_scope("input_highway"):
in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
tf.get_variable_scope().reuse_variables()
in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
in_question_repres = tf.expand_dims(in_question_repres, -1) # [batch_size, question_len, word_dim, 1]
in_passage_repres = tf.expand_dims(in_passage_repres, -1) # [batch_size, passage_len, word_dim, 1]
# ======CNN context layer======
for i in range(options.context_layer_num): # support multiple context layer
with tf.variable_scope('cnn-layer-{}'.format(i)):
# contextual cnn for both passage and question
question_context_outputs = layer_utils.my_cnn_layer_1d(
in_question_repres, options.max_sent_length, input_dim, options.filter_sizes_1d, options.num_filters_1d, input_lengths=self.question_lengths,scope_name="context_represent",
reuse=False, is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
# Encode the second sentence, using the same LSTM weights.
tf.get_variable_scope().reuse_variables()
passage_context_outputs = layer_utils.my_cnn_layer_1d(
in_passage_repres, options.max_sent_length, input_dim, options.filter_sizes_1d, options.num_filters_1d, input_lengths=self.passage_lengths,scope_name="context_represent",
reuse=True, is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
(match_representation, match_dim) = match_utils.siameseCNN_match_func(question_context_outputs, passage_context_outputs, options.num_filters_1d[0]*len(options.filter_sizes_1d))
return match_representation, match_dim
# ==================================================== MatchPyramid ==============================================
def create_MatchPyramid_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if word_vocab is not None:
in_question_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_question_words) # [batch_size, question_len, word_dim]
in_passage_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_passage_words) # [batch_size, passage_len, word_dim]
in_question_repres.append(in_question_word_repres)
in_passage_repres.append(in_passage_word_repres)
input_shape = tf.shape(self.in_question_words)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_words)
passage_len = input_shape[1]
input_dim += word_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
# if is_training:
# in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
# in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
passage_mask = tf.sequence_mask(self.passage_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
question_mask = tf.sequence_mask(self.question_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
if options.with_highway:
with tf.variable_scope("input_highway"):
in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
tf.get_variable_scope().reuse_variables()
in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
match_representation = match_utils.matchpyramid_match_func(in_question_repres, in_passage_repres)
match_representation = tf.expand_dims(match_representation, -1)
# ======CNN context layer======
match_dim = options.max_sent_length
for i in range(len(options.filter_sizes_2d)): # support multiple context layer
match_representation, match_dim = layer_utils.my_cnn_layer_2d(
match_representation, match_dim, options.filter_sizes_2d[i], options.num_filters_2d[i],
input_lengths=None, scope_name="context_represent",
reuse=False, is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
match_dim = match_dim * match_dim * options.num_filters_2d[-1]
match_representation = tf.reshape(match_representation, shape=[-1, match_dim])
return match_representation, match_dim
# ==================================================== ESIM ======================================================
def create_esim_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if word_vocab is not None:
in_question_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_question_words) # [batch_size, question_len, word_dim]
in_passage_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_passage_words) # [batch_size, passage_len, word_dim]
in_question_repres.append(in_question_word_repres)
in_passage_repres.append(in_passage_word_repres)
input_shape = tf.shape(self.in_question_words)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_words)
passage_len = input_shape[1]
input_dim += word_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
passage_mask = tf.sequence_mask(self.passage_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
question_mask = tf.sequence_mask(self.question_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
# if options.with_highway:
# with tf.variable_scope("input_highway"):
# in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
# tf.get_variable_scope().reuse_variables()
# in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
# ======Encoding BiLSTM context layer======
for i in range(options.context_layer_num): # support multiple context layer
with tf.variable_scope('encoding-bilstm-layer-{}'.format(i)):
# contextual lstm for both passage and question
in_question_repres = tf.multiply(in_question_repres, tf.expand_dims(question_mask, axis=-1))
(question_context_representation_fw, question_context_representation_bw,
in_question_repres) = layer_utils.my_lstm_layer(
in_question_repres, options.context_lstm_dim, input_lengths=self.question_lengths, scope_name="context_represent",
reuse=False, is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
# Encode the second sentence, using the same LSTM weights.
tf.get_variable_scope().reuse_variables()
in_passage_repres = tf.multiply(in_passage_repres, tf.expand_dims(passage_mask, axis=-1))
(passage_context_representation_fw, passage_context_representation_bw,
in_passage_repres) = layer_utils.my_lstm_layer(
in_passage_repres, options.context_lstm_dim, input_lengths=self.passage_lengths, scope_name="context_represent",
reuse=True, is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
question_context_outputs = tf.concat(axis=2, values=[question_context_representation_fw, question_context_representation_bw]) # [batch_size, question_len, 2*context_lstm_dim]
passage_context_outputs = tf.concat(axis=2, values=[passage_context_representation_fw, passage_context_representation_bw]) # [batch_size, passage_len, 2*context_lstm_dim]
# ======Local Inference layer======
atten_value1, atten_value2, att_question_contexts, att_passage_contexts = layer_utils.inter_attention(question_context_outputs, passage_context_outputs, 2*options.context_lstm_dim, 2*options.context_lstm_dim,
scope_name="inter_attend", att_type=options.att_type, att_dim=options.inter_att_dim,
remove_diagnoal=False, mask1=question_mask, mask2=passage_mask, is_training=is_training, dropout_rate=options.dropout_rate)
(question_match_representation, question_match_dim) = match_utils.esim_match_func(question_context_outputs, att_question_contexts, options.context_lstm_dim)
(passage_match_representation, passage_match_dim) = match_utils.esim_match_func(passage_context_outputs, att_passage_contexts, options.context_lstm_dim)
# ======Inference BiLSTM context layer======
for i in range(options.context_layer_num): # support multiple context layer
with tf.variable_scope('inference-bilstm-layer-{}'.format(i)):
# contextual lstm for both passage and question
question_match_representation = tf.multiply(question_match_representation, tf.expand_dims(question_mask, axis=-1))
(question_context_representation_fw, question_context_representation_bw,
question_match_representation) = layer_utils.my_lstm_layer(
question_match_representation, 4*options.context_lstm_dim, input_lengths=self.question_lengths, scope_name="context_represent",
reuse=False, is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
# Encode the second sentence, using the same LSTM weights.
tf.get_variable_scope().reuse_variables()
passage_match_representation = tf.multiply(passage_match_representation, tf.expand_dims(passage_mask, axis=-1))
(passage_context_representation_fw, passage_context_representation_bw,
passage_match_representation) = layer_utils.my_lstm_layer(
passage_match_representation, 4*options.context_lstm_dim, input_lengths=self.passage_lengths, scope_name="context_represent",
reuse=True, is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
if options.lstm_out_type == 'mean':
question_context_representation_fw = layer_utils.collect_mean_step_of_lstm(question_context_representation_fw)
question_context_representation_bw = layer_utils.collect_mean_step_of_lstm(question_context_representation_bw)
passage_context_representation_fw = layer_utils.collect_mean_step_of_lstm(passage_context_representation_fw)
passage_context_representation_bw = layer_utils.collect_mean_step_of_lstm(passage_context_representation_bw)
elif options.lstm_out_type == 'end':
question_context_representation_fw = layer_utils.collect_final_step_of_lstm(question_context_representation_fw, self.question_lengths - 1)
question_context_representation_bw = question_context_representation_bw[:, 0, :]
passage_context_representation_fw = layer_utils.collect_final_step_of_lstm(passage_context_representation_fw, self.passage_lengths - 1)
passage_context_representation_bw = passage_context_representation_bw[:, 0, :]
question_outputs = tf.concat(axis=1, values=[question_context_representation_fw, question_context_representation_bw])
passage_outputs = tf.concat(axis=1, values=[passage_context_representation_fw, passage_context_representation_bw])
(match_representation, match_dim) = match_utils.siameseLSTM_match_func(question_outputs, passage_outputs, 4*options.context_lstm_dim)
return match_representation, match_dim
# ==================================================== DecomposableAttention =====================================
def create_DecAtt_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if word_vocab is not None:
in_question_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_question_words) # [batch_size, question_len, word_dim]
in_passage_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_passage_words) # [batch_size, passage_len, word_dim]
in_question_repres.append(in_question_word_repres)
in_passage_repres.append(in_passage_word_repres)
input_shape = tf.shape(self.in_question_words)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_words)
passage_len = input_shape[1]
input_dim += word_vocab.word_dim
mask = tf.sequence_mask(self.passage_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
question_mask = tf.sequence_mask(self.question_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Intra-attention layer======
if options.with_intra_att:
with tf.variable_scope("intra_attend"):
atten_value, in_question_word_att_repres = layer_utils.intra_attention(in_question_word_repres, input_dim,
att_type=options.att_type, att_dim=options.intra_att_dim,
remove_diagnoal=False, mask1=question_mask, is_training=is_training, dropout_rate=options.dropout_rate) # [batch_size, question_len, intra_att_dim]
tf.get_variable_scope().reuse_variables()
atten_value, in_passage_word_att_repres = layer_utils.intra_attention(in_passage_word_repres, input_dim,
att_type=options.att_type, att_dim=options.intra_att_dim,
remove_diagnoal=False, mask1=mask, is_training=is_training, dropout_rate=options.dropout_rate) # [batch_size, passage_len, intra_att_dim]
in_question_repres.append(in_question_word_att_repres)
in_passage_repres.append(in_passage_word_att_repres)
input_dim += word_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
# ======Highway layer======
if options.with_highway:
with tf.variable_scope("input_highway"):
in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
tf.get_variable_scope().reuse_variables()
in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
# ======Inter-attention layer======
atten_value1, atten_value2, beta, alpha = layer_utils.inter_attention(in_question_repres, in_passage_repres, input_dim, input_dim,
scope_name="inter_attend", att_type=options.att_type, att_dim=options.inter_att_dim,
remove_diagnoal=False, mask1=question_mask, mask2=mask, is_training=is_training, dropout_rate=options.dropout_rate)
# ========Compare layer=====
with tf.variable_scope("compare"):
v1 = layer_utils.projection_layer(tf.concat(axis=2, values=[in_question_repres, beta]),
input_size=input_dim*2, output_size=options.compare_layer_dim)
tf.get_variable_scope().reuse_variables()
v2 = layer_utils.projection_layer(tf.concat(axis=2, values=[in_passage_repres, alpha]),
input_size=input_dim*2, output_size=options.compare_layer_dim)
# ========Aggregate layer=====
with tf.variable_scope("aggregate"):
v1_sum = tf.reduce_sum(v1, axis=1)
v2_sum = tf.reduce_sum(v2, axis=1)
match_representation = tf.concat(axis=1, values=[v1_sum, v2_sum])
match_dim = 2 * options.compare_layer_dim
return match_representation, match_dim
# ==================================================== MY MODEL ==================================================
def create_my_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
define your model here
"""
options = self.options
return match_representation, match_dim
# ==================================================== Features ==================================================
def create_features(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
options = self.options
match_representation = self.in_question_passage_features
match_dim = options.feat_dim
return match_representation, match_dim
# ==================================================== Concat representation =====================================
def prediction_layer(self, num_classes, match_representation, match_dim, is_training, global_step):
options = self.options
#========Prediction Layer=========
w_0 = tf.get_variable("w_0", [match_dim, int(match_dim/2)], dtype=tf.float32)
b_0 = tf.get_variable("b_0", [int(match_dim/2)], dtype=tf.float32)
w_1 = tf.get_variable("w_1", [int(match_dim/2), num_classes],dtype=tf.float32)
b_1 = tf.get_variable("b_1", [num_classes],dtype=tf.float32)
# if is_training: match_representation = tf.nn.dropout(match_representation, (1 - options.dropout_rate))
logits = tf.matmul(match_representation, w_0) + b_0
logits = tf.nn.relu(logits)
if is_training: logits = tf.nn.dropout(logits, (1 - options.dropout_rate))
logits = tf.matmul(logits, w_1) + b_1
# #========Prediction Layer=========
# w_0 = tf.get_variable("w_0", [match_dim, num_classes], dtype=tf.float32)
# b_0 = tf.get_variable("b_0", [num_classes], dtype=tf.float32)
# logits = tf.matmul(match_representation, w_0) + b_0
self.prob = tf.nn.softmax(logits)
self.predictions = tf.argmax(self.prob, 1)
gold_matrix = tf.one_hot(self.truth, num_classes, dtype=tf.float32)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=gold_matrix))
if options.pos_weight > 0.0:
# self.loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=gold_matrix, pos_weight=options.pos_weight))
# class_weights = tf.constant([[options.pos_weight, 1-options.pos_weight]])
# weights = tf.reduce_sum(class_weights * gold_matrix, axis=1)
# unweighted_losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=gold_matrix)
# weighted_losses = unweighted_losses * weights
# self.loss = tf.reduce_mean(weighted_losses)
class_weights = tf.constant([options.pos_weight, 1-options.pos_weight])
weighted_logits = tf.multiply(logits, class_weights)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=weighted_logits, labels=gold_matrix))
correct = tf.nn.in_top_k(logits, self.truth, 1)
self.eval_correct = tf.reduce_sum(tf.cast(correct, tf.int32))
if not is_training: return
tvars = tf.trainable_variables()
if self.options.lambda_l1>0.0:
l1_loss = tf.add_n([tf.contrib.layers.l1_regularizer(self.options.lambda_l1)(v) for v in tvars if v.get_shape().ndims > 1])
self.loss = self.loss + l1_loss
if self.options.lambda_l2>0.0:
# l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tvars if v.get_shape().ndims > 1])
# l2_loss = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(1e-4), tf.trainable_variables())
l2_loss = tf.add_n([tf.contrib.layers.l2_regularizer(self.options.lambda_l2)(v) for v in tvars if v.get_shape().ndims > 1])
self.loss = self.loss + l2_loss
if self.options.optimize_type == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(learning_rate=self.options.learning_rate)
elif self.options.optimize_type == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=self.options.learning_rate)
grads = layer_utils.compute_gradients(self.loss, tvars)
grads, _ = tf.clip_by_global_norm(grads, self.options.grad_clipper)
self.train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)
# self.train_op = optimizer.apply_gradients(zip(grads, tvars))
if self.options.with_moving_average:
# Track the moving averages of all trainable variables.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
train_ops = [self.train_op, variables_averages_op]
self.train_op = tf.group(*train_ops)
|
import warnings
import numpy as np
from typing import Union, List
from sklearn.metrics import pairwise_distances
from gensim.models import KeyedVectors
from whatlies.embedding import Embedding
from whatlies.embeddingset import EmbeddingSet
from whatlies.language.common import SklearnTransformerMixin
class GensimLanguage(SklearnTransformerMixin):
"""
This object is used to lazily fetch [Embedding][whatlies.embedding.Embedding]s or
[EmbeddingSet][whatlies.embeddingset.EmbeddingSet]s from a keyed vector file.
These files are generated by [gensim](https://radimrehurek.com/gensim/models/word2vec.html).
This object is meant for retreival, not plotting.
Important:
The vectors are not given by this library they must be download/created upfront.
A potential benefit of this is that you can train your own embeddings using
gensim and visualise them using this library.
Here's a snippet that you can use to train your own (very limited) word2vec embeddings.
```
from gensim.test.utils import common_texts
from gensim.models import Word2Vec
model = Word2Vec(common_texts, size=10, window=5, min_count=1, workers=4)
model.wv.save("wordvectors.kv")
```
Note that if a word is not available in the keyed vectors file then we'll assume
a zero vector. If you pass a sentence then we'll add together the embeddings vectors
of the seperate words.
Arguments:
keyedfile: name of the model to load, be sure that it's downloaded or trained beforehand
**Usage**:
```python
> from whatlies.language import GensimLanguage
> lang = GensimLanguage("wordvectors.kv")
> lang['computer']
> lang = GensimLanguage("wordvectors.kv", size=10)
> lang[['computer', 'human', 'dog']]
```
"""
def __init__(self, keyedfile):
self.kv = KeyedVectors.load(keyedfile)
def __getitem__(self, query: Union[str, List[str]]):
"""
Retreive a single embedding or a set of embeddings.
Arguments:
query: single string or list of strings
**Usage**
```python
> from whatlies.language import GensimLanguage
> lang = GensimLanguage("wordvectors.kv")
> lang['computer']
> lang = GensimLanguage("wordvectors.kv", size=10)
> lang[['computer', 'human', 'dog']]
```
"""
if isinstance(query, str):
if " " in query:
return Embedding(
query, np.sum([self[q].vector for q in query.split(" ")], axis=0)
)
try:
vec = np.sum([self.kv[q] for q in query.split(" ")], axis=0)
except KeyError:
vec = np.zeros(self.kv.vector_size)
return Embedding(query, vec)
return EmbeddingSet(*[self[tok] for tok in query])
def _prepare_queries(self, lower):
queries = [w for w in self.kv.vocab.keys()]
if lower:
queries = [w for w in queries if w.lower() == w]
return queries
def _calculate_distances(self, emb, queries, metric):
vec = emb.vector
vector_matrix = np.array([self[w].vector for w in queries])
# there are NaNs returned, good to investigate later why that might be
vector_matrix = np.array(
[np.zeros(v.shape) if np.any(np.isnan(v)) else v for v in vector_matrix]
)
return pairwise_distances(vector_matrix, vec.reshape(1, -1), metric=metric)
def score_similar(
self, emb: Union[str, Embedding], n: int = 10, metric="cosine", lower=False,
) -> List:
"""
Retreive a list of (Embedding, score) tuples that are the most similar to the passed query.
Arguments:
emb: query to use
n: the number of items you'd like to see returned
metric: metric to use to calculate distance, must be scipy or sklearn compatible
lower: only fetch lower case tokens
Returns:
An list of ([Embedding][whatlies.embedding.Embedding], score) tuples.
"""
if isinstance(emb, str):
emb = self[emb]
queries = self._prepare_queries(lower=lower)
distances = self._calculate_distances(emb=emb, queries=queries, metric=metric)
by_similarity = sorted(zip(queries, distances), key=lambda z: z[1])
if len(queries) < n:
warnings.warn(
f"We could only find {len(queries)} feasible words. Consider changing `top_n` or `lower`",
UserWarning,
)
return [(self[q], float(d)) for q, d in by_similarity[:n]]
def embset_similar(
self, emb: Union[str, Embedding], n: int = 10, lower=False, metric="cosine",
) -> EmbeddingSet:
"""
Retreive an [EmbeddingSet][whatlies.embeddingset.EmbeddingSet] that are the most similar to the passed query.
Arguments:
emb: query to use
n: the number of items you'd like to see returned
metric: metric to use to calculate distance, must be scipy or sklearn compatible
lower: only fetch lower case tokens
Returns:
An [EmbeddingSet][whatlies.embeddingset.EmbeddingSet] containing the similar embeddings.
"""
embs = [
w[0] for w in self.score_similar(emb=emb, n=n, lower=lower, metric=metric)
]
return EmbeddingSet({w.name: w for w in embs})
|
import torch
from rich.progress import track
from rich import print
from .dataset import Dataset
from .samplers import BatchSampler, ParallelSampler
class FewShotDataset (Dataset):
def __init__(
self,
directory: str,
device: torch.device
):
super().__init__(directory, device)
def sample(self, images):
return self.collate([self.load(i) for i in images])
def get_iter(self, params):
"""
Returns an iterable over images in the dataset with the
specified parameters
"""
split = params.get('split', None)
k, n, m = params.get('k', 1), params.get('n', 1), params.get('m', 1)
self.tree.put_samplers({
self.class_level - 1: (
ParallelSampler,
self.collate,
{'batch_size': k}
),
self.class_level: (
BatchSampler,
self.sample,
{'batch_size': n + m}
)
})
if split:
self.split = self.tree.get(split)
return self
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
from . import junit_output
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class ProgressIndicator(object):
def __init__(self):
self.runner = None
def Starting(self):
pass
def Done(self):
pass
def AboutToRun(self, test):
pass
def HasRun(self, test, has_unexpected_output):
pass
def PrintFailureHeader(self, test):
if test.suite.IsNegativeTest(test):
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
class SimpleProgressIndicator(ProgressIndicator):
"""Abstract base class for {Verbose,Dots}ProgressIndicator"""
def Starting(self):
print 'Running %i tests' % self.runner.total
def Done(self):
print
for failed in self.runner.failed:
self.PrintFailureHeader(failed)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
if failed.output.HasCrashed():
print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
if failed.output.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.runner.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.runner.failed)
if self.runner.crashed > 0:
print "=== %i tests CRASHED" % self.runner.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, test):
print 'Starting %s...' % test.GetLabel()
sys.stdout.flush()
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
if test.output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def HasRun(self, test, has_unexpected_output):
total = self.runner.succeeded + len(self.runner.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if has_unexpected_output:
if test.output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif test.output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
"""Abstract base class for {Color,Monochrome}ProgressIndicator"""
def __init__(self, templates):
super(CompactProgressIndicator, self).__init__()
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Done(self):
self.PrintProgress('Done')
print "" # Line break.
def AboutToRun(self, test):
self.PrintProgress(test.GetLabel())
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
stdout = test.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = test.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
print "exit code: %d" % test.output.exit_code
print "--- CRASHED ---"
if test.output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, string, length):
if length and (len(string) > (length - 3)):
return string[:(length - 3)] + "..."
else:
return string
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.runner.succeeded,
'remaining': (((self.runner.total - self.runner.remaining) * 100) //
self.runner.total),
'failed': len(self.runner.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
"\033[34m%%%(remaining) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|%%%(remaining) 4d|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
}
super(MonochromeProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
class JUnitTestProgressIndicator(ProgressIndicator):
def __init__(self, progress_indicator, junitout, junittestsuite):
self.progress_indicator = progress_indicator
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
else:
self.outfile = sys.stdout
def Starting(self):
self.progress_indicator.runner = self.runner
self.progress_indicator.Starting()
def Done(self):
self.progress_indicator.Done()
self.outputter.FinishAndWrite(self.outfile)
if self.outfile != sys.stdout:
self.outfile.close()
def AboutToRun(self, test):
self.progress_indicator.AboutToRun(test)
def HasRun(self, test, has_unexpected_output):
self.progress_indicator.HasRun(test, has_unexpected_output)
fail_text = ""
if has_unexpected_output:
stdout = test.output.stdout.strip()
if len(stdout):
fail_text += "stdout:\n%s\n" % stdout
stderr = test.output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
if test.output.HasTimedOut():
fail_text += "--- TIMEOUT ---"
self.outputter.HasRunTest(
[test.GetLabel()] + self.runner.context.mode_flags + test.flags,
test.duration,
fail_text)
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
|
from kapyo.service.auth_service import AuthService, KayoAuthService
from kapyo.dataclass.token import KayoAuthToken
from kapyo.dataclass.credentials import Credentials
from kapyo.helpers import retry
import logging
class AuthSession:
def __init__(self,auth_service=None):
self.auth_token: KayoAuthToken = None
self.auth_service: AuthService
self._cached_credentials: Credentials = None
self._setup_service(auth_service)
def _setup_service(self,service):
if service:
self.auth_service = service
else:
self.auth_service = KayoAuthService()
@property
def session_expired(self) -> bool:
if self.auth_token is not None:
return self.auth_token.is_token_expired
else:
return True
def set_login_details(self, username: str, password: str):
cred = Credentials.from_login_details(username, password)
self._cached_credentials = cred
def set_credentials_path(self, path):
cred = Credentials.from_credential_file(path)
self._cached_credentials = cred
def connect(self):
# Try to use current token if it exists
if self.auth_token is not None:
try:
self.auth_token = self._refresh()
logging.info("Refreshed Authentication Token")
except Exception as ex:
logging.error(ex)
raise ex
else:
# Try to use cached credentials if they exist
if self._cached_credentials:
try:
self.auth_token = self._login(self._cached_credentials)
logging.info("Logged in to Authentication Service with Cached Credentials")
except Exception as ex:
logging.error(ex)
raise ex
@retry
def _login(self, credentials: Credentials) -> KayoAuthToken:
# When session needs to get a new authentication token
return self.auth_service.login(credentials)
@retry
def _refresh(self) -> KayoAuthToken:
# when a session needs to refresh the current token
return self.auth_service.refresh_token(self.auth_token)
|
class Message(object):
RLGYM_HEADER_END_TOKEN = "RLGEHEADER"
RLGYM_BODY_END_TOKEN = "RLGEBODY"
RLGYM_NULL_MESSAGE_HEADER = "RLGNMH"
RLGYM_NULL_MESSAGE_BODY = "RLGNMB"
RLGYM_CONFIG_MESSAGE_HEADER = "RLGC"
RLGYM_STATE_MESSAGE_HEADER = "RLGSMH"
RLGYM_AGENT_ACTION_MESSAGE_HEADER = "RLGAAMH"
RLGYM_RESET_GAME_STATE_MESSAGE_HEADER = "RLGRGSMH"
RLGYM_AGENT_ACTION_IMMEDIATE_RESPONSE_MESSAGE_HEADER = "RLGAAIRMH"
RLGYM_REQUEST_LAST_BOT_INPUT_MESSAGE_HEADER = "RLGRLBIMH"
RLGYM_LAST_BOT_INPUT_MESSAGE_HEADER = "RLGLBIMH"
def __init__(self, header=None, body=None):
if header is None:
header = Message.RLGYM_NULL_MESSAGE_HEADER
if body is None:
body = Message.RLGYM_NULL_MESSAGE_BODY
self.body = body
self.header = header
def serialize(self):
return "{header}{header_token}{body}{body_token}\0".format(header=self.header,
header_token=Message.RLGYM_HEADER_END_TOKEN,
body=self.body,
body_token=Message.RLGYM_BODY_END_TOKEN)
def deserialize(self, serialized_str):
s = serialized_str
header = s[:s.find(Message.RLGYM_HEADER_END_TOKEN)]
start = s.find(Message.RLGYM_HEADER_END_TOKEN) + len(Message.RLGYM_HEADER_END_TOKEN)
end = s.find(Message.RLGYM_BODY_END_TOKEN)
body = s[start:end]
self.body = body
self.header = header
|
from typing import Dict
from boa3.model.builtin.interop.nativecontract import StdLibMethod
from boa3.model.variable import Variable
class JsonSerializeMethod(StdLibMethod):
def __init__(self):
from boa3.model.type.type import Type
identifier = 'json_serialize'
native_identifier = 'jsonSerialize'
args: Dict[str, Variable] = {'item': Variable(Type.any)}
super().__init__(identifier, native_identifier, args, return_type=Type.str)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='mnb')
crawl_bibleis(crawler, out, bible='MNBTBL')
|
import re
from typing import List
class TestIntroduction:
def test_a(self):
def solution(text: str):
return True if re.search("0xB0", text) else False
line1 = 'start address: 0xA0, func1 address: 0xC0'
line2 = 'end address: 0xFF, func2 address: 0xB0'
assert solution(line1) == False
assert solution(line2) == True
def test_b(self):
def solution(text: str):
return re.sub("5", "five", text)
ip = 'They ate 5 apples and 5 oranges'
assert solution(ip) == 'They ate five apples and five oranges'
def test_c(self):
def solution(text: str):
return re.sub("5{1}", "five", text)
ip = 'They ate 5 apples and 5 oranges'
assert solution(ip) == 'They ate five apples and five oranges'
def test_d(self):
def solution(items: List[str]):
return [w for w in items if not re.search("e", w)]
items = ['goal', 'new', 'user', 'sit', 'eat', 'dinner']
assert solution(items) == ['goal', 'sit']
def test_e(self):
def solution(text: str):
return re.sub("(?i)note", "X", text)
ip = 'This note should not be NoTeD'
assert solution(ip) == 'This X should not be XD'
def test_f(self):
def solution(text: str):
return bool(re.search(b"at", text))
ip = b'tiger imp goat'
assert solution(ip) == True
def test_g(self):
def solution(text: str):
result = []
pat = re.compile("(?i)start")
for line in para.split('\n'):
if not pat.search(line):
result.append(line)
return "\n".join(result)
para = """
good start
Start working on that
project you always wanted
stars are shining brightly
hi there
start and try to
finish the book
bye
"""
soln = """
project you always wanted
stars are shining brightly
hi there
finish the book
bye
"""
assert solution(para) == soln
def test_h(self):
def solution(items: List[str]):
return [w for w in items if re.search("(a|w)", w)]
items = ['goal', 'new', 'user', 'sit', 'eat', 'dinner']
assert solution(items) == ['goal', 'new', 'eat']
def test_i(self):
def solution(items: List[str]):
return [w for w in items if re.search("(?=.*e)(?=.*n)", w)]
items = ['goal', 'new', 'user', 'sit', 'eat', 'dinner']
assert solution(items) == ['new', 'dinner']
def test_j(self):
def solution(text: str):
return re.sub("0xC0", "0x1F", re.sub("0xA0", "0x7F", text))
ip = 'start address: 0xA0, func1 address: 0xC0'
assert solution(ip) == 'start address: 0x7F, func1 address: 0x1F'
class TestAnchors:
def test_a(self):
def solution(text: str):
pat = re.compile("^be")
return bool(pat.search(text))
line1 = 'be nice'
line2 = '"best!"'
line3 = 'better?'
line4 = 'oh no\nbear spotted'
assert solution(line1) == True
assert solution(line2) == False
assert solution(line3) == True
assert solution(line4) == False
def test_b(self):
def solution(text: str):
return re.sub(r"\bred\b", "brown", text)
words = 'bred red spread credible'
assert solution(words) == 'bred brown spread credible'
def test_c(self):
def solution(words: List[str]):
return [w for w in words if re.search(r"\B42\B", w)]
words = ['hi42bye', 'nice1423', 'bad42', 'cool_42a', 'fake4b']
assert solution(words) == ['hi42bye', 'nice1423', 'cool_42a']
def test_d(self):
def solution(items: List[str]):
return [e for e in items if re.search(r"(\Aden|ly\Z)", e)]
items = ['lovely', '1\ndentist', '2 lonely', 'eden', 'fly\n', 'dent']
assert solution(items) == ['lovely', '2 lonely', 'dent']
def test_e(self):
def solution(text: str):
return re.sub(r"(?m)^mall\b", "1234", text)
para = """\
\nball fall wall tall\
\nmall call ball pall\
\nwall mall ball fall\
\nmallet wallet malls"""
soln = """\
\nball fall wall tall\
\n1234 call ball pall\
\nwall mall ball fall\
\nmallet wallet malls"""
assert solution(para) == soln
def test_f(self):
def solution(items: List[str]):
return [e for e in items if re.search(r"(?m)(^den|ly$)", e)]
items = ['lovely', '1\ndentist',
'2 lonely', 'eden', 'fly\nfar', 'dent']
assert solution(items) == [
'lovely', '1\ndentist', '2 lonely', 'fly\nfar', 'dent']
def test_g(self):
def solution(items: List[str]):
return [e for e in items if re.search(r"(?i)\A12\nthree\Z", e)]
items = ['12\nthree\n', '12\nThree', '12\nthree\n4', '12\nthree']
assert solution(items) == ['12\nThree', '12\nthree']
def test_h(self):
def solution(items: List[str]):
return [re.sub(r"^hand\B", "X", e) for e in items]
items = ['handed', 'hand', 'handy', 'unhanded', 'handle', 'hand-2']
assert solution(items) == ['Xed', 'hand',
'Xy', 'unhanded', 'Xle', 'hand-2']
def test_i(self):
def solution(items: List[str]):
return [re.sub("e", "X", e) for e in items if re.search(r"\Ah", e)]
items = ['handed', 'hand', 'handy', 'unhanded', 'handle', 'hand-2']
assert solution(items) == ['handXd', 'hand',
'handy', 'handlX', 'hand-2']
class TestAlternationAndGrouping:
def test_a(self):
def solution(items: List[str]):
return [x for x in items if re.search(r"(\Aden|ly\Z)", x)]
items = ['lovely', '1\ndentist', '2 lonely', 'eden', 'fly\n', 'dent']
assert solution(items) == ['lovely', '2 lonely', 'dent']
def test_b(self):
def solution(items: List[str]):
return [x for x in items if re.search(r"(?m)(^den|ly$)", x)]
items = items = ['lovely', '1\ndentist',
'2 lonely', 'eden', 'fly\nfar', 'dent']
assert solution(items) == [
'lovely', '1\ndentist', '2 lonely', 'fly\nfar', 'dent']
def test_c(self):
def solution(line: str):
pat = re.compile(r"re(mov||ceiv|fus)ed")
return pat.sub('X', line)
s1 = 'creed refuse removed read'
s2 = 'refused reed redo received'
assert solution(s1) == 'cX refuse X read'
assert solution(s2) == 'X X redo X'
def test_d(self):
def solution(line: str):
words = ['late', 'later', 'slated']
# Match largest words first
pat = re.compile("|".join(sorted(words, reverse=True)))
return pat.sub('A', line)
s1 = 'plate full of slate'
s2 = "slated for later, don't be late"
assert solution(s1) == 'pA full of sA'
assert solution(s2) == "A for A, don't be A"
def test_e(self):
def solution(items: List[str]):
words = ['late', 'later', 'slated']
pat = re.compile('|'.join(words))
return [x for x in items if pat.fullmatch(x)]
items = ['slate', 'later', 'plate', 'late', 'slates', 'slated ']
assert solution(items) == ['later', 'late']
class TestEscapingMetacharacters:
def test_a(self):
def solution(line: str):
return re.sub(r"\(9-2\)\*", "3", line)
str1 = '(9-2)*5+qty/3'
str2 = '(qty+4)/2-(9-2)*5+pq/4'
assert solution(str1) == '35+qty/3'
assert solution(str2) == '(qty+4)/2-35+pq/4'
def test_b(self):
def solution(line: str):
esc = re.escape(r"(4)\|")
pat = re.compile(r"\A" + esc + r"|" + esc + r"\Z")
return pat.sub("2", line)
s1 = r'2.3/(4)\|6 foo 5.3-(4)\|'
s2 = r'(4)\|42 - (4)\|3'
s3 = 'two - (4)\\|\n'
assert solution(s1) == '2.3/(4)\\|6 foo 5.3-2'
assert solution(s2) == '242 - (4)\\|3'
assert solution(s3) == 'two - (4)\\|\n'
def test_c(self):
def solution(line: str):
items = ['a.b', '3+n', r'x\y\z', 'qty||price', '{n}']
pat = re.compile('|'.join([re.escape(x) for x in items]))
return pat.sub('X', line)
s1 = '0a.bcd'
s2 = 'E{n}AMPLE'
s3 = r'43+n2 ax\y\ze'
assert solution(s1) == '0Xcd'
assert solution(s2) == 'EXAMPLE'
assert solution(s3) == '4X2 aXe'
def test_d(self):
def solution(line: str):
return re.sub("\b", " ", line)
ip = '123\b456'
assert solution(ip) == '123 456'
def test_e(self):
def solution(line: str):
return re.sub(r"\\e", "e", line)
ip = r'th\er\e ar\e common asp\ects among th\e alt\ernations'
assert solution(
ip) == 'there are common aspects among the alternations'
def test_f(self):
def solution(line: str):
eqns = ['(a^b)', '(a/b)', '(a^b)+2']
pat = re.compile("|".join([re.escape(x)
for x in sorted(eqns, reverse=True)]))
return pat.sub("X", line)
ip = '3-(a^b)+2*(a^b)-(a/b)+3'
assert solution(ip) == '3-X*X-X+3'
class TestDotMetacharacterAndQuantifiers:
def test_a(self):
def solution(line: str):
return re.sub(r"42/+5", "8", line)
ip = 'a+42//5-c pressure*3+42/5-14256'
assert solution(ip) == 'a+8-c pressure*3+8-14256'
def test_b(self):
def solution(items: List[str]):
return [x for x in items if re.search(r"^hand(.?|le)\Z", x)]
items = ['handed', 'hand', 'handled',
'handy', 'unhand', 'hands', 'handle']
assert solution(items) == ['hand', 'handy', 'hands', 'handle']
def test_c(self):
def solution(line: str):
return re.split(r"42/{1,2}5", line)
eqn1 = 'a+42//5-c'
eqn2 = 'pressure*3+42/5-14256'
eqn3 = 'r*42-5/3+42///5-42/53+a'
assert solution(eqn1) == ['a+', '-c']
assert solution(eqn2) == ['pressure*3+', '-14256']
assert solution(eqn3) == ['r*42-5/3+42///5-', '3+a']
def test_d(self):
def solution(line: str):
pat = re.compile(r"i.*")
return pat.sub("", line)
s1 = 'remove the special meaning of such constructs'
s2 = 'characters while constructing'
assert solution(s1) == 'remove the spec'
assert solution(s2) == 'characters wh'
def test_e(self):
def solution(line: str):
remove_parentheses = re.compile(r"\(.*?\)")
return remove_parentheses.sub('', line)
str1 = 'a+b(addition)'
str2 = 'a/b(division) + c%d(#modulo)'
str3 = 'Hi there(greeting). Nice day(a(b)'
assert solution(str1) == 'a+b'
assert solution(str2) == 'a/b + c%d'
assert solution(str3) == 'Hi there. Nice day'
def test_f(self):
def solution(line: str):
change = re.compile(r'inco|ink|ing|inter|int|ion|in')
return change.sub('X', words)
words = 'plink incoming tint winter in caution sentient'
assert solution(words) == 'plX XmX tX wX X cautX sentient'
def test_g(self):
"'?' is same as {0,1}"
"'*' is same as {0,}"
"'+' is same as {1,}"
def test_h(self):
"Question: '(a*|b*)' is same as '(a | b)*'. True or False?"
answer = False
def test_i(self):
def solution(line: str):
pat = re.compile(r"(?i)(test)(.+)\Z")
return pat.sub('', line)
s1 = 'this is a Test'
s2 = 'always test your RE for corner cases'
s3 = 'a TEST of skill tests?'
assert solution(s1) == "this is a Test"
assert solution(s2) == "always "
assert solution(s3) == "a "
def test_j(self):
def solution(words: List[str]):
pats = (r"^s", r"e", r"t")
return [w for w in words if all([re.search(p, w) for p in pats])]
words = ['sequoia', 'subtle', 'exhibit',
'asset', 'sets', 'tests', 'site']
assert solution(words) == ['subtle', 'sets', 'site']
def test_k(self):
def solution(words: List[str]):
return [w for w in words if re.search(".{6,}", w)]
words = ['sequoia', 'subtle', 'exhibit',
'asset', 'sets', 'tests', 'site']
assert solution(words) == ['sequoia', 'subtle', 'exhibit']
def test_l(self):
def solution(words: List[str]):
pat = re.compile(r"^(s|t)(.{,5})$")
return [w for w in words if pat.search(w)]
words = ['sequoia', 'subtle', 'exhibit',
'asset', 'sets', 'tests', 'site']
assert solution(words) == ['subtle', 'sets', 'tests', 'site']
def test_m(self):
def solution(line: str):
return re.sub(r'<.+?>', '', line)
ip = 'a<apple> 1<> b<bye> 2<> c<cat>'
# Not possible easily without character classes!
assert solution(ip) != 'a 1<> b 2<> c'
def test_n(self):
def solution(line: str):
pat = re.compile(r" +// +")
return pat.split(line, maxsplit=1)
s1 = 'go there // "this // that"'
s2 = 'a//b // c//d e//f // 4//5'
s3 = '42// hi//bye//see // carefully'
assert solution(s1) == ['go there', '"this // that"']
assert solution(s2) == ['a//b', 'c//d e//f // 4//5']
assert solution(s3) == ['42// hi//bye//see', 'carefully']
class TestWorkingWithMatchedPortions:
def test_a(self):
def solution(line: str):
pat = re.compile(r"is.*t")
return pat.search(line).group()
str1 = 'This the biggest fruit you have seen?'
str2 = 'Your mission is to read and practice consistently'
assert solution(str1) == 'is the biggest fruit'
assert solution(str2) == 'ission is to read and practice consistent'
def test_b(self):
def solution(line: str):
pat = re.compile(r"is|the|was|to")
return pat.search(line).span()[0]
s1 = 'match after the last newline character'
s2 = 'and then you want to test'
s3 = 'this is good bye then'
s4 = 'who was there to see?'
assert solution(s1) == 12
assert solution(s2) == 4
assert solution(s3) == 2
assert solution(s4) == 4
def test_c(self):
def solution(line: str):
pat = re.compile(r"is|the|was|to")
*_, match = pat.finditer(line)
return match.span()[0]
s1 = 'match after the last newline character'
s2 = 'and then you want to test'
s3 = 'this is good bye then'
s4 = 'who was there to see?'
assert solution(s1) == 12
assert solution(s2) == 18
assert solution(s3) == 17
assert solution(s4) == 14
def test_d(self):
def solution(line: str):
return re.search(r":(.*)", line).group(1)
ip = 'fruits:apple, mango, guava, blueberry'
assert solution(ip) == 'apple, mango, guava, blueberry'
def test_e(self):
def solution(line: str):
import math
pat = re.compile(r"(.*-)(.*)")
return pat.sub(lambda x: f"{x[1]}{math.log(float(x[2]))}", line)
s1 = 'first-3.14'
s2 = 'next-123'
assert solution(s1) == 'first-1.144222799920162'
assert solution(s2) == 'next-4.812184355372417'
def test_f(self):
def solution(line: str):
word_map = {"par": "spar", "spare": "extra", "park": "garden"}
pat = re.compile(r'spare|park|par')
return pat.sub(lambda m: word_map[m[0]], line)
str1 = 'apartment has a park'
str2 = 'do you have a spare cable'
str3 = 'write a parser'
assert solution(str1) == 'aspartment has a garden'
assert solution(str2) == 'do you have a extra cable'
assert solution(str3) == 'write a sparser'
def test_g(self):
def solution(line: str):
return re.findall(r"\((.*?)\)", line)
ip = 'another (way) to reuse (portion) matched (by) capture groups'
assert solution(ip) == ['way', 'portion', 'by']
def test_h(self):
def solution(line: str):
return re.findall(r"<.+?>", line)
ip = 'a<apple> 1<> b<bye> 2<> c<cat>'
assert solution(ip) == ['<apple>', '<> b<bye>', '<> c<cat>']
def test_i(self):
def solution(line: str):
pat = re.compile(r"(.+?),(.+?) ")
return pat.findall(line)
row1 = '-2,5 4,+3 +42,-53 4356246,-357532354 '
row2 = '1.32,-3.14 634,5.63 63.3e3,9907809345343.235 '
assert solution(row1) == [('-2', '5'), ('4', '+3'),
('+42', '-53'), ('4356246', '-357532354')]
assert solution(row2) == [('1.32', '-3.14'),
('634', '5.63'), ('63.3e3', '9907809345343.235')]
def test_j(self):
def solution1(line: str):
result = re.findall(r"(.+?),(.+?) ", line)
sums = [sum(map(int, pair)) for pair in result]
return sums
def solution2(line: str):
result = re.findall(r"(.+?),(.+?) ", line)
sums = [sum(map(float, pair)) for pair in result]
return sums
row1 = '-2,5 4,+3 +42,-53 4356246,-357532354 '
row2 = '1.32,-3.14 634,5.63 63.3e3,9907809345343.235 '
assert solution1(row1) == [3, 7, -11, -353176108]
assert solution2(row2) == [-1.82, 639.63, 9907809408643.234]
def test_k(self):
def solution(line: str):
return re.split(r":.+?-(.+?);", line)
ip = '42:no-output;1000:car-truck;SQEX49801'
assert solution(ip) == ['42', 'output',
'1000', 'truck', 'SQEX49801']
def test_l(self):
def solution(words: List[str]):
return [(w, w.count("t")) for w in words]
words = ['sequoia', 'attest', 'tattletale', 'asset']
assert solution(words) == [
('sequoia', 0), ('attest', 3), ('tattletale', 4), ('asset', 1)]
def test_m(self):
def solution(line: str):
return [m.groups(default="NA") for m in re.finditer(r'(.{4})(..)?:', line)]
ip = 'TWXA42:JWPA:NTED01:'
assert solution(ip) == [('TWXA', '42'), ('JWPA', 'NA'), ('NTED', '01')]
def test_n(self):
def solution(line: str):
pat = re.compile(r"(.*?):(.*?),")
return {m[1]: m[2] for m in pat.finditer(line)}
row1 = 'name:rohan,maths:75,phy:89,'
row2 = 'name:rose,maths:88,phy:92,'
assert solution(row1) == {'name': 'rohan', 'maths': '75', 'phy': '89'}
assert solution(row2) == {'name': 'rose', 'maths': '88', 'phy': '92'}
class TestCharacterClass:
def test_a(self):
def solution(items: List[str]):
return [x for x in items if re.search(r"^hand.*([sy]|le)", x)]
items = ['-handy', 'hand', 'handy', 'unhand', 'hands', 'handle']
assert solution(items) == ['handy', 'hands', 'handle']
def test_b(self):
def solution(line: str):
return re.sub(r"\bre[ea]?d\b", "X", line)
ip = 'redo red credible :read: rod reed'
assert solution(ip) == 'redo X credible :X: rod X'
def test_c(self):
def solution(items: List[str]):
return [x for x in items if re.search(r"[ei].*[ln]", x)]
words = ['surrender', 'unicorn', 'newer',
'door', 'empty', 'eel', 'pest']
assert solution(words) == ['surrender', 'unicorn', 'eel']
def test_d(self):
def solution(items: List[str]):
pats = (r"[ei]", r"[ln]")
return [x for x in items if all(re.search(p, x) for p in pats)]
words = ['surrender', 'unicorn', 'newer',
'door', 'empty', 'eel', 'pest']
assert solution(words) == ['surrender', 'unicorn', 'newer', 'eel']
def test_e(self):
def solution(line: str):
hex_seq = re.compile(r"\b(0x)?[\da-f]+\b", flags=re.I)
return [m[0] for m in hex_seq.finditer(line)]
str1 = '128A foo 0xfe32 34 0xbar'
str2 = '0XDEADBEEF place 0x0ff1ce bad'
assert solution(str1) == ['128A', '0xfe32', '34']
assert solution(str2) == ['0XDEADBEEF', '0x0ff1ce', 'bad']
def test_f(self):
def solution(line: str):
remove_parentheses = re.compile(r"\([^()]*\)")
return remove_parentheses.sub('', line)
str1 = 'def factorial()'
str2 = 'a/b(division) + c%d(#modulo) - (e+(j/k-3)*4)'
str3 = 'Hi there(greeting). Nice day(a(b)'
assert solution(str1) == 'def factorial'
assert solution(str2) == 'a/b + c%d - (e+*4)'
assert solution(str3) == 'Hi there. Nice day(a'
def test_g(self):
def solution(words: List[str]):
return [w for w in words if re.search(f"^[^epu]", w)]
words = ['surrender', 'unicorn', 'newer',
'door', 'empty', 'eel', 'pest']
assert solution(words) == ['surrender', 'newer', 'door']
def test_h(self):
def solution(words: List[str]):
return [w for w in words if not re.search(f"([uw]|ee|-)", w)]
words = ['p-t', 'you', 'tea', 'heel', 'owe', 'new', 'reed', 'ear']
assert solution(words) == ['tea', 'ear']
def test_i(self):
def solution(line: str):
pat = re.compile(r"(,[^,]*){3}\Z")
return pat.sub(",WHTSZ323", line)
row1 = '(2),kite,12,,D,C,,'
row2 = 'hi,bye,sun,moon'
assert solution(row1) == '(2),kite,12,,D,WHTSZ323'
assert solution(row2) == 'hi,WHTSZ323'
def test_j(self):
def solution(line: str):
pat = re.compile(r"[\d\s]+")
return pat.split(line)
str1 = 'lion \t Ink32onion Nice'
str2 = '**1\f2\n3star\t7 77\r**'
assert solution(str1) == ['lion', 'Ink', 'onion', 'Nice']
assert solution(str2) == ['**', 'star', '**']
def test_k(self):
def solution(line: str):
return re.sub(r"<[a-zA-Z]+>", "", line)
ip = 'a<apple> 1<> b<bye> 2<> c<cat>'
assert solution(ip) == 'a 1<> b 2<> c'
def test_l(self):
"""
'\b[a-z](on | no)[a-z]\b' is same as '\b[a-z][on]{2}[a-z]\b'.
True or False?
"""
def solution():
return False
assert solution() == False
def test_m(self):
def solution(items: List[str]):
def filter_gt_624(item: re.Match):
return any(int(m[0]) > 624 for m in re.finditer(r'\d+', item))
return [e for e in items if filter_gt_624(e)]
items = ['hi0000432abcd', 'car00625',
'42_624 0512', '3.14 96 2 foo1234baz']
assert solution(items) == ['car00625', '3.14 96 2 foo1234baz']
def test_n(self):
def solution(line: str):
count = 0
while True:
(line, n_brace) = re.subn(r"\{[^{}]*\}", "", line)
if n_brace == 0:
break
count += 1
if re.search(r"[{}]", line):
return -1
return count
str1 = 'a*b'
str2 = '}a+b{'
str3 = 'a*b+{}'
str4 = '{{a+2}*{b+c}+e}'
str5 = '{{a+2}*{b+{c*d}}+e}'
str6 = '{{a+2}*{\n{b+{c*d}}+e*d}}'
str7 = 'a*{b+c*{e*3.14}}}'
assert solution(str1) == 0
assert solution(str2) == -1
assert solution(str3) == 1
assert solution(str4) == 2
assert solution(str5) == 3
assert solution(str6) == 4
assert solution(str7) == -1
def test_o(self):
def solution(line: str):
return re.split(r"\s+", line.strip())
ip = ' \t\r so pole\t\t\t\n\nlit in to \r\n\v\f '
assert solution(ip) == ['so', 'pole', 'lit', 'in', 'to']
def test_p(self):
def solution1(line: str):
return re.split(r"\W+", line)
def solution2(line: str):
return re.split(r"(\W+)", line)
ip = 'price_42 roast^\t\n^-ice==cat\neast'
assert solution1(ip) == ['price_42', 'roast', 'ice', 'cat', 'east']
assert solution2(ip) == ['price_42', ' ', 'roast',
'^\t\n^-', 'ice', '==', 'cat', '\n', 'east']
def test_q(self):
def solution(items: List[str]):
return [x for x in items if re.search(r"^\s*[^#\s]", x)]
items = [' #comment', '\t\napple #42',
'#oops', 'sure', 'no#1', '\t\r\f']
assert solution(items) == ['\t\napple #42', 'sure', 'no#1']
if __name__ == "__main__":
import pytest
pytest.main(["-v", f"{__file__}"])
|
import argparse
import cv2
import imutils
import operator
import time
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to the image file")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the colors in the HSV color space
lower = {'red': (166, 84, 141), 'green': (66, 122, 129), 'blue': (97, 100, 117), 'yellow': (23, 59, 119),
'orange': (0, 50, 80), 'black': (0, 0, 0), 'white': (0, 0, 215), 'gray': (0, 0, 88),
'silver': (0, 0, 152)}
upper = {'red': (186, 255, 255), 'green': (86, 255, 255), 'blue': (117, 255, 255), 'yellow': (54, 255, 255),
'orange': (20, 255, 255), 'black': (10, 40, 40), 'white': (10, 40, 295), 'gray': (10, 40, 168),
'silver': (10, 40, 232)}
# define standard colors for circle around the object
colors = {'red': (0, 0, 255), 'green': (0, 255, 0), 'blue': (255, 0, 0), 'yellow': (0, 255, 217),
'orange': (0, 140, 255), 'black': (0, 0, 0), 'white': (255, 255, 255), 'gray': (128, 128, 128),
'silver': (192, 192, 192)}
result = {}
img = cv2.imread(args["image"])
img = imutils.resize(img, width=300)
blurred = cv2.GaussianBlur(img, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
start = time.time()
for key, val in colors.items():
mask = cv2.inRange(hsv, lower[key], upper[key])
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
if radius > 0.5:
result[key] = radius
stop = time.time()
print(max(result.items(), key=operator.itemgetter(1))[0])
print("processing time:", stop - start)
|
from abc import ABC, abstractmethod
import numpy as np
class CollisionObject(ABC):
@abstractmethod
def isCollision(self, target):
pass
class CollisionBox(CollisionObject):
def __init__(self, location, lengths):
self.location = np.asarray(location)
self.lengths = np.asarray(lengths)
self.ndim = self.location.shape[0]
def isCollision(self, target):
llimit = self.location
ulimit = self.location+self.lengths
if (np.all(target>=llimit) and np.all(target<=ulimit)):
return True
else:
return False
class CollisionSphere(CollisionObject):
def __init__(self, location, radius):
self.location = np.asarray(location)
self.radius = radius
def isCollision(self, target):
sphere_eqn = sum((target-self.location)**2)
if sphere_eqn <= self.radius**2:
return True
else:
return False
|
# -*- coding: utf-8 -*-
import datetime
import os
import sys
sys.path.append(os.path.abspath("../concurrency_safe_shelve"))
sys.path.append(os.path.abspath("../utils"))
from . import auth_server_blueprint
from .constant import _access_token
from .constant import _auth_code_db_name
from concurrency_safe_shelve import open_thread_safe_shelf
from flask import current_app
from flask import jsonify
from flask import request
from flask import Response
from flask_api import status as StatusCode
from utils import gen_auth_code
from utils import gen_auth_code_hash
from utils import gen_datetime_range
from utils import gen_salt
from utils import validate_date_format
from utils import validate_email_format
# 生成激活码接口
@auth_server_blueprint.route("/api/v1/authcode", methods=["POST"])
def new_auth_code():
access_token = request.headers.get("x-auth-token", "")
if len(access_token) != 10 or access_token != _access_token:
return Response(
"Invalid x-auth-token",
status=StatusCode.HTTP_401_UNAUTHORIZED,
)
payload = request.get_json()
user_email = payload.get("user_email", "")
if not validate_email_format(user_email):
current_app.logger.error("invalid user_email: {}".format(user_email))
return Response(
"Invalid user_email",
status=StatusCode.HTTP_400_BAD_REQUEST,
)
expired_date = payload.get("expired_date", "")
if not validate_date_format(expired_date):
current_app.logger.error("invalid expired_date: {}".format(expired_date))
return Response(
"Invalid expired_date",
status=StatusCode.HTTP_400_BAD_REQUEST,
)
auth_code = ""
with open_thread_safe_shelf("./{}".format(_auth_code_db_name), flag="c") as db:
while 1:
auth_code = gen_auth_code()
if auth_code not in db.keys():
break
salt = gen_salt()
today = datetime.datetime.now().strftime("%Y-%m-%d")
datetime_range = gen_datetime_range(today, expired_date)
auth_code_hash_list = [gen_auth_code_hash(auth_code, salt, user_email, date) for date in datetime_range]
db[auth_code] = {
"user_email": user_email,
"expired_date": expired_date,
"salt": salt,
"hash_list": auth_code_hash_list,
}
response_object = {
"auth_code": auth_code,
"user_email": user_email,
"expired_date": expired_date,
}
current_app.logger.info("generate new auth code: {} for {}, which will be expired at {}".format(
auth_code, user_email, expired_date))
return jsonify(response_object)
|
"""
2523 : 별 찍기 - 13
URL : https://www.acmicpc.net/problem/2523
Input :
3
Output :
*
**
***
**
*
"""
n = int(input())
for i in range(1, n + 1, 1):
print(''.join(map(lambda x: '*', range(i))))
for i in range(n - 1, 0, -1):
print(''.join(map(lambda x: '*', range(i))))
|
# Github Service Test
import unittest
import GitHubService as gs
import os
import requests
class TestGithubService(unittest.TestCase):
def test_get_repo_url(self):
headers={'Authorization':'token 5c4d81ec05cb82053d9fd0c0519120fe3eed17be',
'User-Agent':'https://api.github.com/meta',
'Content-Type':'application/json'}
pull_req = requests.get('https://api.github.com/search/issues?q=type:pr+language:java+state:closed+is:merged+status:success+repo:dlew/joda-time-android',headers=headers)
url = gs.get_repo_url(pull_req)
self.assertEqual(url,'https://github.com/dlew/joda-time-android')
"""def test_process(self):
sha_list = ['3b45f2cad666726f06f14347026cc25a05051246', '65e1cae2decfc1b86481eb40607eac0e23b64b0c', '0b7343dd345f314d02b2c21ea69b84a02ac2c94f', '8b06ea52ab2915725635a5fb34c1468f9787d422', '86ee59c23202692a3d4814e1a7df119c8a1ce430', 'ecad8647035c39dfc068c79c3d026b5fd8c7a50c', 'ac57d67f45c1017b24117ec0dc491787df936a42', '960d2ea6f70d1f38e269e711aff3877c4bb5430c', '87cb33f69b0d612e029ee454253f3a7cbce05fb6', '915a1f3eb0d251cfa9c432cd0a582278f698cd84', '696ed86affe8e04a681d05077877f3f5b8191f09', 'e0b8f6f85059779f80f166305574759cb28488ab', '991ba320a6b2cde1b892e87ebca77be249884560', '522fbc035fe319fbf6fad58579fdf36491babe18', '40e5cc628a1a3f5c53ed006f093fb7cddffd5d54', '7a13012855bcdebb4962e102ed03f08b44a3d095', 'c3b102e547535a5410250b264f35020c176d6e1b', '3f9a5a254ec40b1073f48711f798df5eae3ec45c', '05a9c030c0b8b515ae9ddc71b0a1f0387e49ee3c', '143213483c6e1ba13fa8c1bb2cc5008b7414960e', '678fa93838dd149eac61a71f1a5dee58ddce0584', 'b2607010b4adacb47496fe1981e875fef24d044d', 'b2dd36f607fa0faa69acf6351067077506553a68', 'af6973884f643712a445bcafd0aaee5c8bf2bb2f', '3894a0bb6cdf397ef46fea64fa5516f562bbb8b9', '49f84349117f1370be4a63800f5ca4f3dab07ebc', '7a7ba871dc1250765b1e1804d9abf642497ee6b6', '36f5947cd78a66eeec0b1de51c92100c94b345ad', 'f62ae456b03eea52addb95d8e3bbdf0277632d65', '080571651550af8798e0b36c41043a52121bc763', '9f797b3009e107835717f1afb6cce054ae457dc9', 'e9a541259b8d24726ad26f13df350c5467d63b3b', '9667878b5f3dacb8a9bd1a7c7d95fa13a153b2d5', '4c4cbd41cf8a1310304bd16a2b40acb18ee7a05d', 'f7825f6c9942b0d7e07f500bc8f5b4ebf227a8af', '94fdefee3a65f8a67335c919440c1a8962681c51', 'b61b64073a01edd5fd1154a5993ea028732d4229', '90570f809be327fae1d047b2c621ebaca689693f', 'ed37d5cdc76581a6db2c8dbd00e4b779e8b84eaa', 'e046007db760fcd2c071ba66369a27c1813f59f5', '03061613803ff475ca22d40627f2ea754f6652fb', '6694d742a370e0f181530734481284de8d5dd8ef', '3defc1b36a0f73b4a0be6944846ee02d58e8c86f', '9677cc4eea8186da314cdd2eeb259c513880e1fd', 'fc690869db2ff03e474e2d5112842f40497345fb', '37c9b922a76fcf84b68909a063558dd72c2b96c1', '93c54a2ee563ac1fc6337dd120b667f05b6f72a9', '5a930f6a268f968746662b8e2a91348c4880f063', '235fe6a7ca904be70ebc7e01876b6af456e80d44', '5373214b70db93181b14128ea65b317718940fbf', '2a4edad23ae5ce9af394826ad9354bd36a9e03ab', 'c1ea04e00296273fdc98429913d9c2389b3cde17', '10c0879d3b8fc70fea763ec83a69ecef79c3c009', '620f408688169abea35490263edd0085946e4c9a', '2456b9f4b0a107916aaba97e2e8abab9954fb418', '144db43ad4af09a708ddf53516a5f982d93ceecd', 'f7c396b0fdcb222cb055a36c38d77b6f07691f68', '04f2be64c668762bd505828c730e56d7f48ff9a9', '8312e09f6eab6ca37f11e7f0c534cbc67542d967', '176bb85f404b7140bdec5fda285505f341a079a0', '7e7a0c83f55c0243191d3b5453a3b3e73a3f2ef5', '4dbf646b37ddaec5da26a3892c1374e55b8e4aaa', '506d316529bba49c4ca88897844b0f4b578a832b', '7cf897a051373919da850bf9cb2d87357e61573c', '8a9162f54202a1d086623724dcc9c4c56146f2d0', 'fb64d69347d145a8b1cd683d0aa8c3d728f4408c', '5c953d7d005551352d5c7b1d76d77f18037a5def', '14279278cd2c57942af6effd2e0126badfc74133', '933c84ff1c82ee4c0e3faa147f6160380e2bb817', '3634b3338cc2fab6cfa147c9475b986165471b73', 'd37922bf823900f9ce319736927dfb2203808ded', '87ee97a1126b170b764eca90787ca221161706f8', 'c45e9a1faf24dd58929b957b7b3e3f4c8ca8de04', '6e0bf59e5a7e2a768f0ddc923178e8153ba3c0ce', 'a2a13758e0abfaa23f74f473314214f3f1bec7cf', 'eeaf7e4b7c1b60b4ab251a5eb3ce2847c59b36cd', '89bfaf876eb97aaefd4f632d0fe130983c33b638']
gs.clone_repo('https://github.com/iluwatar/java-design-patterns.git','java-design-patterns',1)
gs.clone_repo('https://github.com/iluwatar/java-design-patterns.git','java-design-patterns',2)
num = gs.process(sha_list,'java-design-patterns','D:\\code\\java-design-patterns\\1','D:\\code\\java-design-patterns\\2',3)
if num < 1:
self.fail('Number of projects cannot be less than 1')"""
def test_clone_repo(self):
dir = gs.clone_repo('https://github.com/dlew/joda-time-android.git','joda-time-android',1)
if not os.path.isdir(dir):
self.fail("Repo Clone test failed")
def test_get_pull_req(self):
headers={'Authorization':'token 5c4d81ec05cb82053d9fd0c0519120fe3eed17be',
'User-Agent':'https://api.github.com/meta',
'Content-Type':'application/json'}
pull_req = requests.get('https://api.github.com/search/issues?q=type:pr+language:java+state:closed+is:merged+status:success+repo:dlew/joda-time-android',headers=headers)
commit_list = gs.get_pull_req(pull_req)
if len(commit_list) < 1:
self.fail('Commit list for pull request is empty')
if __name__ == '__main__':
unittest.main()
|
'''
Good Point:
1. In one bfs
Bad Point:
1. in fact the if-elses really took time. Don't do that
2. not clear enough
'''
# Definition for a Node.
class Node:
def __init__(self, val=0, neighbors=[]):
self.val = val
self.neighbors = neighbors
class Solution:
def cloneGraph(self, node: 'Node') -> 'Node':
if node is None:
return None
root = node
mapping = {}
q = [node]
while q:
node = q.pop()
if node not in mapping:
mapping[node] = Node(node.val)
new_node = mapping[node]
for neighbor in node.neighbors:
to_add = False
if neighbor not in mapping:
mapping[neighbor] = Node(neighbor.val)
to_add = True
new_node.neighbors.append(mapping[neighbor])
if to_add:
q.append(neighbor)
return mapping[root]
|
#Compass Student Export to Seperate CSV for Barcode creation. Library Use
# Justin Clarke <justin@dev.justinclarke.me>
'''import csv
f=open('file.csv')
datareader = csv.reader(f, delimiter=',', quotechar='"')
headers = datareader.next()
datalist=[]
for row in datareader:
data={}
for i in range(4):
data[headers[i]] = row[i]
datalist.append(data)
for data in datalist:
print(data)
'''
inFile = 'students.csv'
#inFile = $1
import os
import csv
import labels
f = open(inFile)
datareader = csv.reader(f, delimiter=',', quotechar='"')
headers = next(datareader)
datalist=[]
forms = {}
classForms = {}
for row in datareader:
data={}
for i in range(len(headers)):
data[headers[i]] = row[i]
#attempt to add the student into the specified FormGroup, or creates the FormGroup
try:
classForms[data['FormGroup']].append(data)
except KeyError as e:
classForms[data['FormGroup']] = []
classForms[data['FormGroup']].append(data)
#adds the FormGroup into a list, for use later
forms[data['FormGroup']]=data['FormGroup']
#for each FormGroup, create a seperate CSV.
try:
os.mkdir('out')
except FileExistsError as e:
pass
for form, students in classForms.items():
outFile = "out{}{}.csv".format(os.path.sep,form)
print(outFile)
#create file for FormGroup
with open(outFile, 'w', newline='\n') as csvFile:
writer = csv.DictWriter(csvFile, fieldnames=headers, quoting=csv.QUOTE_ALL)
writer.writeheader()
for student in students:
writer.writerow(student)
#create barcodes
labels.gen_barcode(student["SussiId"], student["SussiId"])
#create html
html_file = labels.gen_html(form, students)
labels.gen_pdf(html_file,"{}.pdf".format(form) )
#create PDFs
#print(classForms['Prep J'])
#print(classForms)
|
from __future__ import print_function
import functools
import json
import re
import traceback
from pymel.core import *
from ... import core
from ... import nodeApi
def parse( names ):
'''
Given a string, divides it into the naming chunks. One name can be marked
with a '*' to denote it repeats.
'''
head = []
repeat = ''
tail = []
names = names.split()
invalid = []
# Find a repeater, if any, verifying there is only one.
for name in names:
if name.endswith( '*' ):
if repeat:
raise Exception('Multiple names were marked as repeating with a "*", only can repeat.')
repeat = name
else:
if not re.search( '^[a-zA-Z_][a-zA-Z0-9_]*$', name ):
invalid.append(name)
if invalid:
raise Exception( ' '.join(invalid) + ' contain invalid characters' )
if not repeat:
# If there is no repeating section, the whole thing is the head
head = names
else:
if repeat == names[-1]:
head = names[:-1]
elif repeat == names[0]:
tail = names[1:]
else:
i = names.index(repeat)
head = names[:i]
tail = names[i + 1:]
if repeat:
repeat = repeat[:-1]
return head, repeat, tail
def isMirrored(jnt):
'''
If any parent joint or card is mirrored, it is returned else False.
.. todo::
Completely deprecate mirroring on joints and only check cards.
'''
if jnt.postCommand.count('mirror'):
return jnt
elif jnt.cardCon.node().mirror not in [None, False]:
return jnt.cardCon.node()
elif jnt.parent:
return isMirrored(jnt.parent)
else:
return False
def isAsymmetric(jnt):
'''
If the given joint is mirrored but lacks a suffix.
.. todo::
Mirror/asymetry is messy right now, there needs to be a check if a joint
is part of a mirror chain, which is different from if the joint should
actually be mirrored or not.
'''
if jnt.cardCon.node().mirror is False:
return True
if not jnt.cardCon.node().suffix.get() and jnt.cardCon.node().mirror is None:
return True
return False
def canMirror(jnt):
'''
Returns True if the joint is in a mirrored hierarchy and not marked as asymmetric.
"Consumer" version of isMirrored and isAsymmetric
'''
return isMirrored(jnt) and not isAsymmetric(jnt)
def preserveSelection(func):
'''
Decorator to keep selection after the function is ran.
'''
def newFunc(*args, **kwargs):
sel = selected()
output = func(*args, **kwargs)
select( sel )
return output
functools.update_wrapper( newFunc, func )
return newFunc
def strToPairs(s):
'''
Given a comma separated list of pairs, return as list of pairs.
Ex: "a b, x y" -> [ ['a','b'], ['x', 'y'] ]
'''
return [ pair.strip().split() for pair in s.strip().split(',') ]
def findTempJoint(name):
'''
Given an output name, searches for a temp joint that will output that joint.
Returns the BPJoint and True *I THINK* if the joint is a single, and False if it could be mirrored
'''
for card in core.findNode.allCards():
for data in card.output():
if name in data:
if data.index(name) == 1:
return data[0], True
else:
return data[0], False
def listTempJoints(includeHelpers=False):
# Gross hack, this funciton needs to be moved elsewhere
temps = [j for j in ls(type='joint') if isinstance(j, nodeApi.BPJoint)]
if includeHelpers:
return temps
else:
return [j for j in temps if not j.isHelper]
def annotateSelectionHandle(obj, text, pos=None):
'''
Make an annotation of the `obj`'s selection handle, optionally specifying
the position of the handle as well.
'''
obj.displayHandle.set( True )
if pos:
obj.selectHandle.set( pos )
loc = spaceLocator()
ann = annotate(loc, text=text).getParent()
ann.setParent( obj )
ann.t.set(obj.selectHandle.get())
ann.r.lock()
ann.s.lock()
loc.setParent(ann)
loc.t.set(0, 0, 0)
hide(loc)
add = createNode( 'plusMinusAverage' )
ann.t >> add.input3D[0]
add.input3D[1].set( 0, 1, 0 )
add.output3D >> obj.selectHandle
# Just in case there are different standard substitutions, have this be a table.
# NOTE: All keys are assumed to be a single character.
# &&& HOW IS THIS USED? Ugg, past me is lame.
_suffixSubstTable = {
'L': ('_L', '_R'),
'R': ('_R', '_L'),
}
def identifySubst(name, subst):
'''
Given a name and a list of (old, new) pairs, figure out which one applies, else None.
'''
for old, new in subst:
if name.count(old):
return (old, new)
return None
def fromCardPath(s):
if s.startswith('FIND('):
return eval(s)
class BLANK:
pass
def FIND(name, cardId=BLANK):
'''
A fancier wrapper for PyNode to make it easier to find objects by other
critieria.
The currently only use is looking up cards by their ids but in case it needs
to be more flexible, it can be.
.. todo::
Use the matching library to find closest matches
This is AT ODDS with weapon attachments! Due to the gluing, attachments
could come up instead. Maybe all cards prioritizes non-attachments stuff?
'''
if cardId is not BLANK:
cards = []
names = []
for c in core.findNode.allCards():
data = c.rigData
if 'id' in data and data['id'] == cardId:
return c
else:
cards.append(c)
names.append(c.name())
for c in cards:
if c.name() == name:
return c
else:
for c in core.findNode.allCards():
if c.name() == name:
return c
class GetNextSelected(object):
'''
Needs a function that takes a single input of a selected item and returns
True if processing was successful, signifying the reselect the previously
selection.
'''
def __init__(self, setFunction, clearFunction, extraMenus=None, **kwargs):
self.field = textFieldButtonGrp(bc=core.alt.Callback(self.setup), bl='Get', **kwargs)
self.menu = []
cmds.popupMenu()
def clear():
self.field.setText('')
clearFunction()
cmds.menuItem(l='Clear', c=core.alt.Callback(clear) )
if extraMenus:
for label, action in extraMenus:
self.menu.append(cmds.menuItem(l=label, c=core.alt.Callback(action, self.field) ))
self.set = setFunction
self.clear = clearFunction
def setMenu(self, extraMenus):
for mi, (label, action) in zip(self.menu, extraMenus):
cmds.menuItem(mi, e=True, l=label, c=core.alt.Callback(action, self.field) )
def setup(self):
scriptJob( ro=True, e=('SelectionChanged', core.alt.Callback(self.getNextSelection)) )
self.current = selected()
def getNextSelection(self):
#sel = selectedJoints()
sel = selected()
if sel:
print( 'Passing', sel[0] )
if self.set( sel[0] ):
print( 'GOODS' )
evalDeferred( core.alt.Callback(select, self.current) )
def getSelectedCards():
return [c for c in selected() if c.__class__.__name__ == 'Card']
def saveCardStates():
'''
Helper to transfer state from one card to another
'''
cardStateInfo = {}
cards = getSelectedCards()
if not cards:
cards = core.findNode.allCards()
for c in cards:
name = c.name()
cardStateInfo[name] = {}
cardStateInfo[name]['fossilRigState'] = c.fossilRigState.get()
for side in ('Left', 'Center', 'Right'):
for kinematic in ('ik', 'fk'):
shapeAttr = 'outputShape' + side + kinematic
if c.hasAttr( shapeAttr ):
cardStateInfo[name][ shapeAttr ] = c.attr( shapeAttr ).get()
core.text.clipboard.set( json.dumps(cardStateInfo) )
def loadCardStates():
try:
cardStateInfo = json.loads( core.text.clipboard.get() )
except Exception:
print( 'Valid json was not found in the clipboard' )
return
selectedCards = getSelectedCards()
# If there is a single card, just apply the data
if len(cardStateInfo) == 1 and len(selectedCards) == 1:
info = cardStatInfo.values()[0]
cardAndInfo = [(selectedCards[0], info)]
# Otherwise apply data to as many cards with the same names
else:
cardAndInfo = [(PyNode(card), info) for card, info in cardStateInfo.items() if objExists(card)]
for card, info in cardAndInfo:
if 'fossilRigState' in info:
card.fossilRigState.set( info['fossilRigState'] )
for side in ('Left', 'Center', 'Right'):
for kinematic in ('ik', 'fk'):
shapeAttr = 'outputShape' + side + kinematic
if shapeAttr in info:
card.attr( shapeAttr ).set( info[ shapeAttr ] )
def selectedCardsSoft(single=False):
'''
Returns selected cards as well as the cards of the selected joints.
If `single` is True, the first card is returned, else None
'''
if single:
cards = selectedCards()
if cards:
return cards[0]
for jnt in selectedJoints():
return jnt.card
return None
else:
cards = selectedCards()
temp = set(cards)
for jnt in selectedJoints():
if jnt.card not in temp:
temp.add(jnt.card)
cards.append(jnt.card)
return cards
def selectedCards():
cards = [ o for o in selected(type='transform') if type(o) == nodeApi.Card ]
if not cards:
bpjs = [ o for o in selected(type='transform') if type(o) == nodeApi.BPJoint ]
cards = list(set(bpj.card for bpj in bpjs))
return cards
def selectedJoints():
sel = selected(type='transform')
if not sel:
return []
try:
# Component don't have .hasAttr but this is easy.
return [ j for j in sel if j.hasAttr( 'realJoint' ) ]
except Exception:
return []
def runOnEach(func, completedMessage=''):
sel = selectedCards()
if not sel:
confirmDialog( m='No cards selected' )
return
with core.ui.progressWin(title='Working on ' + completedMessage, max=len(sel) ) as prog:
errors = {}
for i, card in enumerate(sel):
try:
func( card )
except Exception:
#print( traceback.format_exc() )
errors[card] = traceback.format_exc()
prog.update()
if not errors:
print( completedMessage )
else:
for card, text in errors.items():
print(card, '-' * 80)
print( text )
warning( 'An error occured on {}, see above for the errors'.format(len(errors)) )
def makeFakeBone():
''' Used by polySkeleton '''
bone = polyCylinder()[0]
bone.ty.set(1)
makeIdentity(bone, t=True, apply=True)
xform(bone, ws=True, piv=(0, 0, 0))
#scale -r -p -1.19209e-07cm 2cm -1.78814e-07cm 0.0229933 0.0229933 0.0229933 ;
scale( bone.vtx[20:39], (0, 0, 0), r=True, p=(0, 2, 0) )
return bone
def polySkeleton(cards=None):
''' Make cylinders to represent a skeleton (for use in zbrush).
'''
if not cards:
cards = core.findNode.allCards()
jointGroup = group(em=True, n='jointGroup')
# Make cylinders to represent joints
made = {}
for card in cards:
for j in card.joints:
p = j.parent
if p:
bone = makeFakeBone()
core.dagObj.moveTo(bone, p)
s = core.dagObj.distanceBetween(j, p) * 0.5
bone.sy.set(s)
delete(aimConstraint(j, bone, aim=(0, 1, 0), u=(0, 0, 1)))
makeIdentity(bone, apply=True, s=True)
made[j] = bone
# Setup fake joints parentage since they all exist
for j, bone in made.items():
p = j.parent
if p in made:
bone.setParent( made[p] )
else:
bone.setParent( jointGroup )
# Make polygon cards
cardGroup = group(em=True, n='jointGroup')
for card in cards:
points = [ xform( x, q=True, ws=True, t=True) for x in card.cv ]
poly = polyPlane(sh=True, sw=True)[0]
poly.setParent(cardGroup)
core.dagObj.matchTo(poly, card)
for p, v in zip(points, poly.vtx):
xform(v, ws=True, t=p)
|
print ("hello")
print ("bye bye bye")
|
import unittest
from inspect4py.utils import *
class Test(unittest.TestCase):
def test_extract_software_type(self):
json_test = [
{"type": "service", "run": "python /GitHub/test_repos/Chowlk/app.py"},
{"type": "script with main", "run": "python /GitHub/test_repos/Chowlk/converter.py"}
]
type_script = rank_software_invocation(json_test)
assert (type_script[0]["type"] == "service")
def test_extract_software_type_empty(self):
json = []
type_script = rank_software_invocation(json)
assert (type_script == [])
if __name__ == '__main__':
unittest.main()
|
foods = ['spam', 'eggs', 'ham']
things = foods
things[1] = 'chips'
print(foods[1]) # 'chips'
print(foods) # ['spam', 'chips', 'ham']
print(things) # ['spam', 'chips', 'ham']
print(40 * '-')
a = ['spam', 'eggs', 'ham']
b = a
c = a[:]
print(a is b, a is c) # (True, False)
print(a == b, a == c) # (True, True)
b[1] = 'milk'
print(a is b, a is c) # (True, False)
print(a == b, a == c) # (True, False)
print(40 * '-')
my_string = '1234567890'
print(my_string[9])
print(40 * '-')
my_data = set(range(15))
print(my_data)
for x in my_data:
print(x)
break
print(40 * '-')
keys = [1, 2, 3]
keys.append([4, 5, 6])
# {key: "Panda" for key in keys} # TypeError: unhashable type: 'list'
# TypeError: object of type 'generator' has no len()
# len(x ** 2 for x in range(5))
print(len([x ** 2 for x in range(5)]))
bum = {1, 2, 3, 4, 5}
print(len(bum))
# AttributeError: 'set' object has no attribute 'count'
# {1, 2, 3, 3, 3, 4, 5}.count(3)
|
# Copyright 2018 The DNNC Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
############################
# Description:
# DNNC AOT Compiler script
#############################
import os, sys
if __name__ == "__main__":
DNNC_PATH=os.path.abspath(os.path.dirname(__file__)+os.path.sep+'..'+os.path.sep+'swig')
sys.path.append(DNNC_PATH)
import dnnc
import read_onnx
import onnx_cpp
def compilerWrapper;
"""Compiler class for models in ONNX binary/protobuf format."""
if __name__ == "__main__":
if len(sys.argv) >= 2:
parser = pbReader()
dcGraph = parser.main(sys.argv[1])
cppCodeGen = dnncCpp();
cppFile = cppCodeGen.main(gcGraph);
onnxCC = compilerWrapper();
onnxCC.main(cppFile);
else:
print("\nUsage: "+sys.argv[0]+ " <onnx_model_file>.onnx \n")
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
from tvm.relay.testing.config import ctx_list
import keras
# prevent Keras from using up all gpu memory
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
def verify_keras_frontend(keras_model, need_transpose=True):
# Keras frontend currently supports tensorflow backend only.
assert(keras.backend.backend() == 'tensorflow')
in_shapes = []
for layer in keras_model._input_layers:
in_shapes.append(tuple(dim.value if dim.value is not None else 1 for dim in layer.input.shape))
def get_keras_output(xs, dtype='float32'):
return keras_model.predict(xs)
def get_tvm_output(xs, target, ctx, dtype='float32'):
shape_dict = {name: x.shape for (name, x) in zip(keras_model.input_names, xs)}
mod, params = relay.frontend.from_keras(keras_model, shape_dict)
with relay.transform.build_config(opt_level=2):
graph, lib, params = relay.build(mod,
target,
params=params)
m = graph_runtime.create(graph, lib, ctx)
for name, x in zip(keras_model.input_names, xs):
m.set_input(name, tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
m.run()
return [m.get_output(i).asnumpy() for i in range(m.get_num_outputs())]
def to_channels_first(arr):
return arr.transpose([0, -1] + list(range(1, arr.ndim - 1)))
def to_channels_last(arr):
return arr.transpose([0] + list(range(2, arr.ndim)) + [1])
xs = [np.random.uniform(size=shape, low=-1.0, high=1.0) for shape in in_shapes]
keras_out = get_keras_output(xs)
keras_out = keras_out if isinstance(keras_out, list) else [keras_out]
for target, ctx in ctx_list():
inputs = [to_channels_first(x) for x in xs] if need_transpose else xs
tvm_out = get_tvm_output(inputs, target, ctx)
for kout, tout in zip(keras_out, tvm_out):
if need_transpose:
tout = to_channels_last(tout)
tvm.testing.assert_allclose(kout, tout, rtol=1e-5, atol=1e-5)
def test_forward_merge():
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.Conv2D(8, (3, 3), padding="same")(data)
y = keras.layers.Conv2D(8, (3, 3), padding="same")(x)
z = keras.layers.Conv2D(8, (3, 3), padding="same")(y)
merge_funcs = [keras.layers.Add(),
keras.layers.Subtract(),
keras.layers.Multiply(),
keras.layers.Maximum(),
keras.layers.Average(),
keras.layers.Concatenate()]
for merge_func in merge_funcs:
if isinstance(merge_func, keras.layers.merge.Subtract):
out = merge_func([x, y])
else:
out = merge_func([x, y, z])
keras_model = keras.models.Model(data, out)
verify_keras_frontend(keras_model)
def test_forward_activations():
data = keras.layers.Input(shape=(32,32,3))
act_funcs = [keras.layers.Activation('softmax'),
keras.layers.Activation('softplus'),
keras.layers.Activation('relu'),
keras.layers.Activation('softsign'),
keras.layers.Activation('hard_sigmoid'),
keras.layers.Activation('sigmoid'),
keras.layers.Activation('tanh'),
keras.layers.Activation('linear'),
keras.layers.Activation('selu'),
keras.layers.ReLU(),
keras.layers.ReLU(max_value=6.),
keras.layers.LeakyReLU(alpha=0.3),
keras.layers.PReLU(weights=np.random.rand(1, 32, 32, 3)),
keras.layers.ELU(alpha=0.5),
keras.layers.ThresholdedReLU(theta=0.5)]
for act_func in act_funcs:
x = act_func(data)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_dense():
data = keras.layers.Input(shape=(32,32,1))
x = keras.layers.Flatten()(data)
x = keras.layers.Dropout(0.5)(x)
x = keras.layers.Dense(10, activation='relu', kernel_initializer='uniform')(x)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_sequential():
keras_model = keras.models.Sequential([
keras.layers.Dense(16, input_dim=32, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(8, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(1, activation='sigmoid')
])
verify_keras_frontend(keras_model)
def test_forward_pool():
data = keras.layers.Input(shape=(32,32,1))
# maxpool
x = keras.layers.MaxPooling2D((3, 3), strides=(1, 1), padding='same')(data)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
# avgpool
y = keras.layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(data)
keras_model = keras.models.Model(data, y)
verify_keras_frontend(keras_model)
def test_forward_conv():
data = keras.layers.Input(shape=(32,32,3))
conv_funcs = [keras.layers.Conv2D(filters=10, kernel_size=(3,3),
strides=(2,2), padding='same'),
keras.layers.Conv2D(filters=10, kernel_size=(3,3),
dilation_rate=(2,2), padding='same'),
keras.layers.DepthwiseConv2D(kernel_size=(3,3), padding='same'),
keras.layers.Conv2DTranspose(filters=10, kernel_size=(3,3), padding='valid'),
keras.layers.SeparableConv2D(filters=10, kernel_size=(3,3), padding='same')]
for conv_func in conv_funcs:
x = conv_func(data)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_upsample(interpolation='nearest'):
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.UpSampling2D(size=(3,3), interpolation=interpolation)(data)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_reshape():
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.Reshape(target_shape=(32,32,3))(data)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_crop():
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.Cropping2D(cropping=((1, 1), (1, 1)))(data)
x = keras.layers.Cropping2D(cropping=(1, 1))(x)
x = keras.layers.Cropping2D(cropping=1)(x)
x = keras.layers.Cropping2D(cropping=((0, 1), (1, 0)))(x)
x = keras.layers.Cropping2D(cropping=(1, 0))(x)
x = keras.layers.Cropping2D(cropping=0)(x)
x = keras.layers.Add()([x, x])
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_multi_inputs():
data1 = keras.layers.Input(shape=(32,32,3))
data2 = keras.layers.Input(shape=(32,32,3))
x = keras.layers.Conv2D(8, (3, 3), padding="same")(data1)
y = keras.layers.Conv2D(8, (3, 3), padding="same")(data2)
z = keras.layers.Average()([x, y])
z = keras.layers.GlobalAveragePooling2D()(z)
keras_model = keras.models.Model([data1, data2], z)
verify_keras_frontend(keras_model)
def test_forward_multi_outputs():
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.Conv2D(8, (3, 3), padding="same")(data)
x = keras.layers.GlobalAveragePooling2D()(x)
y = keras.layers.Conv2D(8, (3, 3), padding="same")(data)
y = keras.layers.GlobalAveragePooling2D()(y)
keras_model = keras.models.Model(data, [x, y])
verify_keras_frontend(keras_model)
def test_forward_reuse_layers():
# reuse conv2d
data = keras.layers.Input(shape=(32,32,3))
conv2d = keras.layers.Conv2D(8, (3, 3), padding="same")
x = conv2d(data)
y = conv2d(data)
z = keras.layers.Add()([x, y])
z = keras.layers.GlobalAveragePooling2D()(z)
keras_model = keras.models.Model(data, z)
verify_keras_frontend(keras_model)
# reuse add
data = keras.layers.Input(shape=(32,32,3))
x = keras.layers.Conv2D(8, (3, 3), padding="same")(data)
add = keras.layers.Add()
x = add([x, x])
x = add([x, x])
z = keras.layers.GlobalAveragePooling2D()(x)
keras_model = keras.models.Model(data, z)
verify_keras_frontend(keras_model)
def test_forward_rnn():
data = keras.layers.Input(shape=(1,32))
rnn_funcs = [keras.layers.LSTM(units=16, return_state=False,
recurrent_activation='sigmoid', activation='tanh'),
keras.layers.SimpleRNN(units=16, return_state=False,
activation='tanh'),
keras.layers.GRU(units=16, return_state=False,
recurrent_activation='sigmoid', activation='tanh')]
for rnn_func in rnn_funcs:
x = rnn_func(data)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
def test_forward_vgg16():
keras_model = keras.applications.VGG16(include_top=True, weights='imagenet',
input_shape=(224,224,3), classes=1000)
verify_keras_frontend(keras_model)
def test_forward_xception():
keras_model = keras.applications.Xception(include_top=True, weights='imagenet',
input_shape=(299,299,3), classes=1000)
verify_keras_frontend(keras_model)
def test_forward_resnet50():
keras_model = keras.applications.ResNet50(include_top=True, weights='imagenet',
input_shape=(224,224,3), classes=1000)
verify_keras_frontend(keras_model)
def test_forward_mobilenet():
keras_model = keras.applications.MobileNet(include_top=True, weights='imagenet',
input_shape=(224,224,3), classes=1000)
verify_keras_frontend(keras_model)
if __name__ == '__main__':
test_forward_merge()
test_forward_activations()
test_forward_dense()
test_forward_sequential()
test_forward_pool()
test_forward_conv()
test_forward_upsample(interpolation='nearest')
test_forward_upsample(interpolation='bilinear')
test_forward_reshape()
test_forward_crop()
test_forward_multi_inputs()
test_forward_multi_outputs()
test_forward_reuse_layers()
test_forward_rnn()
test_forward_vgg16()
test_forward_xception()
test_forward_resnet50()
test_forward_mobilenet()
|
"""
MAP Client Plugin Step
"""
from mapclient.mountpoints.workflowstep import WorkflowStepMountPoint
class FieldworkModelTransformationStep(WorkflowStepMountPoint):
"""
Step for applying a rigid-body or scaling transform to
a fieldwork model.
"""
def __init__(self, location):
super(FieldworkModelTransformationStep, self).__init__('Fieldwork Model Transformation', location)
self._configured = True # A step cannot be executed until it has been configured.
self._category = 'Fieldwork'
# Add any other initialisation code here:
# Ports:
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'ju#fieldworkmodel'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'ju#geometrictransform'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'ju#fieldworkmodel'))
self.GF = None
self.T = None
def execute(self):
"""
Add your code here that will kick off the execution of the step.
Make sure you call the _doneExecution() method when finished. This method
may be connected up to a button in a widget for example.
"""
# Put your execute step code here before calling the '_doneExecution' method.
GFTransforms = {'affine': self.GF.transformAffine,
'rigid_about_point': self.GF.transformRigidRotateAboutP,
'rigidscale_about_point': self.GF.transformRigidScaleRotateAboutP,
}
try:
transformFunction = GFTransforms[self.T.transformType]
except KeyError:
raise RuntimeError('unknown transform type: ' + self.T.transformType)
if self.T.transformType == 'affine':
transformFunction(self.T.T)
else:
transformFunction(self.T.getT(), self.T.getP())
self._doneExecution()
def setPortData(self, index, dataIn):
"""
Add your code here that will set the appropriate objects for this step.
The index is the index of the port in the port list. If there is only one
uses port for this step then the index can be ignored.
"""
if index == 0:
self.GF = dataIn # ju#fieldworkmodel
else:
self.T = dataIn # ju#geometrictransform
def getPortData(self, index):
"""
Add your code here that will return the appropriate objects for this step.
The index is the index of the port in the port list. If there is only one
provides port for this step then the index can be ignored.
"""
return self.GF # ju#fieldworkmodel
def configure(self):
"""
This function will be called when the configure icon on the step is
clicked. It is appropriate to display a configuration dialog at this
time. If the conditions for the configuration of this step are complete
then set:
self._configured = True
"""
pass
def getIdentifier(self):
"""
The identifier is a string that must be unique within a workflow.
"""
return 'fieldworkmodeltransformation' # TODO: The string must be replaced with the step's unique identifier
def setIdentifier(self, identifier):
"""
The framework will set the identifier for this step when it is loaded.
"""
pass # TODO: Must actually set the step's identifier here
def serialize(self):
"""
Add code to serialize this step to disk. Returns a json string for
mapclient to serialise.
"""
return ''
def deserialize(self, string):
"""
Add code to deserialize this step from disk. Parses a json string
given by mapclient
"""
pass
|
"""
Haystack-API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.api_client import ApiClient, Endpoint as _Endpoint
from openapi_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from openapi_client.model.http_validation_error import HTTPValidationError
from openapi_client.model.query_request import QueryRequest
from openapi_client.model.query_response import QueryResponse
class SearchApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.check_status_endpoint = _Endpoint(
settings={
'response_type': (bool, date, datetime, dict, float, int, list, str, none_type,),
'auth': [],
'endpoint_path': '/initialized',
'operation_id': 'check_status',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.query_endpoint = _Endpoint(
settings={
'response_type': (QueryResponse,),
'auth': [],
'endpoint_path': '/query',
'operation_id': 'query',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'query_request',
],
'required': [
'query_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'query_request':
(QueryRequest,),
},
'attribute_map': {
},
'location_map': {
'query_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def check_status(
self,
**kwargs
):
"""Initialized # noqa: E501
This endpoint can be used during startup to understand if the server is ready to take any requests, or is still loading. The recommended approach is to call this endpoint with a short timeout, like 500ms, and in case of no reply, consider the server busy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.check_status(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
bool, date, datetime, dict, float, int, list, str, none_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.check_status_endpoint.call_with_http_info(**kwargs)
def query(
self,
query_request,
**kwargs
):
"""Query # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query(query_request, async_req=True)
>>> result = thread.get()
Args:
query_request (QueryRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
QueryResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['query_request'] = \
query_request
return self.query_endpoint.call_with_http_info(**kwargs)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from mmcv.runner import load_checkpoint
from mmseg.utils import get_root_logger
from functools import partial
from ..builder import BACKBONES
__all__ = [
'resnet103D', 'resnet183D', 'resnet343D', 'resnet503D', 'resnet1013D',
'resnet1523D', 'resnet2003D']
def conv3x3x3(in_planes, out_planes, stride=1, dilation=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
dilation=dilation,
stride=stride,
padding=dilation,
bias=False)
def downsample_basic_block(x, planes, stride, no_cuda=False):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if not no_cuda:
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride=stride, dilation=dilation)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes, dilation=dilation)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
shortcut_type='B',
no_cuda=False,
pretrained=None):
self.inplanes = 64
self.no_cuda = no_cuda
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(
1,
64,
kernel_size=7,
stride=(2, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(
block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(
block, 256, layers[2], shortcut_type, stride=1, dilation=2)
self.layer4 = self._make_layer(
block, 512, layers[3], shortcut_type, stride=1, dilation=4)
self.init_weights(pretrained)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
else:
raise TypeError('pretrained must be a str or None')
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride,
no_cuda=self.no_cuda)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
@BACKBONES.register_module()
class resnet103D(ResNet):
"""Constructs a ResNet-18 model.
"""
def __init__(self,**kwargs):
super(resnet103D).__init__(BasicBlock, [1, 1, 1, 1],
**kwargs)
@BACKBONES.register_module()
class resnet183D(ResNet):
def __init__(self, **kwargs):
super(resnet183D).__init__(BasicBlock, [2, 2, 2, 2],
**kwargs)
@BACKBONES.register_module()
class resnet343D(ResNet):
"""Constructs a ResNet-34 model.
"""
# model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
# return model
def __init__(self, **kwargs):
super(resnet343D).__init__(BasicBlock, [3, 4, 6, 3],
**kwargs)
@BACKBONES.register_module()
class resnet503D(ResNet):
"""Constructs a ResNet-50 model.
"""
# model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
# return model
def __init__(self, **kwargs):
super(resnet503D).__init__(Bottleneck, [3, 4, 6, 3],
**kwargs)
@BACKBONES.register_module()
class resnet1013D(ResNet):
"""Constructs a ResNet-101 model.
"""
# model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
# return model
def __init__(self, **kwargs):
super(resnet1013D).__init__(Bottleneck, [3, 4, 23, 3],
**kwargs)
@BACKBONES.register_module()
class resnet1523D(ResNet):
"""Constructs a ResNet-101 model.
"""
# model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
# return model
def __init__(self, **kwargs):
super(resnet1523D).__init__(Bottleneck, [3, 8, 36, 3],
**kwargs)
@BACKBONES.register_module()
class resnet2003D(ResNet):
"""Constructs a ResNet-101 model.
"""
# model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
# return model
def __init__(self, **kwargs):
super(resnet2003D).__init__(Bottleneck, [3, 24, 36, 3],
**kwargs)
|
class List:
def __init__(self):
self.data = {}
self.length = 0
def append(self, element):
self.data[self.length] = element
self.length += 1
def pop(self):
self.length -= 1
last_element = self.data[self.length]
del self.data[self.length]
return last_element
def insert(self, index, element):
if index > self.length:
self.data[self.length] = element
self.length += 1
else:
element_at_index = self.data[index]
self.data[index] = element
def clear(self):
self.data = {}
self.length = 0
def copy(self):
return self.data
def count(self, element):
count = 0
for value in self.data.values():
if value == element:
count += 1
return count
def index(self, element):
for key, value in self.data.items():
if value == element:
return key
raise ValueError
def remove(self, element):
for key, value in self.data.items():
if value == element:
del self.data[key]
self.length -= 1
raise ValueError
def reverse(self):
reversed_data = {}
original_length = self.length
for key, value in self.data.items():
original_length -= 1
reversed_data[original_length] = value
self.data = reversed_data
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2019 The EncoCoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import subprocess
import sys
def setup_linux():
global args, workdir
if os.path.isfile('/usr/bin/apt-get'):
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', os.environ['USER']])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
setup_repos()
elif args.is_fedora:
pkgmgr = 'dnf'
repourl = 'https://download.docker.com/linux/fedora/docker-ce.repo'
elif args.is_centos:
pkgmgr = 'yum'
repourl = 'https://download.docker.com/linux/centos/docker-ce.repo'
if args.is_fedora or args.is_centos:
programs = ['ruby', 'make', 'wget', 'curl']
if args.kvm:
print('KVM not supported with Fedora/CentOS yet.')
sys.exit(1)
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
user = os.environ['USER']
dockers = ['docker-ce', 'docker-ce-cli', 'containerd.io']
if args.is_fedora:
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'dnf-plugins-core'])
subprocess.check_call(['sudo', pkgmgr, 'config-manager', '--add-repo', repourl])
elif args.is_centos:
reqs = ['yum-utils', 'device-mapper-persistent-data', 'lvm2']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + reqs)
subprocess.check_call(['sudo', 'yum-config-manager', '--add-repo', repourl])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + dockers)
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', user])
subprocess.check_call(['sudo', 'systemctl', 'enable', 'docker'])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
subprocess.check_call(['sudo', 'systemctl', 'start', 'docker'])
else:
print('LXC not supported with Fedora/CentOS yet.')
sys.exit(1)
if args.is_fedora:
programs += ['git']
if args.is_centos:
# CentOS ships with an insanely outdated version of git that is no longer compatible with gitian builds
# Check current version and update if necessary
oldgit = b'2.' not in subprocess.check_output(['git', '--version'])
if oldgit:
subprocess.check_call(['sudo', pkgmgr, 'remove', '-y', 'git*'])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'https://centos7.iuscommunity.org/ius-release.rpm'])
programs += ['git2u-all']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + programs)
setup_repos()
else:
print('Unsupported system/OS type.')
sys.exit(1)
def setup_darwin():
global args, workdir
programs = []
if not os.path.isfile('/usr/local/bin/wget'):
programs += ['wget']
if not os.path.isfile('/usr/local/bin/git'):
programs += ['git']
if not os.path.isfile('/usr/local/bin/gsha256sum'):
programs += ['coreutils']
if args.docker:
print('Experimental setup for macOS host')
if len(programs) > 0:
subprocess.check_call(['brew', 'install'] + programs)
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
elif args.kvm or not args.docker:
print('KVM and LXC are not supported under macOS at this time.')
sys.exit(0)
setup_repos()
def setup_repos():
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/encocoin-Project/gitian.sigs.git'])
if not os.path.isdir('encocoin-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/encocoin-Project/encocoin-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('encocoin'):
subprocess.check_call(['git', 'clone', 'https://github.com/encocoin-Project/encocoin.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
if args.host_os == 'darwin':
subprocess.check_call(['sed', '-i.old', '/50cacher/d', 'bin/make-base-vm'])
if args.host_os == 'linux':
if args.is_fedora or args.is_centos:
subprocess.check_call(['sed', '-i', '/50cacher/d', 'bin/make-base-vm'])
subprocess.check_call(make_image_prog)
subprocess.check_call(['git', 'checkout', 'bin/make-base-vm'])
os.chdir(workdir)
if args.host_os == 'linux':
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
print('Setup complete!')
sys.exit(0)
def build():
global args, workdir
os.makedirs('encocoin-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../encocoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'encocoin='+args.commit, '--url', 'encocoin='+args.url, '../encocoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/encocoin-*.tar.gz build/out/src/encocoin-*.tar.gz ../encocoin-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'encocoin='+args.commit, '--url', 'encocoin='+args.url, '../encocoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/encocoin-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/encocoin-*.zip build/out/encocoin-*.exe build/out/src/encocoin-*.tar.gz ../encocoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'encocoin='+args.commit, '--url', 'encocoin='+args.url, '../encocoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/encocoin-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/encocoin-*.tar.gz build/out/encocoin-*.dmg build/out/src/encocoin-*.tar.gz ../encocoin-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
# TODO: Skip making signed windows sigs until we actually start producing signed windows binaries
#print('\nSigning ' + args.version + ' Windows')
#subprocess.check_call('cp inputs/encocoin-' + args.version + '-win-unsigned.tar.gz inputs/encocoin-win-unsigned.tar.gz', shell=True)
#subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../encocoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call('mv build/out/encocoin-*win64-setup.exe ../encocoin-binaries/'+args.version, shell=True)
#subprocess.check_call('mv build/out/encocoin-*win32-setup.exe ../encocoin-binaries/'+args.version, shell=True)
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/encocoin-' + args.version + '-osx-unsigned.tar.gz inputs/encocoin-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../encocoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/encocoin-osx-signed.dmg ../encocoin-binaries/'+args.version+'/encocoin-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
os.chdir('gitian.sigs')
commit = False
if os.path.isfile(args.version+'-win-signed/'+args.signer+'/encocoin-win-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
commit = True
if os.path.isfile(args.version+'-osx-signed/'+args.signer+'/encocoin-dmg-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
commit = True
if commit:
print('\nCommitting '+args.version+' Signed Sigs\n')
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
else:
print('\nNothing to commit\n')
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../encocoin/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../encocoin/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../encocoin/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
# TODO: Skip checking signed windows sigs until we actually start producing signed windows binaries
#print('\nVerifying v'+args.version+' Signed Windows\n')
#if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../encocoin/contrib/gitian-descriptors/gitian-win-signer.yml']):
# print('Verifying v'+args.version+' Signed Windows FAILED\n')
# rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../encocoin/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/encocoin-Project/encocoin', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.host_os = sys.platform
if args.host_os == 'win32' or args.host_os == 'cygwin':
raise Exception('Error: Native Windows is not supported by this script, use WSL')
if args.host_os == 'linux':
if os.environ['USER'] == 'root':
raise Exception('Error: Do not run this script as the root user')
args.is_bionic = False
args.is_fedora = False
args.is_centos = False
if os.path.isfile('/usr/bin/lsb_release'):
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if os.path.isfile('/etc/fedora-release'):
args.is_fedora = True
if os.path.isfile('/etc/centos-release'):
args.is_centos = True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
if args.host_os == 'linux':
setup_linux()
elif args.host_os == 'darwin':
setup_darwin()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
if args.host_os == 'darwin':
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
if args.detach_sign:
args.commit_files = False
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
os.chdir('encocoin')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
if not os.path.isdir('../gitian-builder/inputs/encocoin'):
os.makedirs('../gitian-builder/inputs/encocoin')
os.chdir('../gitian-builder/inputs/encocoin')
if not os.path.isdir('.git'):
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
|
from __future__ import print_function, division
import numpy
from galpy.util import coords
import pytest
import astropy
_APY3= astropy.__version__ > '3'
def test_radec_to_lb_ngp():
_turn_off_apy()
# Test that the NGP is at b=90
ra, dec= 192.25, 27.4
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=1950.)
assert not coords._APY_LOADED, "_APY_LOADED should be False, but isn't"
assert numpy.fabs(lb[1]-90.) < 10.**-6., 'Galactic latitude of the NGP given in ra,dec is not 90'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=1950.)
assert numpy.fabs(lb[1]-numpy.pi/2.) < 10.**-7., 'Galactic latitude of the NGP given in ra,dec is not pi/2'
_turn_on_apy()
assert coords._APY_LOADED, "_APY_LOADED should be True, but isn't"
return None
def test_radec_to_lb_ngp_apyangles():
# Test, but using transformation angles derived from astropy
_turn_off_apy(keep_loaded=True)
# Test that the NGP is at b=90
ra, dec= 192.25, 27.4
assert coords._APY_LOADED, "_APY_LOADED should be True, but isn't"
lb= coords.radec_to_lb(ra,dec,degree=True,epoch='B1950')
assert numpy.fabs(lb[1]-90.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not 90'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch='B1950')
assert numpy.fabs(lb[1]-numpy.pi/2.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not pi/2'
_turn_on_apy()
return None
def test_radec_to_lb_ngp_apy():
# Test that the NGP is at b=90, using astropy's coordinate transformations
ra, dec= 192.25, 27.4
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=1950.)
assert numpy.fabs(lb[1]-90.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not 90'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=1950.)
assert numpy.fabs(lb[1]-numpy.pi/2.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not pi/2'
return None
def test_radec_to_lb_ngp_j2000():
_turn_off_apy()
# Test that the NGP is at b=90
ra, dec= 192.8594812065348, 27.12825118085622
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=2000.)
assert numpy.fabs(lb[1]-90.) < 10.**-8., 'Galactic latitude of the NGP given in ra,dec is not 90'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=2000.)
assert numpy.fabs(lb[1]-numpy.pi/2.) < 10.**-8., 'Galactic latitude of the NGP given in ra,dec is not pi/2'
_turn_on_apy()
return None
def test_radec_to_lb_ngp_j2000_apy():
# Test that the NGP is at b=90
ra, dec= 192.8594812065348, 27.12825118085622
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=2000.)
assert numpy.fabs(lb[1]-90.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not 90'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=2000.)
assert numpy.fabs(lb[1]-numpy.pi/2.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not pi/2'
return None
def test_radec_to_lb_ngp_j2000_apyangles():
# Same test, but using transformation angles derived from astropy
_turn_off_apy(keep_loaded=True)
# Test that the NGP is at b=90
ra, dec= 192.8594812065348, 27.12825118085622
lb= coords.radec_to_lb(ra,dec,degree=True,epoch='J2000')
assert numpy.fabs(lb[1]-90.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not 90'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch='J2000')
assert numpy.fabs(lb[1]-numpy.pi/2.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not pi/2'
_turn_on_apy()
return None
def test_radec_to_lb_ngp_j2000_apyangles_icrs():
# Test, but using transformation angles derived from astropy, for ICRS
_turn_off_apy(keep_loaded=True)
# Test that the NGP is at b=90
ra, dec= 192.8594812065348, 27.12825118085622
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=None)
assert numpy.fabs(lb[1]-90.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not 90'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=None)
assert numpy.fabs(lb[1]-numpy.pi/2.) < 10.**-4., 'Galactic latitude of the NGP given in ra,dec is not pi/2'
_turn_on_apy()
return None
def test_radec_to_lb_sgp():
_turn_off_apy()
# Test that the SGP is at b=90
ra, dec= 12.25, -27.4
assert not coords._APY_LOADED, "_APY_LOADED should be False, but isn't"
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=1950.)
assert numpy.fabs(lb[1]+90.) < 10.**-6., 'Galactic latitude of the SGP given in ra,dec is not 90'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=1950.)
assert numpy.fabs(lb[1]+numpy.pi/2.) < 10.**-7., 'Galactic latitude of the SGP given in ra,dec is not pi/2'
_turn_on_apy()
return None
# Test the longitude of the north celestial pole
def test_radec_to_lb_ncp():
_turn_off_apy()
ra, dec= 180., 90.
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=1950.)
assert numpy.fabs(lb[0]-123.) < 10.**-8., 'Galactic longitude of the NCP given in ra,dec is not 123'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=1950.)
assert numpy.fabs(lb[0]-123./180.*numpy.pi) < 10.**-8., 'Galactic longitude of the NCP given in ra,dec is not 123'
# Also test the latter for vector inputs
os= numpy.ones(2)
lb= coords.radec_to_lb(os*ra/180.*numpy.pi,os*dec/180.*numpy.pi,
degree=False,epoch=1950.)
assert numpy.all(numpy.fabs(lb[:,0]-123./180.*numpy.pi) < 10.**-8.), 'Galactic longitude of the NCP given in ra,dec is not 123'
_turn_on_apy()
return None
def test_radec_to_lb_ncp_apyangles():
_turn_off_apy(keep_loaded=True)
ra, dec= 180., 90.
lb= coords.radec_to_lb(ra,dec,degree=True,epoch='B1950')
assert numpy.fabs(lb[0]-123.) < 10.**-4., 'Galactic longitude of the NCP given in ra,dec is not 123'
_turn_on_apy()
return None
# Test the longitude of the north celestial pole
def test_radec_to_lb_ncp_j2000():
_turn_off_apy()
ra, dec= 180., 90.
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=2000.)
assert numpy.fabs(lb[0]-122.9319185680026) < 10.**-8., 'Galactic longitude of the NCP given in ra,dec is not 122.9319185680026'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=2000.)
assert numpy.fabs(lb[0]-122.9319185680026/180.*numpy.pi) < 10.**-8., 'Galactic longitude of the NCP given in ra,dec is not 122.9319185680026'
# Also test the latter for vector inputs
os= numpy.ones(2)
lb= coords.radec_to_lb(os*ra/180.*numpy.pi,os*dec/180.*numpy.pi,
degree=False,epoch=2000.)
assert numpy.all(numpy.fabs(lb[:,0]-122.9319185680026/180.*numpy.pi) < 10.**-8.), 'Galactic longitude of the NCP given in ra,dec is not 122.9319185680026'
_turn_on_apy()
return None
def test_radec_to_lb_ncp_j2000_apyangles():
_turn_off_apy(keep_loaded=True)
ra, dec= 180., 90.
lb= coords.radec_to_lb(ra,dec,degree=True,epoch='J2000')
assert numpy.fabs(lb[0]-122.9319185680026) < 10.**-4., 'Galactic longitude of the NCP given in ra,dec is not 122.9319185680026'
_turn_on_apy()
return None
# Test that other epochs do not work when not using astropy
def test_radec_to_lb_otherepochs():
_turn_off_apy()
ra, dec= 180., 90.
try:
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=1975.)
except IOError:
pass
else:
raise AssertionError('radec functions with epoch not equal to 1950 or 2000 did not raise IOError')
_turn_on_apy()
return None
# Test that other epochs do work when using astropy
def test_radec_to_lb_otherepochs_apy():
_turn_off_apy(keep_loaded=True)
ra, dec= 180., 90.
try:
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch='J2015')
except IOError:
raise AssertionError('radec functions with epoch not equal to 1950 or 2000 did not raise IOError')
else:
pass
_turn_on_apy()
return None
# Test that radec_to_lb and lb_to_radec are each other's inverse
def test_lb_to_radec():
_turn_off_apy()
ra, dec= 120, 60.
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=2000.)
rat, dect= coords.lb_to_radec(lb[0],lb[1],degree=True,epoch=2000.)
assert numpy.fabs(ra-rat) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.fabs(dec-dect) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=2000.)
rat, dect= coords.lb_to_radec(lb[0],lb[1],degree=False,epoch=2000.)
assert numpy.fabs(ra/180.*numpy.pi-rat) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.fabs(dec/180.*numpy.pi-dect) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
# And also test this for arrays
os= numpy.ones(2)
lb= coords.radec_to_lb(os*ra/180.*numpy.pi,os*dec/180.*numpy.pi,
degree=False,epoch=2000.)
ratdect= coords.lb_to_radec(lb[:,0],lb[:,1],degree=False,epoch=2000.)
rat= ratdect[:,0]
dect= ratdect[:,1]
assert numpy.all(numpy.fabs(ra/180.*numpy.pi-rat) < 10.**-10.), 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.all(numpy.fabs(dec/180.*numpy.pi-dect) < 10.**-10.), 'lb_to_radec is not the inverse of radec_to_lb'
#Also test for a negative l
l,b= 240., 60.
ra,dec= coords.lb_to_radec(l,b,degree=True)
lt,bt= coords.radec_to_lb(ra,dec,degree=True)
assert numpy.fabs(lt-l) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.fabs(bt-b) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
_turn_on_apy()
return None
# Test that radec_to_lb and lb_to_radec are each other's inverse, using astropy
def test_lb_to_radec_apy():
ra, dec= 120, 60.
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=2000.)
rat, dect= coords.lb_to_radec(lb[0],lb[1],degree=True,epoch=2000.)
assert numpy.fabs(ra-rat) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.fabs(dec-dect) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=2000.)
rat, dect= coords.lb_to_radec(lb[0],lb[1],degree=False,epoch=2000.)
assert numpy.fabs(ra/180.*numpy.pi-rat) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.fabs(dec/180.*numpy.pi-dect) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
# And also test this for arrays
os= numpy.ones(2)
lb= coords.radec_to_lb(os*ra/180.*numpy.pi,os*dec/180.*numpy.pi,
degree=False,epoch=2000.)
ratdect= coords.lb_to_radec(lb[:,0],lb[:,1],degree=False,epoch=2000.)
rat= ratdect[:,0]
dect= ratdect[:,1]
assert numpy.all(numpy.fabs(ra/180.*numpy.pi-rat) < 10.**-10.), 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.all(numpy.fabs(dec/180.*numpy.pi-dect) < 10.**-10.), 'lb_to_radec is not the inverse of radec_to_lb'
#Also test for a negative l
l,b= 240., 60.
ra,dec= coords.lb_to_radec(l,b,degree=True)
lt,bt= coords.radec_to_lb(ra,dec,degree=True)
assert numpy.fabs(lt-l) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.fabs(bt-b) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
return None
# Test that radec_to_lb and lb_to_radec are each other's inverse, using astropy
def test_lb_to_radec_apy_icrs():
ra, dec= 120, 60.
lb= coords.radec_to_lb(ra,dec,degree=True,epoch=None)
rat, dect= coords.lb_to_radec(lb[0],lb[1],degree=True,epoch=None)
assert numpy.fabs(ra-rat) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.fabs(dec-dect) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
# Also test this for degree=False
lb= coords.radec_to_lb(ra/180.*numpy.pi,dec/180.*numpy.pi,
degree=False,epoch=None)
rat, dect= coords.lb_to_radec(lb[0],lb[1],degree=False,epoch=None)
assert numpy.fabs(ra/180.*numpy.pi-rat) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.fabs(dec/180.*numpy.pi-dect) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
# And also test this for arrays
os= numpy.ones(2)
lb= coords.radec_to_lb(os*ra/180.*numpy.pi,os*dec/180.*numpy.pi,
degree=False,epoch=None)
ratdect= coords.lb_to_radec(lb[:,0],lb[:,1],degree=False,epoch=None)
rat= ratdect[:,0]
dect= ratdect[:,1]
assert numpy.all(numpy.fabs(ra/180.*numpy.pi-rat) < 10.**-10.), 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.all(numpy.fabs(dec/180.*numpy.pi-dect) < 10.**-10.), 'lb_to_radec is not the inverse of radec_to_lb'
#Also test for a negative l
l,b= 240., 60.
ra,dec= coords.lb_to_radec(l,b,degree=True)
lt,bt= coords.radec_to_lb(ra,dec,degree=True)
assert numpy.fabs(lt-l) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
assert numpy.fabs(bt-b) < 10.**-10., 'lb_to_radec is not the inverse of radec_to_lb'
return None
def test_radec_to_lb_galpyvsastropy():
# Test that galpy's radec_to_lb agrees with astropy's
from astropy.coordinates import SkyCoord
import astropy.units as u
_turn_off_apy(keep_loaded=True)
ra, dec= 33., -20.
# using galpy
lg,bg= coords.radec_to_lb(ra,dec,degree=True,epoch=2000.0)
# using astropy
c= SkyCoord(ra=ra*u.deg,dec=dec*u.deg,frame='fk5',equinox='J2000')
c= c.transform_to('galactic')
la,ba= c.l.to(u.deg).value,c.b.to(u.deg).value
assert numpy.fabs(lg-la) < 1e-12, "radec_to_lb using galpy's own transformations does not agree with astropy's"
assert numpy.fabs(bg-ba) < 1e-12, "radec_to_lb using galpy's own transformations does not agree with astropy's"
_turn_on_apy()
return None
def test_radec_to_lb__1950_galpyvsastropy():
# Test that galpy's radec_to_lb agrees with astropy's
from astropy.coordinates import SkyCoord
import astropy.units as u
_turn_off_apy(keep_loaded=True)
ra, dec= 33., -20.
# using galpy
lg,bg= coords.radec_to_lb(ra,dec,degree=True,epoch=1950.0)
# using astropy
c= SkyCoord(ra=ra*u.deg,dec=dec*u.deg,frame='fk4noeterms',equinox='B1950')
c= c.transform_to('galactic')
la,ba= c.l.to(u.deg).value,c.b.to(u.deg).value
assert numpy.fabs(lg-la) < 1e-12, "radec_to_lb using galpy's own transformations does not agree with astropy's"
assert numpy.fabs(bg-ba) < 1e-12, "radec_to_lb using galpy's own transformations does not agree with astropy's"
_turn_on_apy()
return None
# Test lb_to_XYZ
def test_lbd_to_XYZ():
l,b,d= 90., 30.,1.
XYZ= coords.lbd_to_XYZ(l,b,d,degree=True)
assert numpy.fabs(XYZ[0]) <10.**-10., 'lbd_to_XYZ conversion does not work as expected'
assert numpy.fabs(XYZ[1]-numpy.sqrt(3.)/2.) < 10.**-10., 'lbd_to_XYZ conversion does not work as expected'
assert numpy.fabs(XYZ[2]-0.5) < 10.**-10., 'lbd_to_XYZ conversion does not work as expected'
# Also test for degree=False
XYZ= coords.lbd_to_XYZ(l/180.*numpy.pi,b/180.*numpy.pi,d,degree=False)
assert numpy.fabs(XYZ[0]) <10.**-10., 'lbd_to_XYZ conversion does not work as expected'
assert numpy.fabs(XYZ[1]-numpy.sqrt(3.)/2.) < 10.**-10., 'lbd_to_XYZ conversion does not work as expected'
assert numpy.fabs(XYZ[2]-0.5) < 10.**-10., 'lbd_to_XYZ conversion does not work as expected'
# Also test for arrays
os= numpy.ones(2)
XYZ= coords.lbd_to_XYZ(os*l/180.*numpy.pi,os*b/180.*numpy.pi,
os*d,degree=False)
assert numpy.all(numpy.fabs(XYZ[:,0]) <10.**-10.), 'lbd_to_XYZ conversion does not work as expected'
assert numpy.all(numpy.fabs(XYZ[:,1]-numpy.sqrt(3.)/2.) < 10.**-10.), 'lbd_to_XYZ conversion does not work as expected'
assert numpy.all(numpy.fabs(XYZ[:,2]-0.5) < 10.**-10.), 'lbd_to_XYZ conversion does not work as expected'
return None
# Test that XYZ_to_lbd is the inverse of lbd_to_XYZ
def test_XYZ_to_lbd():
l,b,d= 90., 30.,1.
XYZ= coords.lbd_to_XYZ(l,b,d,degree=True)
lt,bt,dt= coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],degree=True)
assert numpy.fabs(lt-l) <10.**-10., 'XYZ_to_lbd conversion does not work as expected'
assert numpy.fabs(bt-b) < 10.**-10., 'XYZ_to_lbd conversion does not work as expected'
assert numpy.fabs(dt-d) < 10.**-10., 'XYZ_to_lbd conversion does not work as expected'
# Also test for degree=False
XYZ= coords.lbd_to_XYZ(l/180.*numpy.pi,b/180.*numpy.pi,d,degree=False)
lt,bt,dt= coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],degree=False)
assert numpy.fabs(lt-l/180.*numpy.pi) <10.**-10., 'XYZ_to_lbd conversion does not work as expected'
assert numpy.fabs(bt-b/180.*numpy.pi) < 10.**-10., 'XYZ_to_lbd conversion does not work as expected'
assert numpy.fabs(dt-d) < 10.**-10., 'XYZ_to_lbd conversion does not work as expected'
# Also test for arrays
os= numpy.ones(2)
XYZ= coords.lbd_to_XYZ(os*l/180.*numpy.pi,os*b/180.*numpy.pi,
os*d,degree=False)
lbdt= coords.XYZ_to_lbd(XYZ[:,0],XYZ[:,1],XYZ[:,2],degree=False)
assert numpy.all(numpy.fabs(lbdt[:,0]-l/180.*numpy.pi) <10.**-10.), 'XYZ_to_lbd conversion does not work as expected'
assert numpy.all(numpy.fabs(lbdt[:,1]-b/180.*numpy.pi) < 10.**-10.), 'XYZ_to_lbd conversion does not work as expected'
assert numpy.all(numpy.fabs(lbdt[:,2]-d) < 10.**-10.), 'XYZ_to_lbd conversion does not work as expected'
return None
def test_vrpmllpmbb_to_vxvyvz():
l,b,d= 90., 0.,1.
vr,pmll,pmbb= 10.,20./4.740470463496208,-10./4.740470463496208
vxvyvz= coords.vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l,b,d,
degree=True,XYZ=False)
assert numpy.fabs(vxvyvz[0]+20.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxvyvz[1]-10.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxvyvz[2]+10.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
vxvyvz= coords.vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l/180.*numpy.pi,
b/180.*numpy.pi,d,
degree=False,XYZ=False)
assert numpy.fabs(vxvyvz[0]+20.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxvyvz[1]-10.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxvyvz[2]+10.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
vxvyvz= coords.vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,0.,1,0.,
XYZ=True)
assert numpy.fabs(vxvyvz[0]+20.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxvyvz[1]-10.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxvyvz[2]+10.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
vxvyvz= coords.vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,0.,1,0.,
XYZ=True,degree=True)
assert numpy.fabs(vxvyvz[0]+20.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxvyvz[1]-10.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxvyvz[2]+10.) < 10.**-10., 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
#Also test for arrays
os= numpy.ones(2)
vxvyvz= coords.vrpmllpmbb_to_vxvyvz(os*vr,os*pmll,os*pmbb,os*l,os*b,
os*d,degree=True,XYZ=False)
assert numpy.all(numpy.fabs(vxvyvz[:,0]+20.) < 10.**-10.), 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.all(numpy.fabs(vxvyvz[:,1]-10.) < 10.**-10.), 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
assert numpy.all(numpy.fabs(vxvyvz[:,2]+10.) < 10.**-10.), 'vrpmllpmbb_to_vxvyvz conversion did not work as expected'
return None
def test_vxvyvz_to_vrpmllpmbb():
vx,vy,vz= -20.*4.740470463496208,10.,-10.*4.740470463496208
X,Y,Z= 0.,1.,0.
vrpmllpmbb= coords.vxvyvz_to_vrpmllpmbb(vx,vy,vz,X,Y,Z,
XYZ=True)
assert numpy.fabs(vrpmllpmbb[0]-10.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.fabs(vrpmllpmbb[1]-20.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.fabs(vrpmllpmbb[2]+10.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
# also try with degree=True (that shouldn't fail!)
vrpmllpmbb= coords.vxvyvz_to_vrpmllpmbb(vx,vy,vz,X,Y,Z,
XYZ=True,
degree=True)
assert numpy.fabs(vrpmllpmbb[0]-10.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.fabs(vrpmllpmbb[1]-20.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.fabs(vrpmllpmbb[2]+10.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
# also for lbd
vrpmllpmbb= coords.vxvyvz_to_vrpmllpmbb(vx,vy,vz,90.,0.,1.,
XYZ=False,degree=True)
assert numpy.fabs(vrpmllpmbb[0]-10.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.fabs(vrpmllpmbb[1]-20.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.fabs(vrpmllpmbb[2]+10.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
# also for lbd, not in degree
vrpmllpmbb= coords.vxvyvz_to_vrpmllpmbb(vx,vy,vz,numpy.pi/2.,0.,1.,
XYZ=False,degree=False)
assert numpy.fabs(vrpmllpmbb[0]-10.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.fabs(vrpmllpmbb[1]-20.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.fabs(vrpmllpmbb[2]+10.) < 10.**-10., 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
# and for arrays
os= numpy.ones(2)
vrpmllpmbb= coords.vxvyvz_to_vrpmllpmbb(os*vx,os*vy,os*vz,
os*numpy.pi/2.,os*0.,os,
XYZ=False,degree=False)
assert numpy.all(numpy.fabs(vrpmllpmbb[:,0]-10.) < 10.**-10.), 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.all(numpy.fabs(vrpmllpmbb[:,1]-20.) < 10.**-10.), 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
assert numpy.all(numpy.fabs(vrpmllpmbb[:,2]+10.) < 10.**-10.), 'vxvyvz_to_vrpmllpmbb conversion did not work as expected'
return None
def test_XYZ_to_galcenrect():
X,Y,Z= 1.,3.,-2.
gcXYZ= coords.XYZ_to_galcenrect(X,Y,Z,Xsun=1.,Zsun=0.)
assert numpy.fabs(gcXYZ[0]) < 10.**-5., 'XYZ_to_galcenrect conversion did not work as expected'
assert numpy.fabs(gcXYZ[1]-3.) < 10.**-5., 'XYZ_to_galcenrect conversion did not work as expected'
assert numpy.fabs(gcXYZ[2]+2.) < 10.**-5., 'XYZ_to_galcenrect conversion did not work as expected'
#Another test
X,Y,Z= -1.,3.,-2.
gcXYZ= coords.XYZ_to_galcenrect(X,Y,Z,Xsun=1.,Zsun=0.)
assert numpy.fabs(gcXYZ[0]-2.) < 10.**-5., 'XYZ_to_galcenrect conversion did not work as expected'
assert numpy.fabs(gcXYZ[1]-3.) < 10.**-5., 'XYZ_to_galcenrect conversion did not work as expected'
assert numpy.fabs(gcXYZ[2]+2.) < 10.**-5., 'XYZ_to_galcenrect conversion did not work as expected'
return None
def test_XYZ_to_galcenrect_negXsun():
# Check that XYZ_to_galcenrect works for negative Xsun
X,Y,Z= 0.3,2.1,-1.2
gcXYZ= coords.XYZ_to_galcenrect(X,Y,Z,Xsun=1.2,Zsun=0.2)
gcXYZn= coords.XYZ_to_galcenrect(X,Y,Z,Xsun=-1.2,Zsun=0.2)
assert numpy.fabs(gcXYZ[0]+gcXYZn[0]) < 10.**-10., 'XYZ_to_galcenrect conversion did not work as expected for negative Xsun'
assert numpy.fabs(gcXYZ[1]-gcXYZn[1]) < 10.**-10., 'XYZ_to_galcenrect conversion did not work as expected for negative Xsun'
assert numpy.fabs(gcXYZ[2]-gcXYZn[2]) < 10.**-10., 'XYZ_to_galcenrect conversion did not work as expected for negative Xsun'
def test_lbd_to_galcenrect_galpyvsastropy():
# Test that galpy's transformations agree with astropy's
from astropy.coordinates import SkyCoord, Galactocentric
import astropy.units as u
_turn_off_apy()
l,b,d= 32., -12., 3.
Zsun= 0.025
# Using galpy
X,Y,Z= coords.lbd_to_XYZ(l,b,d,degree=True)
gcXYZ= coords.XYZ_to_galcenrect(X,Y,Z,Xsun=8.,Zsun=Zsun)
# Using astropy
c= SkyCoord(l=l*u.deg,b=b*u.deg,distance=d*u.kpc,frame='galactic')
gc_frame= Galactocentric(galcen_distance=numpy.sqrt(8.**2.+Zsun**2.)*u.kpc,
z_sun=Zsun*u.kpc)
c= c.transform_to(gc_frame)
# galpy is left-handed, astropy right-handed
assert numpy.fabs(gcXYZ[0]+c.x.to(u.kpc).value) < 10.**-10., "lbd to galcenrect conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(gcXYZ[1]-c.y.to(u.kpc).value) < 10.**-10., "lbd to galcenrect conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(gcXYZ[2]-c.z.to(u.kpc).value) < 10.**-9.5, "lbd to galcenrect conversion using galpy's methods does not agree with astropy"
# Also with negative Xsun
l,b,d= 32., -12., 3.
Zsun= 0.025
# Using galpy
X,Y,Z= coords.lbd_to_XYZ(l,b,d,degree=True)
gcXYZ= coords.XYZ_to_galcenrect(X,Y,Z,Xsun=-8.,Zsun=Zsun)
# Using astropy
c= SkyCoord(l=l*u.deg,b=b*u.deg,distance=d*u.kpc,frame='galactic')
gc_frame= Galactocentric(galcen_distance=numpy.sqrt(8.**2.+Zsun**2.)*u.kpc,
z_sun=Zsun*u.kpc)
c= c.transform_to(gc_frame)
# galpy is now right-handed, astropy right-handed
assert numpy.fabs(gcXYZ[0]-c.x.to(u.kpc).value) < 10.**-10., "lbd to galcenrect conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(gcXYZ[1]-c.y.to(u.kpc).value) < 10.**-10., "lbd to galcenrect conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(gcXYZ[2]-c.z.to(u.kpc).value) < 10.**-9.5, "lbd to galcenrect conversion using galpy's methods does not agree with astropy"
_turn_on_apy()
return None
def test_lbd_to_galcencyl_galpyvsastropy():
# Test that galpy's transformations agree with astropy's
from astropy.coordinates import SkyCoord, Galactocentric
import astropy.units as u
_turn_off_apy()
l,b,d= 32., -12., 3.
Zsun= 0.025
# Using galpy
X,Y,Z= coords.lbd_to_XYZ(l,b,d,degree=True)
gcRpZ= coords.XYZ_to_galcencyl(X,Y,Z,Xsun=8.,Zsun=Zsun)
# Using astropy
c= SkyCoord(l=l*u.deg,b=b*u.deg,distance=d*u.kpc,frame='galactic')
gc_frame= Galactocentric(galcen_distance=numpy.sqrt(8.**2.+Zsun**2.)*u.kpc,
z_sun=Zsun*u.kpc)
c= c.transform_to(gc_frame)
c.representation= 'cylindrical'
# galpy is left-handed, astropy right-handed
assert numpy.fabs(gcRpZ[0]-c.rho.to(u.kpc).value) < 10.**-10., "lbd to galcencyl conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(gcRpZ[1]-numpy.pi+c.phi.to(u.rad).value) < 10.**-10., "lbd to galcencyl conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(gcRpZ[2]-c.z.to(u.kpc).value) < 10.**-9.5, "lbd to galcencyl conversion using galpy's methods does not agree with astropy"
# Also with negative Xsun
l,b,d= 32., -12., 3.
Zsun= 0.025
# Using galpy
X,Y,Z= coords.lbd_to_XYZ(l,b,d,degree=True)
gcRpZ= coords.XYZ_to_galcencyl(X,Y,Z,Xsun=-8.,Zsun=Zsun)
# Using astropy
c= SkyCoord(l=l*u.deg,b=b*u.deg,distance=d*u.kpc,frame='galactic')
gc_frame= Galactocentric(galcen_distance=numpy.sqrt(8.**2.+Zsun**2.)*u.kpc,
z_sun=Zsun*u.kpc)
c= c.transform_to(gc_frame)
c.representation= 'cylindrical'
# galpy is now right-handed, astropy right-handed
assert numpy.fabs(gcRpZ[0]-c.rho.to(u.kpc).value) < 10.**-10., "lbd to galcencyl conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(gcRpZ[1]-c.phi.to(u.rad).value) < 10.**-10., "lbd to galcencyl conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(gcRpZ[2]-c.z.to(u.kpc).value) < 10.**-9.5, "lbd to galcencyl conversion using galpy's methods does not agree with astropy"
_turn_on_apy()
return None
def test_galcenrect_to_XYZ_negXsun():
gcX, gcY, gcZ= -1.,4.,2.
XYZ= numpy.array(coords.galcenrect_to_XYZ(gcX,gcY,gcZ,Xsun=1.,Zsun=0.2))
XYZn= numpy.array(coords.galcenrect_to_XYZ(-gcX,gcY,gcZ,Xsun=-1.,Zsun=0.2))
assert numpy.all(numpy.fabs(XYZ-XYZn) < 10.**-10.), 'galcenrect_to_XYZ conversion did not work as expected for negative Xsun'
return None
def test_galcenrect_to_XYZ():
gcX, gcY, gcZ= -1.,4.,2.
XYZ= coords.galcenrect_to_XYZ(gcX,gcY,gcZ,Xsun=1.,Zsun=0.)
assert numpy.fabs(XYZ[0]-2.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1]-4.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[2]-2.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
# Also for arrays
s= numpy.arange(2)+1
XYZ= coords.galcenrect_to_XYZ(gcX*s,gcY*s,gcZ*s,Xsun=1.,Zsun=0.)
assert numpy.fabs(XYZ[0,0]-2.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[0,1]-4.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[0,2]-2.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
# Check 2nd one
assert numpy.fabs(XYZ[1,0]-3.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1,1]-8.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1,2]-4.) < 10.**-4.7, 'galcenrect_to_XYZ conversion did not work as expected'
# Also for arrays with Xsun/Zsun also arrays
s= numpy.arange(2)+1
XYZ= coords.galcenrect_to_XYZ(gcX*s,gcY*s,gcZ*s,Xsun=1.*s,Zsun=0.*s)
assert numpy.fabs(XYZ[0,0]-2.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[0,1]-4.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[0,2]-2.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
# Check 2nd one
assert numpy.fabs(XYZ[1,0]-4.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1,1]-8.) < 10.**-5., 'galcenrect_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1,2]-4.) < 10.**-4.7, 'galcenrect_to_XYZ conversion did not work as expected'
return None
def test_galcenrect_to_XYZ_asInverse():
# Test that galcenrect_to_XYZ is the inverse of XYZ_to_galcenrect
X,Y,Z= 1.,3.,-2.
gcXYZ= coords.XYZ_to_galcenrect(X,Y,Z,Xsun=1.,Zsun=0.1)
Xt,Yt,Zt= coords.galcenrect_to_XYZ(gcXYZ[0],gcXYZ[1],gcXYZ[2],Xsun=1.,Zsun=0.1)
assert numpy.fabs(X-Xt) < 1e-14, 'galcenrect_to_XYZ is not the exact inverse of XYZ_to_galcenrect'
assert numpy.fabs(Y-Yt) < 1e-14, 'galcenrect_to_XYZ is not the exact inverse of XYZ_to_galcenrect'
assert numpy.fabs(Z-Zt) < 1e-14, 'galcenrect_to_XYZ is not the exact inverse of XYZ_to_galcenrect'
return None
def test_XYZ_to_galcencyl():
X,Y,Z= 5.,4.,-2.
gcRpZ= coords.XYZ_to_galcencyl(X,Y,Z,Xsun=8.,Zsun=0.)
assert numpy.fabs(gcRpZ[0]-5.) < 10.**-5., 'XYZ_to_galcencyl conversion did not work as expected'
assert numpy.fabs(gcRpZ[1]-numpy.arctan(4./3.)) < 10.**-5., 'XYZ_to_galcencyl conversion did not work as expected'
assert numpy.fabs(gcRpZ[2]+2.) < 10.**-4.8, 'XYZ_to_galcencyl conversion did not work as expected'
#Another X
X,Y,Z= 11.,4.,-2.
gcRpZ= coords.XYZ_to_galcencyl(X,Y,Z,Xsun=8.,Zsun=0.)
assert numpy.fabs(gcRpZ[0]-5.) < 10.**-5., 'XYZ_to_galcencyl conversion did not work as expected'
assert numpy.fabs(gcRpZ[1]-numpy.pi+numpy.arctan(4./3.)) < 10.**-5., 'XYZ_to_galcencyl conversion did not work as expected'
assert numpy.fabs(gcRpZ[2]+2.) < 10.**-4.6, 'XYZ_to_galcencyl conversion did not work as expected'
return None
def test_galcencyl_to_XYZ():
gcR, gcp, gcZ= 5.,numpy.arctan(4./3.),2.
XYZ= coords.galcencyl_to_XYZ(gcR,gcp,gcZ,Xsun=8.,Zsun=0.)
assert numpy.fabs(XYZ[0]-5.) < 10.**-5., 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1]-4.) < 10.**-5., 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[2]-2.) < 10.**-4.7, 'galcencyl_to_XYZ conversion did not work as expected'
# Also for arrays
s= numpy.arange(2)+1
XYZ= coords.galcencyl_to_XYZ(gcR*s,gcp*s,gcZ*s,Xsun=8.,Zsun=0.)
assert numpy.fabs(XYZ[0,0]-5.) < 10.**-5., 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[0,1]-4.) < 10.**-5., 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[0,2]-2.) < 10.**-4.7, 'galcencyl_to_XYZ conversion did not work as expected'
# Also test the second one
assert numpy.fabs(XYZ[1,0]-10.8) < 10.**-5., 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1,1]-9.6) < 10.**-4.7, 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1,2]-4.0) < 10.**-4.5, 'galcencyl_to_XYZ conversion did not work as expected'
# Also for arrays where Xsun/Zsun are also arrays
s= numpy.arange(2)+1
XYZ= coords.galcencyl_to_XYZ(gcR*s,gcp*s,gcZ*s,Xsun=8.*s,Zsun=0.*s)
assert numpy.fabs(XYZ[0,0]-5.) < 10.**-5., 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[0,1]-4.) < 10.**-5., 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[0,2]-2.) < 10.**-4.7, 'galcencyl_to_XYZ conversion did not work as expected'
# Also test the second one
assert numpy.fabs(XYZ[1,0]-18.8) < 10.**-5., 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1,1]-9.6) < 10.**-4.5, 'galcencyl_to_XYZ conversion did not work as expected'
assert numpy.fabs(XYZ[1,2]-4.0) < 10.**-4., 'galcencyl_to_XYZ conversion did not work as expected'
return None
def test_galcencyl_to_XYZ_asInverse():
# Test that galcencyl_to_XYZ is the inverse of XYZ_to_galcencyl
X,Y,Z= 1.,3.,-2.
gcRpZ= coords.XYZ_to_galcencyl(X,Y,Z,Xsun=1.,Zsun=0.1)
Xt,Yt,Zt= coords.galcencyl_to_XYZ(gcRpZ[0],gcRpZ[1],gcRpZ[2],Xsun=1.,Zsun=0.1)
assert numpy.fabs(X-Xt) < 1e-14, 'galcencyl_to_XYZ is not the exact inverse of XYZ_to_galcencyl'
assert numpy.fabs(Y-Yt) < 1e-14, 'galcencyl_to_XYZ is not the exact inverse of XYZ_to_galcencyl'
assert numpy.fabs(Z-Zt) < 1e-14, 'galcencyl_to_XYZ is not the exact inverse of XYZ_to_galcencyl'
# Also for arrays where Xsun/Zsun are also arrays
s= numpy.arange(2)+1
gcRpZ1= coords.XYZ_to_galcencyl(X*s[0],Y*s[0],Z*s[0],Xsun=1.*s[0],
Zsun=0.1*s[0])
gcRpZ2= coords.XYZ_to_galcencyl(X*s[1],Y*s[1],Z*s[1],Xsun=1.*s[1],
Zsun=0.1*s[1])
XYZt= coords.galcencyl_to_XYZ(numpy.hstack((gcRpZ1[0],gcRpZ2[0])),
numpy.hstack((gcRpZ1[1],gcRpZ2[1])),
numpy.hstack((gcRpZ1[2],gcRpZ2[2])),
Xsun=1.*s,Zsun=0.1*s)
# first one
assert numpy.fabs(XYZt[0,0]-Xt) < 1e-14, 'galcencyl_to_XYZ is not the exact inverse of XYZ_to_galcencyl'
assert numpy.fabs(XYZt[0,1]-Yt) < 1e-14, 'galcencyl_to_XYZ is not the exact inverse of XYZ_to_galcencyl'
assert numpy.fabs(XYZt[0,2]-Zt) < 1e-14, 'galcencyl_to_XYZ is not the exact inverse of XYZ_to_galcencyl'
# second one
assert numpy.fabs(XYZt[1,0]-Xt*s[1]) < 1e-14, 'galcencyl_to_XYZ is not the exact inverse of XYZ_to_galcencyl'
assert numpy.fabs(XYZt[1,1]-Yt*s[1]) < 1e-14, 'galcencyl_to_XYZ is not the exact inverse of XYZ_to_galcencyl'
assert numpy.fabs(XYZt[1,2]-Zt*s[1]) < 1e-14, 'galcencyl_to_XYZ is not the exact inverse of XYZ_to_galcencyl'
return None
def test_vxvyvz_to_galcenrect():
vx,vy,vz= 10.,-20.,30
vgc= coords.vxvyvz_to_galcenrect(vx,vy,vz,vsun=[-5.,10.,5.])
assert numpy.fabs(vgc[0]+15.) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected'
assert numpy.fabs(vgc[1]+10.) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected'
assert numpy.fabs(vgc[2]-35.) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected'
return None
def test_vxvyvz_to_galcenrect_negXsun():
vx,vy,vz= 10.,-20.,30
vgc= coords.vxvyvz_to_galcenrect(vx,vy,vz,vsun=[-5.,10.,5.],
Xsun=1.1,Zsun=0.2)
vgcn= coords.vxvyvz_to_galcenrect(vx,vy,vz,vsun=[5.,10.,5.],
Xsun=-1.1,Zsun=0.2)
assert numpy.fabs(vgc[0]+vgcn[0]) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected for negative Xsun'
assert numpy.fabs(vgc[1]-vgcn[1]) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected for negative Xsun'
assert numpy.fabs(vgc[2]-vgcn[2]) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected for negative Xsun'
return None
def test_vrpmllpmbb_to_galcenrect_galpyvsastropy():
# Only run this for astropy>3
if not _APY3: return None
# Test that galpy's transformations agree with astropy's
from astropy.coordinates import SkyCoord, Galactocentric, \
CartesianDifferential
import astropy.units as u
_turn_off_apy()
l,b,d= 32., -12., 3.
vr,pmll,pmbb= -112., -13.,5.
Zsun= 0.025
Rsun= 8.
vsun= [-10.,230.,7.]
# Using galpy
vx,vy,vz= coords.vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l,b,d,degree=True)
vXYZg= coords.vxvyvz_to_galcenrect(vx,vy,vz,vsun=vsun,Xsun=Rsun,
Zsun=Zsun)
# Using astropy
c= SkyCoord(l=l*u.deg,b=b*u.deg,distance=d*u.kpc,
radial_velocity=vr*u.km/u.s,pm_l_cosb=pmll*u.mas/u.yr,
pm_b=pmbb*u.mas/u.yr,frame='galactic')
gc_frame= Galactocentric(\
galcen_distance=numpy.sqrt(Rsun**2.+Zsun**2.)*u.kpc,z_sun=Zsun*u.kpc,
galcen_v_sun=CartesianDifferential(numpy.array([-vsun[0],vsun[1],vsun[2]])*u.km/u.s))
c= c.transform_to(gc_frame)
c.representation= 'cartesian'
# galpy is left-handed, astropy right-handed
assert numpy.fabs(vXYZg[0]+c.v_x.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbblbd to galcenrect conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(vXYZg[1]-c.v_y.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbb to galcenrect conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(vXYZg[2]-c.v_z.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbb to galcenrect conversion using galpy's methods does not agree with astropy"
# Also with negative Xsun
l,b,d= 32., -12., 3.
Zsun= 0.025
Rsun= -8.
vsun= numpy.array([-10.,230.,7.])
# Using galpy
vx,vy,vz= coords.vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l,b,d,degree=True)
vXYZg= coords.vxvyvz_to_galcenrect(vx,vy,vz,vsun=vsun,Xsun=Rsun,
Zsun=Zsun)
# Using astropy
c= SkyCoord(l=l*u.deg,b=b*u.deg,distance=d*u.kpc,
radial_velocity=vr*u.km/u.s,pm_l_cosb=pmll*u.mas/u.yr,
pm_b=pmbb*u.mas/u.yr,frame='galactic')
gc_frame= Galactocentric(\
galcen_distance=numpy.sqrt(Rsun**2.+Zsun**2.)*u.kpc,z_sun=Zsun*u.kpc,
galcen_v_sun=CartesianDifferential(numpy.array([vsun[0],vsun[1],vsun[2]])*u.km/u.s))
c= c.transform_to(gc_frame)
c.representation= 'cartesian'
# galpy is now right-handed, astropy right-handed
assert numpy.fabs(vXYZg[0]-c.v_x.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbblbd to galcenrect conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(vXYZg[1]-c.v_y.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbb to galcenrect conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(vXYZg[2]-c.v_z.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbb to galcenrect conversion using galpy's methods does not agree with astropy"
_turn_on_apy()
return None
def test_vxvyvz_to_galcencyl():
X,Y,Z= 3.,4.,2.
vx,vy,vz= 10.,-20.,30
vgc= coords.vxvyvz_to_galcencyl(vx,vy,vz,X,Y,Z,vsun=[-5.,10.,5.])
assert numpy.fabs(vgc[0]+17.) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected'
assert numpy.fabs(vgc[1]-6.) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected'
assert numpy.fabs(vgc[2]-35.) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected'
#with galcen=True
vgc= coords.vxvyvz_to_galcencyl(vx,vy,vz,5.,numpy.arctan(4./3.),Z,
vsun=[-5.,10.,5.],galcen=True)
assert numpy.fabs(vgc[0]+17.) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected'
assert numpy.fabs(vgc[1]-6.) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected'
assert numpy.fabs(vgc[2]-35.) < 10.**-4., 'vxvyvz_to_galcenrect conversion did not work as expected'
return None
def test_vrpmllpmbb_to_galcencyl_galpyvsastropy():
# Only run this for astropy>3
if not _APY3: return None
# Test that galpy's transformations agree with astropy's
from astropy.coordinates import SkyCoord, Galactocentric, \
CartesianDifferential
import astropy.units as u
_turn_off_apy()
l,b,d= 32., -12., 3.
vr,pmll,pmbb= -112., -13.,5.
Zsun= 0.025
Rsun= 8.
vsun= [-10.,230.,7.]
# Using galpy
X,Y,Z= coords.lbd_to_XYZ(l,b,d,degree=True)
gcXYZ= coords.XYZ_to_galcenrect(X,Y,Z,Xsun=Rsun,Zsun=Zsun)
vx,vy,vz= coords.vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l,b,d,degree=True)
vRTZg= coords.vxvyvz_to_galcencyl(vx,vy,vz,gcXYZ[0],gcXYZ[1],gcXYZ[2],
vsun=vsun,Xsun=Rsun,Zsun=Zsun)
# Using astropy
c= SkyCoord(l=l*u.deg,b=b*u.deg,distance=d*u.kpc,
radial_velocity=vr*u.km/u.s,pm_l_cosb=pmll*u.mas/u.yr,
pm_b=pmbb*u.mas/u.yr,frame='galactic')
gc_frame= Galactocentric(\
galcen_distance=numpy.sqrt(Rsun**2.+Zsun**2.)*u.kpc,z_sun=Zsun*u.kpc,
galcen_v_sun=CartesianDifferential(numpy.array([-vsun[0],vsun[1],vsun[2]])*u.km/u.s))
c= c.transform_to(gc_frame)
c.representation= 'cylindrical'
# galpy is left-handed, astropy right-handed
assert numpy.fabs(vRTZg[0]-c.d_rho.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbblbd to galcencyl conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(vRTZg[1]+(c.d_phi*c.rho).to(u.km/u.s,
equivalencies=u.dimensionless_angles()).value) < 10.**-8., "vrpmllpmbb to galcencyl conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(vRTZg[2]-c.d_z.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbb to galcencyl conversion using galpy's methods does not agree with astropy"
# Also with negative Xsun
l,b,d= 32., -12., 3.
Zsun= 0.025
Rsun= -8.
vsun= numpy.array([-10.,230.,7.])
# Using galpy
X,Y,Z= coords.lbd_to_XYZ(l,b,d,degree=True)
gcXYZ= coords.XYZ_to_galcenrect(X,Y,Z,Xsun=Rsun,Zsun=Zsun)
vx,vy,vz= coords.vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l,b,d,degree=True)
vRTZg= coords.vxvyvz_to_galcencyl(vx,vy,vz,gcXYZ[0],gcXYZ[1],gcXYZ[2],
vsun=vsun,Xsun=Rsun,Zsun=Zsun)
# Using astropy
c= SkyCoord(l=l*u.deg,b=b*u.deg,distance=d*u.kpc,
radial_velocity=vr*u.km/u.s,pm_l_cosb=pmll*u.mas/u.yr,
pm_b=pmbb*u.mas/u.yr,frame='galactic')
gc_frame= Galactocentric(\
galcen_distance=numpy.sqrt(Rsun**2.+Zsun**2.)*u.kpc,z_sun=Zsun*u.kpc,
galcen_v_sun=CartesianDifferential(numpy.array([vsun[0],vsun[1],vsun[2]])*u.km/u.s))
c= c.transform_to(gc_frame)
c.representation= 'cylindrical'
# galpy is left-handed, astropy right-handed
assert numpy.fabs(vRTZg[0]-c.d_rho.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbblbd to galcencyl conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(vRTZg[1]-(c.d_phi*c.rho).to(u.km/u.s,
equivalencies=u.dimensionless_angles()).value) < 10.**-8., "vrpmllpmbb to galcencyl conversion using galpy's methods does not agree with astropy"
assert numpy.fabs(vRTZg[2]-c.d_z.to(u.km/u.s).value) < 10.**-8., "vrpmllpmbb to galcencyl conversion using galpy's methods does not agree with astropy"
_turn_on_apy()
return None
def test_galcenrect_to_vxvyvz():
vxg,vyg,vzg= -15.,-10.,35.
vxyz= coords.galcenrect_to_vxvyvz(vxg,vyg,vzg,vsun=[-5.,10.,5.])
assert numpy.fabs(vxyz[0]-10.) < 10.**-4., 'galcenrect_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxyz[1]+20.) < 10.**-4., 'galcenrect_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxyz[2]-30.) < 10.**-4., 'galcenrect_to_vxvyvz conversion did not work as expected'
#Also for arrays
os= numpy.ones(2)
vxyz= coords.galcenrect_to_vxvyvz(os*vxg,os*vyg,os*vzg,
vsun=[-5.,10.,5.])
assert numpy.all(numpy.fabs(vxyz[:,0]-10.) < 10.**-4.), 'galcenrect_to_vxvyvz conversion did not work as expected'
assert numpy.all(numpy.fabs(vxyz[:,1]+20.) < 10.**-4.), 'galcenrect_to_vxvyvz conversion did not work as expected'
assert numpy.all(numpy.fabs(vxyz[:,2]-30.) < 10.**-4.), 'galcenrect_to_vxvyvz conversion did not work as expected'
return None
def test_galcenrect_to_vxvyvz_negXsun():
vxg,vyg,vzg= -15.,-10.,35.
vxyz= coords.galcenrect_to_vxvyvz(vxg,vyg,vzg,vsun=[-5.,10.,5.],
Xsun=1.1,Zsun=0.2)
vxyzn= coords.galcenrect_to_vxvyvz(-vxg,vyg,vzg,vsun=[5.,10.,5.],
Xsun=-1.1,Zsun=0.2)
assert numpy.all(numpy.fabs(numpy.array(vxyz)-numpy.array(vxyzn)) < 10.**-4.), 'galcenrect_to_vxvyvz conversion did not work as expected'
return None
def test_galcenrect_to_vxvyvz_asInverse():
# Test that galcenrect_to_vxvyvz is the inverse of vxvyvz_to_galcenrect
vx,vy,vz= -15.,-10.,35.
vxg,vyg,vzg= coords.vxvyvz_to_galcenrect(vx,vy,vz,vsun=[-5.,10.,5.])
vxt,vyt,vzt= coords.galcenrect_to_vxvyvz(vxg,vyg,vzg,vsun=[-5.,10.,5.])
assert numpy.fabs(vx-vxt) < 10.**-14., 'galcenrect_to_vxvyvz is not the inverse of vxvyvz_to_galcenrect'
assert numpy.fabs(vy-vyt) < 10.**-14., 'galcenrect_to_vxvyvz is not the inverse of vxvyvz_to_galcenrect'
assert numpy.fabs(vz-vzt) < 10.**-14., 'galcenrect_to_vxvyvz is not the inverse of vxvyvz_to_galcenrect'
#Also for arrays
os= numpy.ones(2)
vxyzg= coords.vxvyvz_to_galcenrect(vx*os,vy*os,vz*os,
vsun=[-5.,10.,5.])
vxyzt= coords.galcenrect_to_vxvyvz(vxyzg[:,0],vxyzg[:,1],vxyzg[:,2],
vsun=[-5.,10.,5.])
assert numpy.all(numpy.fabs(vxyzt[:,0]-vx*os) < 10.**-10.), 'galcenrect_to_vxvyvz is not the inverse of vxvyvz_to_galcenrect'
assert numpy.all(numpy.fabs(vxyzt[:,1]-vy*os) < 10.**-10.), 'galcenrect_to_vxvyvz is not the inverse of vxvyvz_to_galcenrect'
assert numpy.all(numpy.fabs(vxyzt[:,2]-vz*os) < 10.**-10.), 'galcenrect_to_vxvyvz is not the inverse of vxvyvz_to_galcenrect'
return None
def test_galcencyl_to_vxvyvz():
vr,vp,vz= -17.,6.,35.
phi= numpy.arctan(4./3.)
vxyz= coords.galcencyl_to_vxvyvz(vr,vp,vz,phi,vsun=[-5.,10.,5.])
assert numpy.fabs(vxyz[0]-10.) < 10.**-4., 'galcenrect_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxyz[1]+20.) < 10.**-4., 'galcenrect_to_vxvyvz conversion did not work as expected'
assert numpy.fabs(vxyz[2]-30.) < 10.**-4., 'galcenrect_to_vxvyvz conversion did not work as expected'
return None
def test_galcencyl_to_vxvyvz_asInverse():
# Test that galcencyl_to_vxvyvz is the inverse of vxvyvz_to_galcencyl
vx,vy,vz= -15.,-10.,35.
phi= numpy.arctan(4./3.)
vrg,vtg,vzg= coords.vxvyvz_to_galcencyl(vx,vy,vz,0.,phi,0.,
vsun=[-5.,10.,5.],galcen=True)
vxt,vyt,vzt= coords.galcencyl_to_vxvyvz(vrg,vtg,vzg,phi,vsun=[-5.,10.,5.])
assert numpy.fabs(vx-vxt) < 10.**-14., 'galcencyl_to_vxvyvz is not the inverse of vxvyvz_to_galcencyl'
assert numpy.fabs(vy-vyt) < 10.**-14., 'galcencyl_to_vxvyvz is not the inverse of vxvyvz_to_galcencyl'
assert numpy.fabs(vz-vzt) < 10.**-14., 'galcencyl_to_vxvyvz is not the inverse of vxvyvz_to_galcencyl'
#Also for arrays
os= numpy.ones(2)
vx,vy,vz= -15.,-10.,35.
phi= numpy.arctan(4./3.)
vrtzg= coords.vxvyvz_to_galcencyl(vx*os,vy*os,vz*os,0.,phi*os,0.,
vsun=[-5.,10.,5.],galcen=True)
vxyzt= coords.galcencyl_to_vxvyvz(vrtzg[:,0],vrtzg[:,1],vrtzg[:,2],
phi*os,vsun=[-5.,10.,5.])
assert numpy.all(numpy.fabs(vxyzt[:,0]-vx*os) < 10.**-10.), 'galcencyl_to_vxvyvz is not the inverse of vxvyvz_to_galcencyl'
assert numpy.all(numpy.fabs(vxyzt[:,1]-vy*os) < 10.**-10.), 'galcencyl_to_vxvyvz is not the inverse of vxvyvz_to_galcencyl'
assert numpy.all(numpy.fabs(vxyzt[:,2]-vz*os) < 10.**-10.), 'galcencyl_to_vxvyvz is not the inverse of vxvyvz_to_galcencyl'
return None
def test_sphergal_to_rectgal():
l,b,d= 90.,0.,1.
vr,pmll,pmbb= 10.,-20./4.740470463496208,30./4.740470463496208
X,Y,Z,vx,vy,vz= coords.sphergal_to_rectgal(l,b,d,vr,pmll,pmbb,
degree=True)
assert numpy.fabs(X-0.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(Y-1.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(Z-0.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(vx-20.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(vy-10.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(vz-30.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
#Also test for degree=False
X,Y,Z,vx,vy,vz= coords.sphergal_to_rectgal(l/180.*numpy.pi,
b/180.*numpy.pi,
d,vr,pmll,pmbb,
degree=False)
assert numpy.fabs(X-0.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(Y-1.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(Z-0.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(vx-20.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(vy-10.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.fabs(vz-30.) < 10.**-10., 'sphergal_to_rectgal conversion did not work as expected'
#Also test for arrays
os= numpy.ones(2)
XYZvxvyvz= coords.sphergal_to_rectgal(os*l,os*b,os*d,
os*vr,os*pmll,os*pmbb,
degree=True)
X= XYZvxvyvz[:,0]
Y= XYZvxvyvz[:,1]
Z= XYZvxvyvz[:,2]
vx= XYZvxvyvz[:,3]
vy= XYZvxvyvz[:,4]
vz= XYZvxvyvz[:,5]
assert numpy.all(numpy.fabs(X-0.) < 10.**-10.), 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.all(numpy.fabs(Y-1.) < 10.**-10.), 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.all(numpy.fabs(Z-0.) < 10.**-10.), 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.all(numpy.fabs(vx-20.) < 10.**-10.), 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.all(numpy.fabs(vy-10.) < 10.**-10.), 'sphergal_to_rectgal conversion did not work as expected'
assert numpy.all(numpy.fabs(vz-30.) < 10.**-10.), 'sphergal_to_rectgal conversion did not work as expected'
return None
def test_rectgal_to_sphergal():
#Test that this is the inverse of sphergal_to_rectgal
l,b,d= 90.,30.,1.
vr,pmll,pmbb= 10.,-20.,30.
X,Y,Z,vx,vy,vz= coords.sphergal_to_rectgal(l,b,d,vr,pmll,pmbb,
degree=True)
lt,bt,dt,vrt,pmllt,pmbbt= coords.rectgal_to_sphergal(X,Y,Z,
vx,vy,vz,
degree=True)
assert numpy.fabs(lt-l) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(bt-b) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(dt-d) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(vrt-vr) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(pmllt-pmll) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(pmbbt-pmbb) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
#Also test for degree=False
lt,bt,dt,vrt,pmllt,pmbbt= coords.rectgal_to_sphergal(X,Y,Z,
vx,vy,vz,
degree=False)
assert numpy.fabs(lt-l/180.*numpy.pi) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(bt-b/180.*numpy.pi) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(dt-d) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(vrt-vr) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(pmllt-pmll) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.fabs(pmbbt-pmbb) < 10.**-10., 'rectgal_to_sphergal conversion did not work as expected'
#Also test for arrays
os= numpy.ones(2)
lbdvrpmllpmbbt= coords.rectgal_to_sphergal(os*X,os*Y,os*Z,
os*vx,os*vy,
os*vz,
degree=True)
lt= lbdvrpmllpmbbt[:,0]
bt= lbdvrpmllpmbbt[:,1]
dt= lbdvrpmllpmbbt[:,2]
vrt= lbdvrpmllpmbbt[:,3]
pmllt= lbdvrpmllpmbbt[:,4]
pmbbt= lbdvrpmllpmbbt[:,5]
assert numpy.all(numpy.fabs(lt-l) < 10.**-10.), 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.all(numpy.fabs(bt-b) < 10.**-10.), 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.all(numpy.fabs(dt-d) < 10.**-10.), 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.all(numpy.fabs(vrt-vr) < 10.**-10.), 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.all(numpy.fabs(pmllt-pmll) < 10.**-10.), 'rectgal_to_sphergal conversion did not work as expected'
assert numpy.all(numpy.fabs(pmbbt-pmbb) < 10.**-10.), 'rectgal_to_sphergal conversion did not work as expected'
return None
def test_pmrapmdec_to_pmllpmbb():
#This is a random ra,dec
ra, dec= 132., -20.4
pmra, pmdec= 10., 20.
pmll, pmbb= coords.pmrapmdec_to_pmllpmbb(pmra,pmdec,
ra,dec,degree=True,epoch=1950.)
assert numpy.fabs(numpy.sqrt(pmll**2.+pmbb**2.)-numpy.sqrt(pmra**2.+pmdec**2.)) < 10.**-10., 'pmrapmdec_to_pmllpmbb conversion did not work as expected'
# This is close to the NGP at 1950.
ra, dec= 192.24, 27.39
pmra, pmdec= 10., 20.
os= numpy.ones(2)
pmllpmbb= coords.pmrapmdec_to_pmllpmbb(os*pmra,os*pmdec,
os*ra,os*dec,
degree=True,epoch=1950.)
pmll= pmllpmbb[:,0]
pmbb= pmllpmbb[:,1]
assert numpy.all(numpy.fabs(numpy.sqrt(pmll**2.+pmbb**2.)-numpy.sqrt(pmra**2.+pmdec**2.)) < 10.**-10.), 'pmrapmdec_to_pmllpmbb conversion did not work as expected close to the NGP'
# This is the NGP at 1950.
ra, dec= 192.25, 27.4
pmra, pmdec= 10., 20.
os= numpy.ones(2)
pmllpmbb= coords.pmrapmdec_to_pmllpmbb(os*pmra,os*pmdec,
os*ra,os*dec,
degree=True,epoch=1950.)
pmll= pmllpmbb[:,0]
pmbb= pmllpmbb[:,1]
assert numpy.all(numpy.fabs(numpy.sqrt(pmll**2.+pmbb**2.)-numpy.sqrt(pmra**2.+pmdec**2.)) < 10.**-10.), 'pmrapmdec_to_pmllpmbb conversion did not work as expected for the NGP'
# This is the NCP
ra, dec= numpy.pi, numpy.pi/2.
pmra, pmdec= 10., 20.
pmll, pmbb= coords.pmrapmdec_to_pmllpmbb(pmra,pmdec,
ra,dec,degree=False,
epoch=1950.)
assert numpy.fabs(numpy.sqrt(pmll**2.+pmbb**2.)-numpy.sqrt(pmra**2.+pmdec**2.)) < 10.**-10., 'pmrapmdec_to_pmllpmbb conversion did not work as expected for the NCP'
return None
def test_pmllpmbb_to_pmrapmdec():
#This is a random l,b
ll, bb= 132., -20.4
pmll, pmbb= 10., 20.
pmra, pmdec= coords.pmllpmbb_to_pmrapmdec(pmll,pmbb,
ll,bb,
degree=True,epoch=1950.)
assert numpy.fabs(numpy.sqrt(pmll**2.+pmbb**2.)-numpy.sqrt(pmra**2.+pmdec**2.)) < 10.**-10., 'pmllpmbb_to_pmrapmdec conversion did not work as expected for a random l,b'
# This is close to the NGP
ll,bb= numpy.pi-0.001, numpy.pi/2.-0.001
pmll, pmbb= 10., 20.
os= numpy.ones(2)
pmrapmdec= coords.pmllpmbb_to_pmrapmdec(os*pmll,os*pmbb,
os*ll,os*bb,
degree=False,epoch=1950.)
pmra= pmrapmdec[:,0]
pmdec= pmrapmdec[:,1]
assert numpy.all(numpy.fabs(numpy.sqrt(pmll**2.+pmbb**2.)-numpy.sqrt(pmra**2.+pmdec**2.)) < 10.**-10.), 'pmllpmbb_to_pmrapmdec conversion did not work as expected close to the NGP'
# This is the NGP
ll,bb= numpy.pi, numpy.pi/2.
pmll, pmbb= 10., 20.
os= numpy.ones(2)
pmrapmdec= coords.pmllpmbb_to_pmrapmdec(os*pmll,os*pmbb,
os*ll,os*bb,
degree=False,epoch=1950.)
pmra= pmrapmdec[:,0]
pmdec= pmrapmdec[:,1]
assert numpy.all(numpy.fabs(numpy.sqrt(pmll**2.+pmbb**2.)-numpy.sqrt(pmra**2.+pmdec**2.)) < 10.**-10.), 'pmllpmbb_to_pmrapmdec conversion did not work as expected at the NGP'
# This is the NCP
ra, dec= numpy.pi, numpy.pi/2.
ll, bb= coords.radec_to_lb(ra,dec,degree=False,epoch=1950.)
pmll, pmbb= 10., 20.
pmra, pmdec= coords.pmllpmbb_to_pmrapmdec(pmll,pmbb,
ll,bb,
degree=False,epoch=1950.)
assert numpy.fabs(numpy.sqrt(pmll**2.+pmbb**2.)-numpy.sqrt(pmra**2.+pmdec**2.)) < 10.**-10., 'pmllpmbb_to_pmrapmdec conversion did not work as expected at the NCP'
return None
def test_cov_pmradec_to_pmllbb():
# This is the NGP at 1950., for this the parallactic angle is 180
ra, dec= 192.25, 27.4
cov_pmrapmdec= numpy.array([[100.,100.],[100.,400.]])
cov_pmllpmbb= coords.cov_pmrapmdec_to_pmllpmbb(cov_pmrapmdec,
ra,dec,
degree=True,
epoch=1950.)
assert numpy.fabs(cov_pmllpmbb[0,0]-100.) < 10.**-10., 'cov_pmradec_to_pmllbb conversion did not work as expected'
assert numpy.fabs(cov_pmllpmbb[0,1]-100.) < 10.**-10., 'cov_pmradec_to_pmllbb conversion did not work as expected'
assert numpy.fabs(cov_pmllpmbb[1,0]-100.) < 10.**-10., 'cov_pmradec_to_pmllbb conversion did not work as expected'
assert numpy.fabs(cov_pmllpmbb[1,1]-400.) < 10.**-10., 'cov_pmradec_to_pmllbb conversion did not work as expected'
# This is a random position, check that the conversion makes sense
ra, dec= 132.25, -23.4
cov_pmrapmdec= numpy.array([[100.,100.],[100.,400.]])
cov_pmllpmbb= coords.cov_pmrapmdec_to_pmllpmbb(cov_pmrapmdec,
ra/180.*numpy.pi,
dec/180.*numpy.pi,
degree=False,
epoch=1950.)
assert numpy.fabs(numpy.linalg.det(cov_pmllpmbb)-numpy.linalg.det(cov_pmrapmdec)) < 10.**-10., 'cov_pmradec_to_pmllbb conversion did not work as expected'
assert numpy.fabs(numpy.trace(cov_pmllpmbb)-numpy.trace(cov_pmrapmdec)) < 10.**-10., 'cov_pmradec_to_pmllbb conversion did not work as expected'
# This is a random position, check that the conversion makes sense, arrays
ra, dec= 132.25, -23.4
icov_pmrapmdec= numpy.array([[100.,100.],[100.,400.]])
cov_pmrapmdec= numpy.empty((3,2,2))
for ii in range(3): cov_pmrapmdec[ii,:,:]= icov_pmrapmdec
os= numpy.ones(3)
cov_pmllpmbb= coords.cov_pmrapmdec_to_pmllpmbb(cov_pmrapmdec,
os*ra,
os*dec,
degree=True,
epoch=1950.)
for ii in range(3):
assert numpy.fabs(numpy.linalg.det(cov_pmllpmbb[ii,:,:])-numpy.linalg.det(cov_pmrapmdec[ii,:,:])) < 10.**-10., 'cov_pmradec_to_pmllbb conversion did not work as expected'
assert numpy.fabs(numpy.trace(cov_pmllpmbb[ii,:,:])-numpy.trace(cov_pmrapmdec[ii,:,:])) < 10.**-10., 'cov_pmradec_to_pmllbb conversion did not work as expected'
return None
def test_cov_dvrpmllbb_to_vxyz():
l,b,d= 90., 0., 2.
e_d, e_vr= 0.2, 2.
cov_pmllpmbb= numpy.array([[100.,0.],[0.,400.]])
pmll,pmbb= 20.,30.
cov_vxvyvz= coords.cov_dvrpmllbb_to_vxyz(d,e_d,e_vr,
pmll,pmbb,
cov_pmllpmbb,
l,b,
degree=True,
plx=False)
assert numpy.fabs(numpy.sqrt(cov_vxvyvz[0,0])
-d*4.740470463496208*pmll*numpy.sqrt((e_d/d)**2.+(10./pmll)**2.)) < 10.**-10., 'cov_dvrpmllbb_to_vxyz coversion did not work as expected'
assert numpy.fabs(numpy.sqrt(cov_vxvyvz[1,1])-e_vr) < 10.**-10., 'cov_dvrpmllbb_to_vxyz coversion did not work as expected'
assert numpy.fabs(numpy.sqrt(cov_vxvyvz[2,2])
-d*4.740470463496208*pmbb*numpy.sqrt((e_d/d)**2.+(20./pmbb)**2.)) < 10.**-10., 'cov_dvrpmllbb_to_vxyz coversion did not work as expected'
#Another one
l,b,d= 180., 0., 1./2.
e_d, e_vr= 0.05, 2.
cov_pmllpmbb= numpy.array([[100.,0.],[0.,400.]])
pmll,pmbb= 20.,30.
cov_vxvyvz= coords.cov_dvrpmllbb_to_vxyz(d,e_d,e_vr,
pmll,pmbb,
cov_pmllpmbb,
l/180.*numpy.pi,
b/180.*numpy.pi,
degree=False,
plx=True)
assert numpy.fabs(numpy.sqrt(cov_vxvyvz[0,0])-e_vr) < 10.**-10., 'cov_dvrpmllbb_to_vxyz coversion did not work as expected'
assert numpy.fabs(numpy.sqrt(cov_vxvyvz[1,1])
-1./d*4.740470463496208*pmll*numpy.sqrt((e_d/d)**2.+(10./pmll)**2.)) < 10.**-10., 'cov_dvrpmllbb_to_vxyz coversion did not work as expected'
assert numpy.fabs(numpy.sqrt(cov_vxvyvz[2,2])
-1./d*4.740470463496208*pmbb*numpy.sqrt((e_d/d)**2.+(20./pmbb)**2.)) < 10.**-10., 'cov_dvrpmllbb_to_vxyz coversion did not work as expected'
#Another one, w/ arrays
l,b,d= 90., 90., 2.
e_d, e_vr= 0.2, 2.
tcov_pmllpmbb= numpy.array([[100.,0.],[0.,400.]])
cov_pmllpmbb= numpy.empty((3,2,2))
for ii in range(3): cov_pmllpmbb[ii,:,:]= tcov_pmllpmbb
pmll,pmbb= 20.,30.
os= numpy.ones(3)
cov_vxvyvz= coords.cov_dvrpmllbb_to_vxyz(os*d,os*e_d,os*e_vr,
os*pmll,os*pmbb,
cov_pmllpmbb,
os*l,os*b,
degree=True,
plx=False)
for ii in range(3):
assert numpy.fabs(numpy.sqrt(cov_vxvyvz[ii,0,0])
-d*4.740470463496208*pmll*numpy.sqrt((e_d/d)**2.+(10./pmll)**2.)) < 10.**-10., 'cov_dvrpmllbb_to_vxyz coversion did not work as expected'
assert numpy.fabs(numpy.sqrt(cov_vxvyvz[ii,1,1])
-d*4.740470463496208*pmbb*numpy.sqrt((e_d/d)**2.+(20./pmbb)**2.)) < 10.**-10., 'cov_dvrpmllbb_to_vxyz coversion did not work as expected'
assert numpy.fabs(numpy.sqrt(cov_vxvyvz[ii,2,2])-e_vr) < 10.**-10., 'cov_dvrpmllbb_to_vxyz coversion did not work as expected'
return None
def test_dl_to_rphi_2d():
#This is a tangent point
l= numpy.arcsin(0.75)
d= 6./numpy.tan(l)
r,phi= coords.dl_to_rphi_2d(d,l,degree=False,ro=8.,phio=0.)
assert numpy.fabs(r-6.) < 10.**-10., 'dl_to_rphi_2d conversion did not work as expected'
assert numpy.fabs(phi-numpy.arccos(0.75)) < 10.**-10., 'dl_to_rphi_2d conversion did not work as expected'
#This is a different point
d,l= 2., 45.
r,phi= coords.dl_to_rphi_2d(d,l,degree=True,ro=2.*numpy.sqrt(2.),
phio=10.)
assert numpy.fabs(r-2.) < 10.**-10., 'dl_to_rphi_2d conversion did not work as expected'
assert numpy.fabs(phi-55.) < 10.**-10., 'dl_to_rphi_2d conversion did not work as expected'
#This is a different point, for array
d,l= 2., 45.
os= numpy.ones(2)
r,phi= coords.dl_to_rphi_2d(os*d,os*l,degree=True,
ro=2.*numpy.sqrt(2.),
phio=0.)
assert numpy.all(numpy.fabs(r-2.) < 10.**-10.), 'dl_to_rphi_2d conversion did not work as expected'
assert numpy.all(numpy.fabs(phi-45.) < 10.**-10.), 'dl_to_rphi_2d conversion did not work as expected'
#This is a different point, for list (which I support for some reason)
d,l= 2., 45.
r,phi= coords.dl_to_rphi_2d([d,d],[l,l],degree=True,
ro=2.*numpy.sqrt(2.),
phio=0.)
r= numpy.array(r)
phi= numpy.array(phi)
assert numpy.all(numpy.fabs(r-2.) < 10.**-10.), 'dl_to_rphi_2d conversion did not work as expected'
assert numpy.all(numpy.fabs(phi-45.) < 10.**-10.), 'dl_to_rphi_2d conversion did not work as expected'
return None
def test_rphi_to_dl_2d():
#This is a tangent point
r,phi= 6., numpy.arccos(0.75)
d,l= coords.rphi_to_dl_2d(r,phi,degree=False,ro=8.,phio=0.)
l= numpy.arcsin(0.75)
d= 6./numpy.tan(l)
assert numpy.fabs(d-6./numpy.tan(numpy.arcsin(0.75))) < 10.**-10., 'dl_to_rphi_2d conversion did not work as expected'
assert numpy.fabs(l-numpy.arcsin(0.75)) < 10.**-10., 'rphi_to_dl_2d conversion did not work as expected'
#This is another point
r,phi= 2., 55.
d,l= coords.rphi_to_dl_2d(r,phi,degree=True,ro=2.*numpy.sqrt(2.),
phio=10.)
assert numpy.fabs(d-2.) < 10.**-10., 'rphi_to_dl_2d conversion did not work as expected'
assert numpy.fabs(l-45.) < 10.**-10., 'rphi_to_dl_2d conversion did not work as expected'
#This is another point, for arrays
r,phi= 2., 45.
os= numpy.ones(2)
d,l= coords.rphi_to_dl_2d(os*r,os*phi,
degree=True,ro=2.*numpy.sqrt(2.),
phio=0.)
assert numpy.all(numpy.fabs(d-2.) < 10.**-10.), 'rphi_to_dl_2d conversion did not work as expected'
assert numpy.all(numpy.fabs(l-45.) < 10.**-10.), 'rphi_to_dl_2d conversion did not work as expected'
#This is another point, for lists, which for some reason I support
r,phi= 2., 45.
d,l= coords.rphi_to_dl_2d([r,r],[phi,phi],
degree=True,ro=2.*numpy.sqrt(2.),
phio=0.)
d= numpy.array(d)
l= numpy.array(l)
assert numpy.all(numpy.fabs(d-2.) < 10.**-10.), 'rphi_to_dl_2d conversion did not work as expected'
assert numpy.all(numpy.fabs(l-45.) < 10.**-10.), 'rphi_to_dl_2d conversion did not work as expected'
return None
def test_uv_to_Rz():
u, v= numpy.arccosh(5./3.), numpy.pi/6.
R,z= coords.uv_to_Rz(u,v,delta=3.)
assert numpy.fabs(R-2.) < 10.**-10., 'uv_to_Rz conversion did not work as expected'
assert numpy.fabs(z-2.5*numpy.sqrt(3.)) < 10.**-10., 'uv_to_Rz conversion did not work as expected'
#Also test for arrays
os= numpy.ones(2)
R,z= coords.uv_to_Rz(os*u,os*v,delta=3.)
assert numpy.all(numpy.fabs(R-2.) < 10.**-10.), 'uv_to_Rz conversion did not work as expected'
assert numpy.all(numpy.fabs(z-2.5*numpy.sqrt(3.)) < 10.**-10.), 'uv_to_Rz conversion did not work as expected'
return None
def test_Rz_to_uv():
u, v= numpy.arccosh(5./3.), numpy.pi/6.
ut,vt= coords.Rz_to_uv(*coords.uv_to_Rz(u,v,delta=3.),delta=3.)
assert numpy.fabs(ut-u) < 10.**-10., 'Rz_to_uvz conversion did not work as expected'
assert numpy.fabs(vt-v) < 10.**-10., 'Rz_to_uv conversion did not work as expected'
#Also test for arrays
os= numpy.ones(2)
ut,vt= coords.Rz_to_uv(*coords.uv_to_Rz(u*os,v*os,delta=3.),delta=3.)
assert numpy.all(numpy.fabs(ut-u) < 10.**-10.), 'Rz_to_uvz conversion did not work as expected'
assert numpy.all(numpy.fabs(vt-v) < 10.**-10.), 'Rz_to_uv conversion did not work as expected'
return None
def test_Rz_to_coshucosv():
u, v= numpy.arccosh(5./3.), numpy.pi/3.
R,z= coords.uv_to_Rz(u,v,delta=3.)
coshu,cosv= coords.Rz_to_coshucosv(R,z,delta=3.)
assert numpy.fabs(coshu-5./3.) < 10.**-10., 'Rz_to_coshucosv conversion did notwork as expected'
assert numpy.fabs(cosv-0.5) < 10.**-10., 'Rz_to_coshucosv conversion did notwork as expected'
#Also test for arrays
os= numpy.ones(2)
coshu,cosv= coords.Rz_to_coshucosv(R*os,z*os,delta=3.)
assert numpy.all(numpy.fabs(coshu-5./3.) < 10.**-10.), 'Rz_to_coshucosv conversion did notwork as expected'
assert numpy.all(numpy.fabs(cosv-0.5) < 10.**-10.), 'Rz_to_coshucosv conversion did notwork as expected'
return None
def test_uv_to_Rz_oblate():
u, v= numpy.arccosh(5./3.), numpy.pi/6.
R,z= coords.uv_to_Rz(u,v,delta=3.,oblate=True)
assert numpy.fabs(R-2.5) < 10.**-10., 'uv_to_Rz conversion did not work as expected'
assert numpy.fabs(z-2.*numpy.sqrt(3.)) < 10.**-10., 'uv_to_Rz conversion did not work as expected'
#Also test for arrays
os= numpy.ones(2)
R,z= coords.uv_to_Rz(os*u,os*v,delta=3.,oblate=True)
assert numpy.all(numpy.fabs(R-2.5) < 10.**-10.), 'uv_to_Rz conversion did not work as expected'
assert numpy.all(numpy.fabs(z-2.*numpy.sqrt(3.)) < 10.**-10.), 'uv_to_Rz conversion did not work as expected'
return None
def test_Rz_to_uv_oblate():
u, v= numpy.arccosh(5./3.), numpy.pi/6.
ut,vt= coords.Rz_to_uv(*coords.uv_to_Rz(u,v,
delta=3.,oblate=True),
delta=3.,oblate=True)
assert numpy.fabs(ut-u) < 10.**-10., 'Rz_to_uvz conversion did not work as expected'
assert numpy.fabs(vt-v) < 10.**-10., 'Rz_to_uv conversion did not work as expected'
#Also test for arrays
os= numpy.ones(2)
ut,vt= coords.Rz_to_uv(*coords.uv_to_Rz(u*os,v*os,
delta=3.,oblate=True),
delta=3.,oblate=True)
assert numpy.all(numpy.fabs(ut-u) < 10.**-10.), 'Rz_to_uvz conversion did not work as expected'
assert numpy.all(numpy.fabs(vt-v) < 10.**-10.), 'Rz_to_uv conversion did not work as expected'
return None
def test_Rz_to_coshucosv_oblate():
u, v= numpy.arccosh(5./3.), numpy.pi/3.
R,z= coords.uv_to_Rz(u,v,delta=3.,oblate=True)
coshu,cosv= coords.Rz_to_coshucosv(R,z,delta=3.,oblate=True)
assert numpy.fabs(coshu-5./3.) < 10.**-10., 'Rz_to_coshucosv conversion did notwork as expected'
assert numpy.fabs(cosv-0.5) < 10.**-10., 'Rz_to_coshucosv conversion did notwork as expected'
#Also test for arrays
os= numpy.ones(2)
coshu,cosv= coords.Rz_to_coshucosv(R*os,z*os,delta=3.,oblate=True)
assert numpy.all(numpy.fabs(coshu-5./3.) < 10.**-10.), 'Rz_to_coshucosv conversion did notwork as expected'
assert numpy.all(numpy.fabs(cosv-0.5) < 10.**-10.), 'Rz_to_coshucosv conversion did notwork as expected'
return None
def test_vRvz_to_pupv():
# Some sanity checks
# At R,z << Delta --> p_u ~ delta vR, p_v ~ -delta vz
delta= 0.5
R,z= delta/100., delta/300.
vR, vz= 0.2,-0.5
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta)[0]-delta*vR) < 10.**-3., 'vRvz_to_pupv at small R,z does not behave as expected'
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta)[1]+delta*vz) < 10.**-3., 'vRvz_to_pupv at small R,z does not behave as expected'
# At R,z >> Delta --> p_u ~ r v_r, p_v ~ r v_theta, spherical velocities
delta= 0.5
R,z= delta*100., delta*300.
vR, vz= 0.2,-0.5
# Compute spherical velocities
r= numpy.sqrt(R**2.+z**2.)
costheta= z/r
sintheta= R/r
vr= vR*sintheta+vz*costheta
vt= -vz*sintheta+vR*costheta
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta)[0]-r*vr) < 10.**-3., 'vRvz_to_pupv at large R,z does not behave as expected'
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta)[1]-r*vt) < 10.**-3., 'vRvz_to_pupv at large R,z does not behave as expected'
# Also check that it does not matter whether we give R,z or u,v
delta= 0.5
R,z= delta*2., delta/3.
vR, vz= 0.2,-0.5
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta)[0]-coords.vRvz_to_pupv(vR,vz,*coords.Rz_to_uv(R,z,delta=delta),delta=delta,uv=True)[0]) < 10.**-3., 'vRvz_to_pupv with and without pre-computed u,v do not agree'
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta)[1]-coords.vRvz_to_pupv(vR,vz,*coords.Rz_to_uv(R,z,delta=delta),delta=delta,uv=True)[1]) < 10.**-3., 'vRvz_to_pupv with and without pre-computed u,v do not agree'
return None
def test_vRvz_to_pupv_oblate():
# Some sanity checks
# At R,z << Delta --> p_u ~ delta vz, p_v ~ delta vR
delta= 0.5
R,z= delta/100., delta/300.
vR, vz= 0.2,-0.5
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta,oblate=True)[0]-delta*vz) < 10.**-3., 'vRvz_to_pupv at small R,z does not behave as expected for oblate spheroidal coordinates'
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta,oblate=True)[1]-delta*vR) < 10.**-3., 'vRvz_to_pupv at small R,z does not behave as expected for oblate spheroidal coordinates'
# At R,z >> Delta --> p_u ~ r v_r, p_v ~ r v_theta, spherical velocities
delta= 0.5
R,z= delta*100., delta*300.
vR, vz= 0.2,-0.5
# Compute spherical velocities
r= numpy.sqrt(R**2.+z**2.)
costheta= z/r
sintheta= R/r
vr= vR*sintheta+vz*costheta
vt= -vz*sintheta+vR*costheta
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta,oblate=True)[0]-r*vr) < 10.**-3., 'vRvz_to_pupv at large R,z does not behave as expected for oblate spheroidal coordinates'
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta,oblate=True)[1]-r*vt) < 10.**-3., 'vRvz_to_pupv at large R,z does not behave as expected for oblate spheroidal coordinates'
# Also check that it does not matter whether we give R,z or u,v
delta= 0.5
R,z= delta*2., delta/3.
vR, vz= 0.2,-0.5
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta,oblate=True)[0]-coords.vRvz_to_pupv(vR,vz,*coords.Rz_to_uv(R,z,delta=delta,oblate=True),delta=delta,oblate=True,uv=True)[0]) < 10.**-3., 'vRvz_to_pupv with and without pre-computed u,v do not agree for oblate spheroidal coordinates'
assert numpy.fabs(coords.vRvz_to_pupv(vR,vz,R,z,delta=delta,oblate=True)[1]-coords.vRvz_to_pupv(vR,vz,*coords.Rz_to_uv(R,z,delta=delta,oblate=True),delta=delta,oblate=True,uv=True)[1]) < 10.**-3., 'vRvz_to_pupv with and without pre-computed u,v do not agree for oblate spheroidal coordinates'
return None
def test_pupv_to_vRvz():
# Test that this is the inverse of vRvz_to_pupv
delta= 0.5
R,z= delta/2., delta*3.
vR, vz= 0.2,-0.5
u,v= coords.Rz_to_uv(R,z,delta=delta)
pu,pv= coords.vRvz_to_pupv(vR,vz,R,z,delta=delta)
assert numpy.fabs(coords.pupv_to_vRvz(pu,pv,u,v,delta=delta)[0]-vR) < 1e-8, 'pupv_to_vRvz is not the inverse of vRvz_to_pupv'
assert numpy.fabs(coords.pupv_to_vRvz(pu,pv,u,v,delta=delta)[1]-vz) < 1e-8, 'pupv_to_vRvz is not the inverse of vRvz_to_pupv'
# Another one
delta= 1.5
R,z= delta*2., -delta/3.
vR, vz= -0.2,0.5
u,v= coords.Rz_to_uv(R,z,delta=delta)
pu,pv= coords.vRvz_to_pupv(vR,vz,R,z,delta=delta)
assert numpy.fabs(coords.pupv_to_vRvz(pu,pv,u,v,delta=delta)[0]-vR) < 1e-8, 'pupv_to_vRvz is not the inverse of vRvz_to_pupv'
assert numpy.fabs(coords.pupv_to_vRvz(pu,pv,u,v,delta=delta)[1]-vz) < 1e-8, 'pupv_to_vRvz is not the inverse of vRvz_to_pupv'
return None
def test_pupv_to_vRvz_oblate():
# Test that this is the inverse of vRvz_to_pupv
delta= 0.5
R,z= delta/2., delta*3.
vR, vz= 0.2,-0.5
u,v= coords.Rz_to_uv(R,z,delta=delta,oblate=True)
pu,pv= coords.vRvz_to_pupv(vR,vz,R,z,delta=delta,oblate=True)
assert numpy.fabs(coords.pupv_to_vRvz(pu,pv,u,v,delta=delta,oblate=True)[0]-vR) < 1e-8, 'pupv_to_vRvz is not the inverse of vRvz_to_pupv'
assert numpy.fabs(coords.pupv_to_vRvz(pu,pv,u,v,delta=delta,oblate=True)[1]-vz) < 1e-8, 'pupv_to_vRvz is not the inverse of vRvz_to_pupv'
# Another one
delta= 1.5
R,z= delta*2., -delta/3.
vR, vz= -0.2,0.5
u,v= coords.Rz_to_uv(R,z,delta=delta,oblate=True)
pu,pv= coords.vRvz_to_pupv(vR,vz,R,z,delta=delta,oblate=True)
assert numpy.fabs(coords.pupv_to_vRvz(pu,pv,u,v,delta=delta,oblate=True)[0]-vR) < 1e-8, 'pupv_to_vRvz is not the inverse of vRvz_to_pupv'
assert numpy.fabs(coords.pupv_to_vRvz(pu,pv,u,v,delta=delta,oblate=True)[1]-vz) < 1e-8, 'pupv_to_vRvz is not the inverse of vRvz_to_pupv'
return None
def test_lbd_to_XYZ_jac():
#Just position
l,b,d= 180.,30.,2.
jac= coords.lbd_to_XYZ_jac(l,b,d,degree=True)
assert numpy.fabs(jac[0,0]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[0,1]-numpy.pi/180.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[0,2]+numpy.sqrt(3.)/2.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[1,0]+numpy.sqrt(3.)*numpy.pi/180.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[1,1]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[1,2]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[2,0]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[2,1]-numpy.sqrt(3.)*numpy.pi/180.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[2,2]-0.5) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
#6D
l,b,d= 3.*numpy.pi/2.,numpy.pi/6.,2.
vr,pmll,pmbb= 10.,20.,-30.
jac= coords.lbd_to_XYZ_jac(l,b,d,vr,pmll,pmbb,degree=False)
assert numpy.fabs(jac[0,0]-numpy.sqrt(3.)) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[0,1]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[0,2]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[1,0]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[1,1]-1.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[1,2]+numpy.sqrt(3.)/2.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[2,0]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[2,1]-numpy.sqrt(3.)) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[2,2]-0.5) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.all(numpy.fabs(jac[:3,3:]) < 10.**-10.), 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[3,0]-numpy.sqrt(3.)/2.*vr+0.5*pmbb*d*4.740470463496208) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[3,1]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[3,2]-pmll*4.740470463496208) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[3,3]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[3,4]-d*4.740470463496208) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[3,5]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[4,0]-pmll*d*4.740470463496208) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[4,1]-vr/2.-numpy.sqrt(3.)/2.*d*pmbb*4.740470463496208) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[4,2]-0.5*4.740470463496208*pmbb) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[4,3]+numpy.sqrt(3.)/2.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[4,4]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[4,5]-4.740470463496208) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[5,0]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[5,1]+0.5*d*4.740470463496208*pmbb-numpy.sqrt(3.)/2.*vr) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[5,2]-numpy.sqrt(3.)/2.*4.740470463496208*pmbb) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[5,3]-0.5) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[5,4]-0.) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
assert numpy.fabs(jac[5,5]-numpy.sqrt(3.)/2.*d*4.740470463496208) < 10.**-10., 'lbd_to_XYZ_jac calculation did not work as expected'
return None
def test_cyl_to_spher_vec():
# Test 45 degrees, disk plane, & polar location
vr,vT,vtheta = coords.cyl_to_spher_vec(0.6,1.3,0.6,1.,1.)
assert numpy.fabs(vr-0.6*2**0.5) < 10.**-8, 'cyl_to_spher_vec does not work as expected'
assert numpy.fabs(vtheta-0) < 10.**-8, 'cyl_to_spher_vec does not work as expected'
assert numpy.fabs(vT-1.3) < 10.**-8, 'cyl_to_spher_vec does not work as expected'
vr,vT,vtheta = coords.cyl_to_spher_vec(-1.2,-0.7,-0.8,1.,0.)
assert numpy.fabs(vr+1.2) < 10.**-8, 'cyl_to_spher_vec does not work as expected'
assert numpy.fabs(vtheta-0.8) < 10.**-8, 'cyl_to_spher_vec does not work as expected'
assert numpy.fabs(vT+0.7) < 10.**-8, 'cyl_to_spher_vec does not work as expected'
vr,vT,vtheta = coords.cyl_to_spher_vec(-1.2,-0.7,-0.8,0.,1.)
assert numpy.fabs(vr+0.8) < 10.**-8, 'cyl_to_spher_vec does not work as expected'
assert numpy.fabs(vtheta+1.2) < 10.**-8, 'cyl_to_spher_vec does not work as expected'
assert numpy.fabs(vT+0.7) < 10.**-8, 'cyl_to_spher_vec does not work as expected'
return None
def test_spher_to_cyl_vec():
# Test 45 degrees, disk plane, & polar location
vR,vT,vz = coords.spher_to_cyl_vec(0.7,1.4,0.7,numpy.pi/4.)
assert numpy.fabs(vR-0.7*2**0.5) < 10.**-8, 'spher_to_cyl_vec does not work as expected'
assert numpy.fabs(vT-1.4) < 10.**-8, 'spher_to_cyl_vec does not work as expected'
assert numpy.fabs(vz-0.) < 10.**-8, 'spher_to_cyl_vec does not work as expected'
vR,vT,vz = coords.spher_to_cyl_vec(0.5,-1.3,0.7,0.)
assert numpy.fabs(vR-0.7) < 10.**-8, 'spher_to_cyl_vec does not work as expected'
assert numpy.fabs(vT+1.3) < 10.**-8, 'spher_to_cyl_vec does not work as expected'
assert numpy.fabs(vz-0.5) < 10.**-8, 'spher_to_cyl_vec does not work as expected'
vR,vT,vz = coords.spher_to_cyl_vec(0.5,-1.3,0.7,numpy.pi/2.)
assert numpy.fabs(vR-0.5) < 10.**-8, 'spher_to_cyl_vec does not work as expected'
assert numpy.fabs(vT+1.3) < 10.**-8, 'spher_to_cyl_vec does not work as expected'
assert numpy.fabs(vz+0.7) < 10.**-8, 'spher_to_cyl_vec does not work as expected'
return None
def test_cyl_to_spher():
# Just a few quick tests
r,t,p= coords.cyl_to_spher(1.2,3.2,1.)
assert numpy.fabs(r**2.-1.2**2.-3.2**2.) < 10.**-8., 'cyl_to_spher does not work as expected'
assert numpy.fabs(r*numpy.cos(t)-3.2) < 10.**-8., 'cyl_to_spher does not work as expected'
assert numpy.fabs(p-1.) < 10.**-8., 'cyl_to_spher does not work as expected'
r,t,p= coords.cyl_to_spher(1.2,-3.2,4.)
assert numpy.fabs(r**2.-1.2**2.-3.2**2.) < 10.**-8., 'cyl_to_spher does not work as expected'
assert numpy.fabs(r*numpy.cos(t)+3.2) < 10.**-8., 'cyl_to_spher does not work as expected'
assert numpy.fabs(p-4.) < 10.**-8., 'cyl_to_spher does not work as expected'
return None
def test_spher_to_cyl():
# Just a few quick tests
R,z,p= coords.spher_to_cyl(5.,numpy.arccos(3./5.),1.)
assert numpy.fabs(R-4.) < 10.**-8., 'spher_to_cyl does not work as expected'
assert numpy.fabs(z-3.) < 10.**-8., 'spher_to_cyl does not work as expected'
assert numpy.fabs(p-1.) < 10.**-8., 'spher_to_cyl does not work as expected'
R,z,p= coords.spher_to_cyl(5.,numpy.arccos(-3./5.),4.)
assert numpy.fabs(R-4.) < 10.**-8., 'spher_to_cyl does not work as expected'
assert numpy.fabs(z+3.) < 10.**-8., 'spher_to_cyl does not work as expected'
assert numpy.fabs(p-4.) < 10.**-8., 'spher_to_cyl does not work as expected'
return None
def test_cyl_to_rect_jac():
#Just position
R,phi,Z= 2., numpy.pi, 1.
jac= coords.cyl_to_rect_jac(R,phi,Z)
assert numpy.fabs(numpy.linalg.det(jac)-R) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[0,0]+1.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[0,1]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[0,2]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[1,0]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[1,1]+2.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[1,2]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[2,0]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[2,1]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[2,2]-1.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
#6D
R,phi,Z= 2., numpy.pi, 1.
vR,vT,vZ= 1.,2.,3.
jac= coords.cyl_to_rect_jac(R,vR,vT,Z,vZ,phi)
vindx= numpy.array([False,True,True,False,True,False],dtype='bool')
assert numpy.fabs(numpy.linalg.det(jac)-R) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[0,0]+1.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[0,5]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[0,3]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.all(numpy.fabs(jac[0,vindx]) < 10.**-10.), 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[1,0]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[1,5]+2.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[1,3]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.all(numpy.fabs(jac[1,vindx]) < 10.**-10.), 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[2,0]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[2,5]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[2,3]-1.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.all(numpy.fabs(jac[2,vindx]) < 10.**-10.), 'cyl_to_rect_jac calculation did not work as expected'
#Velocities
assert numpy.fabs(jac[3,0]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[3,1]+1.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[3,2]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[3,3]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[3,4]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[3,5]-2.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[4,0]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[4,1]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[4,2]+1.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[4,3]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[4,4]-0.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[4,5]+1.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.all(numpy.fabs(jac[5,numpy.array([True,True,True,True,False,True],dtype='bool')]-0.) < 10.**-10.), 'cyl_to_rect_jac calculation did not work as expected'
assert numpy.fabs(jac[5,4]-1.) < 10.**-10., 'cyl_to_rect_jac calculation did not work as expected'
return None
def test_radec_to_custom_valueerror():
# Test the radec_to_custom without T raises a ValueError
with pytest.raises(ValueError):
xieta= coords.radec_to_custom(20.,30.)
return None
def test_radec_to_custom_againstlb():
_turn_off_apy()
ra, dec= 20., 30.
theta,dec_ngp,ra_ngp= coords.get_epoch_angles(2000.)
T= numpy.dot(numpy.array([[numpy.cos(ra_ngp),-numpy.sin(ra_ngp),0.],
[numpy.sin(ra_ngp),numpy.cos(ra_ngp),0.],
[0.,0.,1.]]),
numpy.dot(numpy.array([[-numpy.sin(dec_ngp),0.,
numpy.cos(dec_ngp)],
[0.,1.,0.],
[numpy.cos(dec_ngp),0.,
numpy.sin(dec_ngp)]]),
numpy.array([[numpy.cos(theta),numpy.sin(theta),0.],
[numpy.sin(theta),-numpy.cos(theta),0.],
[0.,0.,1.]])))
lb_direct= coords.radec_to_lb(ra,dec,degree=True)
lb_custom= coords.radec_to_custom(ra,dec,T=T.T,degree=True)
assert numpy.fabs(lb_direct[0]-lb_custom[0]) < 10.**-8., 'radec_to_custom for transformation to l,b does not work properly'
assert numpy.fabs(lb_direct[1]-lb_custom[1]) < 10.**-8., 'radec_to_custom for transformation to l,b does not work properly'
# Array
s= numpy.arange(2)
lb_direct= coords.radec_to_lb(ra*s,dec*s,degree=True)
lb_custom= coords.radec_to_custom(ra*s,dec*s,T=T.T,degree=True)
assert numpy.all(numpy.fabs(lb_direct-lb_custom) < 10.**-8.), 'radec_to_custom for transformation to l,b does not work properly'
_turn_on_apy()
return None
def test_radec_to_custom_pal5():
# Test the custom ra,dec transformation for Pal 5
_RAPAL5= 229.018/180.*numpy.pi
_DECPAL5= -0.124/180.*numpy.pi
_TPAL5= numpy.dot(numpy.array([[numpy.cos(_DECPAL5),0.,numpy.sin(_DECPAL5)],
[0.,1.,0.],
[-numpy.sin(_DECPAL5),0.,numpy.cos(_DECPAL5)]]),
numpy.array([[numpy.cos(_RAPAL5),numpy.sin(_RAPAL5),0.],
[-numpy.sin(_RAPAL5),numpy.cos(_RAPAL5),0.],
[0.,0.,1.]]))
xieta= coords.radec_to_custom(_RAPAL5,_DECPAL5,T=_TPAL5,degree=False)
def checkrng(x, xpct, dom, shft):
return numpy.fabs(((numpy.fabs(x - xpct) + shft) % dom) - shft)
# 0 < xieta[0] < 2 * pi
assert checkrng(xieta[0], 0, 2*numpy.pi, 0) < 1e-8, 'radec_to_custom does not work properly for Pal 5 transformation'
assert checkrng(xieta[1], 0, numpy.pi, numpy.pi/2) < 1e-8, 'radec_to_custom does not work properly for Pal 5 transformation'
# One more, rough estimate based on visual inspection of plot
xieta= coords.radec_to_custom(240.,6.,T=_TPAL5,degree=True)
assert checkrng(xieta[0], 11., 2*numpy.pi, 0) < .2, 'radec_to_custom does not work properly for Pal 5 transformation'
assert checkrng(xieta[1], 6., numpy.pi, numpy.pi/2) < .2, 'radec_to_custom does not work properly for Pal 5 transformation'
return None
def test_pmrapmdec_to_custom_valueerror():
# Test the pmrapmdec_to_custom without T raises a ValueError
with pytest.raises(ValueError):
xieta= coords.pmrapmdec_to_custom(1.,1.,20.,30.)
return None
def test_pmrapmdec_to_custom_againstlb():
_turn_off_apy()
ra, dec= 20., 30.
pmra, pmdec= -3.,4.
theta,dec_ngp,ra_ngp= coords.get_epoch_angles(2000.)
T= numpy.dot(numpy.array([[numpy.cos(ra_ngp),-numpy.sin(ra_ngp),0.],
[numpy.sin(ra_ngp),numpy.cos(ra_ngp),0.],
[0.,0.,1.]]),
numpy.dot(numpy.array([[-numpy.sin(dec_ngp),0.,
numpy.cos(dec_ngp)],
[0.,1.,0.],
[numpy.cos(dec_ngp),0.,
numpy.sin(dec_ngp)]]),
numpy.array([[numpy.cos(theta),numpy.sin(theta),0.],
[numpy.sin(theta),-numpy.cos(theta),0.],
[0.,0.,1.]])))
pmlb_direct= coords.pmrapmdec_to_pmllpmbb(pmra,pmdec,ra,dec,
degree=True)
pmlb_custom= coords.pmrapmdec_to_custom(pmra,pmdec,ra,dec,
T=T.T,degree=True)
assert numpy.fabs(pmlb_direct[0]-pmlb_custom[0]) < 10.**-8., 'pmrapmdec_to_custom for transformation to pml,pmb does not work properly'
assert numpy.fabs(pmlb_direct[1]-pmlb_custom[1]) < 10.**-8., 'pmrapmdec_to_custom for transformation to pml,pmb does not work properly'
# Array
s= numpy.arange(2)
pmlb_direct= coords.pmrapmdec_to_pmllpmbb(pmra*s,pmdec*s,
ra*s,dec*s,degree=True)
pmlb_custom= coords.pmrapmdec_to_custom(pmra*s,pmdec*s,
ra*s,dec*s,T=T.T,degree=True)
assert numpy.all(numpy.fabs(pmlb_direct-pmlb_custom) < 10.**-8.), 'pmrapmdec_to_custom for transformation to pml,pmb does not work properly'
_turn_on_apy()
return None
def test_custom_to_radec_valueerror():
# Test the custom_to_radec without T raises a ValueError
with pytest.raises(ValueError):
xieta = coords.custom_to_radec(20., 30.)
return None
def test_custom_to_radec_againstlb(): # FIXME COMPARE TO DOCUMENT
_turn_off_apy()
ra, dec= 20., 30.
theta,dec_ngp,ra_ngp= coords.get_epoch_angles(2000.)
T= numpy.dot(numpy.array([[numpy.cos(ra_ngp),-numpy.sin(ra_ngp),0.],
[numpy.sin(ra_ngp),numpy.cos(ra_ngp),0.],
[0.,0.,1.]]),
numpy.dot(numpy.array([[-numpy.sin(dec_ngp),0.,
numpy.cos(dec_ngp)],
[0.,1.,0.],
[numpy.cos(dec_ngp),0.,
numpy.sin(dec_ngp)]]),
numpy.array([[numpy.cos(theta),numpy.sin(theta),0.],
[numpy.sin(theta),-numpy.cos(theta),0.],
[0.,0.,1.]])))
lb_direct= coords.radec_to_lb(ra,dec,degree=True)
lb_custom= coords.custom_to_radec(ra,dec,T=T,degree=True)
assert numpy.fabs(lb_direct[0]-lb_custom[0]) < 10.**-8., 'custom_to_radec for transformation to l,b does not work properly'
assert numpy.fabs(lb_direct[1]-lb_custom[1]) < 10.**-8., 'custom_to_radec for transformation to l,b does not work properly'
# Array
s= numpy.arange(2)
lb_direct= coords.radec_to_lb(ra*s,dec*s,degree=True)
lb_custom= coords.custom_to_radec(ra*s,dec*s,T=T,degree=True)
assert numpy.all(numpy.fabs(lb_direct-lb_custom) < 10.**-8.), 'radec_to_custom for transformation to l,b does not work properly'
_turn_on_apy()
return None
def test_custom_to_radec_pal5(): # FIXME COMPARE TO DOCUMENT
# Test the custom ra,dec transformation for Pal 5
_RAPAL5= 229.018/180.*numpy.pi
_DECPAL5= -0.124/180.*numpy.pi
_TPAL5= numpy.dot(numpy.array([[numpy.cos(_DECPAL5),0.,numpy.sin(_DECPAL5)],
[0.,1.,0.],
[-numpy.sin(_DECPAL5),0.,numpy.cos(_DECPAL5)]]),
numpy.array([[numpy.cos(_RAPAL5),numpy.sin(_RAPAL5),0.],
[-numpy.sin(_RAPAL5),numpy.cos(_RAPAL5),0.],
[0.,0.,1.]]))
xieta= coords.custom_to_radec(_RAPAL5,_DECPAL5,T=_TPAL5.T,degree=False)
def checkrng(x, xpct, dom, shft):
return numpy.fabs(((numpy.fabs(x - xpct) + shft) % dom) - shft)
# 0 < xieta[0] < 2 * pi
assert checkrng(xieta[0], 0, 2*numpy.pi, 0) < 1e-8, 'custom_to_radec does not work properly for Pal 5 transformation'
assert checkrng(xieta[1], 0, numpy.pi, numpy.pi/2) < 1e-8, 'custom_to_radec does not work properly for Pal 5 transformation'
# One more, rough estimate based on visual inspection of plot
xieta= coords.custom_to_radec(240.,6.,T=_TPAL5.T,degree=True)
assert checkrng(xieta[0], 11., 2*numpy.pi, 0) < .2, 'custom_to_radec does not work properly for Pal 5 transformation'
assert checkrng(xieta[1], 6., numpy.pi, numpy.pi/2) < .2, 'custom_to_radec does not work properly for Pal 5 transformation'
return None
def test_custom_to_pmrapmdec_valueerror():
# Test the pmrapmdec_to_custom without T raises a ValueError
with pytest.raises(ValueError):
xieta= coords.custom_to_pmrapmdec(1.,1.,20.,30.)
return None
def test_custom_to_pmrapmdec_againstlb():
_turn_off_apy()
ra, dec= 20., 30.
pmra, pmdec= -3.,4.
theta,dec_ngp,ra_ngp= coords.get_epoch_angles(2000.)
T= numpy.dot(numpy.array([[numpy.cos(ra_ngp),-numpy.sin(ra_ngp),0.],
[numpy.sin(ra_ngp),numpy.cos(ra_ngp),0.],
[0.,0.,1.]]),
numpy.dot(numpy.array([[-numpy.sin(dec_ngp),0.,
numpy.cos(dec_ngp)],
[0.,1.,0.],
[numpy.cos(dec_ngp),0.,
numpy.sin(dec_ngp)]]),
numpy.array([[numpy.cos(theta),numpy.sin(theta),0.],
[numpy.sin(theta),-numpy.cos(theta),0.],
[0.,0.,1.]])))
pmlb_direct= coords.pmrapmdec_to_pmllpmbb(pmra,pmdec,ra,dec, degree=True)
pmlb_custom= coords.custom_to_pmrapmdec(pmra,pmdec,ra,dec, T=T,degree=True)
assert numpy.fabs(pmlb_direct[0]-pmlb_custom[0]) < 10.**-8., 'custom_to_pmrapmdec for transformation to pml,pmb does not work properly'
assert numpy.fabs(pmlb_direct[1]-pmlb_custom[1]) < 10.**-8., 'custom_to_pmrapmdec for transformation to pml,pmb does not work properly'
# Array
s= numpy.arange(2)
pmlb_direct= coords.pmrapmdec_to_pmllpmbb(pmra*s,pmdec*s,
ra*s,dec*s,degree=True)
pmlb_custom= coords.custom_to_pmrapmdec(pmra*s,pmdec*s, ra*s,dec*s,T=T,degree=True)
assert numpy.all(numpy.fabs(pmlb_direct-pmlb_custom) < 10.**-8.), 'custom_to_pmrapmdec for transformation to pml,pmb does not work properly'
_turn_on_apy()
return None
# 02/06/2018 (JB): Edited for cases where astropy coords are always turned off
# [case at hand: einsum bug in numpy 1.14 / python2.7 astropy]
def _turn_off_apy(keep_loaded=False):
coords._APY_COORDS_ORIG= coords._APY_COORDS
coords._APY_COORDS= False
if not keep_loaded:
coords._APY_LOADED= False
def _turn_on_apy():
coords._APY_COORDS= coords._APY_COORDS_ORIG
coords._APY_LOADED= True
|
# DEP MONI - Personal departure monitor
#
# Based on VasttraPi (https://github.com/platisd/vasttraPi/blob/master/departures.py) by Dimitris Platis (published under MIT license).
# The structure & function is widely borrowed from VasttraPi.
from tkinter import Tk, Label, Button, N, S, E, W, Frame
import tkinter.font as tkFont
import tkinter.ttk as ttkSep
from datetime import datetime
import time
import PyTrafik.pytrafik.client
from subprocess import call
import os
import sys
import configparser
from socket import AF_INET, SOCK_DGRAM
import socket
import struct
import threading
from collections import defaultdict
# Change working directory to the one that the file is residing in
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
mainThread = threading.current_thread()
# set variables and times
vasttrafik = None
# Bus stop ID
# get ID via request: vasttrafik.location_name('BUSSTOPNAME')[0]['id']
busstop_id = None
departure_track_B = []
departure_track_A = []
VT_secret = None
VT_key = None
# name shortning because of limited display size
direction_31busA = 'Hj. Brantingspl.'
direction_31busB = 'Wieselg.pl. (Eketräg.)'
timeoutNTP = 1.5 # How much to wait for the NTP server's response in seconds
guiRefreshRate = 45
tokenTimeout = 3600 # How much time your token is valid (default is 3600 seconds, i.e. 1 hour)
# tkinter stuff - sizes and colors
widths = [4, 18, 10, 6]
colorsDep1 = ['LightSkyBlue1', 'LightSkyBlue2']
colorsDep2 = ['SkyBlue1', 'SkyBlue2']
# import secret, key and busstop ID from login.ini
def get_key_secret_busstopid():
login = configparser.ConfigParser()
login.read('login.ini')
global VT_key, VT_secret, busstop_id
VT_secret = login.get('login','secret')
VT_key = login.get ('login','key')
busstop_id = login.get('busstop','id')
# Fetches the time from NTP server. Source: http://blog.mattcrampton.com/post/88291892461/query-an-ntp-server-from-python
# copied from platsid
def getNTPTime(host="pool.ntp.org"):
port = 123
buf = 1024
address = (host, port)
msg = '\x1b' + 47 * '\0'
# Reference time (in seconds since 1900-01-01 00:00:00)
TIME1970 = 2208988800 # 1970-01-01 00:00:00
# connect to server
client = socket.socket(AF_INET, SOCK_DGRAM)
client.settimeout(timeoutNTP) # Do not wait too much to receive a response from the NTP server
try:
client.sendto(bytes(msg, "UTF-8"), address)
msg, address = client.recvfrom(buf)
t = struct.unpack("!12I", msg)[10]
t -= TIME1970
except:
print ("WARNING: Could not fetch time from NTP server! Using system time instead.")
t = time.time() # Fall back to the system time when no response from ntp server
d = time.strptime(time.ctime(t), "%a %b %d %H:%M:%S %Y")
return (time.strftime("%Y-%m-%d", d), time.strftime("%H:%M", d))
# initialize connection to Västtrafik - get token
def initializeConnection():
try:
global vasttrafik
vasttrafik = PyTrafik.pytrafik.client.Client("json", VT_key, VT_secret)
time.sleep(15)
except Exception as e:
print (e)
print ("Authentication failure!")
sys.exit(1)
# Necessary to reinitialize the token after it's timed out
if mainThread.is_alive():
threading.Timer(tokenTimeout, initializeConnection).start()
# ----
# Getting data from Västtrafik and process them
# ----
def prepareData():
# Get json from Västtrafik
getDepartures()
# Extract from json relevant data for monitor
global departure_track_B
global departure_track_A
departure_track_B = sort_after_dep(extractDepartures('B'))
departure_track_A = sort_after_dep(extractDepartures('A'))
def getDepartures():
# Get the current time and date from an NTP server as the host might not have an RTC
global now
(currentDate, currentTime) = getNTPTime()
now = currentTime
try:
global saterigatan_db
saterigatan_db = vasttrafik.get_departures(busstop_id, date=currentDate, time=currentTime)
except Exception as e:
print (e)
print ("Connection failure on departure request")
def extractDepartures(track_side):
function_departure = []
for x in range(10):
if(saterigatan_db[x]['track']==track_side):
track = saterigatan_db[x]['track']
rtTimeExist = True if 'rtTime' in saterigatan_db[x] else False
# TODO: marking real-time vs schedule time times (not used atm)
rtOrPt = 'RT' if rtTimeExist==True else 'PT'
departureTime = saterigatan_db[x]['rtTime'] if rtTimeExist else saterigatan_db[x]['time'+' !']
# TODO: parsing should have try-except
minutesToLeave = (int)((datetime.strptime(departureTime, "%H:%M") - datetime.strptime(now, "%H:%M")).total_seconds() / 60)
# meaning that the next departure time is on the next day
if minutesToLeave < 0:
MINUTES_IN_DAY = 1440
minutesToLeave += MINUTES_IN_DAY
busNumber = saterigatan_db[x]['sname']
if saterigatan_db[x]['sname'] == '31' and saterigatan_db[x]['track'] == 'A':
direction = direction_31busA
elif saterigatan_db[x]['sname'] == '31' and saterigatan_db[x]['track'] == 'B':
direction = direction_31busB
else:
direction = saterigatan_db[x]['direction']
journeyTupel = (busNumber, direction, departureTime, minutesToLeave, rtTimeExist, track)
# Departures in 0 minutes are not useful and will not be stored/will be skipped
if(minutesToLeave>0):
function_departure.append(journeyTupel)
# only 3 (useful) departures will be stored
if len(function_departure)==3:break
return function_departure
# data from Västtrafik not always in correct order (saying, not sorted by next departure). This fixes it.
def sort_after_dep(tupel_array):
tupel_array.sort(key = lambda x: x[3]) #sorts by "Minutes until next departure" = minutes to leave
return tupel_array
# ----
# GUI
# ----
class departureGUI:
def __init__(self, master):
self.master = master
# A list that will hold the temporary departure frames so to destroy them upon refreshing
self.departureRowFrames = []
self.currentlyDisplayedDepartures = [0]*2
master.title("GUI")
self.master.bind("<Escape>", self.end_fullscreen)
departuresFrame = Frame(master)
departuresFrame.grid()
self.departuresFrame = departuresFrame
def populate_with_departures(self, departure_B, departure_A):
depFrame = Frame(self.departuresFrame)
#specifies "self.font" for time/clock
self.font = tkFont.Font(family="helvetica", size=18)
#specifies for all belonging to TextFont (other types: TkDefaultFont, TkTextFont, TkFixedFont)
self.default_font = tkFont.nametofont("TkTextFont")
self.default_font.configure(size=14)
# bold for "in min"
self.in_min_font = tkFont.Font(size=14, weight='bold')
depFrame.grid_columnconfigure(1, weight=2)
self.time_label = Label(self.master, font=self.font, text="Current time: "+now) # .strftime('%H:%M'))
self.time_label.grid(row=0, column=0, columnspan=2, sticky=W)
self.update_button = Button(self.master, text="Shutdown", command=self.update)
self.update_button.grid(row=0, column = 2, columnspan=2, sticky=E)
# label colums
self.label_columns("<- Direction <-", 1)
# departures on track A
self.departure_rows(departure_B,0)
# SPACE between directions CODE
self.spacer = Label(self.master, width=sum(widths)+2)
self.spacer.grid(row=2+len(departure_B), column=0, columnspan=7)
# label colums
self.label_columns("-> Direction ->", 3+len(departure_B))
# departures on track B
self.departure_rows(departure_A,4+len(departure_B))
# Add the newly created frame to a list so we can destroy it later when we refresh the departures
self.departureRowFrames.append(depFrame)
def label_columns(self, direction, row_count):
# BUSnr | Direction | Departure Time | in Min
self.bus_label = Label(self.master, font=self.default_font, text="Bus", width=widths[0], bg='grey60')
self.bus_label.grid(row=row_count, column=0)
self.direction_label = Label(self.master, font=self.default_font, text=direction, width=widths[1], bg='grey70')
self.direction_label.grid(row=row_count, column=1, sticky=E+W)
self.dep_label = Label(self.master, font=self.default_font, text="Departure", width=widths[2], bg='grey60')
self.dep_label.grid(row=row_count, column=2)
self.min_label = Label(self.master, font=self.default_font, text="in Min", width=widths[3], bg='grey70')
self.min_label.grid(row=row_count, column=3, sticky=E+W)
# parameters the departure array + shift of rows
def departure_rows(self, dep_info_array, row_shift):
for y in range (0,len(dep_info_array)):
## conditional formating
# first row in different color
if(y==0):
bgColors=colorsDep2
else:
bgColors=colorsDep1
# only few minutes until departure, make "in Min" red
if(dep_info_array[y][3]<5):
fore='firebrick3'
else:
fore='black'
# more than 60 minutes until departure -> 60+
minutesToLeaveStr = ' ' +str(dep_info_array[y][3]).zfill(2) if dep_info_array[y][3]<=60 else '60+'
# bus
Label(self.master, font=self.default_font, text=dep_info_array[y][0], width=widths[0], fg='black', bg=bgColors[0]).grid(row=(2+y+row_shift), column=0, sticky=E+W+N+S)
# direction
Label(self.master, font=self.default_font, text=dep_info_array[y][1], width=widths[1], fg='black', bg=bgColors[1]).grid(row=(2+y+row_shift), column=1, sticky=E+W+N+S)
# departure time
Label(self.master, font=self.default_font, text=dep_info_array[y][2], width=widths[2], fg='black', bg=bgColors[0]).grid(row=(2+y+row_shift), column=2, sticky=E+W+N+S)
# in min departure
Label(self.master, font=self.in_min_font, text=minutesToLeaveStr, width=widths[3], fg=fore, bg=bgColors[1]).grid(row=(2+y+row_shift), column=3, sticky=E+W+N+S)
# Destroy any existing frames containing departures that already exist
def resetDepartures(self):
for frame in self.departureRowFrames:
frame.destroy()
# Empty the list as we have destroyed everything that was included in it
self.departureRowFrames = []
def update(self):
print("Shutdown")
shutdown_raspi()
def end_fullscreen(self, event=None):
self.state = False
self.master.attributes("-fullscreen", False)
#return "break"
def updateGui(my_gui):
# Get & process the next trips from Västtrafik's API
prepareData()
# Update the displayed departures if they are different to the ones currently displayed
if departure_track_B != my_gui.currentlyDisplayedDepartures[0] or departure_track_A != my_gui.currentlyDisplayedDepartures[1]:
my_gui.resetDepartures() # Remove any already existing departures
my_gui.populate_with_departures(departure_track_B, departure_track_A)
my_gui.currentlyDisplayedDepartures[0] = departure_track_B
my_gui.currentlyDisplayedDepartures[1] = departure_track_A
if mainThread.is_alive():
threading.Timer(guiRefreshRate, updateGui, [my_gui]).start()
# ----
# Get it started & shutdown
# ----
def start():
global root
root = Tk()
#root.overrideredirect(True)
#root.overrideredirect(False)
root.attributes('-fullscreen',True)
global my_gui
my_gui = departureGUI(root)
updateGui(my_gui)
#updateScreen(my_gui)
root.mainloop()
#root.update()
def shutdown_raspi():
root.destroy()
#os.system("sudo shutdown -h now")
os.system("sudo shutdown -h now")
def main():
# Read Key and Secret from login.ini
get_key_secret_busstopid()
# Initialize the connection to the Vasttrafik public API. If not succesful the script will exit here
initializeConnection()
# prepareData()
start()
if __name__ == "__main__":
main()
|
import sqlite3
connection = sqlite3.connect('demo_data.sqlite3')
# Helper Functions
def execute_fetchall_sqlite_query(connection, query):
cursor = connection.cursor()
return cursor.execute(query).fetchall()
# Part 1B: Queries
## Count how many rows you have - it should be 3!
query = "SELECT COUNT(*) FROM demo"
print(query, execute_fetchall_sqlite_query(connection, query))
## How many rows are there where both x and y are at least 5?
query = "SELECT COUNT(*) FROM demo WHERE x>=5 AND y>=5"
print(query, execute_fetchall_sqlite_query(connection, query))
## How many unique values of y are there (hint - COUNT() can accept a keyword DISTINCT)?
query = "SELECT COUNT (DISTINCT y) FROM demo"
print(query, execute_fetchall_sqlite_query(connection, query))
|
import argparse
import os
import sys
import itertools
import SoftLayer
import Crypto.Cipher.AES
def encrypt_password(hostname, password):
"""IPMIView stores its passwords encrypted with AES-128-CBC using
an all-zeros IV and the hostname as the key.
SO SECURE!"""
iv = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if len(hostname) < 16:
key = hostname + ('\x00' * (16 - len(hostname)))
else:
key = hostname[:16]
cipher = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC, iv)
if len(password) % 16 != 0:
password += ('\x00' * (16 - (len(password) % 16)))
return cipher.encrypt(password).encode('hex')
def hostname_frags(s):
return tuple(
(int(''.join(chrs)) if is_digits else ''.join(chrs))
for (is_digits, chrs) in
itertools.groupby(s, str.isdigit)
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--username', default=os.environ.get('SL_USERNAME', None),
required='SL_USERNAME' not in os.environ, help='SoftLayer username (default $SL_USERNAME)')
parser.add_argument('--api-key', default=os.environ.get('SL_API_KEY', None),
required='SL_API_KEY' not in os.environ, help='SoftLayer API key (default $SL_API_KEY)')
parser.add_argument('-A', '--account-file', default='account.properties', type=argparse.FileType('w'),
help='Path to write account.properties file to')
parser.add_argument('-I', '--ipmiview-file', default='IPMIView.properties', type=argparse.FileType('w'),
help='Path to write IPMIView.properties file to')
args = parser.parse_args()
client = SoftLayer.create_client_from_env(args.username, args.api_key)
hardware = SoftLayer.managers.hardware.HardwareManager(client)
for host in sorted(hardware.list_hardware(), key=lambda d: hostname_frags(d.get('hostname', None))):
if 'globalIdentifier' not in host:
continue
hwinfo = hardware.get_hardware(host['globalIdentifier'])
args.ipmiview_file.write('{hostname}={mgmt_ip}:{hostname}.{domain}\n'.format(
hostname=hwinfo['hostname'],
mgmt_ip=hwinfo['networkManagementIpAddress'],
domain=hwinfo['domain']
))
if len(hwinfo['remoteManagementAccounts']) > 0:
acct = hwinfo['remoteManagementAccounts'][0]
args.account_file.write('{hostname}={username},{password}\n'.format(
hostname=hwinfo['hostname'],
username=acct['username'],
password=encrypt_password(hwinfo['hostname'], acct['password'])
))
sys.exit(main())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-04 20:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('movies', '0022_userprofile'),
]
operations = [
migrations.AlterModelTable(
name='userprofile',
table='user_profile',
),
]
|
from collections import Counter, OrderedDict
import torch
from transforms import (
PretrainedSPTokenizer,
PretrainedSPVocab,
PyTextVocabTransform,
PyTextScriptVocabTransform,
iterate_batch,
tokenizer_func,
totensor,
vocab_func,
)
from torchtext.experimental.transforms import (
basic_english_normalize,
TextSequentialTransforms,
)
from torchtext.data.utils import get_tokenizer
from torchtext.experimental.functional import (
sequential_transforms,
)
from torchtext.experimental.vectors import FastText as FastTextExperimental
from torchtext.experimental.vocab import vocab_from_file
from torchtext.vocab import FastText
import argparse
from torchtext.experimental.datasets.raw import text_classification as raw
import time
from dataset import BatchTextClassificationData
from torchtext.data.functional import load_sp_model
def build_sp_pipeline(spm_file):
tokenizer = PretrainedSPTokenizer(load_sp_model(spm_file))
vocab = PretrainedSPVocab(load_sp_model(spm_file))
# Insert token in vocab to match a pretrained vocab
vocab.insert_token('<pad>', 1)
pipeline = TextSequentialTransforms(tokenizer, vocab)
jit_pipeline = torch.jit.script(pipeline.to_ivalue())
print('jit sentencepiece pipeline success!')
return pipeline, pipeline.to_ivalue(), jit_pipeline
def build_legacy_torchtext_vocab_pipeline(vocab_file):
tokenizer = get_tokenizer("basic_english")
from torchtext.vocab import build_vocab_from_iterator
def token_iterator(vocab_file):
f = open(vocab_file, 'r')
for line in f:
for token in line:
yield token
vocab = build_vocab_from_iterator(token_iterator(vocab_file))
pipeline = sequential_transforms(tokenizer_func(tokenizer), vocab_func(vocab))
return iterate_batch(pipeline), None, None
def build_experimental_torchtext_pipeline(hf_vocab_file):
tokenizer = basic_english_normalize()
f = open(hf_vocab_file, 'r')
vocab = vocab_from_file(f)
pipeline = TextSequentialTransforms(tokenizer, vocab)
jit_pipeline = torch.jit.script(pipeline.to_ivalue())
print('jit experimental torchtext pipeline success!')
return pipeline, pipeline.to_ivalue(), jit_pipeline
def build_legacy_batch_torchtext_vocab_pipeline(vocab_file):
tokenizer = get_tokenizer("basic_english")
from torchtext.vocab import build_vocab_from_iterator
from transforms import TextClassificationPipeline
def token_iterator(vocab_file):
f = open(vocab_file, 'r')
for line in f:
for token in line:
yield token
vocab = build_vocab_from_iterator(token_iterator(vocab_file))
text_pipeline = sequential_transforms(tokenizer, vocab_func(vocab))
label_pipeline = totensor(dtype=torch.long)
return TextClassificationPipeline(label_pipeline, text_pipeline), None, None
def build_legacy_pytext_vocab_pipeline(vocab_file):
from pytext.data.utils import Vocabulary
tokenizer = get_tokenizer("basic_english")
f = open(vocab_file, 'r')
vocab_counter = Counter([token for line in f for token in line.rstrip()])
sorted_by_freq_tuples = sorted(vocab_counter.items(), key=lambda x: x[1], reverse=True)
vocab_list = [pair[0] for pair in sorted_by_freq_tuples]
vocab_list.insert(0, "<unk>")
pipeline = sequential_transforms(tokenizer_func(tokenizer),
PyTextVocabTransform(Vocabulary(vocab_list, unk_token="<unk>")))
return pipeline, None, None
def build_legacy_pytext_script_vocab_pipeline(vocab_file):
from pytext.torchscript.vocab import ScriptVocabulary
tokenizer = basic_english_normalize()
f = open(vocab_file, 'r')
vocab_counter = Counter([token for line in f for token in line.rstrip()])
sorted_by_freq_tuples = sorted(vocab_counter.items(), key=lambda x: x[1], reverse=True)
vocab_list = [pair[0] for pair in sorted_by_freq_tuples]
vocab_list.insert(0, "<unk>")
pipeline = TextSequentialTransforms(tokenizer_func(tokenizer),
PyTextScriptVocabTransform(ScriptVocabulary(vocab_list)))
jit_pipeline = torch.jit.script(pipeline.to_ivalue())
print('jit legacy PyText pipeline success!')
return pipeline, pipeline.to_ivalue(), jit_pipeline
def build_experimental_pytext_script_vocab_pipeline(vocab_file):
import os
import sys
# this is needed because we want to add 'torchtext/examples/vocab' directory to the
# `sys.path` variable in order to import the pytext_vocab (since its not a module)
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "vocab"))
from pytext_vocab import script_vocab
tokenizer = basic_english_normalize()
f = open(vocab_file, 'r')
vocab_counter = Counter([token for line in f for token in line.rstrip()])
ordered_dict = OrderedDict(sorted(vocab_counter.items(), key=lambda x: x[1], reverse=True))
# Insert token in vocab to match a pretrained vocab
pipeline = TextSequentialTransforms(tokenizer,
PyTextScriptVocabTransform(script_vocab(ordered_dict)))
jit_pipeline = torch.jit.script(pipeline.to_ivalue())
print('jit legacy PyText pipeline success!')
return pipeline, pipeline.to_ivalue(), jit_pipeline
def build_legacy_fasttext_vector_pipeline():
tokenizer = get_tokenizer("basic_english")
vector = FastText()
pipeline = sequential_transforms(tokenizer_func(tokenizer), vector_func(vector))
return pipeline, None, None
def build_experimental_fasttext_vector_pipeline():
tokenizer = basic_english_normalize()
vector = FastTextExperimental()
pipeline = TextSequentialTransforms(tokenizer, vector)
jit_pipeline = torch.jit.script(pipeline.to_ivalue())
print('jit legacy fasttext pipeline success!')
return pipeline, pipeline.to_ivalue(), jit_pipeline
def run_benchmark_lookup(text_classification_dataset, pipeline):
t0 = time.monotonic()
lines = [text for (label, text) in text_classification_dataset]
lines = pipeline(lines)
print("Lookup time:", time.monotonic() - t0)
def run_batch_benchmark_lookup(text_classification_dataset, pipeline):
t0 = time.monotonic()
for items in text_classification_dataset:
items = list(map(pipeline, items))
print("Lookup time:", time.monotonic() - t0)
def generate_dataset(args):
if args.pipeline == 'legacy_batch_torchtext':
train = BatchTextClassificationData(args.dataset)
test = None
else:
train, test = raw.DATASETS[args.dataset]()
return train, test
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Data procesing pipelines')
parser.add_argument('--pipeline', type=str, default='sentencepiece',
help='The name of pipeline')
parser.add_argument('--dataset', type=str, default='AG_NEWS',
help='Dataset for performance benchmark')
parser.add_argument('--spm-filename', type=str, default='m_user.model',
help='The filename of sentencepiece model')
parser.add_argument('--vocab-filename', type=str, default='vocab.txt',
help='The name of vocab filename')
args = parser.parse_args()
if args.pipeline == 'sentencepiece':
pipeline, torchbind_pipeline, jit_pipeline = build_sp_pipeline(args.spm_filename)
elif args.pipeline == 'experimental_torchtext':
pipeline, torchbind_pipeline, jit_pipeline = build_experimental_torchtext_pipeline(args.vocab_filename)
elif args.pipeline == 'experimental_pytext_script_vocab':
pipeline, torchbind_pipeline, jit_pipeline = build_experimental_pytext_script_vocab_pipeline(args.vocab_filename)
elif args.pipeline == 'experimental_fasttext':
pipeline, torchbind_pipeline, jit_pipeline = build_experimental_fasttext_vector_pipeline()
elif args.pipeline == 'legacy_torchtext':
pipeline, torchbind_pipeline, jit_pipeline = build_legacy_torchtext_vocab_pipeline(args.vocab_filename)
elif args.pipeline == 'legacy_pytext_vocab':
pipeline, torchbind_pipeline, jit_pipeline = build_legacy_pytext_vocab_pipeline(args.vocab_filename)
elif args.pipeline == 'legacy_pytext_script_vocab':
pipeline, torchbind_pipeline, jit_pipeline = build_legacy_pytext_script_vocab_pipeline(args.vocab_filename)
elif args.pipeline == 'legacy_fasttext':
pipeline, torchbind_pipeline, jit_pipeline = build_legacy_fasttext_vector_pipeline()
elif args.pipeline == 'legacy_batch_torchtext':
pipeline, torchbind_pipeline, jit_pipeline = build_legacy_batch_torchtext_vocab_pipeline(args.vocab_filename)
else:
print("pipeline is not supported. Current pipelines include sentencepiece, experimental_torchtext, " +
"experimental_fasttext, legacy_pytext, experimental_fasttext, legacy_torchtext, legacy_batch_torchtext")
if pipeline is not None:
print("Test eager mode for pipeline with pybind", args.pipeline)
train, test = generate_dataset(args)
if args.pipeline == 'legacy_batch_torchtext':
run_batch_benchmark_lookup(train, pipeline)
else:
run_benchmark_lookup(train, pipeline)
if torchbind_pipeline is not None:
print("Test eager mode for pipeline with torchbind", args.pipeline)
train, test = generate_dataset(args)
if args.pipeline == 'legacy_batch_torchtext':
run_batch_benchmark_lookup(train, torchbind_pipeline)
else:
run_benchmark_lookup(train, torchbind_pipeline)
if jit_pipeline is not None:
print("Test jit mode for pipeline", args.pipeline)
train, test = generate_dataset(args)
run_benchmark_lookup(train, jit_pipeline)
|
"""Cancel an existing iSCSI account."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
@click.command()
@click.argument('volume-id')
@click.option('--reason', help="An optional reason for cancellation")
@click.option('--immediate',
is_flag=True,
help="Cancels the block storage volume immediately instead "
"of on the billing anniversary")
@environment.pass_env
def cli(env, volume_id, reason, immediate):
"""Cancel an existing block storage volume."""
block_storage_manager = SoftLayer.BlockStorageManager(env.client)
if not (env.skip_confirmations or formatting.no_going_back(volume_id)):
raise exceptions.CLIAbort('Aborted')
cancelled = block_storage_manager.cancel_block_volume(volume_id,
reason, immediate)
if cancelled:
if immediate:
click.echo('Block volume with id %s has been marked'
' for immediate cancellation' % volume_id)
else:
click.echo('Block volume with id %s has been marked'
' for cancellation' % volume_id)
else:
click.echo('Unable to cancel block volume %s' % volume_id)
|
from collections import namedtuple
import torch
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, InstanceNorm2d
import torch.nn.functional as F
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
""" A named tuple describing a ResNet block. """
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=128, num_units=9),
get_block(in_channel=128, depth=256, num_units=5),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
return blocks
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_ORIG(Module):
expansion = 4
def __init__(self, in_channel, depth, stride):
super(bottleneck_ORIG, self).__init__()
in_planes = in_channel
planes = depth
self.conv1 = Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = InstanceNorm2d(planes)
self.conv2 = Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = InstanceNorm2d(planes)
self.conv3 = Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = InstanceNorm2d(self.expansion*planes)
self.shortcut = Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = Sequential(
Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
InstanceNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.leaky_relu_(self.bn1(self.conv1(x)))
out = F.leaky_relu_(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.leaky_relu_(out)
return out
class FPN(Module):
def __init__(self, block, input_nc, num_blocks):
super(FPN, self).__init__()
self.in_planes = 64
self.conv1 = Conv2d(input_nc, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = InstanceNorm2d(64)
# Bottom-up layers
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
# Top layer
self.toplayer = Conv2d(2048, 512, kernel_size=1, stride=1, padding=0) # Reduce channels
# Smooth layers
self.smooth1 = Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.smooth2 = Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
# Lateral layers
self.latlayer1 = Conv2d(1024, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = Conv2d( 512, 512, kernel_size=1, stride=1, padding=0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return Sequential(*layers)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
def forward(self, x):
# Bottom-up
c1 = F.leaky_relu_(self.bn1(self.conv1(x)))
c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
# Top-down
p5 = self.toplayer(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p3 = self._upsample_add(p4, self.latlayer2(c3))
# Smooth
p4 = self.smooth1(p4)
p3 = self.smooth2(p3)
return p3, p4, p5
def FPN101(input_nc):
return FPN(bottleneck_ORIG, input_nc, [2, 2, 2, 2])
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
)
self.res_layer = Sequential(
Conv2d(in_channel, depth, kernel_size=1, stride=1, padding=0, bias=False),
Conv2d(depth, depth, kernel_size=3, stride=1, padding=1, bias=False, groups=depth),
PReLU(depth),
InstanceNorm2d(depth),
Conv2d(depth, depth, kernel_size=1, stride=1, padding=0, bias=False),
Conv2d(depth, depth, kernel_size=3, stride=stride, padding=1, bias=False, groups=depth),
SEModule(depth, 16),
PReLU(depth),
InstanceNorm2d(depth)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
|
from collections import OrderedDict
from django import forms
from django.utils.translation import ugettext_lazy as _
from . import app_settings
from .helpers import get_foreign_keys
class BaseReportForm(object):
'''
Holds basic function
'''
def get_filters(self):
"""
Get the foreign key filters for report queryset,
:return: a dicttionary of filters to be used with QuerySet.filter(**returned_value)
"""
# todo: implement cross tab support
_values = {}
if self.is_valid():
for key, field in self.foreign_keys.items():
if key in self.cleaned_data:
val = self.cleaned_data[key]
if val:
val = [x for x in val.values_list('pk', flat=True)]
_values['%s__in' % key] = val
return None, _values
def get_crispy_helper(self, foreign_keys_map=None, crosstab_model=None, **kwargs):
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Column, Layout, Div, Row, Field
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-sm-2 col-md-2 col-lg-2'
helper.field_class = 'col-sm-10 col-md-10 col-lg-10'
helper.form_tag = False
helper.disable_csrf = True
foreign_keys_map = foreign_keys_map or self.foreign_keys
helper.layout = Layout(
Row(
Column(
Field('start_date'), css_class='col-sm-6'),
Column(
Field('end_date'), css_class='col-sm-6'),
css_class='raReportDateRange'),
Div(css_class="mt-20", style='margin-top:20px')
)
# if crosstab_model:
# entry_point.append(Row(
# Div('matrix_entities', css_class='col-sm-9'),
# Div('matrix_show_other', css_class='col-sm-3')
# , css_class='matrixField')
# )
for k in foreign_keys_map:
if k[:-3] != crosstab_model:
helper.layout.fields[1].append(Field(k))
return helper
def _default_foreign_key_widget(f_field):
return {'form_class': forms.ModelMultipleChoiceField,
'required': False, }
def report_form_factory(model, fkeys_filter_func=None, foreign_key_widget_func=None, **kwargs):
foreign_key_widget_func = foreign_key_widget_func or _default_foreign_key_widget
fkeys_filter_func = fkeys_filter_func or (lambda x: x)
# gather foreign keys
fkeys_map = get_foreign_keys(model)
fkeys_map = fkeys_filter_func(fkeys_map)
fkeys_list = []
fields = OrderedDict()
fields['start_date'] = forms.SplitDateTimeField(required=False, label=_('From date'),
initial=app_settings.SLICK_REPORTING_DEFAULT_START_DATE,
# widget=RaBootstrapDateTime(),
input_date_formats=['%Y-%m-%d', '%Y-%m-%d'],
input_time_formats=['%H:%M', '%H:%M:%S'],
)
fields['end_date'] = forms.SplitDateTimeField(required=False, label=_('To date'),
initial=app_settings.SLICK_REPORTING_DEFAULT_END_DATE,
# widget=RaBootstrapDateTime(),
)
for name, f_field in fkeys_map.items():
fkeys_list.append(name)
fields[name] = f_field.formfield(
**foreign_key_widget_func(f_field))
new_form = type('ReportForm', (BaseReportForm, forms.BaseForm,),
{"base_fields": fields,
'_fkeys': fkeys_list,
'foreign_keys': fkeys_map,
})
return new_form
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import base64
from copy import deepcopy
import json
import os
try:
from urllib.parse import urlparse
except ImportError: # pragma nocover
from urlparse import urlparse
import six
import http_ece
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from py_vapid import Vapid
class WebPushException(Exception):
pass
class CaseInsensitiveDict(dict):
"""A dictionary that has case-insensitive keys"""
def __init__(self, data={}, **kwargs):
for key in data:
dict.__setitem__(self, key.lower(), data[key])
self.update(kwargs)
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def __setitem__(self, key, value):
dict.__setitem__(self, key.lower(), value)
def __getitem__(self, key):
return dict.__getitem__(self, key.lower())
def __delitem__(self, key):
dict.__delitem__(self, key.lower())
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def update(self, data):
for key in data:
self.__setitem__(key, data[key])
class WebPusher:
"""WebPusher encrypts a data block using HTTP Encrypted Content Encoding
for WebPush.
See https://tools.ietf.org/html/draft-ietf-webpush-protocol-04
for the current specification, and
https://developer.mozilla.org/en-US/docs/Web/API/Push_API for an
overview of Web Push.
Example of use:
The javascript promise handler for PushManager.subscribe()
receives a subscription_info object. subscription_info.getJSON()
will return a JSON representation.
(e.g.
.. code-block:: javascript
subscription_info.getJSON() ==
{"endpoint": "https://push.server.com/...",
"keys":{"auth": "...", "p256dh": "..."}
}
)
This subscription_info block can be stored.
To send a subscription update:
.. code-block:: python
# Optional
# headers = py_vapid.sign({"aud": "https://push.server.com/",
"sub": "mailto:your_admin@your.site.com"})
data = "Mary had a little lamb, with a nice mint jelly"
WebPusher(subscription_info).send(data, headers)
"""
subscription_info = {}
valid_encodings = [
# "aesgcm128", # this is draft-0, but DO NOT USE.
"aesgcm", # draft-httpbis-encryption-encoding-01
"aes128gcm" # draft-httpbis-encryption-encoding-04
]
def __init__(self, subscription_info, requests_session=None):
"""Initialize using the info provided by the client PushSubscription
object (See
https://developer.mozilla.org/en-US/docs/Web/API/PushManager/subscribe)
:param subscription_info: a dict containing the subscription_info from
the client.
:type subscription_info: dict
:param requests_session: a requests.Session object to optimize requests
to the same client.
:type requests_session: requests.Session
"""
if requests_session is None:
self.requests_method = requests
else:
self.requests_method = requests_session
if 'endpoint' not in subscription_info:
raise WebPushException("subscription_info missing endpoint URL")
self.subscription_info = deepcopy(subscription_info)
self.auth_key = self.receiver_key = None
if 'keys' in subscription_info:
keys = self.subscription_info['keys']
for k in ['p256dh', 'auth']:
if keys.get(k) is None:
raise WebPushException("Missing keys value: {}".format(k))
if isinstance(keys[k], six.string_types):
keys[k] = bytes(keys[k].encode('utf8'))
receiver_raw = base64.urlsafe_b64decode(
self._repad(keys['p256dh']))
if len(receiver_raw) != 65 and receiver_raw[0] != "\x04":
raise WebPushException("Invalid p256dh key specified")
self.receiver_key = receiver_raw
self.auth_key = base64.urlsafe_b64decode(
self._repad(keys['auth']))
def _repad(self, data):
"""Add base64 padding to the end of a string, if required"""
return data + b"===="[:len(data) % 4]
def encode(self, data, content_encoding="aesgcm"):
"""Encrypt the data.
:param data: A serialized block of byte data (String, JSON, bit array,
etc.) Make sure that whatever you send, your client knows how
to understand it.
:type data: str
:param content_encoding: The content_encoding type to use to encrypt
the data. Defaults to draft-01 "aesgcm". Latest draft-04 is
"aes128gcm", however not all clients may be able to use this
format.
:type content_encoding: enum("aesgcm", "aes128gcm")
"""
# Salt is a random 16 byte array.
if not data:
return
if not self.auth_key or not self.receiver_key:
raise WebPushException("No keys specified in subscription info")
salt = None
if content_encoding not in self.valid_encodings:
raise WebPushException("Invalid content encoding specified. "
"Select from " +
json.dumps(self.valid_encodings))
if (content_encoding == "aesgcm"):
salt = os.urandom(16)
# The server key is an ephemeral ECDH key used only for this
# transaction
server_key = ec.generate_private_key(ec.SECP256R1, default_backend())
crypto_key = base64.urlsafe_b64encode(
server_key.public_key().public_numbers().encode_point()
).strip(b'=')
if isinstance(data, six.string_types):
data = bytes(data.encode('utf8'))
encrypted = http_ece.encrypt(
data,
salt=salt,
keyid=crypto_key.decode(),
private_key=server_key,
dh=self.receiver_key,
auth_secret=self.auth_key,
version=content_encoding)
reply = CaseInsensitiveDict({
'crypto_key': crypto_key,
'body': encrypted,
})
if salt:
reply['salt'] = base64.urlsafe_b64encode(salt).strip(b'=')
return reply
def as_curl(self, endpoint, encoded_data, headers):
"""Return the send as a curl command.
Useful for debugging. This will write out the encoded data to a local
file named `encrypted.data`
:param endpoint: Push service endpoint URL
:type endpoint: basestring
:param encoded_data: byte array of encoded data
:type encoded_data: bytearray
:param headers: Additional headers for the send
:type headers: dict
:returns string
"""
header_list = [
'-H "{}: {}" \\ \n'.format(
key.lower(), val) for key, val in headers.items()
]
data = ""
if encoded_data:
with open("encrypted.data", "wb") as f:
f.write(encoded_data)
data = "--data-binary @encrypted.data"
if 'content-length' not in headers:
header_list.append(
'-H "content-length: {}" \\ \n'.format(len(data)))
return ("""curl -vX POST {url} \\\n{headers}{data}""".format(
url=endpoint, headers="".join(header_list), data=data))
def send(self, data=None, headers=None, ttl=0, gcm_key=None, reg_id=None,
content_encoding="aesgcm", curl=False, timeout=None):
"""Encode and send the data to the Push Service.
:param data: A serialized block of data (see encode() ).
:type data: str
:param headers: A dictionary containing any additional HTTP headers.
:type headers: dict
:param ttl: The Time To Live in seconds for this message if the
recipient is not online. (Defaults to "0", which discards the
message immediately if the recipient is unavailable.)
:type ttl: int
:param gcm_key: API key obtained from the Google Developer Console.
Needed if endpoint is https://android.googleapis.com/gcm/send
:type gcm_key: string
:param reg_id: registration id of the recipient. If not provided,
it will be extracted from the endpoint.
:type reg_id: str
:param content_encoding: ECE content encoding (defaults to "aesgcm")
:type content_encoding: str
:param curl: Display output as `curl` command instead of sending
:type curl: bool
:param timeout: POST requests timeout
:type timeout: float or tuple
"""
# Encode the data.
if headers is None:
headers = dict()
encoded = {}
headers = CaseInsensitiveDict(headers)
if data:
encoded = self.encode(data)
# Append the p256dh to the end of any existing crypto-key
crypto_key = headers.get("crypto-key", "")
if crypto_key:
# due to some confusion by a push service provider, we should
# use ';' instead of ',' to append the headers.
# see https://github.com/webpush-wg/webpush-encryption/issues/6
crypto_key += ';'
crypto_key += ("dh=" + encoded["crypto_key"].decode('utf8'))
headers.update({
'crypto-key': crypto_key,
'content-encoding': content_encoding,
'encryption': "salt=" + encoded['salt'].decode('utf8'),
})
if gcm_key:
endpoint = 'https://android.googleapis.com/gcm/send'
reg_ids = []
if not reg_id:
reg_id = self.subscription_info['endpoint'].rsplit('/', 1)[-1]
reg_ids.append(reg_id)
gcm_data = dict()
gcm_data['registration_ids'] = reg_ids
if data:
gcm_data['raw_data'] = base64.b64encode(
encoded.get('body')).decode('utf8')
gcm_data['time_to_live'] = int(
headers['ttl'] if 'ttl' in headers else ttl)
encoded_data = json.dumps(gcm_data)
headers.update({
'Authorization': 'key='+gcm_key,
'Content-Type': 'application/json',
})
else:
encoded_data = encoded.get('body')
endpoint = self.subscription_info['endpoint']
if 'ttl' not in headers or ttl:
headers['ttl'] = str(ttl or 0)
# Additionally useful headers:
# Authorization / Crypto-Key (VAPID headers)
if curl:
return self.as_curl(endpoint, encoded_data, headers)
return self.requests_method.post(endpoint,
data=encoded_data,
headers=headers,
timeout=timeout)
def webpush(subscription_info,
data=None,
vapid_private_key=None,
vapid_claims=None,
content_encoding="aesgcm",
curl=False,
timeout=None,
ttl=0):
"""
One call solution to endcode and send `data` to the endpoint
contained in `subscription_info` using optional VAPID auth headers.
in example:
.. code-block:: python
from pywebpush import python
webpush(
subscription_info={
"endpoint": "https://push.example.com/v1/abcd",
"keys": {"p256dh": "0123abcd...",
"auth": "001122..."}
},
data="Mary had a little lamb, with a nice mint jelly",
vapid_private_key="path/to/key.pem",
vapid_claims={"sub": "YourNameHere@example.com"}
)
No additional method call is required. Any non-success will throw a
`WebPushException`.
:param subscription_info: Provided by the client call
:type subscription_info: dict
:param data: Serialized data to send
:type data: str
:param vapid_private_key: Vapid instance or path to vapid private key PEM \
or encoded str
:type vapid_private_key: Union[Vapid, str]
:param vapid_claims: Dictionary of claims ('sub' required)
:type vapid_claims: dict
:param content_encoding: Optional content type string
:type content_encoding: str
:param curl: Return as "curl" string instead of sending
:type curl: bool
:param timeout: POST requests timeout
:type timeout: float or tuple
:param ttl: Time To Live
:type ttl: int
:return requests.Response or string
"""
vapid_headers = None
if vapid_claims:
if not vapid_claims.get('aud'):
url = urlparse(subscription_info.get('endpoint'))
aud = "{}://{}".format(url.scheme, url.netloc)
vapid_claims['aud'] = aud
if not vapid_private_key:
raise WebPushException("VAPID dict missing 'private_key'")
if isinstance(vapid_private_key, Vapid):
vv = vapid_private_key
elif os.path.isfile(vapid_private_key):
# Presume that key from file is handled correctly by
# py_vapid.
vv = Vapid.from_file(
private_key_file=vapid_private_key) # pragma no cover
else:
vv = Vapid.from_string(private_key=vapid_private_key)
vapid_headers = vv.sign(vapid_claims)
result = WebPusher(subscription_info).send(
data,
vapid_headers,
ttl=ttl,
content_encoding=content_encoding,
curl=curl,
timeout=timeout,
)
if not curl and result.status_code > 202:
raise WebPushException("Push failed: {}: {}".format(
result, result.text))
return result
|
import torch
import numpy as np
# import bagread
# import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import time
from ..util import get_data_maybe
NUM_HISTORY = 2
NUM_INPUTS = (NUM_HISTORY + 1) * 13
NUM_OUTPUTS = 18
NUM_HIDDEN_UNITS = 200
NUM_HIDDEN_LAYERS = 2
# NUM_ENSEMBLE = 1
#
# PATH = ['flight_model_net1_ctrl_256_1000_2layers_2his_noval.pth',
# 'flight_model_net1_ctrl_256_1000_2layers_2his_noval.pth',
# 'flight_model_net3_ctrl_256_1000_2layers_2his_noval.pth',
# 'flight_model_net4_ctrl_256_1000_2layers_2his_noval.pth',
# 'flight_model_net5_ctrl_256_1000_2layers_2his_noval.pth']
class Net(nn.Module):
def __init__(self, n_feature, n_hidden, n_output, n_layers):
super(Net, self).__init__()
self.bn_input = nn.BatchNorm1d(n_hidden, momentum=0.1)
self.input = nn.Linear(n_feature, n_hidden)
self.hiddens = nn.ModuleList()
self.batchnorms = nn.ModuleList()
for _ in range(n_layers-1):
self.hiddens.append(nn.Linear(n_hidden, n_hidden))
self.batchnorms.append(nn.BatchNorm1d(n_hidden, momentum=0.1))
# self.hiddens.append(nn.Dropout(p=0.5))
# self.hidden_1 = nn.Linear(n_feature, n_hidden)
# self.hidden_2 = nn.Linear(n_hidden, n_hidden)
self.predict = nn.Linear(n_hidden, n_output)
def forward(self, x):
x= self.input(x)
x= self.bn_input(x)
self.before_input_act = x
x= F.relu(x)
self.before_act = []
for i in range(len(self.hiddens)):
x = self.hiddens[i](x)
x = self.batchnorms[i](x)
self.before_act.append(x)
x = F.relu(x)
# x = F.dropout(x, p=0.5)
# x= self.hidden_2(x)
# x= self.bn_hidden(x)
# x= F.relu(x)
# x= F.dropout(x, p=0.5)
x = self.predict(x)
return x
class flight_dynamics(nn.Module):
def __init__(self, T, n_batch, num_ensemble, path):
super(flight_dynamics, self).__init__()
self.NUM_HISTORY = NUM_HISTORY
self.NUM_ENSEMBLE = num_ensemble
self.NUM_INPUTS = NUM_INPUTS
self.NUM_HIDDEN_UNITS = NUM_HIDDEN_UNITS
self.NUM_OUTPUTS = NUM_OUTPUTS
self.NUM_HIDDEN_LAYERS = NUM_HIDDEN_LAYERS
self.PATH = path
self.goal_weights = np.concatenate((np.array([10., 10., 10., 15., 10., 3., 2., 2., 2.], dtype='single'),
# torch.tensor([10., 10., 10., 10., 10., 3., 2., 2., 2.]),
# torch.tensor([10., 10., 10., 10., 10., 3., 2., 2., 2.]),
np.zeros(self.NUM_HISTORY * 13, dtype='single')))
self.goal_state = np.concatenate((np.array([0., 0., 0., -0.075, 0., 3.14, 0., 0., 0.], dtype='single'),
# torch.tensor([0., 0., 0., -0.075, 0., 3.14, 0., 0., 0.]),
# torch.tensor([0., 0., 0., -0.075, 0., 3.14, 0., 0., 0.]),
np.zeros(self.NUM_HISTORY * 13, dtype='single')))
self.ctrl_penalty = 20.
self.goal_ctrl = np.array([0.075, 0., 0., 0.6], dtype='single')
# self.slew_rate_penalty = torch.tensor([2., 2., 2., 2.])
self.slew_rate_penalty = None
self.n_ctrl = 4
self.n_state = self.NUM_INPUTS - self.n_ctrl
self.n_present_state = 9
self.lower = np.tile(np.array([-0.05, -0.05, -0.05, 0.65], dtype='single'), (T, n_batch, 1))
self.upper = np.tile(np.array([0.15, 0.05, 0.05, 0.75], dtype='single'), (T, n_batch, 1))
# self.delta_u = torch.tensor(0.01)
self.delta_u = None
# self.lower = None
# self.upper = None
self.n_batch = n_batch
self.linesearch_decay = 0.1
self.max_linesearch_iter = 1
self.net = {}
for i in range(self.NUM_ENSEMBLE):
self.net['obj'+str(i)] = Net(self.NUM_INPUTS, self.NUM_HIDDEN_UNITS, self.NUM_OUTPUTS, self.NUM_HIDDEN_LAYERS)
self.net['obj'+str(i)].load_state_dict(torch.load(self.PATH[i]))
self.net['obj' + str(i)] = self.net['obj'+str(i)].eval()
def forward(self, x, u):
# time1 = time.time()
# assert x.ndimension() == u.ndimension()
if x.ndim == 1 and u.ndim == 1:
x = np.expand_dims(x, 0)
u = np.expand_dims(u, 0)
exp_sum = torch.zeros(x.shape[0], int(self.NUM_OUTPUTS/2))
xu = np.concatenate((x[:, :(self.NUM_HISTORY + 1) * self.n_present_state], u, x[:, (self.NUM_HISTORY + 1) * self.n_present_state:]), axis=1)
xu_torch = torch.from_numpy(xu)
for i in range(self.NUM_ENSEMBLE):
# net = Net(self.NUM_INPUTS, self.NUM_HIDDEN_UNITS, self.NUM_OUTPUTS, self.NUM_HIDDEN_LAYERS)
# net.load_state_dict(torch.load(self.PATH[i]))
# net = net.eval()
prediction = self.net['obj' + str(i)](xu_torch)
# print('Batch norm weight:', self.net['obj' + str(i)].predict.weight.size())
exp, _ = torch.chunk(prediction, 2, dim=1)
exp_sum += exp
z = 1/self.NUM_ENSEMBLE * exp_sum
z = np.concatenate((z.detach().numpy(), x[:,:self.NUM_HISTORY*self.n_present_state], xu[:,(self.NUM_HISTORY + 1)*self.n_present_state:(self.NUM_HISTORY + 1)*self.n_present_state + self.NUM_HISTORY * self.n_ctrl]), axis=1)
# time2 = time.time()
# print('model forward time:', time2 - time1)
return z
def get_true_obj(self):
q = np.concatenate((
self.goal_weights,
self.ctrl_penalty * np.ones(self.n_ctrl, dtype='single')
))
assert not hasattr(self, 'mpc_lin')
# px = -torch.sqrt(self.goal_weights) * self.goal_state # + self.mpc_lin
px = -self.goal_weights * self.goal_state
pu = -self.ctrl_penalty * np.ones(self.n_ctrl, dtype='single') * self.goal_ctrl
p = np.concatenate((px, pu))
return q, p
def grad_input(self, x, u):
n_batch_horizon = x.shape[0]
for j in range(self.NUM_ENSEMBLE):
grad = self.net['obj' + str(j)].predict.weight.repeat(n_batch_horizon,1,1).detach().numpy()
for i in range(self.NUM_HIDDEN_LAYERS-2, -1, -1):
I = get_data_maybe(self.net['obj' + str(j)].before_act[i] <= 0.).unsqueeze(2).repeat(1, 1, self.NUM_HIDDEN_UNITS)
batchnorm_p = torch.div(self.net['obj' + str(j)].batchnorms[i].weight,
torch.sqrt(self.net['obj' + str(j)].batchnorms[i].running_var) + 1e-5)
Wi_grad = torch.mul(self.net['obj' + str(j)].hiddens[i].weight, batchnorm_p.reshape(-1,1)).repeat(n_batch_horizon,1,1)
Wi_grad[I] = 0.
# grad = grad.bmm(Wi_grad)
grad = np.matmul(grad, Wi_grad.detach().numpy())
I = get_data_maybe(self.net['obj' + str(j)].before_input_act <= 0.).unsqueeze(2).repeat(1, 1, self.NUM_INPUTS)
batchnorm_p = torch.div(self.net['obj' + str(j)].bn_input.weight,
torch.sqrt(self.net['obj' + str(j)].bn_input.running_var) + 1e-5)
Wi_grad = torch.mul(self.net['obj' + str(j)].input.weight, batchnorm_p.reshape(-1,1)).repeat(n_batch_horizon, 1, 1)
Wi_grad[I] = 0.
# grad = grad.bmm(Wi_grad)
grad = np.matmul(grad, Wi_grad.detach().numpy())
if j == 0:
grad_total = 1/self.NUM_ENSEMBLE * grad
else:
grad_total += grad
R = np.concatenate((grad_total[:, :self.n_present_state, :(self.NUM_HISTORY + 1) * self.n_present_state],
grad_total[:, :self.n_present_state, self.NUM_HISTORY * self.n_present_state + 13:]), axis=2)
S = grad_total[:, :self.n_present_state, (self.NUM_HISTORY + 1) * self.n_present_state:self.NUM_HISTORY * self.n_present_state + 13]
if self.NUM_HISTORY >= 1:
RHS = np.expand_dims(np.eye(self.NUM_HISTORY * self.n_present_state, self.n_state, dtype='single'), 0).repeat(n_batch_horizon, 0)
# print('RHS: {}'. format(RHS[0,:,:]))
SHS = np.expand_dims(np.zeros((self.NUM_HISTORY * self.n_present_state, self.n_ctrl), dtype='single'), 0).repeat(n_batch_horizon, 0)
# print('SHS: {}'.format(SHS[0,:,:]))
RU = np.expand_dims(np.zeros((self.n_ctrl, self.n_state), dtype='single'), 0).repeat(n_batch_horizon, 0)
# print('RU: {}'.format(RU[0,:,:]))
SU = np.expand_dims(np.eye(self.n_ctrl, dtype='single'), 0).repeat(n_batch_horizon, 0)
# print('SU: {}'.format(SU[0,:,:]))
RHU = np.expand_dims(np.concatenate((np.zeros(((self.NUM_HISTORY - 1) * self.n_ctrl, (self.NUM_HISTORY + 1) * self.n_present_state), dtype='single'),
np.eye((self.NUM_HISTORY - 1) * self.n_ctrl, self.NUM_HISTORY * self.n_ctrl, dtype='single')), axis=1), 0).repeat(n_batch_horizon, 0)
# print('RHu: {}'.format(RHU[0,:,:]))
SHU = np.expand_dims(np.zeros(((self.NUM_HISTORY - 1) * self.n_ctrl, self.n_ctrl), dtype='single'), 0).repeat(n_batch_horizon, 0)
# print('SHU: {}'.format(SHU[0,:,:]))
R = np.concatenate((R, RHS, RU, RHU), axis=1)
# print('R: {}'.format(R[0,:,:]))
S = np.concatenate((S, SHS, SU, SHU), axis=1)
# print('S: {}'.format(S[0,:,:]))
else:
pass
return R, S
|
from fractions import Fraction
import operator
import pandas as pd
import numpy as np
from abc import ABCMeta, abstractmethod
from probability.concept.random_variable import RandomVariable, SetOfRandomVariable
def joint_distribution(series_or_dataframe):
from probability.probability_distribution import ProbabilityDistribution
return ProbabilityDistribution.from_joint_distribution(series_or_dataframe)
class AbstractProbabilityDistribution(metaclass=ABCMeta):
@property
def variables(self) -> SetOfRandomVariable:
index = self.series.index
names = [index.name] if type(index) == pd.Index else index.names
return SetOfRandomVariable(tuple(RandomVariable(column) for column in names))
@property
@abstractmethod
def series(self) -> pd.Series:
return None
def argmax(self, *variables):
"""
Arguments of the maxima (or argmax) returns the assignments that causes the maximum
value in probability distribution.
:return:
"""
if not variables:
variables = self.variables
method = lambda assignment: assignment.random_variable in variables
maximum = self.series.argmax()
argmax = (variable == value for variable, value in zip(self.variables, maximum))
return tuple(filter(method, argmax))
def sum(self):
return self.series.sum()
def __eq__(self, other):
#common_variables = set(self.variables) & set(other.variables)
#common_variables = tuple(common_variables)
#this = self(*common_variables)
#other = other(*common_variables)
#this
#other
# Sort columns
series = other.series.reorder_levels(self.variables.names)
series.sort_index(inplace=True)
other = joint_distribution(series)
return np.isclose(self.series, other.series).all()
def __mul__(self, other):
name = self.series.name + ' ' + other.series.name
operation = operator.mul
return self._calcule(other, operation, name)
def __truediv__(self, other):
name = self.series.name + ' / ' + other.series.name
operation = operator.truediv
return self._calcule(other, operation, name)
def _calcule(self, other, operation, new_name):
"""
Based in: https://github.com/pandas-dev/pandas/issues/9368
"""
X = self.series
Y = other.series
on = tuple(set(X.index.names) & set(Y.index.names))
result = pd.merge(X.reset_index(), Y.reset_index(), on=on, how='outer')
result[new_name] = operation(result[X.name], result[Y.name])
result = result.drop([X.name, Y.name], axis=1)
variables = set(self.variables.names) | set(other.variables.names)
result.set_index(list(variables))
return joint_distribution(result)
def __repr__(self):
return self.series.map(lambda x: Fraction(x).limit_denominator()).__repr__()
|
"""
byceps.services.user.transfer.log
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict
from uuid import UUID
from ....typing import UserID
UserLogEntryData = Dict[str, Any]
@dataclass(frozen=True)
class UserLogEntry:
id: UUID
occurred_at: datetime
event_type: str
user_id: UserID
data: UserLogEntryData
|
#Message App
#copyright (c) 2015 Tyler Spadgenske
#MIT License
################################
#To be packaged with stock TYOS#
################################
import pygame, time, os
from pygame.locals import *
class Run():
def __init__(self, fona):
self.fona = fona
self.next_app = None
#Colors
self.WHITE = (255,255,255)
self.BLACK = (0,0,0)
self.send = False
self.valid = False
#Variables
self.mode = 3
self.number = ''
self.message = ''
self.first = False
self.sms_messages = {'messages':[], 'senders':[]}
self.page = 1
#Load images
self.keyboard_image = pygame.image.load('/home/pi/zero-phone/keyboard.png')
self.num_keyboard_image = pygame.image.load('/home/pi/zero-phone/numbered_keyboard.png')
self.bubble = pygame.image.load('/home/pi/zero-phone/bubble.png')
self.keyboard_rect = self.keyboard_image.get_rect()
self.keyboard_rect.x = 4
self.keyboard_rect.y = 190
#Main conversation image
self.conversation_image = pygame.image.load('/home/pi/zero-phone/conversation.png')
self.conversation_rect = self.conversation_image.get_rect()
self.conversation_rect.centerx = 104
self.conversation_rect.centery = 169
#Setup text
#Setup fonts
self.font = pygame.font.Font('/home/pi/zero-phone/arial.ttf', 20)
self.message_font = pygame.font.Font('/home/pi/zero-phone/arial.ttf', 12)
#please wait Text
self.wait = self.font.render('Please wait...', True, self.BLACK, self.WHITE)
self.wait_rect = self.wait.get_rect()
self.wait_rect.centerx = 104
self.wait_rect.centery = 156
#Says... text
self.says_text1 = self.font.render('Tyler says...', True, self.BLACK, self.WHITE)
self.says_rect1 = self.says_text1.get_rect()
self.says_rect1.x = 23
self.says_rect1.y = 78
self.says_text2 = self.font.render('Billy says...', True, self.BLACK, self.WHITE)
self.says_rect2 = self.says_text2.get_rect()
self.says_rect2.x = 23
self.says_rect2.y = 151
#Message lines
self.message_line1 = self.font.render('', True, self.BLACK, self.WHITE)
self.message_line1_rect = self.message_line1.get_rect()
self.message_line1_rect.x = 27
self.message_line1_rect.y = 101
self.message_line2 = self.font.render('', True, self.BLACK, self.WHITE)
self.message_line2_rect = self.message_line2.get_rect()
self.message_line2_rect.x = 27
self.message_line2_rect.y = 115
self.message_line3 = self.font.render('', True, self.BLACK, self.WHITE)
self.message_line3_rect = self.message_line3.get_rect()
self.message_line3_rect.x = 27
self.message_line3_rect.y = 129
self.message2_line1 = self.font.render('', True, self.BLACK, self.WHITE)
self.message2_line1_rect = self.message2_line1.get_rect()
self.message2_line1_rect.x = 27
self.message2_line1_rect.y = 176
self.message2_line2 = self.font.render('', True, self.BLACK, self.WHITE)
self.message2_line2_rect = self.message2_line2.get_rect()
self.message2_line2_rect.x = 27
self.message2_line2_rect.y = 190
self.message2_line3 = self.font.render('', True, self.BLACK, self.WHITE)
self.message2_line3_rect = self.message2_line3.get_rect()
self.message2_line3_rect.x = 27
self.message2_line3_rect.y = 205
#Number to send
#Setup numbers Text
self.number_text = self.font.render(self.number, True, self.BLACK, self.WHITE)
self.number_rect = self.number_text.get_rect()
self.number_rect.x = 10
self.number_rect.y = 37
#Setup numbers Text
self.line1 = self.font.render(self.message, True, self.BLACK, self.WHITE)
self.line1_rect = self.line1.get_rect()
self.line1_rect.x = 10
self.line1_rect.y = 85
#Setup numbers Text
self.line2 = self.font.render(self.message, True, self.BLACK, self.WHITE)
self.line2_rect = self.line2.get_rect()
self.line2_rect.x = 10
self.line2_rect.y = 98
#Setup numbers Text
self.line3 = self.font.render(self.message, True, self.BLACK, self.WHITE)
self.line3_rect = self.line3.get_rect()
self.line3_rect.x = 10
self.line3_rect.y = 111
self.bubble_rect = self.bubble.get_rect()
self.bubble_rect.x = 3
self.bubble_rect.y = 33
#Stuff to follow app protocol
self.exit = False
self.blit_one_surface = {'surface':[], 'rects':[]}
self.blit_mode1 = {'surfaces':[self.keyboard_image, self.bubble, self.line1, self.line2,
self.line3, self.number_text], 'rects':[self.keyboard_rect, self.bubble_rect, self.line1_rect, self.line2_rect,
self.line3_rect, self.number_rect]}
self.blit_mode2 = {'surfaces':[self.conversation_image, self.says_text1, self.says_text2, self.message_line1,
self.message_line2, self.message_line3,
self.message2_line1, self.message2_line2,
self.message2_line3], 'rects':[self.conversation_rect,
self.says_rect1, self.says_rect2,
self.message_line1_rect, self.message_line2_rect,
self.message_line3_rect, self.message2_line1_rect,
self.message2_line2_rect,self.message2_line3_rect]}
self.blit_mode3 = {'surfaces':[self.wait], 'rects':[self.wait_rect]}
self.blit = self.blit_mode2
self.load_contacts()
def load_contacts(self):
self.contacts = {'names':[], 'numbers':[]}
try:
contact_file = open('/home/pi/zero-phone/configure/contacts.conf', 'r')
except:
print '***********************************************************'
print 'NO CONTACTS FOUND'
print 'PLEASE EDIT /home/pi/zero-phone/configure/contacts.conf FILE'
print '***********************************************************'
if not os.path.exists('/home/pi/zero-phone/configure'):
os.mkdir('/home/pi/zero-phone/configure')
if not os.path.exists('/home/pi/zero-phone/logs'):
os.mkdir('/home/pi/zero-phone/logs') #May be in wrong spot, but it works
contact_file = open('/home/pi/zero-phone/configure/contacts.conf', 'w+')
contact_file.write('#Contacts\n')
contact_file.write('#Use format name=number i.e. Joe=1555666777 # are comments\n')
contact_file.close()
contact_file = open('/home/pi/zero-phone/configure/contacts.conf', 'r')
self.contact_list = contact_file.readlines()
contact_file.close()
for i in range(0, len(self.contact_list)):
if self.contact_list[i][0] == '#':
pass
#Do Nothing. Line is comment
else:
self.contact_list[i] = self.contact_list[i].rstrip().split('=')
def on_first_run(self):
self.first = False
self.mode = 3
def get_sms(self):
#Set to text mode
self.fona.transmit('AT+CMGF=1')
self.fona.transmit('AT+CSDH=1')
#Get number of sms messages
num_sms = self.fona.transmit('AT+CPMS?')
num_sms = num_sms[1]
num_sms = num_sms.split(',')
num_sms = num_sms[1]
print 'SMS FOUND IN MEMORY: ' + num_sms
print 'LOADING SMS MESSAGES...'
#Retrieve sms messages
for i in range(1, int(num_sms) + 1):
self.sms_messages['senders'].append(self.fona.transmit('AT+CMGR=' + str(i))[1].split('"')[3].replace('+',''))
self.sms_messages['messages'].append(self.fona.transmit('AT+CMGR=' + str(i))[2])
#If in contacts, replace number with name
for i in self.contact_list:
index = 0
for senders in self.sms_messages['senders']:
if i[1] == senders:
self.sms_messages['senders'][index] = i[0]
index += 1
#If there are less than two messages, do some configuring
if int(num_sms) < 2:
self.sms_messages['senders'].append('')
self.sms_messages['messages'].append('')
if int(num_sms) == 0:
self.sms_messages['senders'].append('')
self.sms_messages['messages'].append('')
def config_sms(self):
self.blit['surfaces'][1] = self.font.render(self.sms_messages['senders'][(self.page + 1) * -1] + ' says...', True, self.BLACK, self.WHITE)
self.blit['surfaces'][2] = self.font.render(self.sms_messages['senders'][self.page * -1] + ' says...', True, self.BLACK, self.WHITE)
#Box 1
self.blit['surfaces'][3] = self.font.render(self.sms_messages['messages'][(self.page + 1) * -1][:25], True, self.BLACK, self.WHITE)
if len(self.sms_messages['messages'][(self.page + 1) * -1]) > 25:
self.blit['surfaces'][4] = self.font.render(self.sms_messages['messages'][(self.page + 1)* -1][25:50], True, self.BLACK, self.WHITE)
if len(self.sms_messages['messages'][(self.page + 1) * -1]) > 50:
self.blit['surfaces'][5] = self.font.render(self.sms_messages['messages'][(self.page + 1)* -1][50:75], True, self.BLACK, self.WHITE)
else:
self.blit['surfaces'][5] = self.font.render('', True, self.BLACK, self.WHITE)
else:
self.blit['surfaces'][4] = self.font.render('', True, self.BLACK, self.WHITE)
self.blit['surfaces'][5] = self.font.render('', True, self.BLACK, self.WHITE)
#Box 2
self.blit['surfaces'][6] = self.font.render(self.sms_messages['messages'][self.page * -1][:25], True, self.BLACK, self.WHITE)
if len(self.sms_messages['messages'][self.page * -1]) > 25:
self.blit['surfaces'][7] = self.font.render(self.sms_messages['messages'][self.page * -1][25:50], True, self.BLACK, self.WHITE)
if len(self.sms_messages['messages'][self.page * -1]) > 50:
self.blit['surfaces'][8] = self.font.render(self.sms_messages['messages'][self.page * -1][50:75], True, self.BLACK, self.WHITE)
else:
self.blit['surfaces'][8] = self.font.render('', True, self.BLACK, self.WHITE)
else:
self.blit['surfaces'][7] = self.font.render('', True, self.BLACK, self.WHITE)
self.blit['surfaces'][8] = self.font.render('', True, self.BLACK, self.WHITE)
def run_app(self):
if self.mode == 3:
self.blit = self.blit_mode3
if self.first:
time.sleep(5)
self.mode = 2
self.blit = self.blit_mode2
self.sms_messages = {'messages':[], 'senders':[]}
self.get_sms()
self.config_sms()
self.first = True
if self.exit:
self.mode = 2
if len(self.number) > 0:
self.valid = True
else:
self.valid = False
self.send = False
if self.send and self.valid:
self.send = False
self.valid = False
self.fona.transmit('AT+CMGF=1')
time.sleep(.25)
self.fona.transmit('AT+CMGS="' + self.number + '"')
time.sleep(0.25)
self.fona.transmit(self.message)
time.sleep(.25)
self.fona.transmit(chr(26))
self.mode = 2
self.blit = self.blit_mode2
def get_events(self, event):
if self.mode != 2:
self.get_keyboard_events(event)
else:
self.get_read_events(event)
def get_read_events(self, event):
if event.pos[0] > 23 and event.pos[0] < 185:
if event.pos[1] > 40 and event.pos[1] < 68:
self.page += 1
if self.page == len(self.sms_messages['senders']):
self.page = len(self.sms_messages['senders']) - 1
self.config_sms()
if event.pos[1] > 229 and event.pos[1] < 262:
self.page -= 1
if self.page == 0:
self.page = 1
self.config_sms()
if event.pos[1] > 267 and event.pos[1] < 296:
self.mode = 0
self.blit = self.blit_mode1
def get_keyboard_events(self, event):
#Get key pressed
#Row 1
if event.pos[1] > 189 and event.pos[1] < 215:
if event.pos[0] > 4 and event.pos[0] < 20:
if self.mode == 1:
self.message = self.message + 'q'
else:
self.number = self.number + '1'
if event.pos[0] > 24 and event.pos[0] < 40:
if self.mode == 1:
self.message = self.message + 'w'
else:
self.number = self.number + '2'
if event.pos[0] > 44 and event.pos[0] < 60:
if self.mode == 1:
self.message = self.message + 'e'
else:
self.number = self.number + '3'
if event.pos[0] > 64 and event.pos[0] < 81:
if self.mode == 1:
self.message = self.message + 'r'
else:
self.number = self.number + '4'
if event.pos[0] > 85 and event.pos[0] < 101:
if self.mode == 1:
self.message = self.message + 't'
else:
self.number = self.number + '5'
if event.pos[0] > 105 and event.pos[0] < 121:
if self.mode == 1:
self.message = self.message + 'y'
else:
self.number = self.number + '6'
if event.pos[0] > 125 and event.pos[0] < 141:
if self.mode == 1:
self.message = self.message + 'u'
else:
self.number = self.number + '7'
if event.pos[0] > 145 and event.pos[0] < 161:
if self.mode == 1:
self.message = self.message + 'i'
else:
self.number = self.number + '8'
if event.pos[0] > 165 and event.pos[0] < 181:
if self.mode == 1:
self.message = self.message + 'o'
else:
self.number = self.number + '9'
if event.pos[0] > 185 and event.pos[0] < 202:
if self.mode == 1:
self.message = self.message + 'p'
else:
self.number = self.number + '0'
#Row 2
if event.pos[1] > 219 and event.pos[1] < 244:
if event.pos[0] > 12 and event.pos[0] < 28:
if self.mode == 1:
self.message = self.message + 'a'
if event.pos[0] > 32 and event.pos[0] < 48:
if self.mode == 1:
self.message = self.message + 's'
if event.pos[0] > 52 and event.pos[0] < 75:
if self.mode == 1:
self.message = self.message + 'd'
if event.pos[0] > 72 and event.pos[0] < 88:
if self.mode == 1:
self.message = self.message + 'f'
if event.pos[0] > 92 and event.pos[0] < 109:
if self.mode == 1:
self.message = self.message + 'g'
if event.pos[0] > 112 and event.pos[0] < 129:
if self.mode == 1:
self.message = self.message + 'h'
if event.pos[0] > 133 and event.pos[0] < 149:
if self.mode == 1:
self.message = self.message + 'j'
if event.pos[0] > 153 and event.pos[0] < 169:
if self.mode == 1:
self.message = self.message + 'k'
if event.pos[0] > 173 and event.pos[0] < 189:
if self.mode == 1:
self.message = self.message + 'l'
#Row 3
if event.pos[1] > 248 and event.pos[1] < 274:
if event.pos[0] > 32 and event.pos[0] < 48:
if self.mode == 1:
self.message = self.message + 'z'
if event.pos[0] > 52 and event.pos[0] < 75:
if self.mode == 1:
self.message = self.message + 'x'
if event.pos[0] > 72 and event.pos[0] < 88:
if self.mode == 1:
self.message = self.message + 'c'
if event.pos[0] > 92 and event.pos[0] < 109:
if self.mode == 1:
self.message = self.message + 'v'
if event.pos[0] > 112 and event.pos[0] < 129:
if self.mode == 1:
self.message = self.message + 'b'
if event.pos[0] > 133 and event.pos[0] < 149:
if self.mode == 1:
self.message = self.message + 'n'
if event.pos[0] > 153 and event.pos[0] < 169:
if self.mode == 1:
self.message = self.message + 'm'
#Row 4
if event.pos[1] > 278 and event.pos[1] < 304:
if event.pos[0] > 32 and event.pos[0] < 75:
if self.mode == 1:
self.message = self.message[:-1]
else:
self.number = self.number[:-1]
if event.pos[0] > 72 and event.pos[0] < 129:
if self.mode == 1:
self.message = self.message + ' '
if event.pos[0] > 133 and event.pos[0] < 169:
self.send = True
#Keyboard mode
if event.pos[0] > 3 and event.pos[0] < 207 and event.pos[1] > 33 and event.pos[1] < 51:
self.mode = 0
if event.pos[0] > 3 and event.pos[0] < 207 and event.pos[1] > 57 and event.pos[1] < 142:
self.mode = 1
if self.mode == 0:
self.blit['surfaces'][0] = self.num_keyboard_image
else:
self.blit['surfaces'][0] = self.keyboard_image
self.blit['surfaces'][2] = self.font.render(self.message[:32], True, self.BLACK, self.WHITE)
self.blit['surfaces'][3] = self.font.render(self.message[32:64], True, self.BLACK, self.WHITE)
self.blit['surfaces'][4] = self.font.render(self.message[64:96], True, self.BLACK, self.WHITE)
self.blit['surfaces'][5] = self.font.render(self.number, True, self.BLACK, self.WHITE)
|
"""
Functions for validating the schema of repo-tools-data.
"""
import collections
import csv
import datetime
import difflib
import pathlib
import re
import yaml
from schema import And, Optional, Or, Schema, SchemaError
from yaml.constructor import ConstructorError
def valid_agreement(s):
"""Is this a valid "agreement" value?"""
return s in ['institution', 'individual', 'none']
def valid_email(s):
"""Is this a valid email?"""
return bool(
isinstance(s, str) and
re.search(r"^[^@ ]+@[^@ ]+\.[^@ ]+$", s) and
not re.search(r"[,;?\\%]", s)
)
def valid_org(s):
"""Is this a valid GitHub org?"""
return isinstance(s, str) and re.match(r"^[^/]+$", s)
def valid_repo(s):
"""Is this a valid repo?"""
return isinstance(s, str) and re.match(r"^[^/]+/[^/]+$", s)
def existing_person(s):
"""Is this an existing person in people.yaml?"""
return isinstance(s, str) and s in ALL_PEOPLE
def not_empty_string(s):
"""A string that can't be empty."""
return isinstance(s, str) and len(s) > 0
def check_institution(d):
"""If the agreement is institution, then we have to have an institution."""
if "agreement" in d:
if d['agreement'] == 'institution':
if 'institution' in d:
if d['institution'] not in ALL_ORGS:
raise SchemaError("Institution {!r} isn't in orgs.yaml: {}".format(d['institution'], d))
if d['agreement'] == 'none':
if 'institution' in d:
raise SchemaError("No-agreement should have no institution")
return True
def github_username(s):
"""Is this a valid GitHub username?"""
# Usernames can have "[bot]" at the end for bots.
suffixes = ["[bot]", "%5Bbot%5D"]
for suffix in suffixes:
if s.endswith(suffix):
s = s[:-len(suffix)]
break
# For Anant, we added a star just to be sure we wouldn't find some other
# account, so allow a star at the end.
return re.match(r"^[a-zA-Z0-9_-]+\*?$", s)
def not_data_key(s):
"""Make sure the GitHub name is not a data line at the wrong indent."""
return s not in [
'name', 'email', 'agreement', 'institution', 'jira',
'comments', 'other_emails', 'before', 'beta', 'committer', 'email_ok',
]
def one_of_keys(*keys):
"""Checks that at least one key is present (not exclusive OR)"""
def _check(d):
if sum(k in d for k in keys) > 0:
return True
raise SchemaError("Must have at least one of {}".format(keys))
return _check
COMMITTER_SCHEMA = Schema(
Or(
# "committer: false" means this person is not a committer.
False,
# or explain where they are a committer:
And(
{
Optional('orgs'): [valid_org],
Optional('repos'): [valid_repo],
Optional('champions'): [existing_person],
Optional('branches'): [not_empty_string],
},
# You have to specify at least one of orgs, repos, or branches:
one_of_keys("orgs", "repos", "branches"),
),
),
)
PEOPLE_SCHEMA = Schema(
{
And(github_username, not_data_key): And(
{
'name': not_empty_string,
'email': valid_email,
'agreement': valid_agreement,
Optional('institution'): not_empty_string,
Optional('is_robot'): True,
Optional('jira'): not_empty_string,
Optional('comments'): [str],
Optional('other_emails'): [valid_email],
Optional('before'): {
datetime.date: And(
{
Optional('agreement'): valid_agreement,
Optional('institution'): not_empty_string,
Optional('comments'): [str],
Optional('committer'): COMMITTER_SCHEMA,
},
check_institution,
),
},
Optional('beta'): bool,
Optional('contractor'): bool,
Optional('committer'): COMMITTER_SCHEMA,
Optional('email_ok'): bool,
},
check_institution,
),
}
)
ORGS_SCHEMA = Schema(
{
str: {
Optional("name"): not_empty_string,
"agreement": Or("institution", "none"),
Optional("contractor"): bool,
Optional("committer"): bool,
Optional("internal"): bool,
Optional(Or("contact", "contact1", "contact2")): {
"name": not_empty_string,
"email": valid_email,
},
},
}
)
def color(s):
return re.match(r"^[a-fA-F0-9]{6}$", s)
LABELS_SCHEMA = Schema(
{
str: Or(
# A label we don't want:
{
"delete": True,
},
# A label we want:
{
"color": color,
Optional("description"): str,
},
),
},
)
# Prevent duplicate keys in YAML.
# Adapted from https://gist.github.com/pypt/94d747fe5180851196eb
# from https://bitbucket.org/xi/pyyaml/issues/9/ignore-duplicate-keys-and-send-warning-or
def mapping_constructor(loader, node, deep=False):
"""Prevent duplicate keys and return an OrderedDict."""
mapping = collections.OrderedDict()
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found duplicate key (%s)" % key, key_node.start_mark)
mapping[key] = value
return mapping
yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, mapping_constructor)
# The public functions.
def validate_labels(filename):
"""
Validate that `filename` conforms to our labels.yaml schema.
"""
labels = yaml.safe_load(open(filename))
LABELS_SCHEMA.validate(labels)
def validate_orgs(filename):
"""
Validate that `filename` conforms to our orgs.yaml schema.
"""
orgs = yaml.safe_load(open(filename))
ORGS_SCHEMA.validate(orgs)
# keys should be sorted.
assert_sorted(orgs, "Keys in {}".format(filename))
def validate_people(filename):
"""
Validate that `filename` conforms to our people.yaml schema.
Supporting files are found in the same directory as `filename`.
"""
people = yaml.safe_load(open(filename))
global ALL_ORGS, ALL_PEOPLE
with open(pathlib.Path(filename).parent / "orgs.yaml") as orgsf:
org_data = yaml.safe_load(orgsf)
ALL_ORGS = set(org_data)
for orgd in org_data.values():
name = orgd.get("name")
if name:
ALL_ORGS.add(name)
ALL_PEOPLE = set(people)
PEOPLE_SCHEMA.validate(people)
# keys should be sorted.
assert_sorted(people, "Keys in {}".format(filename))
def validate_salesforce_export(filename):
"""
Validate that `filename` is a Salesforce export we expect.
"""
with open(filename, encoding="cp1252") as fcsv:
reader = csv.DictReader(fcsv)
# fields are:
# "First Name","Last Name","Number of Active Ind. CLA Contracts","Title","Account Name","Number of Active Entity CLA Contracts","GitHub Username"
for row in reader:
acct = row["Account Name"]
if acct == "Opfocus Test":
# A bogus entry made by the vendor. skip it.
continue
# "Individual Contributors" and "Household Accounts" are both Salesforce artifacts.
# In both cases, these records represent unaffiliated individual contributors.
acct_valid = (acct in ALL_ORGS or acct == "Individual Contributors" or acct.endswith(" Household"))
assert acct_valid, f"Account Name is not a valid org: {acct}"
username = row["GitHub Username"]
assert github_username(username), f"GitHub Username is not valid: {username}"
def assert_sorted(strs, what):
"""
Assert that a sequence of strings is sorted.
Args:
strs (iterable of strings): the strings that must be sorted.
what (str): a description of what these are, for the failure message.
"""
strs = list(strs)
sstrs = sorted(strs)
if strs == sstrs:
return
lines = difflib.Differ().compare(strs, sstrs)
out_of_place = set(ln[2:] for ln in lines if ln.startswith(("-", "+")))
msg = "{} must be sorted. These are out of place: {}".format(
what, ", ".join(out_of_place)
)
assert False, msg
|
"""Three earth-gravity models for use with SGP4."""
from collections import namedtuple
from propagation import getgravconst
EarthGravity = namedtuple(
'EarthGravity',
'tumin mu radiusearthkm xke j2 j3 j4 j3oj2',
)
wgs72old = EarthGravity(*getgravconst('wgs72old'))
wgs72 = EarthGravity(*getgravconst('wgs72'))
wgs84 = EarthGravity(*getgravconst('wgs84'))
|
import sqlite3, random
names = ['ruan', 'stefan', 'philip', 'norman', 'frank', 'pete', 'johnny', 'peter', 'adam']
cities = ['cape town', 'johannesburg', 'pretoria', 'dublin', 'kroonstad', 'bloemfontein', 'port elizabeth', 'auckland', 'sydney']
lastnames = ['smith', 'bekker', 'admams', 'phillips', 'james', 'adamson']
words = ['some really random text', 'this is more random text with stupid words', 'this is a hello world string', 'this is going to be fun - bhla blah blah blah blah bhla blah blah blah blahbhla blah blah blah blahbhla blah blah blah blahbhla blah blah blah blahbhla blah blah blah blahbhla blah blah blah blah']
conn = sqlite3.connect('database-large.db')
conn.execute('CREATE TABLE IF NOT EXISTS people (name STRING, age INTEGER, surname STRING, city STRING, favorite_words STRING)')
conn = sqlite3.connect('database-large.db')
for x in range(1,1000000):
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}", "{}")'.format(random.choice(names), random.randint(18,40), random.choice(lastnames), random.choice(cities), random.choice(words)))
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}", "{}")'.format(random.choice(names), random.randint(18,40), random.choice(lastnames), random.choice(cities), random.choice(words)))
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}", "{}")'.format(random.choice(names), random.randint(18,40), random.choice(lastnames), random.choice(cities), random.choice(words)))
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}", "{}")'.format(random.choice(names), random.randint(18,40), random.choice(lastnames), random.choice(cities), random.choice(words)))
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}", "{}")'.format(random.choice(names), random.randint(18,40), random.choice(lastnames), random.choice(cities), random.choice(words)))
conn.commit()
conn.close()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: comment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from idcmanager_sdk.model.topboard import issue_basic_pb2 as idcmanager__sdk_dot_model_dot_topboard_dot_issue__basic__pb2
from idcmanager_sdk.model.cmdb import user_pb2 as idcmanager__sdk_dot_model_dot_cmdb_dot_user__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='comment.proto',
package='topboard',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/topboard'),
serialized_pb=_b('\n\rcomment.proto\x12\x08topboard\x1a/idcmanager_sdk/model/topboard/issue_basic.proto\x1a$idcmanager_sdk/model/cmdb/user.proto\"\x9b\x01\n\x07\x43omment\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninstanceId\x18\x02 \x01(\t\x12#\n\x05issue\x18\x03 \x03(\x0b\x32\x14.topboard.IssueBasic\x12\r\n\x05\x63time\x18\x04 \x01(\t\x12\x0c\n\x04\x62ody\x18\x05 \x01(\t\x12\x10\n\x08parentId\x18\x06 \x01(\t\x12\x1a\n\x06\x61uthor\x18\x07 \x03(\x0b\x32\n.cmdb.UserBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/topboardb\x06proto3')
,
dependencies=[idcmanager__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,idcmanager__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,])
_COMMENT = _descriptor.Descriptor(
name='Comment',
full_name='topboard.Comment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='topboard.Comment.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='topboard.Comment.instanceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='issue', full_name='topboard.Comment.issue', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='topboard.Comment.ctime', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='body', full_name='topboard.Comment.body', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parentId', full_name='topboard.Comment.parentId', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='author', full_name='topboard.Comment.author', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=270,
)
_COMMENT.fields_by_name['issue'].message_type = idcmanager__sdk_dot_model_dot_topboard_dot_issue__basic__pb2._ISSUEBASIC
_COMMENT.fields_by_name['author'].message_type = idcmanager__sdk_dot_model_dot_cmdb_dot_user__pb2._USER
DESCRIPTOR.message_types_by_name['Comment'] = _COMMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Comment = _reflection.GeneratedProtocolMessageType('Comment', (_message.Message,), {
'DESCRIPTOR' : _COMMENT,
'__module__' : 'comment_pb2'
# @@protoc_insertion_point(class_scope:topboard.Comment)
})
_sym_db.RegisterMessage(Comment)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from server import *
startServer()
|
# -*- coding: utf-8 -*-
"""Base exchange class"""
# -----------------------------------------------------------------------------
__version__ = '1.14.254'
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
# -----------------------------------------------------------------------------
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
# -----------------------------------------------------------------------------
__all__ = [
'Exchange',
]
# -----------------------------------------------------------------------------
# Python 2 & 3
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException
# import socket
from ssl import SSLError
# import sys
import time
import uuid
import zlib
from decimal import Decimal
# -----------------------------------------------------------------------------
try:
import urllib.parse as _urlencode # Python 3
except ImportError:
import urllib as _urlencode # Python 2
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
# -----------------------------------------------------------------------------
class Exchange(object):
"""Base exchange class"""
id = None
version = None
# rate limiter settings
enableRateLimit = False
rateLimit = 2000 # milliseconds = seconds * 1000
timeout = 10000 # milliseconds = seconds * 1000
asyncio_loop = None
aiohttp_proxy = None
session = None # Session () by default
logger = None # logging.getLogger(__name__) by default
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
verbose = False
markets = None
symbols = None
fees = {
'trading': {
'fee_loaded': False,
'percentage': True, # subclasses should rarely have to redefine this
},
'funding': {
'fee_loaded': False,
'withdraw': {},
'deposit': {},
},
}
ids = None
tickers = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*' # CORS origin
proxies = None
apiKey = ''
secret = ''
password = ''
uid = ''
twofa = False
marketsById = None
markets_by_id = None
currencies_by_id = None
precision = None
limits = None
exceptions = None
headers = None
balance = None
orderbooks = None
orders = None
trades = None
currencies = None
options = None # Python does not allow to define properties in run-time with setattr
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False, # 2-factor authentication (one-time password key)
}
# API method metainfo
has = {
'publicAPI': True,
'privateAPI': True,
'CORS': False,
'cancelOrder': True,
'cancelOrders': False,
'createDepositAddress': False,
'createOrder': True,
'createMarketOrder': True,
'createLimitOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchFundingFees': False,
'fetchL2OrderBook': True,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': False,
'fetchTicker': True,
'fetchTickers': False,
'fetchTrades': True,
'fetchTradingFees': False,
'fetchTradingLimits': False,
'withdraw': False,
}
precisionMode = DECIMAL_PLACES
minFundingAddressLength = 1 # used in check_address
substituteCommonCurrencyCodes = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
last_http_response = None
last_json_response = None
last_response_headers = None
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
}
def __init__(self, config={}):
self.precision = {} if self.precision is None else self.precision
self.limits = {} if self.limits is None else self.limits
self.exceptions = {} if self.exceptions is None else self.exceptions
self.headers = {} if self.headers is None else self.headers
self.balance = {} if self.balance is None else self.balance
self.orderbooks = {} if self.orderbooks is None else self.orderbooks
self.orders = {} if self.orders is None else self.orders
self.trades = {} if self.trades is None else self.trades
self.currencies = {} if self.currencies is None else self.currencies
self.options = {} if self.options is None else self.options # Python does not allow to define properties in run-time with setattr
self.decimalToPrecision = self.decimal_to_precision = decimal_to_precision
# version = '.'.join(map(str, sys.version_info[:3]))
# self.userAgent = {
# 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version
# }
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
# format camel case
for attr in dir(self):
if attr[0] != '_'and attr[-1] != '_' and '_' in attr:
conv = attr.split('_')
camel_case = conv[0] + ''.join(i[0].upper() + i[1:] for i in conv[1:])
setattr(self, camel_case, getattr(self, attr))
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit,
'delay': 1.0,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {})
self.session = self.session if self.session else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
def __del__(self):
if self.session:
self.session.close()
def describe(self):
return {}
def define_rest_api(self, api, method_name, options={}):
delimiters = re.compile('[^a-zA-Z0-9]')
for api_type, methods in api.items():
for http_method, urls in methods.items():
for url in urls:
url = url.strip()
split_path = delimiters.split(url)
uppercase_method = http_method.upper()
lowercase_method = http_method.lower()
camelcase_method = lowercase_method.capitalize()
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
lowercase_path = [x.strip().lower() for x in split_path]
underscore_suffix = '_'.join([k for k in lowercase_path if len(k)])
camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower()
if 'suffixes' in options:
if 'camelcase' in options['suffixes']:
camelcase += options['suffixes']['camelcase']
if 'underscore' in options['suffixes']:
underscore += options['suffixes']['underscore']
partial = functools.partial(getattr(self, method_name), url, api_type, uppercase_method)
setattr(self, camelcase, partial)
setattr(self, underscore, partial)
def raise_error(self, exception_type, url=None, method=None, error=None, details=None):
if error:
error = str(error)
output = ' '.join([self.id] + [var for var in (url, method, error, details) if var is not None])
raise exception_type(output)
def throttle(self):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
if elapsed < self.rateLimit:
delay = self.rateLimit - elapsed
time.sleep(delay / 1000.0)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
return self.fetch2(path, api, method, params, headers, body)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def handle_errors(self, code, reason, url, method, headers, body):
pass
def prepare_request_headers(self, headers=None):
headers = headers or {}
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return headers
def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, request_headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
if body:
body = body.encode()
self.session.cookies.clear()
response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies
)
self.last_http_response = response.text
self.last_response_headers = response.headers
if self.verbose:
print("\nResponse:", method, url, str(response.status_code), str(response.headers), self.last_http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, response.headers, self.last_http_response)
response.raise_for_status()
except Timeout as e:
self.raise_error(RequestTimeout, method, url, e)
except TooManyRedirects as e:
self.raise_error(ExchangeError, url, method, e)
except SSLError as e:
self.raise_error(ExchangeError, url, method, e)
except HTTPError as e:
self.handle_errors(response.status_code, response.reason, url, method, self.last_response_headers, self.last_http_response)
self.handle_rest_errors(e, response.status_code, self.last_http_response, url, method)
self.raise_error(ExchangeError, url, method, e, self.last_http_response)
except RequestException as e: # base exception class
self.raise_error(ExchangeError, url, method, e, self.last_http_response)
self.handle_errors(response.status_code, response.reason, url, method, None, self.last_http_response)
return self.handle_rest_response(self.last_http_response, url, method, headers, body)
def handle_rest_errors(self, exception, http_status_code, response, url, method='GET'):
error = None
if http_status_code in [418, 429]:
error = DDoSProtection
elif http_status_code in [404, 409, 500, 501, 502, 520, 521, 522, 525]:
error = ExchangeNotAvailable
elif http_status_code in [422]:
error = ExchangeError
elif http_status_code in [400, 403, 405, 503, 530]:
# special case to detect ddos protection
error = ExchangeNotAvailable
if response:
ddos_protection = re.search('(cloudflare|incapsula)', response, flags=re.IGNORECASE)
if ddos_protection:
error = DDoSProtection
elif http_status_code in [408, 504]:
error = RequestTimeout
elif http_status_code in [401, 511]:
error = AuthenticationError
if error:
self.raise_error(error, url, method, exception if exception else http_status_code, response)
def handle_rest_response(self, response, url, method='GET', headers=None, body=None):
try:
if self.parseJsonResponse:
last_json_response = json.loads(response) if len(response) > 1 else None
self.last_json_response = last_json_response
return last_json_response
else:
return response
except ValueError as e: # ValueError == JsonDecodeError
ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE)
exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE)
if ddos_protection:
self.raise_error(DDoSProtection, method, url, None, response)
if exchange_not_available:
message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect'
self.raise_error(ExchangeNotAvailable, method, url, None, message)
self.raise_error(ExchangeError, method, url, e, response)
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if isinstance(dictionary, list) and isinstance(key, int) and len(dictionary) > key:
value = float(dictionary[key])
else:
value = float(dictionary[key]) if (key is not None) and (key in dictionary) and (dictionary[key] is not None) else default_value
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if key is None or (key not in dictionary):
return default_value
value = dictionary[key]
if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()):
return int(value)
return default_value
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
@staticmethod
def truncate(num, precision=0):
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
if precision > 0:
parts = ('%f' % Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
if value:
grouped = Exchange.group_by(array, key)
if value in grouped:
return grouped[value]
return []
return array
@staticmethod
def filterBy(self, array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
for element in array:
if (key in element) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
for key in params:
string = string.replace('{' + key + '}', str(params[key]))
return string
@staticmethod
def url(path, params={}):
result = Exchange.implode_params(path, params)
query = Exchange.omit(params, Exchange.extract_params(path))
if query:
result += '?' + _urlencode.urlencode(query)
return result
@staticmethod
def urlencode(params={}):
if (type(params) is dict) or isinstance(params, collections.OrderedDict):
return _urlencode.urlencode(params)
return params
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri):
return _urlencode.quote(uri, safe="~()*!.'")
@staticmethod
def omit(d, *args):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, int):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:<03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d')
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1')
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
h = hashlib.new(algorithm, request)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_to_string(s):
return s.decode('ascii')
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def jwt(request, secret, algorithm=hashlib.sha256, alg='HS256'):
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encodedHeader = Exchange.base64urlencode(header)
encodedData = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encodedHeader + '.' + encodedData
hmac = Exchange.hmac(Exchange.encode(token), Exchange.encode(secret), algorithm, 'binary')
signature = Exchange.base64urlencode(hmac)
return token + '.' + signature
@staticmethod
def unjson(input):
return json.loads(input)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def encode(string):
return string.encode()
@staticmethod
def decode(string):
return string.decode()
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
def check_required_credentials(self):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
self.raise_error(AuthenticationError, details='requires `' + key + '`')
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence"""
if address is None:
self.raise_error(InvalidAddress, details='address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': 0.0,
'used': 0.0,
'total': 0.0,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def currency_id(self, commonCode):
currencyIds = {v: k for k, v in self.commonCurrencies.items()}
return self.safe_string(currencyIds, commonCode, commonCode)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
return ('{:.' + str(self.markets[symbol]['precision']['price']) + 'f}').format(float(cost))
def price_to_precision(self, symbol, price):
return ('{:.' + str(self.markets[symbol]['precision']['price']) + 'f}').format(float(price))
def amount_to_precision(self, symbol, amount):
return self.truncate(amount, self.markets[symbol]['precision']['amount'])
def amount_to_string(self, symbol, amount):
return self.truncate_to_string(amount, self.markets[symbol]['precision']['amount'])
def amount_to_lots(self, symbol, amount):
lot = self.markets[symbol]['lot']
return self.amount_to_precision(symbol, math.floor(amount / lot) * lot)
def fee_to_precision(self, symbol, fee):
return ('{:.' + str(self.markets[symbol]['precision']['price']) + 'f}').format(float(fee))
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
self.fees['trading'],
{'precision': self.precision, 'limits': self.limits},
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.marketsById = self.markets_by_id
self.symbols = sorted(list(self.markets.keys()))
self.ids = sorted(list(self.markets_by_id.keys()))
if currencies:
self.currencies = self.deep_extend(currencies, self.currencies)
else:
base_currencies = [{
'id': market['baseId'] if 'baseId' in market else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if 'quoteId' in market else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies)
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
return self.markets
def load_markets(self, reload=False):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
markets = self.fetch_markets()
currencies = None
if self.has['fetchCurrencies']:
currencies = self.fetch_currencies()
return self.set_markets(markets, currencies)
def populate_fees(self):
if not (hasattr(self, 'markets') or hasattr(self, 'currencies')):
return
for currency, data in self.currencies.items(): # try load withdrawal fees from currencies
if 'fee' in data and data['fee'] is not None:
self.fees['funding']['withdraw'][currency] = data['fee']
self.fees['funding']['fee_loaded'] = True
# find a way to populate trading fees from markets
def load_fees(self):
self.load_markets()
self.populate_fees()
if not (self.has['fetchTradingFees'] or self.has['fetchFundingFees']):
return self.fees
fetched_fees = self.fetch_fees()
if fetched_fees['funding']:
self.fees['funding']['fee_loaded'] = True
if fetched_fees['trading']:
self.fees['trading']['fee_loaded'] = True
self.fees = self.deep_extend(self.fees, fetched_fees)
return self.fees
def fetch_markets(self):
return self.to_array(self.markets)
def fetch_fees(self):
trading = {}
funding = {}
try:
trading = self.fetch_trading_fees()
except AuthenticationError:
pass
except AttributeError:
pass
try:
funding = self.fetch_funding_fees()
except AuthenticationError:
pass
except AttributeError:
pass
return {
'trading': trading,
'funding': funding,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.raise_error(NotSupported, details='create_order() not implemented yet')
def cancel_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='cancel_order() not implemented yet')
def fetch_bids_asks(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_tickers(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, market=None):
order = self.fetch_order(id)
return order['status']
def purge_cached_orders(self, before):
orders = self.to_array(self.orders)
orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)]
self.orders = self.index_by(orders, 'id')
return self.orders
def fetch_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order() is not implemented yet')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_orders() is not implemented yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_open_orders() is not implemented yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_closed_orders() is not implemented yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_my_trades() is not implemented yet')
def fetch_order_trades(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order_trades() is not implemented yet')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
ohlcvs = self.to_array(ohlcvs)
num_ohlcvs = len(ohlcvs)
result = []
i = 0
while i < num_ohlcvs:
if limit and (len(result) >= limit):
break
ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit)
i = i + 1
if since and (ohlcv[0] < since):
continue
result.append(ohlcv)
return result
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [float(bidask[price_key]), float(bidask[amount_key])]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
self.raise_error(ExchangeError, details='unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def parse_balance(self, balance):
currencies = self.omit(balance, 'info').keys()
for account in ['free', 'used', 'total']:
balance[account] = {}
for currency in currencies:
balance[account][currency] = balance[currency][account]
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
limits = response['limits']
keys = list(limits.keys())
for i in range(0, len(keys)):
symbol = keys[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], {
'limits': limits[symbol],
})
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
self.raise_error(NotSupported, details='fetch_ohlcv() not implemented yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs):
result = []
for i in range(0, len(ohlcvs['t'])):
result.append([
ohlcvs['t'][i] * 1000,
ohlcvs['o'][i],
ohlcvs['h'][i],
ohlcvs['l'][i],
ohlcvs['c'][i],
ohlcvs['v'][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs):
result = {
't': [],
'o': [],
'h': [],
'l': [],
'c': [],
'v': [],
}
for i in range(0, len(ohlcvs)):
result['t'].append(int(ohlcvs[i][0] / 1000))
result['o'].append(ohlcvs[i][1])
result['h'].append(ohlcvs[i][2])
result['l'].append(ohlcvs[i][3])
result['c'].append(ohlcvs[i][4])
result['v'].append(ohlcvs[i][5])
return result
def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(high, low, close, volume) = (2, 3, 4, 5)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M)
j = len(ohlcvs)
if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms:
# moved to a new timeframe -> create a new candle from opening trade
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
])
else:
# still processing the same timeframe -> update opening trade
ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price'])
ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price'])
ohlcvs[j - 1][close] = trade['price']
ohlcvs[j - 1][volume] += trade['amount']
return ohlcvs
def parse_timeframe(self, timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' in unit:
scale = 60 * 60 * 24 * 365
elif 'M' in unit:
scale = 60 * 60 * 24 * 30
elif 'w' in unit:
scale = 60 * 60 * 24 * 7
elif 'd' in unit:
scale = 60 * 60 * 24
elif 'h' in unit:
scale = 60 * 60
else:
scale = 60 # 1m by default
return amount * scale
def parse_trades(self, trades, market=None, since=None, limit=None):
array = self.to_array(trades)
array = [self.parse_trade(trade, market) for trade in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def parse_orders(self, orders, market=None, since=None, limit=None):
array = self.to_array(orders)
array = [self.parse_order(order, market) for order in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None):
array = self.to_array(array)
if symbol:
array = [entry for entry in array if entry['symbol'] == symbol]
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_since_limit(self, array, since=None, limit=None):
array = self.to_array(array)
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
self.raise_error(ExchangeError, details='Currencies not loaded')
if isinstance(code, basestring) and (code in self.currencies):
return self.currencies[code]
self.raise_error(ExchangeError, details='Does not have currency code ' + str(code))
def find_market(self, string):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(string, basestring):
if string in self.markets_by_id:
return self.markets_by_id[string]
if string in self.markets:
return self.markets[string]
return string
def find_symbol(self, string, market=None):
if market is None:
market = self.find_market(string)
if isinstance(market, dict):
return market['symbol']
return string
def market(self, symbol):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(symbol, basestring) and (symbol in self.markets):
return self.markets[symbol]
self.raise_error(ExchangeError, details='No market symbol ' + str(symbol))
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * price))
return {
'rate': rate,
'type': takerOrMaker,
'currency': market['quote'],
'cost': float(self.fee_to_precision(symbol, rate * cost)),
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
self.raise_error(ExchangeError, details='edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, *args):
return self.create_order(symbol, 'limit', *args)
def create_market_order(self, symbol, *args):
return self.create_order(symbol, 'market', *args)
def create_limit_buy_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'buy', *args)
def create_limit_sell_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'sell', *args)
def create_market_buy_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
|
# _*_ coding:utf-8 _*_
import logging
import re
from pyquery import PyQuery as pq
from app.spider_store.utils.content_cleaner import cleaner
from app.spider_store.common import (get_content, get_location)
"""***猎奇网爬虫***"""
def lieqi_news_download(url):
i = 1
content_list = []
title = None
source = None
thumbnail_urls = None
while True:
if i == 1:
detail_url = url
else:
detail_url = url.replace(".html", '-{}.html'.format(i))
try:
html = get_content(detail_url, )
except Exception:
raise Exception("获取文章内容超时")
if re.search(r"很抱歉!您访问页面被外星人劫持了", html):
break
doc = pq(html)
if i == 1:
# 标题
title = doc('title').text()
if not title:
title = doc("div.contentLtopCnt.clearfix h1.title").text()
# 来源
source = doc('div.contentLtopCnt.clearfix div.sourceShare div.source').children()
# 缩略图
try:
thumbnail_urls = re.search(
r'var\s*detail_poster_src\s*=\s*[\'|"](.*?)[\'|"]',
html
).group(1)
if not re.match(r"http[s]?:", thumbnail_urls):
thumbnail_urls = "http:" + thumbnail_urls
thumbnail_urls = [thumbnail_urls]
except AttributeError:
pass
try:
source = re.search(r"</span>\s*<span>(.*?)</span>", str(source)).group(1)
except AttributeError:
raise AttributeError("获取来源失败")
# 预处理正文内容
div = doc('div.contentLtopCnt.clearfix div.contentTextCnt').html()
content_list.append(str(div))
i += 1
else:
# 预处理正文内容
div = doc('div.contentLtopCnt.clearfix div.contentTextCnt').html()
content_list.append(str(div))
i += 1
# 阈值
if i >= 30:
break
try:
content = ''.join(content_list)
content = cleaner(content)
logging.debug('清洗完成')
except:
raise AssertionError("获取文章内容失败")
# 获取文章内图片
image_urls = re.findall(r'src=[\'|"](.*?)[\'|"]', content, re.S)
# 获取不到返回空列表
assert image_urls, "文章中缺少图片"
image_urls_final = []
for url in image_urls:
regex = re.compile(r'http:|https:')
if regex.match(url):
image_urls_final.append(url)
else:
image_url = 'http:' + url
image_urls_final.append(image_url)
# 缩略图
if not thumbnail_urls:
thumbnail_urls = [image_urls_final[0]]
if (title and source):
data = {
"type": 'news',
"title": title,
"source": source,
"content": content,
"thumbnail_urls": thumbnail_urls,
"image_urls": image_urls_final,
}
else:
raise Exception("获取标题和来源失败")
return data
def lieqi_video_download(url):
assert url
pass
def lieqi_spider(url):
if news_type(url) == "video":
return lieqi_video_download(url)
else:
return lieqi_news_download(url)
def news_type(url):
# 判断类型
# http://www.lieqinews.com/a/n190414105400138.html
if not url:
return "video"
else:
return "news"
download = lieqi_spider
if __name__ == '__main__':
url = 'http://www.lieqinews.com/a/n190320202600183.html'
data = lieqi_spider(url)
for key, value in data.items():
print(key+':'+'{}'.format(value))
|
# coding=utf-8
"""
Contains an abstract base class that supports data transformations.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
import scipy
import scipy.ndimage
import time
import deepchem as dc
import tensorflow as tf
from deepchem.data import NumpyDataset
def undo_transforms(y, transformers):
"""Undoes all transformations applied."""
# Note that transformers have to be undone in reversed order
for transformer in reversed(transformers):
if transformer.transform_y:
y = transformer.untransform(y)
return y
def undo_grad_transforms(grad, tasks, transformers):
for transformer in reversed(transformers):
if transformer.transform_y:
grad = transformer.untransform_grad(grad, tasks)
return grad
def get_grad_statistics(dataset):
"""Computes and returns statistics of a dataset
This function assumes that the first task of a dataset holds the energy for
an input system, and that the remaining tasks holds the gradient for the
system.
"""
if len(dataset) == 0:
return None, None, None, None
y = dataset.y
energy = y[:, 0]
grad = y[:, 1:]
for i in range(energy.size):
grad[i] *= energy[i]
ydely_means = np.sum(grad, axis=0) / len(energy)
return grad, ydely_means
class Transformer(object):
"""
Abstract base class for different ML models.
"""
# Hack to allow for easy unpickling:
# http://stefaanlippens.net/pickleproblem
__module__ = os.path.splitext(os.path.basename(__file__))[0]
def __init__(self,
transform_X=False,
transform_y=False,
transform_w=False,
dataset=None):
"""Initializes transformation based on dataset statistics."""
self.dataset = dataset
self.transform_X = transform_X
self.transform_y = transform_y
self.transform_w = transform_w
# One, but not both, transform_X or tranform_y is true
assert transform_X or transform_y or transform_w
# Use fact that bools add as ints in python
assert (transform_X + transform_y + transform_w) == 1
def transform_array(self, X, y, w):
"""Transform the data in a set of (X, y, w) arrays."""
raise NotImplementedError(
"Each Transformer is responsible for its own transform_array method.")
def untransform(self, z):
"""Reverses stored transformation on provided data."""
raise NotImplementedError(
"Each Transformer is responsible for its own untransfomr method.")
def transform(self, dataset, parallel=False):
"""
Transforms all internally stored data.
Adds X-transform, y-transform columns to metadata.
"""
_, y_shape, w_shape, _ = dataset.get_shape()
if y_shape == tuple() and self.transform_y:
raise ValueError("Cannot transform y when y_values are not present")
if w_shape == tuple() and self.transform_w:
raise ValueError("Cannot transform w when w_values are not present")
return dataset.transform(lambda X, y, w: self.transform_array(X, y, w))
def transform_on_array(self, X, y, w):
"""
Transforms numpy arrays X, y, and w
"""
X, y, w = self.transform_array(X, y, w)
return X, y, w
class NormalizationTransformer(Transformer):
def __init__(self,
transform_X=False,
transform_y=False,
transform_w=False,
dataset=None,
transform_gradients=False):
"""Initialize normalization transformation."""
if transform_X:
X_means, X_stds = dataset.get_statistics(X_stats=True, y_stats=False)
self.X_means = X_means
self.X_stds = X_stds
elif transform_y:
y_means, y_stds = dataset.get_statistics(X_stats=False, y_stats=True)
self.y_means = y_means
# Control for pathological case with no variance.
y_stds = np.array(y_stds)
y_stds[y_stds == 0] = 1.
self.y_stds = y_stds
self.transform_gradients = transform_gradients
if self.transform_gradients:
true_grad, ydely_means = get_grad_statistics(dataset)
self.grad = np.reshape(true_grad, (true_grad.shape[0], -1, 3))
self.ydely_means = ydely_means
super(NormalizationTransformer, self).__init__(
transform_X=transform_X,
transform_y=transform_y,
transform_w=transform_w,
dataset=dataset)
def transform(self, dataset, parallel=False):
return super(NormalizationTransformer, self).transform(
dataset, parallel=parallel)
def transform_array(self, X, y, w):
"""Transform the data in a set of (X, y, w) arrays."""
if self.transform_X:
X = np.nan_to_num((X - self.X_means) / self.X_stds)
if self.transform_y:
y = np.nan_to_num((y - self.y_means) / self.y_stds)
return (X, y, w)
def untransform(self, z):
"""
Undo transformation on provided data.
"""
if self.transform_X:
return z * self.X_stds + self.X_means
elif self.transform_y:
y_stds = self.y_stds
y_means = self.y_means
n_tasks = self.y_stds.shape[0]
z_shape = list(z.shape)
# Get the reversed shape of z: (..., n_tasks, batch_size)
z_shape.reverse()
# Find the task dimension of z
for dim in z_shape:
if dim != n_tasks and dim == 1:
# Prevent broadcasting on wrong dimension
y_stds = np.expand_dims(y_stds, -1)
y_means = np.expand_dims(y_means, -1)
return z * y_stds + y_means
def untransform_grad(self, grad, tasks):
"""
Undo transformation on gradient.
"""
if self.transform_y:
grad_means = self.y_means[1:]
energy_var = self.y_stds[0]
grad_var = 1 / energy_var * (
self.ydely_means - self.y_means[0] * self.y_means[1:])
energy = tasks[:, 0]
transformed_grad = []
for i in range(energy.size):
Etf = energy[i]
grad_Etf = grad[i].flatten()
grad_E = Etf * grad_var + energy_var * grad_Etf + grad_means
grad_E = np.reshape(grad_E, (-1, 3))
transformed_grad.append(grad_E)
transformed_grad = np.asarray(transformed_grad)
return transformed_grad
class ClippingTransformer(Transformer):
"""Clip large values in datasets.
Example:
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> transformer = dc.trans.ClippingTransformer(transform_X=True)
>>> dataset = transformer.transform(dataset)
"""
def __init__(self,
transform_X=False,
transform_y=False,
transform_w=False,
dataset=None,
x_max=5.,
y_max=500.):
"""Initialize clipping transformation.
Parameters:
----------
transform_X: bool, optional (default False)
Whether to transform X
transform_y: bool, optional (default False)
Whether to transform y
transform_w: bool, optional (default False)
Whether to transform w
dataset: dc.data.Dataset object, optional
Dataset to be transformed
x_max: float, optional
Maximum absolute value for X
y_max: float, optional
Maximum absolute value for y
"""
super(ClippingTransformer, self).__init__(
transform_X=transform_X,
transform_y=transform_y,
transform_w=transform_w,
dataset=dataset)
assert not transform_w
self.x_max = x_max
self.y_max = y_max
def transform_array(self, X, y, w):
"""Transform the data in a set of (X, y, w) arrays.
Parameters:
----------
X: np.ndarray
Features
y: np.ndarray
Tasks
w: np.ndarray
Weights
Returns:
-------
X: np.ndarray
Transformed features
y: np.ndarray
Transformed tasks
w: np.ndarray
Transformed weights
"""
if self.transform_X:
X[X > self.x_max] = self.x_max
X[X < (-1.0 * self.x_max)] = -1.0 * self.x_max
if self.transform_y:
y[y > self.y_max] = self.y_max
y[y < (-1.0 * self.y_max)] = -1.0 * self.y_max
return (X, y, w)
def untransform(self, z):
raise NotImplementedError(
"Cannot untransform datasets with ClippingTransformer.")
class LogTransformer(Transformer):
def __init__(self,
transform_X=False,
transform_y=False,
features=None,
tasks=None,
dataset=None):
self.features = features
self.tasks = tasks
"""Initialize log transformation."""
super(LogTransformer, self).__init__(
transform_X=transform_X, transform_y=transform_y, dataset=dataset)
def transform_array(self, X, y, w):
"""Transform the data in a set of (X, y, w) arrays."""
if self.transform_X:
num_features = len(X[0])
if self.features is None:
X = np.log(X + 1)
else:
for j in range(num_features):
if j in self.features:
X[:, j] = np.log(X[:, j] + 1)
else:
X[:, j] = X[:, j]
if self.transform_y:
num_tasks = len(y[0])
if self.tasks is None:
y = np.log(y + 1)
else:
for j in range(num_tasks):
if j in self.tasks:
y[:, j] = np.log(y[:, j] + 1)
else:
y[:, j] = y[:, j]
return (X, y, w)
def untransform(self, z):
"""
Undo transformation on provided data.
"""
if self.transform_X:
num_features = len(z[0])
if self.features is None:
return np.exp(z) - 1
else:
for j in range(num_features):
if j in self.features:
z[:, j] = np.exp(z[:, j]) - 1
else:
z[:, j] = z[:, j]
return z
elif self.transform_y:
num_tasks = len(z[0])
if self.tasks is None:
return np.exp(z) - 1
else:
for j in range(num_tasks):
if j in self.tasks:
z[:, j] = np.exp(z[:, j]) - 1
else:
z[:, j] = z[:, j]
return z
class BalancingTransformer(Transformer):
"""Balance positive and negative examples for weights."""
def __init__(self,
transform_X=False,
transform_y=False,
transform_w=False,
dataset=None,
seed=None):
super(BalancingTransformer, self).__init__(
transform_X=transform_X,
transform_y=transform_y,
transform_w=transform_w,
dataset=dataset)
# BalancingTransformer can only transform weights.
assert not transform_X
assert not transform_y
assert transform_w
# Compute weighting factors from dataset.
y = self.dataset.y
w = self.dataset.w
# Ensure dataset is binary
np.testing.assert_allclose(sorted(np.unique(y)), np.array([0., 1.]))
weights = []
for ind, task in enumerate(self.dataset.get_task_names()):
task_w = w[:, ind]
task_y = y[:, ind]
# Remove labels with zero weights
task_y = task_y[task_w != 0]
num_positives = np.count_nonzero(task_y)
num_negatives = len(task_y) - num_positives
if num_positives > 0:
pos_weight = float(num_negatives) / num_positives
else:
pos_weight = 1
neg_weight = 1
weights.append((neg_weight, pos_weight))
self.weights = weights
def transform_array(self, X, y, w):
"""Transform the data in a set of (X, y, w) arrays."""
w_balanced = np.zeros_like(w)
for ind, task in enumerate(self.dataset.get_task_names()):
task_y = y[:, ind]
task_w = w[:, ind]
zero_indices = np.logical_and(task_y == 0, task_w != 0)
one_indices = np.logical_and(task_y == 1, task_w != 0)
w_balanced[zero_indices, ind] = self.weights[ind][0]
w_balanced[one_indices, ind] = self.weights[ind][1]
return (X, y, w_balanced)
class CDFTransformer(Transformer):
"""Histograms the data and assigns values based on sorted list."""
"""Acts like a Cumulative Distribution Function (CDF)."""
def __init__(self, transform_X=False, transform_y=False, dataset=None,
bins=2):
self.transform_X = transform_X
self.transform_y = transform_y
self.bins = bins
self.y = dataset.y
# self.w = dataset.w
# TODO (flee2): for transform_y, figure out weights
def transform(self, dataset, bins):
"""Performs CDF transform on data."""
X, y, w, ids = (dataset.X, dataset.y, dataset.w, dataset.ids)
w_t = w
ids_t = ids
if self.transform_X:
X_t = get_cdf_values(X, self.bins)
y_t = y
if self.transform_y:
X_t = X
y_t = get_cdf_values(y, self.bins)
# print("y will not be transformed by CDFTransformer, for now.")
return NumpyDataset(X_t, y_t, w_t, ids_t)
def untransform(self, z):
# print("Cannot undo CDF Transformer, for now.")
# Need this for transform_y
if self.transform_y:
return self.y
def get_cdf_values(array, bins):
# array = np.transpose(array)
n_rows = array.shape[0]
n_cols = array.shape[1]
array_t = np.zeros((n_rows, n_cols))
parts = n_rows / bins
hist_values = np.zeros(n_rows)
sorted_hist_values = np.zeros(n_rows)
for row in range(n_rows):
if np.remainder(bins, 2) == 1:
hist_values[row] = np.floor(np.divide(row, parts)) / (bins - 1)
else:
hist_values[row] = np.floor(np.divide(row, parts)) / bins
for col in range(n_cols):
order = np.argsort(array[:, col], axis=0)
sorted_hist_values = hist_values[order]
array_t[:, col] = sorted_hist_values
return array_t
class PowerTransformer(Transformer):
"""Takes power n transforms of the data based on an input vector."""
def __init__(self, transform_X=False, transform_y=False, powers=[1]):
self.transform_X = transform_X
self.transform_y = transform_y
self.powers = powers
def transform(self, dataset):
"""Performs power transform on data."""
X, y, w, ids = (dataset.X, dataset.y, dataset.w, dataset.ids)
w_t = w
ids_t = ids
n_powers = len(self.powers)
if self.transform_X:
X_t = np.power(X, self.powers[0])
for i in range(1, n_powers):
X_t = np.hstack((X_t, np.power(X, self.powers[i])))
y_t = y
if self.transform_y:
# print("y will not be transformed by PowerTransformer, for now.")
y_t = np.power(y, self.powers[0])
for i in range(1, n_powers):
y_t = np.hstack((y_t, np.power(y, self.powers[i])))
X_t = X
"""
shutil.rmtree(dataset.data_dir)
os.makedirs(dataset.data_dir)
DiskDataset.from_numpy(dataset.data_dir, X_t, y_t, w_t, ids_t)
return dataset
"""
return NumpyDataset(X_t, y_t, w_t, ids_t)
def untransform(self, z):
# print("Cannot undo Power Transformer, for now.")
n_powers = len(self.powers)
orig_len = (z.shape[1]) / n_powers
z = z[:, :orig_len]
z = np.power(z, 1 / self.powers[0])
return z
class CoulombFitTransformer(Transformer):
"""Performs randomization and binarization operations on batches of Coulomb Matrix features during fit.
Example:
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
>>> model = dc.models.TensorflowMultiTaskFitTransformRegressor(n_tasks,
... [n_features, n_features], batch_size=n_samples, fit_transformers=fit_transformers, n_evals=1)
n_features after fit_transform: 12
"""
def __init__(self, dataset):
"""Initializes CoulombFitTransformer.
Parameters:
----------
dataset: dc.data.Dataset object
"""
X = dataset.X
num_atoms = X.shape[1]
self.step = 1.0
self.noise = 1.0
self.triuind = (np.arange(num_atoms)[:, np.newaxis] <=
np.arange(num_atoms)[np.newaxis, :]).flatten()
self.max = 0
for _ in range(10):
self.max = np.maximum(self.max, self.realize(X).max(axis=0))
X = self.expand(self.realize(X))
self.nbout = X.shape[1]
self.mean = X.mean(axis=0)
self.std = (X - self.mean).std()
def realize(self, X):
"""Randomize features.
Parameters:
----------
X: np.ndarray
Features
Returns:
-------
X: np.ndarray
Randomized features
"""
def _realize_(x):
assert (len(x.shape) == 2)
inds = np.argsort(-(x**2).sum(axis=0)**.5 + np.random.normal(
0, self.noise, x[0].shape))
x = x[inds, :][:, inds] * 1
x = x.flatten()[self.triuind]
return x
return np.array([_realize_(z) for z in X])
def normalize(self, X):
"""Normalize features.
Parameters:
----------
X: np.ndarray
Features
Returns:
-------
X: np.ndarray
Normalized features
"""
return (X - self.mean) / self.std
def expand(self, X):
"""Binarize features.
Parameters:
----------
X: np.ndarray
Features
Returns:
-------
X: np.ndarray
Binarized features
"""
Xexp = []
for i in range(X.shape[1]):
for k in np.arange(0, self.max[i] + self.step, self.step):
Xexp += [np.tanh((X[:, i] - k) / self.step)]
return np.array(Xexp).T
def X_transform(self, X):
"""Perform Coulomb Fit transform on features.
Parameters:
----------
X: np.ndarray
Features
Returns:
-------
X: np.ndarray
Transformed features
"""
X = self.normalize(self.expand(self.realize(X)))
return X
def transform_array(self, X, y, w):
X = self.X_transform(X)
return (X, y, w)
def untransform(self, z):
raise NotImplementedError(
"Cannot untransform datasets with FitTransformer.")
class IRVTransformer():
"""Performs transform from ECFP to IRV features(K nearest neibours)."""
def __init__(self, K, n_tasks, dataset, transform_y=False, transform_x=False):
"""Initializes IRVTransformer.
Parameters:
----------
dataset: dc.data.Dataset object
train_dataset
K: int
number of nearest neighbours being count
n_tasks: int
number of tasks
"""
self.X = dataset.X
self.n_tasks = n_tasks
self.K = K
self.y = dataset.y
self.w = dataset.w
self.transform_x = transform_x
self.transform_y = transform_y
def realize(self, similarity, y, w):
"""find samples with top ten similarity values in the reference dataset
Parameters:
-----------
similarity: np.ndarray
similarity value between target dataset and reference dataset
should have size of (n_samples_in_target, n_samples_in_reference)
y: np.array
labels for a single task
w: np.array
weights for a single task
Return:
----------
features: list
n_samples * np.array of size (2*K,)
each array includes K similarity values and corresponding labels
"""
features = []
similarity_xs = similarity * np.sign(w)
[target_len, reference_len] = similarity_xs.shape
g_temp = tf.Graph()
values = []
top_labels = []
with g_temp.as_default():
with tf.device('/cpu:0'):
labels_tf = tf.constant(y)
similarity_placeholder = tf.placeholder(
dtype=tf.float64, shape=(None, reference_len))
value, indice = tf.nn.top_k(
similarity_placeholder, k=self.K + 1, sorted=True)
# the tf graph here pick up the (K+1) highest similarity values
# and their indices
top_label = tf.gather(labels_tf, indice)
# map the indices to labels
feed_dict = {}
with tf.Session() as sess:
for count in range(target_len // 100 + 1):
feed_dict[similarity_placeholder] = similarity_xs[count * 100:min((
count + 1) * 100, target_len), :]
# generating batch of data by slicing similarity matrix
# into 100*reference_dataset_length
fetched_values = sess.run([value, top_label], feed_dict=feed_dict)
values.append(fetched_values[0])
top_labels.append(fetched_values[1])
values = np.concatenate(values, axis=0)
top_labels = np.concatenate(top_labels, axis=0)
# concatenate batches of data together
for count in range(values.shape[0]):
if values[count, 0] == 1:
features.append(
np.concatenate([
values[count, 1:(self.K + 1)], top_labels[count, 1:(self.K + 1)]
]))
# highest similarity is 1: target is in the reference
# use the following K points
else:
features.append(
np.concatenate(
[values[count, 0:self.K], top_labels[count, 0:self.K]]))
# highest less than 1: target not in the reference, use top K points
return features
def X_transform(self, X_target):
""" Calculate similarity between target dataset(X_target) and
reference dataset(X): #(1 in intersection)/#(1 in union)
similarity = (X_target intersect X)/(X_target union X)
Parameters:
-----------
X_target: np.ndarray
fingerprints of target dataset
should have same length with X in the second axis
Returns:
----------
X_target: np.ndarray
features of size(batch_size, 2*K*n_tasks)
"""
X_target2 = []
n_features = X_target.shape[1]
print('start similarity calculation')
time1 = time.time()
similarity = IRVTransformer.matrix_mul(X_target, np.transpose(self.X)) / (
n_features - IRVTransformer.matrix_mul(1 - X_target,
np.transpose(1 - self.X)))
time2 = time.time()
print('similarity calculation takes %i s' % (time2 - time1))
for i in range(self.n_tasks):
X_target2.append(self.realize(similarity, self.y[:, i], self.w[:, i]))
return np.concatenate([z for z in np.array(X_target2)], axis=1)
@staticmethod
def matrix_mul(X1, X2, shard_size=5000):
""" Calculate matrix multiplication for big matrix,
X1 and X2 are sliced into pieces with shard_size rows(columns)
then multiplied together and concatenated to the proper size
"""
X1 = np.float_(X1)
X2 = np.float_(X2)
X1_shape = X1.shape
X2_shape = X2.shape
assert X1_shape[1] == X2_shape[0]
X1_iter = X1_shape[0] // shard_size + 1
X2_iter = X2_shape[1] // shard_size + 1
all_result = np.zeros((1,))
for X1_id in range(X1_iter):
result = np.zeros((1,))
for X2_id in range(X2_iter):
partial_result = np.matmul(X1[X1_id * shard_size:min((
X1_id + 1) * shard_size, X1_shape[0]), :],
X2[:, X2_id * shard_size:min((
X2_id + 1) * shard_size, X2_shape[1])])
# calculate matrix multiplicatin on slices
if result.size == 1:
result = partial_result
else:
result = np.concatenate((result, partial_result), axis=1)
# concatenate the slices together
del partial_result
if all_result.size == 1:
all_result = result
else:
all_result = np.concatenate((all_result, result), axis=0)
del result
return all_result
def transform(self, dataset):
X_length = dataset.X.shape[0]
X_trans = []
for count in range(X_length // 5000 + 1):
X_trans.append(
self.X_transform(dataset.X[count * 5000:min((count + 1) * 5000,
X_length), :]))
X_trans = np.concatenate(X_trans, axis=0)
return NumpyDataset(X_trans, dataset.y, dataset.w, ids=None)
def untransform(self, z):
raise NotImplementedError(
"Cannot untransform datasets with IRVTransformer.")
class DAGTransformer(Transformer):
"""Performs transform from ConvMol adjacency lists to
DAG calculation orders
"""
def __init__(self,
max_atoms=50,
transform_X=True,
transform_y=False,
transform_w=False):
"""Initializes DAGTransformer.
Only X can be transformed
"""
self.max_atoms = max_atoms
self.transform_X = transform_X
self.transform_y = transform_y
self.transform_w = transform_w
assert self.transform_X
assert not self.transform_y
assert not self.transform_w
def transform_array(self, X, y, w):
"""Add calculation orders to ConvMol objects"""
if self.transform_X:
for idm, mol in enumerate(X):
X[idm].parents = self.UG_to_DAG(mol)
return (X, y, w)
def untransform(self, z):
raise NotImplementedError(
"Cannot untransform datasets with DAGTransformer.")
def UG_to_DAG(self, sample):
"""This function generates the DAGs for a molecule
"""
# list of calculation orders for DAGs
# stemming from one specific atom in the molecule
parents = []
# starting from the adjacency list derived by graphconv featurizer
UG = sample.get_adjacency_list()
# number of atoms, also number of DAGs
n_atoms = sample.get_num_atoms()
# DAG on a molecule with k atoms includes k steps of calculation,
# each step calculating graph features for one atom.
# `max_atoms` is the maximum number of steps
max_atoms = self.max_atoms
for count in range(n_atoms):
# each iteration generates the DAG starting from atom with index `count`
DAG = []
# list of lists, elements represent the calculation orders
# for atoms in the current graph
parent = [[] for i in range(n_atoms)]
# starting from the target atom with index `count`
current_atoms = [count]
# flags of whether the atom is already included in the DAG
atoms_indicator = np.zeros((n_atoms,))
# atom `count` is in the DAG
radial = 1
atoms_indicator[count] = radial
# recording number of radial propagation steps
while not np.all(atoms_indicator):
# in the fisrt loop, atoms directly connected to `count` will be added
# into the DAG(radial=0), then atoms two-bond away from `count`
# will be added in the second loop(radial=1).
# atoms i-bond away will be added in i-th loop
if radial > n_atoms:
# when molecules have separate parts, starting from one part,
# it is not possible to include all atoms.
# this break quit the loop when going into such condition
break
# reinitialize targets for next iteration
next_atoms = []
radial = radial + 1
for current_atom in current_atoms:
for atom_adj in UG[current_atom]:
# atoms connected to current_atom
if atoms_indicator[atom_adj] == 0:
# generate the dependency map of current DAG
# atoms connected to `current_atoms`(and not included in the DAG)
# are added, and will be the `current_atoms` for next iteration.
DAG.append((current_atom, atom_adj))
atoms_indicator[atom_adj] = radial
next_atoms.append(atom_adj)
current_atoms = next_atoms
# DAG starts from the target atom, calculation should go in reverse
for edge in reversed(DAG):
# `edge[1]` is the parent of `edge[0]`
parent[edge[0]].append(edge[1] % max_atoms)
parent[edge[0]].extend(parent[edge[1]])
for i, order in enumerate(parent):
parent[i] = sorted(order, key=lambda x: atoms_indicator[x])
# after this loop, `parents[i]` includes all parents of atom i
for ids, atom in enumerate(parent):
# manually adding the atom index into its parents list
parent[ids].insert(0, ids % max_atoms)
# after this loop, `parents[i][0]` is i, `parents[i][1:]` are all parents of atom i
# atoms with less parents(farther from the target atom) come first.
# graph features of atoms without parents will be first calculated,
# then atoms with more parents can be calculated in order
# based on previously calculated graph features.
# target atom of this DAG will be calculated in the last step
parent = sorted(parent, key=len)
for ids, atom in enumerate(parent):
n_par = len(atom)
# padding with `max_atoms`
if n_par < max_atoms:
parent[ids].extend([max_atoms for i in range(max_atoms - n_par)])
if n_par > max_atoms:
parent[ids] = parent[ids][:max_atoms]
if len(parent) > max_atoms:
parent = parent[-max_atoms:]
while len(parent) < max_atoms:
# padding
parent.insert(0, [max_atoms] * max_atoms)
# `parents[i]` is the calculation order for the DAG stemming from atom i,
# which is a max_atoms * max_atoms numpy array after padding
parents.append(np.array(parent))
return parents
class ImageTransformer(Transformer):
"""
Convert an image into width, height, channel
"""
def __init__(self,
size,
transform_X=True,
transform_y=False,
transform_w=False):
"""Initializes transformation based on dataset statistics."""
self.size = size
self.transform_X = True
self.transform_y = False
self.transform_w = False
def transform_array(self, X, y, w):
"""Transform the data in a set of (X, y, w) arrays."""
images = [scipy.ndimage.imread(x, mode='RGB') for x in X]
images = [scipy.misc.imresize(x, size=self.size) for x in images]
return np.array(images), y, w
class ANITransformer(Transformer):
"""Performs transform from 3D coordinates to ANI symmetry functions
"""
def __init__(self,
max_atoms=23,
radial_cutoff=4.6,
angular_cutoff=3.1,
radial_length=32,
angular_length=8,
atom_cases=[1, 6, 7, 8, 16],
atomic_number_differentiated=True,
coordinates_in_bohr=True,
transform_X=True,
transform_y=False,
transform_w=False):
"""
Only X can be transformed
"""
self.max_atoms = max_atoms
self.radial_cutoff = radial_cutoff
self.angular_cutoff = angular_cutoff
self.radial_length = radial_length
self.angular_length = angular_length
self.atom_cases = atom_cases
self.atomic_number_differentiated = atomic_number_differentiated
self.coordinates_in_bohr = coordinates_in_bohr
self.transform_X = transform_X
self.transform_y = transform_y
self.transform_w = transform_w
self.compute_graph = self.build()
self.sess = tf.Session(graph=self.compute_graph)
self.transform_batch_size = 32
assert self.transform_X
assert not self.transform_y
assert not self.transform_w
def transform_array(self, X, y, w):
if self.transform_X:
n_samples = X.shape[0]
X_out = []
num_transformed = 0
start = 0
batch_size = self.transform_batch_size
while True:
end = min((start + 1) * batch_size, X.shape[0])
X_batch = X[(start * batch_size):end]
output = self.sess.run(
[self.outputs], feed_dict={self.inputs: X_batch})[0]
X_out.append(output)
num_transformed = num_transformed + X_batch.shape[0]
print('%i samples transformed' % num_transformed)
start += 1
if end >= len(X):
break
X_new = np.concatenate(X_out, axis=0)
assert X_new.shape[0] == X.shape[0]
return (X_new, y, w)
def untransform(self, z):
raise NotImplementedError(
"Cannot untransform datasets with ANITransformer.")
def build(self):
""" tensorflow computation graph for transform """
graph = tf.Graph()
with graph.as_default():
self.inputs = tf.placeholder(tf.float32, shape=(None, self.max_atoms, 4))
atom_numbers = tf.cast(self.inputs[:, :, 0], tf.int32)
flags = tf.sign(atom_numbers)
flags = tf.to_float(tf.expand_dims(flags, 1) * tf.expand_dims(flags, 2))
coordinates = self.inputs[:, :, 1:]
if self.coordinates_in_bohr:
coordinates = coordinates * 0.52917721092
d = self.distance_matrix(coordinates, flags)
d_radial_cutoff = self.distance_cutoff(d, self.radial_cutoff, flags)
d_angular_cutoff = self.distance_cutoff(d, self.angular_cutoff, flags)
radial_sym = self.radial_symmetry(d_radial_cutoff, d, atom_numbers)
angular_sym = self.angular_symmetry(d_angular_cutoff, d, atom_numbers,
coordinates)
self.outputs = tf.concat(
[
tf.to_float(tf.expand_dims(atom_numbers, 2)), radial_sym,
angular_sym
],
axis=2)
return graph
def distance_matrix(self, coordinates, flags):
""" Generate distance matrix """
max_atoms = self.max_atoms
tensor1 = tf.stack([coordinates] * max_atoms, axis=1)
tensor2 = tf.stack([coordinates] * max_atoms, axis=2)
# Calculate pairwise distance
d = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=3))
# Masking for valid atom index
d = d * flags
return d
def distance_cutoff(self, d, cutoff, flags):
""" Generate distance matrix with trainable cutoff """
# Cutoff with threshold Rc
d_flag = flags * tf.sign(cutoff - d)
d_flag = tf.nn.relu(d_flag)
d_flag = d_flag * tf.expand_dims((1 - tf.eye(self.max_atoms)), 0)
d = 0.5 * (tf.cos(np.pi * d / cutoff) + 1)
return d * d_flag
def radial_symmetry(self, d_cutoff, d, atom_numbers):
""" Radial Symmetry Function """
embedding = tf.eye(np.max(self.atom_cases) + 1)
atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)
Rs = np.linspace(0., self.radial_cutoff, self.radial_length)
ita = np.ones_like(Rs) * 3 / (Rs[1] - Rs[0])**2
Rs = tf.to_float(np.reshape(Rs, (1, 1, 1, -1)))
ita = tf.to_float(np.reshape(ita, (1, 1, 1, -1)))
length = ita.get_shape().as_list()[-1]
d_cutoff = tf.stack([d_cutoff] * length, axis=3)
d = tf.stack([d] * length, axis=3)
out = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
if self.atomic_number_differentiated:
out_tensors = []
for atom_type in self.atom_cases:
selected_atoms = tf.expand_dims(
tf.expand_dims(atom_numbers_embedded[:, :, atom_type], axis=1),
axis=3)
out_tensors.append(tf.reduce_sum(out * selected_atoms, axis=2))
return tf.concat(out_tensors, axis=2)
else:
return tf.reduce_sum(out, axis=2)
def angular_symmetry(self, d_cutoff, d, atom_numbers, coordinates):
""" Angular Symmetry Function """
max_atoms = self.max_atoms
embedding = tf.eye(np.max(self.atom_cases) + 1)
atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)
Rs = np.linspace(0., self.angular_cutoff, self.angular_length)
ita = 3 / (Rs[1] - Rs[0])**2
thetas = np.linspace(0., np.pi, self.angular_length)
zeta = float(self.angular_length**2)
ita, zeta, Rs, thetas = np.meshgrid(ita, zeta, Rs, thetas)
zeta = tf.to_float(np.reshape(zeta, (1, 1, 1, 1, -1)))
ita = tf.to_float(np.reshape(ita, (1, 1, 1, 1, -1)))
Rs = tf.to_float(np.reshape(Rs, (1, 1, 1, 1, -1)))
thetas = tf.to_float(np.reshape(thetas, (1, 1, 1, 1, -1)))
length = zeta.get_shape().as_list()[-1]
vector_distances = tf.stack([coordinates] * max_atoms, 1) - tf.stack(
[coordinates] * max_atoms, 2)
R_ij = tf.stack([d] * max_atoms, axis=3)
R_ik = tf.stack([d] * max_atoms, axis=2)
f_R_ij = tf.stack([d_cutoff] * max_atoms, axis=3)
f_R_ik = tf.stack([d_cutoff] * max_atoms, axis=2)
# Define angle theta = arccos(R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance))
vector_mul = tf.reduce_sum(tf.stack([vector_distances]*max_atoms, axis=3) * \
tf.stack([vector_distances]*max_atoms, axis=2), axis=4)
vector_mul = vector_mul * tf.sign(f_R_ij) * tf.sign(f_R_ik)
theta = tf.acos(tf.div(vector_mul, R_ij * R_ik + 1e-5))
R_ij = tf.stack([R_ij] * length, axis=4)
R_ik = tf.stack([R_ik] * length, axis=4)
f_R_ij = tf.stack([f_R_ij] * length, axis=4)
f_R_ik = tf.stack([f_R_ik] * length, axis=4)
theta = tf.stack([theta] * length, axis=4)
out_tensor = tf.pow((1. + tf.cos(theta - thetas))/2., zeta) * \
tf.exp(-ita * tf.square((R_ij + R_ik)/2. - Rs)) * f_R_ij * f_R_ik * 2
if self.atomic_number_differentiated:
out_tensors = []
for id_j, atom_type_j in enumerate(self.atom_cases):
for atom_type_k in self.atom_cases[id_j:]:
selected_atoms = tf.stack([atom_numbers_embedded[:, :, atom_type_j]] * max_atoms, axis=2) * \
tf.stack([atom_numbers_embedded[:, :, atom_type_k]] * max_atoms, axis=1)
selected_atoms = tf.expand_dims(
tf.expand_dims(selected_atoms, axis=1), axis=4)
out_tensors.append(
tf.reduce_sum(out_tensor * selected_atoms, axis=(2, 3)))
return tf.concat(out_tensors, axis=2)
else:
return tf.reduce_sum(out_tensor, axis=(2, 3))
def get_num_feats(self):
n_feat = self.outputs.get_shape().as_list()[-1]
return n_feat
|
import gym
from .main import MarloEnvBuilder
def _register():
##########################################
# Version 0 of env
##########################################
gym.envs.registration.register(
id='MarLo-BuildbattleTrain1-v0',
entry_point=MarloEnvBuilder
)
|
import math
from ..problem import Problem
from ..algorithm_common import AlgorithmCommon as AC
class function_XinSheYang(Problem):
def __init__(self, size):
super().__init__(self, size)
self.MIN_VAL = -2*math.pi
self.MAX_VAL = math.pi
self.SCORE_MIN = -float('inf')
self.SCORE_MAX = 0
def init(self):
pass
def eval(self, np_arr):
sum1 = sum([abs(x) for x in np_arr])
sum2 = sum([math.sin(x**2) for x in np_arr])
n = sum1 * math.exp(-sum2)
return -n
def view(self, np_arr):
print("score: {}".format(self.eval(np_arr)))
|
# find a peak (any peak) if it exists.
import matplotlib.pyplot as plt
import numpy as np
NUM_POINTS = 51
# make random data the same for all instances
np.random.seed(0)
def build_peak_in_middle_data():
peak = np.floor(NUM_POINTS/2)
right = 2*peak - NUM_POINTS
uphill = np.arange(0, peak)
downhill = np.arange(peak, right, -1)
return np.concatenate((uphill, downhill))
plot_data = [
('Peak on left', np.arange(NUM_POINTS, 0, -1)),
('Peak on right', np.arange(NUM_POINTS)),
('Peak in middle', build_peak_in_middle_data()),
('Random peaks', np.random.randint(0, 6, NUM_POINTS))
]
class PeakFinder1D:
def __init__(self, name, func):
# name of algorithm
self.name = name
# func takes array of data, returns tuple of (peak_index, num_steps)
# returns -1 as peak_index if no peak was found
self.func = func
self.has_run = False
def run(self, plotting=False):
if not self.has_run:
self.results = {}
if plotting:
self.setup_plotting()
for i, (title, data) in enumerate(plot_data):
peak_index, num_steps = self.func(data)
self.results[title] = (peak_index, num_steps)
if plotting:
self.plot_subplot(i, title, data, peak_index)
if plotting:
self.fig.tight_layout()
self.has_run = True
def setup_plotting(self):
fig, axes = plt.subplots(2, 2)
plt.figtext(.5, .96, '1D Peak Finding', fontweight='bold', fontsize=16, ha='center')
plt.figtext(.5, .93, self.name, fontsize=10, ha='center')
self.fig = fig
self.flat_axes = np.ndarray.flatten(axes)
def plot_subplot(self, subplot_index, title, data, peak_index):
ax = self.flat_axes[subplot_index]
alignment = {0: 'left', 1: 'right'}.get(subplot_index, 'center')
ax.set_title(title, loc=alignment)
ax.plot(np.arange(NUM_POINTS), data)
if peak_index != -1:
ax.plot(peak_index, data[peak_index], 'ro')
def describe(self):
if not self.has_run:
self.run()
print('## ' + self.name + ':\n')
print(' | '.join(['Dataset', 'peak_index', 'num_steps']))
print('--|--|--')
for key, (peak_index, num_steps) in self.results.items():
print(' | '.join([key, str(peak_index), str(num_steps)]))
# straightfoward solution
# start on the left, walk to end, stop if you find a peak
def straightfoward(data):
step = 0
prev_val = -np.inf
curr_val = data[0]
while step < NUM_POINTS:
next_val = -np.inf if step == NUM_POINTS-1 else data[step + 1]
if prev_val <= curr_val and curr_val >= next_val:
return (step, step+1)
prev_val = curr_val
curr_val = next_val
step += 1
return (-1, step+1)
def main():
algorithms = [
PeakFinder1D('Straightfoward', straightfoward)
]
for algorithm in algorithms:
algorithm.run(plotting=True)
algorithm.describe()
plt.show()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# File generated according to DDataPlotter.ui
# WARNING! All changes made in this file will be lost!
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from SciDataTool.GUI.WPlotManager.WPlotManager import WPlotManager
class Ui_DDataPlotter(object):
def setupUi(self, DDataPlotter):
if not DDataPlotter.objectName():
DDataPlotter.setObjectName("DDataPlotter")
DDataPlotter.setEnabled(True)
DDataPlotter.resize(1246, 884)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(DDataPlotter.sizePolicy().hasHeightForWidth())
DDataPlotter.setSizePolicy(sizePolicy)
DDataPlotter.setCursor(QCursor(Qt.ArrowCursor))
self.gridLayout = QGridLayout(DDataPlotter)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.is_auto_refresh = QCheckBox(DDataPlotter)
self.is_auto_refresh.setObjectName("is_auto_refresh")
sizePolicy1 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(
self.is_auto_refresh.sizePolicy().hasHeightForWidth()
)
self.is_auto_refresh.setSizePolicy(sizePolicy1)
self.is_auto_refresh.setMinimumSize(QSize(0, 24))
self.is_auto_refresh.setMaximumSize(QSize(16777215, 24))
self.is_auto_refresh.setChecked(False)
self.horizontalLayout_2.addWidget(self.is_auto_refresh)
self.b_refresh = QPushButton(DDataPlotter)
self.b_refresh.setObjectName("b_refresh")
self.b_refresh.setEnabled(True)
sizePolicy1.setHeightForWidth(self.b_refresh.sizePolicy().hasHeightForWidth())
self.b_refresh.setSizePolicy(sizePolicy1)
self.b_refresh.setMinimumSize(QSize(0, 0))
self.b_refresh.setMaximumSize(QSize(16777215, 16777215))
self.b_refresh.setLayoutDirection(Qt.LeftToRight)
self.horizontalLayout_2.addWidget(self.b_refresh)
self.gridLayout.addLayout(self.horizontalLayout_2, 1, 1, 1, 1)
self.w_scroll = QScrollArea(DDataPlotter)
self.w_scroll.setObjectName("w_scroll")
sizePolicy2 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
sizePolicy2.setHorizontalStretch(0)
sizePolicy2.setVerticalStretch(0)
sizePolicy2.setHeightForWidth(self.w_scroll.sizePolicy().hasHeightForWidth())
self.w_scroll.setSizePolicy(sizePolicy2)
self.w_scroll.setMinimumSize(QSize(200, 0))
self.w_scroll.setMaximumSize(QSize(400, 16777215))
self.w_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.w_scroll.setWidgetResizable(True)
self.scrollAreaWidgetContents = QWidget()
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollAreaWidgetContents.setGeometry(QRect(0, 0, 398, 823))
self.lay_scroll = QVBoxLayout(self.scrollAreaWidgetContents)
self.lay_scroll.setObjectName("lay_scroll")
self.lay_scroll.setContentsMargins(0, 0, 0, 0)
self.w_plot_manager = WPlotManager(self.scrollAreaWidgetContents)
self.w_plot_manager.setObjectName("w_plot_manager")
sizePolicy1.setHeightForWidth(
self.w_plot_manager.sizePolicy().hasHeightForWidth()
)
self.w_plot_manager.setSizePolicy(sizePolicy1)
self.w_plot_manager.setMinimumSize(QSize(0, 0))
self.lay_scroll.addWidget(self.w_plot_manager)
self.w_scroll.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.w_scroll, 0, 1, 1, 1)
self.plot_layout = QVBoxLayout()
self.plot_layout.setObjectName("plot_layout")
self.gridLayout.addLayout(self.plot_layout, 0, 0, 2, 1)
self.retranslateUi(DDataPlotter)
QMetaObject.connectSlotsByName(DDataPlotter)
# setupUi
def retranslateUi(self, DDataPlotter):
DDataPlotter.setWindowTitle(
QCoreApplication.translate("DDataPlotter", "Data Plot", None)
)
self.is_auto_refresh.setText(
QCoreApplication.translate("DDataPlotter", "Auto Refresh", None)
)
self.b_refresh.setText(
QCoreApplication.translate("DDataPlotter", "Refresh", None)
)
# retranslateUi
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_example.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
"""
Bulk walk Agent MIB (SNMPv2c)
+++++++++++++++++++++++++++++
Perform SNMP GETBULK operation with the following options:
* with SNMPv2c, community 'public'
* over IPv4/UDP
* to an Agent at demo.snmplabs.com:161
* for OID in tuple form
* with non-repeaters=0 and max-repeaters=25
This script performs similar to the following Net-SNMP command:
| $ snmpbulkwalk -v2c -c public -ObentU -Cn0 -Cr25 demo.snmplabs.com 1.3.6
"""#
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
from pysnmp.carrier.asyncore.dgram import udp
from pyasn1.codec.ber import encoder, decoder
from pysnmp.proto.api import v2c
from time import time
# SNMP table header
headVars = [v2c.ObjectIdentifier((1, 3, 6))]
# Build PDU
reqPDU = v2c.GetBulkRequestPDU()
v2c.apiBulkPDU.setDefaults(reqPDU)
v2c.apiBulkPDU.setNonRepeaters(reqPDU, 0)
v2c.apiBulkPDU.setMaxRepetitions(reqPDU, 25)
v2c.apiBulkPDU.setVarBinds(reqPDU, [(x, v2c.null) for x in headVars])
# Build message
reqMsg = v2c.Message()
v2c.apiMessage.setDefaults(reqMsg)
v2c.apiMessage.setCommunity(reqMsg, 'public')
v2c.apiMessage.setPDU(reqMsg, reqPDU)
startedAt = time()
def cbTimerFun(timeNow):
if timeNow - startedAt > 3:
raise Exception("Request timed out")
# noinspection PyUnusedLocal
def cbRecvFun(transportDispatcher, transportDomain, transportAddress,
wholeMsg, reqPDU=reqPDU, headVars=headVars):
while wholeMsg:
rspMsg, wholeMsg = decoder.decode(wholeMsg, asn1Spec=v2c.Message())
rspPDU = v2c.apiMessage.getPDU(rspMsg)
# Match response to request
if v2c.apiBulkPDU.getRequestID(reqPDU) == v2c.apiBulkPDU.getRequestID(rspPDU):
# Format var-binds table
varBindTable = v2c.apiBulkPDU.getVarBindTable(reqPDU, rspPDU)
# Check for SNMP errors reported
errorStatus = v2c.apiBulkPDU.getErrorStatus(rspPDU)
if errorStatus and errorStatus != 2:
errorIndex = v2c.apiBulkPDU.getErrorIndex(rspPDU)
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBindTable[int(errorIndex) - 1] or '?'))
transportDispatcher.jobFinished(1)
break
# Report SNMP table
for tableRow in varBindTable:
for name, val in tableRow:
print('from: %s, %s = %s' % (transportAddress,
name.prettyPrint(),
val.prettyPrint()))
# Stop on EOM
for oid, val in varBindTable[-1]:
if not isinstance(val, v2c.Null):
break
else:
transportDispatcher.jobFinished(1)
continue
# Generate request for next row
v2c.apiBulkPDU.setVarBinds(
reqPDU, [(x, v2c.null) for x, y in varBindTable[-1]]
)
v2c.apiBulkPDU.setRequestID(reqPDU, v2c.getNextRequestID())
transportDispatcher.sendMessage(
encoder.encode(reqMsg), transportDomain, transportAddress
)
global startedAt
if time() - startedAt > 3:
raise Exception('Request timed out')
startedAt = time()
return wholeMsg
transportDispatcher = AsyncoreDispatcher()
transportDispatcher.registerRecvCbFun(cbRecvFun)
transportDispatcher.registerTimerCbFun(cbTimerFun)
transportDispatcher.registerTransport(
udp.DOMAIN_NAME, udp.UdpSocketTransport().openClientMode()
)
transportDispatcher.sendMessage(
encoder.encode(reqMsg), udp.DOMAIN_NAME, ('demo.snmplabs.com', 161)
)
transportDispatcher.jobStarted(1)
# Dispatcher will finish as job#1 counter reaches zero
transportDispatcher.runDispatcher()
transportDispatcher.closeDispatcher()
|
"""Tests for Euclidean algorithms, GCDs, LCMs and polynomial remainder sequences. """
from sympy.polys.euclidtools import (
dup_gcdex, dup_half_gcdex, dup_invert,
dup_euclidean_prs, dmp_euclidean_prs,
dup_primitive_prs, dmp_primitive_prs,
dup_subresultants, dmp_subresultants,
dup_prs_resultant, dmp_prs_resultant,
dmp_zz_collins_resultant,
dmp_qq_collins_resultant,
dup_resultant, dmp_resultant,
dup_discriminant, dmp_discriminant,
dup_zz_heu_gcd, dmp_zz_heu_gcd,
dup_qq_heu_gcd, dmp_qq_heu_gcd,
dup_rr_prs_gcd, dmp_rr_prs_gcd,
dup_ff_prs_gcd, dmp_ff_prs_gcd,
dup_inner_gcd, dmp_inner_gcd,
dup_lcm, dmp_lcm,
dmp_content, dmp_primitive,
dup_cancel, dmp_cancel)
from sympy.polys.densebasic import (
dmp_one_p,
dup_LC, dmp_LC,
dup_normal, dmp_normal)
from sympy.polys.densearith import (
dup_add,
dup_mul, dmp_mul,
dup_exquo)
from sympy.polys.densetools import (
dup_diff)
from sympy.polys.specialpolys import (
f_4, f_5, f_6,
dmp_fateman_poly_F_1,
dmp_fateman_poly_F_2,
dmp_fateman_poly_F_3)
from sympy.polys.domains import ZZ, QQ, RR
def test_dup_gcdex():
f = dup_normal([1,-2,-6,12,15], QQ)
g = dup_normal([1,1,-4,-4], QQ)
s = [QQ(-1,5),QQ(3,5)]
t = [QQ(1,5),QQ(-6,5),QQ(2)]
h = [QQ(1),QQ(1)]
assert dup_half_gcdex(f, g, QQ) == (s, h)
assert dup_gcdex(f, g, QQ) == (s, t, h)
f = dup_normal([1,4,0,-1,1], QQ)
g = dup_normal([1,0,-1,1], QQ)
s, t, h = dup_gcdex(f, g, QQ)
S, T, H = dup_gcdex(g, f, QQ)
assert dup_add(dup_mul(s, f, QQ),
dup_mul(t, g, QQ), QQ) == h
assert dup_add(dup_mul(S, g, QQ),
dup_mul(T, f, QQ), QQ) == H
f = dup_normal([2,0], QQ)
g = dup_normal([1,0,-16], QQ)
s = [QQ(1,32),QQ(0)]
t = [QQ(-1,16)]
h = [QQ(1)]
assert dup_half_gcdex(f, g, QQ) == (s, h)
assert dup_gcdex(f, g, QQ) == (s, t, h)
def test_dup_invert():
assert dup_invert([QQ(2),QQ(0)], [QQ(1),QQ(0),QQ(-16)], QQ) == [QQ(1,32),QQ(0)]
def test_dup_euclidean_prs():
f = QQ.map([1, 0, 1, 0, -3, -3, 8, 2, -5])
g = QQ.map([3, 0, 5, 0, -4, -9, 21])
assert dup_euclidean_prs(f, g, QQ) == [f, g,
[-QQ(5,9), QQ(0,1), QQ(1,9), QQ(0,1), -QQ(1,3)],
[-QQ(117,25), -QQ(9,1), QQ(441,25)],
[QQ(233150,19773), -QQ(102500,6591)],
[-QQ(1288744821,543589225)]]
def test_dup_primitive_prs():
f = ZZ.map([1, 0, 1, 0, -3, -3, 8, 2, -5])
g = ZZ.map([3, 0, 5, 0, -4, -9, 21])
assert dup_primitive_prs(f, g, ZZ) == [f, g,
[-ZZ(5), ZZ(0), ZZ(1), ZZ(0), -ZZ(3)],
[ZZ(13), ZZ(25), -ZZ(49)],
[ZZ(4663), -ZZ(6150)],
[ZZ(1)]]
def test_dup_subresultants():
assert dup_resultant([], [], ZZ) == ZZ(0)
assert dup_resultant([ZZ(1)], [], ZZ) == ZZ(0)
assert dup_resultant([], [ZZ(1)], ZZ) == ZZ(0)
f = dup_normal([1,0,1,0,-3,-3,8,2,-5], ZZ)
g = dup_normal([3,0,5,0,-4,-9,21], ZZ)
a = dup_normal([15,0,-3,0,9], ZZ)
b = dup_normal([65,125,-245], ZZ)
c = dup_normal([9326,-12300], ZZ)
d = dup_normal([260708], ZZ)
assert dup_subresultants(f, g, ZZ) == [f, g, a, b, c, d]
assert dup_resultant(f, g, ZZ) == dup_LC(d, ZZ)
f = dup_normal([1,-2,1], ZZ)
g = dup_normal([1,0,-1], ZZ)
a = dup_normal([2,-2], ZZ)
assert dup_subresultants(f, g, ZZ) == [f, g, a]
assert dup_resultant(f, g, ZZ) == 0
f = dup_normal([1,0, 1], ZZ)
g = dup_normal([1,0,-1], ZZ)
a = dup_normal([-2], ZZ)
assert dup_subresultants(f, g, ZZ) == [f, g, a]
assert dup_resultant(f, g, ZZ) == 4
f = dup_normal([1,0,-1], ZZ)
g = dup_normal([1,-1,0,2], ZZ)
assert dup_resultant(f, g, ZZ) == 0
f = dup_normal([3,0,-1,0], ZZ)
g = dup_normal([5,0,1], ZZ)
assert dup_resultant(f, g, ZZ) == 64
f = dup_normal([1,-2,7], ZZ)
g = dup_normal([1,0,-1,5], ZZ)
assert dup_resultant(f, g, ZZ) == 265
f = dup_normal([1,-6,11,-6], ZZ)
g = dup_normal([1,-15,74,-120], ZZ)
assert dup_resultant(f, g, ZZ) == -8640
f = dup_normal([1,-6,11,-6], ZZ)
g = dup_normal([1,-10,29,-20], ZZ)
assert dup_resultant(f, g, ZZ) == 0
f = dup_normal([1,0,0,-1], ZZ)
g = dup_normal([1,2,2,-1], ZZ)
assert dup_resultant(f, g, ZZ) == 16
f = dup_normal([1,0,0,0,0,0,0,0,-2], ZZ)
g = dup_normal([1,-1], ZZ)
assert dup_resultant(f, g, ZZ) == -1
def test_dmp_subresultants():
assert dmp_resultant([[]], [[]], 1, ZZ) == []
assert dmp_prs_resultant([[]], [[]], 1, ZZ)[0] == []
assert dmp_zz_collins_resultant([[]], [[]], 1, ZZ) == []
assert dmp_qq_collins_resultant([[]], [[]], 1, ZZ) == []
assert dmp_resultant([[ZZ(1)]], [[]], 1, ZZ) == []
assert dmp_resultant([[ZZ(1)]], [[]], 1, ZZ) == []
assert dmp_resultant([[ZZ(1)]], [[]], 1, ZZ) == []
assert dmp_resultant([[]], [[ZZ(1)]], 1, ZZ) == []
assert dmp_prs_resultant([[]], [[ZZ(1)]], 1, ZZ)[0] == []
assert dmp_zz_collins_resultant([[]], [[ZZ(1)]], 1, ZZ) == []
assert dmp_qq_collins_resultant([[]], [[ZZ(1)]], 1, ZZ) == []
f = dmp_normal([[3,0],[],[-1,0,0,-4]], 1, ZZ)
g = dmp_normal([[1],[1,0,0,0],[-9]], 1, ZZ)
a = dmp_normal([[3,0,0,0,0],[1,0,-27,4]], 1, ZZ)
b = dmp_normal([[-3,0,0,-12,1,0,-54,8,729,-216,16]], 1, ZZ)
r = dmp_LC(b, ZZ)
assert dmp_subresultants(f, g, 1, ZZ) == [f, g, a, b]
assert dmp_resultant(f, g, 1, ZZ) == r
assert dmp_prs_resultant(f, g, 1, ZZ)[0] == r
assert dmp_zz_collins_resultant(f, g, 1, ZZ) == r
assert dmp_qq_collins_resultant(f, g, 1, ZZ) == r
f = dmp_normal([[-1],[],[],[5]], 1, ZZ)
g = dmp_normal([[3,1],[],[]], 1, ZZ)
a = dmp_normal([[45,30,5]], 1, ZZ)
b = dmp_normal([[675,675,225,25]], 1, ZZ)
r = dmp_LC(b, ZZ)
assert dmp_subresultants(f, g, 1, ZZ) == [f, g, a]
assert dmp_resultant(f, g, 1, ZZ) == r
assert dmp_prs_resultant(f, g, 1, ZZ)[0] == r
assert dmp_zz_collins_resultant(f, g, 1, ZZ) == r
assert dmp_qq_collins_resultant(f, g, 1, ZZ) == r
f = [[[[[6]]]], [[[[-3]]], [[[-2]], [[]]]], [[[[1]], [[]]], [[[]]]]]
g = [[[[[1]]]], [[[[-1], [-1, 0]]]], [[[[1, 0], []]]]]
r = [[[[1]], [[-3], [-3, 0]], [[9, 0], []]], [[[-2], [-2, 0]], [[6],
[12, 0], [6, 0, 0]], [[-18, 0], [-18, 0, 0], []]], [[[4, 0],
[]], [[-12, 0], [-12, 0, 0], []], [[36, 0, 0], [], []]]]
assert dmp_zz_collins_resultant(f, g, 4, ZZ) == r
f = [[[[[QQ(1,1)]]]], [[[[QQ(-1,2)]]], [[[QQ(-1,3)]], [[]]]], [[[[QQ(1,6)]], [[]]], [[[]]]]]
g = [[[[[QQ(1,1)]]]], [[[[QQ(-1,1)], [QQ(-1,1), QQ(0, 1)]]]], [[[[QQ(1,1), QQ(0,1)], []]]]]
r = [[[[QQ(1,36)]], [[QQ(-1,12)], [QQ(-1,12), QQ(0,1)]], [[QQ(1,4), QQ(0,1)], []]],
[[[QQ(-1,18)], [QQ(-1,18), QQ(0,1)]], [[QQ(1,6)], [QQ(1,3), QQ(0,1)], [QQ(1,6),
QQ(0,1), QQ(0,1)]], [[QQ(-1,2), QQ(0,1)], [QQ(-1,2), QQ(0,1), QQ(0,1)], []]],
[[[QQ(1,9), QQ(0,1)], []], [[QQ(-1,3), QQ(0,1)], [QQ(-1,3), QQ(0,1), QQ(0,1)], []],
[[QQ(1,1), QQ(0,1), QQ(0,1)], [], []]]]
assert dmp_qq_collins_resultant(f, g, 4, QQ) == r
def test_dup_discriminant():
assert dup_discriminant([], ZZ) == 0
assert dup_discriminant([1,0], ZZ) == 1
assert dup_discriminant([1,3,9,-13], ZZ) == -11664
assert dup_discriminant([5,0,1,0,0,2], ZZ) == 31252160
assert dup_discriminant([1,2,6,-22,13], ZZ) == 0
assert dup_discriminant([12,0,0,15,30,1,0,1], ZZ) == -220289699947514112
def test_dmp_discriminant():
assert dmp_discriminant([], 0, ZZ) == 0
assert dmp_discriminant([[]], 1, ZZ) == []
assert dmp_discriminant([[1,0]], 1, ZZ) == []
assert dmp_discriminant([1,3,9,-13], 0, ZZ) == -11664
assert dmp_discriminant([5,0,1,0,0,2], 0, ZZ) == 31252160
assert dmp_discriminant([1,2,6,-22,13], 0, ZZ) == 0
assert dmp_discriminant([12,0,0,15,30,1,0,1], 0, ZZ) == -220289699947514112
assert dmp_discriminant([[1,0],[],[2,0]], 1, ZZ) == [-8,0,0]
assert dmp_discriminant([[1,0,2],[]], 1, ZZ) == [1]
assert dmp_discriminant([[[1],[]],[[1,0]]], 2, ZZ) == [[1]]
assert dmp_discriminant([[[[1]],[[]]],[[[1],[]]],[[[1,0]]]], 3, ZZ) == \
[[[-4, 0]], [[1], [], []]]
assert dmp_discriminant([[[[[1]]],[[[]]]],[[[[1]],[[]]]],[[[[1],[]]]],[[[[1,0]]]]], 4, ZZ) == \
[[[[-27,0,0]]],[[[18,0],[]],[[-4],[],[],[]]],[[[-4,0]],[[1],[],[]],[[]],[[]]]]
def test_dup_gcd():
assert dup_zz_heu_gcd([], [], ZZ) == ([], [], [])
assert dup_rr_prs_gcd([], [], ZZ) == ([], [], [])
assert dup_zz_heu_gcd([2], [], ZZ) == ([2], [1], [])
assert dup_rr_prs_gcd([2], [], ZZ) == ([2], [1], [])
assert dup_zz_heu_gcd([-2], [], ZZ) == ([2], [-1], [])
assert dup_rr_prs_gcd([-2], [], ZZ) == ([2], [-1], [])
assert dup_zz_heu_gcd([], [-2], ZZ) == ([2], [], [-1])
assert dup_rr_prs_gcd([], [-2], ZZ) == ([2], [], [-1])
assert dup_zz_heu_gcd([], [2,4], ZZ) == ([2,4], [], [1])
assert dup_rr_prs_gcd([], [2,4], ZZ) == ([2,4], [], [1])
assert dup_zz_heu_gcd([2,4], [], ZZ) == ([2,4], [1], [])
assert dup_rr_prs_gcd([2,4], [], ZZ) == ([2,4], [1], [])
assert dup_zz_heu_gcd([2], [2], ZZ) == ([2], [1], [1])
assert dup_rr_prs_gcd([2], [2], ZZ) == ([2], [1], [1])
assert dup_zz_heu_gcd([-2], [2], ZZ) == ([2], [-1], [1])
assert dup_rr_prs_gcd([-2], [2], ZZ) == ([2], [-1], [1])
assert dup_zz_heu_gcd([2], [-2], ZZ) == ([2], [1], [-1])
assert dup_rr_prs_gcd([2], [-2], ZZ) == ([2], [1], [-1])
assert dup_zz_heu_gcd([-2], [-2], ZZ) == ([2], [-1], [-1])
assert dup_rr_prs_gcd([-2], [-2], ZZ) == ([2], [-1], [-1])
assert dup_zz_heu_gcd([1,2,1], [1], ZZ) == ([1], [1, 2, 1], [1])
assert dup_rr_prs_gcd([1,2,1], [1], ZZ) == ([1], [1, 2, 1], [1])
assert dup_zz_heu_gcd([1,2,1], [2], ZZ) == ([1], [1, 2, 1], [2])
assert dup_rr_prs_gcd([1,2,1], [2], ZZ) == ([1], [1, 2, 1], [2])
assert dup_zz_heu_gcd([2,4,2], [2], ZZ) == ([2], [1, 2, 1], [1])
assert dup_rr_prs_gcd([2,4,2], [2], ZZ) == ([2], [1, 2, 1], [1])
assert dup_zz_heu_gcd([2], [2,4,2], ZZ) == ([2], [1], [1, 2, 1])
assert dup_rr_prs_gcd([2], [2,4,2], ZZ) == ([2], [1], [1, 2, 1])
assert dup_zz_heu_gcd([2,4,2], [1,1], ZZ) == ([1, 1], [2, 2], [1])
assert dup_rr_prs_gcd([2,4,2], [1,1], ZZ) == ([1, 1], [2, 2], [1])
assert dup_zz_heu_gcd([1,1], [2,4,2], ZZ) == ([1, 1], [1], [2, 2])
assert dup_rr_prs_gcd([1,1], [2,4,2], ZZ) == ([1, 1], [1], [2, 2])
f, g = [1, -31], [1, 0]
assert dup_zz_heu_gcd(f, g, ZZ) == ([1], f, g)
assert dup_rr_prs_gcd(f, g, ZZ) == ([1], f, g)
f = [1,8,21,22,8]
g = [1,6,11,6]
h = [1,3,2]
cff = [1,5,4]
cfg = [1,3]
assert dup_zz_heu_gcd(f, g, ZZ) == (h, cff, cfg)
assert dup_rr_prs_gcd(f, g, ZZ) == (h, cff, cfg)
f = [1,0,0,0,-4]
g = [1,0,4,0, 4]
h = [1,0,2]
cff = [1,0,-2]
cfg = [1,0, 2]
assert dup_zz_heu_gcd(f, g, ZZ) == (h, cff, cfg)
assert dup_rr_prs_gcd(f, g, ZZ) == (h, cff, cfg)
f = [1,0,1,0,-3,-3,8,2,-5]
g = [3,0,5,-0,-4,-9,21]
h = [1]
cff = f
cfg = g
assert dup_zz_heu_gcd(f, g, ZZ) == (h, cff, cfg)
assert dup_rr_prs_gcd(f, g, ZZ) == (h, cff, cfg)
f = dup_normal([1,0,1,0,-3,-3,8,2,-5], QQ)
g = dup_normal([3,0,5,-0,-4,-9,21], QQ)
h = dup_normal([1], QQ)
assert dup_qq_heu_gcd(f, g, QQ) == (h, cff, cfg)
assert dup_ff_prs_gcd(f, g, QQ) == (h, cff, cfg)
f = [-352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272,
0, 0, 0, 0, 0, 0,
46818041807522713962450042363465092040687472354933295397472942006618953623327997952,
0, 0, 0, 0, 0, 0,
378182690892293941192071663536490788434899030680411695933646320291525827756032,
0, 0, 0, 0, 0, 0,
112806468807371824947796775491032386836656074179286744191026149539708928,
0, 0, 0, 0, 0, 0,
-12278371209708240950316872681744825481125965781519138077173235712,
0, 0, 0, 0, 0, 0,
289127344604779611146960547954288113529690984687482920704,
0, 0, 0, 0, 0, 0,
19007977035740498977629742919480623972236450681,
0, 0, 0, 0, 0, 0,
311973482284542371301330321821976049]
g = [365431878023781158602430064717380211405897160759702125019136,
0, 0, 0, 0, 0, 0,
197599133478719444145775798221171663643171734081650688,
0, 0, 0, 0, 0, 0,
-9504116979659010018253915765478924103928886144,
0, 0, 0, 0, 0, 0,
-311973482284542371301330321821976049]
f = dup_normal(f, ZZ)
g = dup_normal(g, ZZ)
assert dup_zz_heu_gcd(f, dup_diff(f, 1, ZZ), ZZ)[0] == g
assert dup_rr_prs_gcd(f, dup_diff(f, 1, ZZ), ZZ)[0] == g
f = [QQ(1,2),QQ(1),QQ(1,2)]
g = [QQ(1,2),QQ(1,2)]
h = [QQ(1), QQ(1)]
assert dup_qq_heu_gcd(f, g, QQ) == (h, g, [QQ(1,2)])
assert dup_ff_prs_gcd(f, g, QQ) == (h, g, [QQ(1,2)])
def test_dmp_gcd():
assert dmp_zz_heu_gcd([[]], [[]], 1, ZZ) == ([[]], [[]], [[]])
assert dmp_rr_prs_gcd([[]], [[]], 1, ZZ) == ([[]], [[]], [[]])
assert dmp_zz_heu_gcd([[2]], [[]], 1, ZZ) == ([[2]], [[1]], [[]])
assert dmp_rr_prs_gcd([[2]], [[]], 1, ZZ) == ([[2]], [[1]], [[]])
assert dmp_zz_heu_gcd([[-2]], [[]], 1, ZZ) == ([[2]], [[-1]], [[]])
assert dmp_rr_prs_gcd([[-2]], [[]], 1, ZZ) == ([[2]], [[-1]], [[]])
assert dmp_zz_heu_gcd([[]], [[-2]], 1, ZZ) == ([[2]], [[]], [[-1]])
assert dmp_rr_prs_gcd([[]], [[-2]], 1, ZZ) == ([[2]], [[]], [[-1]])
assert dmp_zz_heu_gcd([[]], [[2],[4]], 1, ZZ) == ([[2],[4]], [[]], [[1]])
assert dmp_rr_prs_gcd([[]], [[2],[4]], 1, ZZ) == ([[2],[4]], [[]], [[1]])
assert dmp_zz_heu_gcd([[2],[4]], [[]], 1, ZZ) == ([[2],[4]], [[1]], [[]])
assert dmp_rr_prs_gcd([[2],[4]], [[]], 1, ZZ) == ([[2],[4]], [[1]], [[]])
assert dmp_zz_heu_gcd([[2]], [[2]], 1, ZZ) == ([[2]], [[1]], [[1]])
assert dmp_rr_prs_gcd([[2]], [[2]], 1, ZZ) == ([[2]], [[1]], [[1]])
assert dmp_zz_heu_gcd([[-2]], [[2]], 1, ZZ) == ([[2]], [[-1]], [[1]])
assert dmp_rr_prs_gcd([[-2]], [[2]], 1, ZZ) == ([[2]], [[-1]], [[1]])
assert dmp_zz_heu_gcd([[2]], [[-2]], 1, ZZ) == ([[2]], [[1]], [[-1]])
assert dmp_rr_prs_gcd([[2]], [[-2]], 1, ZZ) == ([[2]], [[1]], [[-1]])
assert dmp_zz_heu_gcd([[-2]], [[-2]], 1, ZZ) == ([[2]], [[-1]], [[-1]])
assert dmp_rr_prs_gcd([[-2]], [[-2]], 1, ZZ) == ([[2]], [[-1]], [[-1]])
assert dmp_zz_heu_gcd([[1],[2],[1]], [[1]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[1]])
assert dmp_rr_prs_gcd([[1],[2],[1]], [[1]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[1]])
assert dmp_zz_heu_gcd([[1],[2],[1]], [[2]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[2]])
assert dmp_rr_prs_gcd([[1],[2],[1]], [[2]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[2]])
assert dmp_zz_heu_gcd([[2],[4],[2]], [[2]], 1, ZZ) == ([[2]], [[1], [2], [1]], [[1]])
assert dmp_rr_prs_gcd([[2],[4],[2]], [[2]], 1, ZZ) == ([[2]], [[1], [2], [1]], [[1]])
assert dmp_zz_heu_gcd([[2]], [[2],[4],[2]], 1, ZZ) == ([[2]], [[1]], [[1], [2], [1]])
assert dmp_rr_prs_gcd([[2]], [[2],[4],[2]], 1, ZZ) == ([[2]], [[1]], [[1], [2], [1]])
assert dmp_zz_heu_gcd([[2],[4],[2]], [[1],[1]], 1, ZZ) == ([[1], [1]], [[2], [2]], [[1]])
assert dmp_rr_prs_gcd([[2],[4],[2]], [[1],[1]], 1, ZZ) == ([[1], [1]], [[2], [2]], [[1]])
assert dmp_zz_heu_gcd([[1],[1]], [[2],[4],[2]], 1, ZZ) == ([[1], [1]], [[1]], [[2], [2]])
assert dmp_rr_prs_gcd([[1],[1]], [[2],[4],[2]], 1, ZZ) == ([[1], [1]], [[1]], [[2], [2]])
assert dmp_zz_heu_gcd([[[[1,2,1]]]], [[[[2,2]]]], 3, ZZ) == ([[[[1,1]]]], [[[[1,1]]]], [[[[2]]]])
assert dmp_rr_prs_gcd([[[[1,2,1]]]], [[[[2,2]]]], 3, ZZ) == ([[[[1,1]]]], [[[[1,1]]]], [[[[2]]]])
f, g = [[[[1,2,1],[1,1],[]]]], [[[[1,2,1]]]]
h, cff, cfg = [[[[1,1]]]], [[[[1,1],[1],[]]]], [[[[1,1]]]]
assert dmp_zz_heu_gcd(f, g, 3, ZZ) == (h, cff, cfg)
assert dmp_rr_prs_gcd(f, g, 3, ZZ) == (h, cff, cfg)
assert dmp_zz_heu_gcd(g, f, 3, ZZ) == (h, cfg, cff)
assert dmp_rr_prs_gcd(g, f, 3, ZZ) == (h, cfg, cff)
f, g, h = dmp_fateman_poly_F_1(2, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
H, cff, cfg = dmp_rr_prs_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
f, g, h = dmp_fateman_poly_F_1(4, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 4, ZZ)
assert H == h and dmp_mul(H, cff, 4, ZZ) == f \
and dmp_mul(H, cfg, 4, ZZ) == g
f, g, h = dmp_fateman_poly_F_1(6, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 6, ZZ)
assert H == h and dmp_mul(H, cff, 6, ZZ) == f \
and dmp_mul(H, cfg, 6, ZZ) == g
f, g, h = dmp_fateman_poly_F_1(8, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 8, ZZ)
assert H == h and dmp_mul(H, cff, 8, ZZ) == f \
and dmp_mul(H, cfg, 8, ZZ) == g
f, g, h = dmp_fateman_poly_F_2(2, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
H, cff, cfg = dmp_rr_prs_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
f, g, h = dmp_fateman_poly_F_3(2, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
H, cff, cfg = dmp_rr_prs_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
f, g, h = dmp_fateman_poly_F_3(4, ZZ)
H, cff, cfg = dmp_inner_gcd(f, g, 4, ZZ)
assert H == h and dmp_mul(H, cff, 4, ZZ) == f \
and dmp_mul(H, cfg, 4, ZZ) == g
f = [[QQ(1,2)],[QQ(1)],[QQ(1,2)]]
g = [[QQ(1,2)],[QQ(1,2)]]
h = [[QQ(1)],[QQ(1)]]
assert dmp_qq_heu_gcd(f, g, 1, QQ) == (h, g, [[QQ(1,2)]])
assert dmp_ff_prs_gcd(f, g, 1, QQ) == (h, g, [[QQ(1,2)]])
f = [[RR(2.1), RR(-2.2), RR(2.1)], []]
g = [[RR(1.0)], [], [], []]
assert dmp_ff_prs_gcd(f, g, 1, RR) == \
([[RR(1.0)], []], [[RR(2.1), RR(-2.2), RR(2.1)]], [[RR(1.0)], [], []])
def test_dup_lcm():
assert dup_lcm([2], [6], ZZ) == [6]
assert dup_lcm([2,0,0,0], [6,0], ZZ) == [6,0,0,0]
assert dup_lcm([2,0,0,0], [3,0], ZZ) == [6,0,0,0]
assert dup_lcm([1,1,0], [1,0], ZZ) == [1,1,0]
assert dup_lcm([1,1,0], [2,0], ZZ) == [2,2,0]
assert dup_lcm([1,2,0], [1,0], ZZ) == [1,2,0]
assert dup_lcm([2,1,0], [1,0], ZZ) == [2,1,0]
assert dup_lcm([2,1,0], [2,0], ZZ) == [4,2,0]
def test_dmp_lcm():
assert dmp_lcm([[2]], [[6]], 1, ZZ) == [[6]]
assert dmp_lcm([[1],[]], [[1,0]], 1, ZZ) == [[1,0],[]]
assert dmp_lcm([[2],[],[],[]], [[6,0,0],[]], 1, ZZ) == [[6,0,0],[],[],[]]
assert dmp_lcm([[2],[],[],[]], [[3,0,0],[]], 1, ZZ) == [[6,0,0],[],[],[]]
assert dmp_lcm([[1,0],[],[]], [[1,0,0],[]], 1, ZZ) == [[1,0,0],[],[]]
f = [[2,-3,-2,3,0,0],[]]
g = [[1,0,-2,0,1,0]]
h = [[2,-3,-4,6,2,-3,0,0],[]]
assert dmp_lcm(f, g, 1, ZZ) == h
f = [[1],[-3,0],[-9,0,0],[-5,0,0,0]]
g = [[1],[6,0],[12,0,0],[10,0,0,0],[3,0,0,0,0]]
h = [[1],[1,0],[-18,0,0],[-50,0,0,0],[-47,0,0,0,0],[-15,0,0,0,0,0]]
assert dmp_lcm(f, g, 1, ZZ) == h
def test_dmp_content():
assert dmp_content([[-2]], 1, ZZ) == [2]
f, g, F = [ZZ(3),ZZ(2),ZZ(1)], [ZZ(1)], []
for i in xrange(0, 5):
g = dup_mul(g, f, ZZ)
F.insert(0, g)
assert dmp_content(F, 1, ZZ) == f
assert dmp_one_p(dmp_content(f_4, 2, ZZ), 1, ZZ)
assert dmp_one_p(dmp_content(f_5, 2, ZZ), 1, ZZ)
assert dmp_one_p(dmp_content(f_6, 3, ZZ), 2, ZZ)
def test_dmp_primitive():
assert dmp_primitive([[]], 1, ZZ) == ([], [[]])
assert dmp_primitive([[1]], 1, ZZ) == ([1], [[1]])
f, g, F = [ZZ(3),ZZ(2),ZZ(1)], [ZZ(1)], []
for i in xrange(0, 5):
g = dup_mul(g, f, ZZ)
F.insert(0, g)
assert dmp_primitive(F, 1, ZZ) == (f,
[ dup_exquo(c, f, ZZ) for c in F ])
cont, f = dmp_primitive(f_4, 2, ZZ)
assert dmp_one_p(cont, 1, ZZ) and f == f_4
cont, f = dmp_primitive(f_5, 2, ZZ)
assert dmp_one_p(cont, 1, ZZ) and f == f_5
cont, f = dmp_primitive(f_6, 3, ZZ)
assert dmp_one_p(cont, 2, ZZ) and f == f_6
def test_dup_cancel():
f = ZZ.map([2, 0, -2])
g = ZZ.map([1, -2, 1])
p = [ZZ(2), ZZ(2)]
q = [ZZ(1), -ZZ(1)]
assert dup_cancel(f, g, ZZ) == (p, q)
assert dup_cancel(f, g, ZZ, include=False) == (ZZ(1), ZZ(1), p, q)
f = [-ZZ(1),-ZZ(2)]
g = [ ZZ(3),-ZZ(4)]
F = [ ZZ(1), ZZ(2)]
G = [-ZZ(3), ZZ(4)]
dup_cancel(f, g, ZZ) == (f, g)
dup_cancel(F, G, ZZ) == (f, g)
def test_dmp_cancel():
f = ZZ.map([[2], [0], [-2]])
g = ZZ.map([[1], [-2], [1]])
p = [[ZZ(2)], [ZZ(2)]]
q = [[ZZ(1)], [-ZZ(1)]]
assert dmp_cancel(f, g, 1, ZZ) == (p, q)
assert dmp_cancel(f, g, 1, ZZ, include=False) == (ZZ(1), ZZ(1), p, q)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 19:45:18 2019
@author: CHaithcock
"""
import sys
sys.path.insert(1, 'C:/Users/chaithcock/Documents/repos/RushHour/RHGraph')
import itertools
import numpy as np
import RHGeneratorConstants as gen
import RHState
c = 5
t = 2
c1 = np.arange(c//1 + 1) # num strips with 1 car
c2 = np.arange(c//2 + 1) # num strips with 2 cars
c3 = np.arange(c//3 + 1) # num strips with 3 cars
t1 = np.arange(t//1 + 1) # num strips with 1 truck
t2 = np.arange(t//2 + 1) # num strips with 2 trucks
'''
General Array Structure
c1 - num instances of TOPO_ONE_CAR
c2 - num instances of TOPO_TWO_CARS
c3 - num instances of TOPO_THREE_CARS
ct - num instances of TOPO_CAR_TRUCK
tc - num instances of TOPO_TRUCK_CAR
t1 - num instances of TOPO_ONE_TRUCK
t1 - num instances of TOPOT_TWO_TRUCKS
'''
# Start with constructing TOPO Counts without CT or TC
C = [x for x in itertools.product(c1,c2,c3) if np.dot(x,[1,2,3]) == c]
T = [x for x in itertools.product(t1,t2) if np.dot(x,[1,2]) == t]
set_no_ct = [ x + (0,) + (0,) + y for x in C for y in T]
# Contruct the TOPO Counts with CT and TC
# Each instance of of set_1 with c1 >= 1 is eligible to be replaced with CT and TC
# These are the only instances in which TC or CT can appear in the counts
set_with_ct = []
if c >= 1 and t >= 1:
C_CT = []
C_CT_eligible = [x for x in C if x[0] >= 1]
for (c1,c2,c3) in C_CT_eligible:
r = np.arange(min(c1,t)) + 1
C_CT = C_CT + [(c1-i,c2,c3,i,0) for i in r]
C_CT = C_CT + [(c1-i,c2,c3,0,i) for i in r]
T_CT = [x for x in itertools.product(t1,t2) if np.dot(x,[1,2]) == (t-1)]
set_with_ct = [x + y for x in C_CT for y in T_CT]
topo_set = set_no_ct + set_with_ct
STRIPS = ['C','CC','CCC','CT','TC','T','TT']
SLOTS = range(12)
EXIT_SLOT = 2
ROW_SLOTS = SLOTS[:6]
COL_SLOTS = SLOTS[6:]
# protoyping loop through topo_set
# for each strip_list s in topo_set -- need better name
# strip_list is a 7-elt list of strip_counts
s = topo_set[0]
s_tot_strips = sum(s)
# s = [c1,2,c3,ct,tc,t1,t2]
# this strip count gives rise to a set of slot arrangements
s_topo_type_nested = [x for x in [ [STRIPS[i]]*s[i] for i in range(len(s))] if x]
#temp looks something like [ ['C','C'] , ['CC'] , [] , [] , ['CT'], [], [], ['TT']]
# marrying the counts in s with the list STRIPS
# flatten the above result into a single list of strips to be distributed
# accross a combinatorial subset of rows/cols
s_topo_type = list(itertools.chain(*s_topo_type_nested))
# result looks something like [ 'C','C','CC','CT,'TT']
s_topo_type_perms = list(itertools.permutations(s_topo_type))
# build out all possible permutations of s_topo_type
# this list of permutations will be combined with a particular subset of row/cols
# to build out the topo classes
# now find sets of roww/cols to pair with elts of s_topo_type_perms
s_all_slot_sets = [x for x in itertools.combinations(SLOTS,s_tot_strips) if EXIT_SLOT in x]
# Now loop through each slot set as each slot set gives rise to a set of
# topo classes
# for each slot in s_slot_set
s_slot_set = s_all_slot_sets[0]
# s_slot = [1,2,5,9,11] - a list of rows/cols selected to be filled with
# strips in by s_topo_type
# one such distribution of elts of s_topo_type across one s_slot is a topo_class
# We now marry the permutation of topo strips
# with this subset of row/col slots
# some of the matches are incompatible with game rules
# must have a red car in exit row
s_slot_admissible_topo_types = [x for x in s_topo_type_perms if x[2] not in ['T','TT'] ]
topo_classes = []
for topo_type in s_slot_admissible_topo_types:
topo_class = [0]*12
for i in range(len(topo_type)):
topo_class[s_slot_set[i]] = topo_type[i]
topo_classes.append(topo_class)
# Each Topo Class defines a set of states. This set of states will coontains
# one or more components of the RH Graph. THis is the smallest such set
# I have figured out how to construct to feed into the graph algorithms
topo_states = {}
def record_topo_class_state(topo_class,state,red_car_end_a):
print("\n\nRecording State")
state = RHState.RHState(state,red_car_end_a)
topo_class = tuple(topo_class)
if topo_class in topo_states:
if topo_states[topo_class]:
topo_states[topo_class].append(state)
else:
topo_states[topo_class] = [state]
else:
topo_states[topo_class] = [state]
# topo_class: [c1,c2,c3,c4,c5,r1,r2,r3,r4,r5,r6]
def topo_class_states(topo_class,slot_set):
slots = list(slot_set)
slots.remove(EXIT_SLOT)
board_strips = gen.HORZ_STRIPS[topo_class[EXIT_SLOT]]
for strip in board_strips:
red_cars = np.where(np.array(strip)==6)
for i in range(0,len(red_cars[0]),2):
state = np.array([0]*36).reshape(6,6)
state[EXIT_SLOT] = strip
red_car_end_a = i + 12
print("\n\nStarting wtih red car end a: %d"%(red_car_end_a))
print(red_cars)
topo_class_states_recursion(state,topo_class,slots,red_car_end_a)
def topo_class_states_recursion(state,topo_class,slots,red_car_end_a):
#print ("\n\nentering recursion:")
#print(topo_class)
#print(slots)
#print(state)
if not slots:
record_topo_class_state(topo_class,state,red_car_end_a)
return
loc_slots = slots[:]
slot = loc_slots.pop()
if slot in ROW_SLOTS:
board_strips = gen.HORZ_STRIPS[topo_class[slot]]
for strip in board_strips:
state[slot] = strip
topo_class_states_recursion(state,topo_class,loc_slots,red_car_end_a)
elif slot in COL_SLOTS:
board_strips = gen.VERT_STRIPS[topo_class[slot]]
for strip in board_strips:
x = state[:,slot]
y = x + strip
if np.all(strip == y - x):
state[slot] = strip
topo_class_states_recursion(state,topo_class,loc_slots,red_car_end_a)
else:
raise ValueError("slot is not in ROW_SLOTS or COL_SLOTS",slot)
|
from sqlalchemy import Column, Integer, String, DateTime, Boolean, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from app.db.base_class import Base
class Messages(Base):
id = Column(Integer, primary_key=True, index=True)
author_user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
target_user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
message = Column(String(256), nullable=False)
is_active = Column(Boolean, nullable=False, default=True)
created_at = Column(DateTime, server_default=func.now())
modified_at = Column(DateTime, server_default=func.now(), onupdate=func.now())
rule = relationship("MessageRules", back_populates="message", uselist=False)
|
from .compress import make_new_fasttext_model, CompressedFastTextKeyedVectors # noqa - kepr for backward compatibility
|
"""
@author: Cem Akpolat
@created by cemakpolat at 2021-12-29
"""
|
c = [0, 0, 1, 0, 0, 1, 0] # len = 7
#c = [0, 0, 0, 1, 0, 0] # len = 6
i = len(c)
j = 0
jumps = 0
while j < i:
print("jumps: " + str(jumps))
if (j + 2 < i) and c[j + 2] == 0: #safe to skip ahead
j += 1
print("bonus j: " + str(j))
j += 1
print("reg j: " + str(j))
if (j < i):
jumps += 1
print("total jumps:" + str(jumps))
|
#!/usr/bin/env python
from __future__ import print_function
import os
import random
import numpy as np
from mpi4py import MPI
comm = MPI.COMM_WORLD # Communicator for the processes.
rank = comm.Get_rank() # What number process is this one?
size = comm.Get_size() # How many processes in total are there?
def check_Nproc(N):
"""
Ensures that an array with `N` elements can be equally divided among the
processes.
If not, abort the program.
Parameters
----------
N: Integer. Required.
Number of elements in array.
Returns
----------
None.
comm.Abort() is called if the elements cannot be equally divided among
processes.
"""
if N % size != 0:
print("Choose a number of processes that equally divides the number "
"of elements in the array. Current number of elements is {0} "
"and number of processes is {1}".format(N, size))
comm.Abort()
def hello_world():
"""
Prints Hello World on each rank.
Parameters
----------
None.
Returns
----------
None.
"""
print("I am Rank {0} of {1}: Hello World!".format(rank, size))
def send_recv():
"""
Manually sends data from rank 0 process to other processes.
If program is executed with only 1 process, raises a RuntimeError.
Parameters
----------
None.
Returns
----------
None.
"""
if size == 1:
print("This function sends data to other ranks. It can't on with one "
"process!")
raise RuntimeError
if rank == 0: # Only rank 0 will have the data.
data = np.arange(10)
comm.send(data, dest=1, tag=11)
else: # All the other ranks will receive their data from rank 0.
data = comm.recv(source=0, tag=11)
print("I am rank {0} and my data is {1}".format(rank, data))
def send_recv_mean(N=1e6):
"""
Each process takes an equal slice of numbers from 0 to N and calculates the
local mean. These are then passed back to rank 0 to determine the global
mean.
If N numbers does not divide equally among the specified number of
processes, a RuntimeError is raised.
Parameters
----------
N: Integer. Default: 1e6.
Calculate the mean of integers from 0 to N.
Returns
----------
None.
"""
check_Nproc(N) # Ensure that the array can be spread across processes.
# First determine the range of numbers this process will handle.
lower_range = int(N/size * rank)
upper_range = int(N/size * (rank+1))
data = np.arange(lower_range, upper_range)
local_mean = np.mean(data)
print("I am rank {0} and my local mean is {1}".format(rank, local_mean))
# Then pass all the values to rank 0 to find the global mean.
if rank == 0:
mean_array = np.empty(size)
mean_array[0] = local_mean
for i in range(1, size):
mean_array[i] = comm.recv(source=i, tag=11)
print("I am rank {0} and the mean from 0 to {1} is {2}".format(rank,
N, np.mean(mean_array)))
else:
comm.send(local_mean, dest=0, tag=11)
def reduce_example(N=1e6):
"""
Each process takes an equal slice of numbers from 0 to N and the global
mean is reduced onto rank 0.
If N numbers does not divide equally among the specified number of
processes, a RuntimeError is raised.
Parameters
----------
N: Integer. Default: 1e6.
Calculate the mean of integers from 0 to N.
Returns
----------
None.
"""
check_Nproc(N)
# First determine the range of numbers this process will handle.
lower_range = int(N/size * rank)
upper_range = int(N/size * (rank+1))
data = np.arange(lower_range, upper_range)
local_mean = np.mean(data)
print("I am rank {0} and my local mean is {1}".format(rank, local_mean))
global_sum = comm.reduce(local_mean, op=MPI.SUM)
if rank == 0:
print("I am rank {0} and the mean from 0 to {1} is {2}".format(rank,
N, global_sum / size))
def reduce_array_example(N=100, num_bins=10):
"""
Calculates the mean of numbers across bins. Used to showcase that mpi4py
can communicate arrays across processes.
Parameters
----------
N: Integer. Default: 100.
Largest data point to be summed.
num_bins: Integer. Default: 10.
Number of bins that we are summing within.
Returns
----------
None.
"""
data = np.array(random.sample(range(0, N), num_bins))
print("I am rank {0} and my data is {1}".format(rank, data))
mean_data = np.zeros_like(data)
comm.Reduce([data, MPI.DOUBLE], [mean_data, MPI.DOUBLE], op=MPI.SUM,
root=0)
if rank == 0:
print("I am rank {0} and the mean is {1}"
.format(rank, mean_data / size))
def my_example(datadir="./data", firstfile=0, lastfile=11):
"""
Calculates the mean for numbers across different data files. Used to
showcase the strength of mpi4py being able to open different files and
communicating the results back to root.
If the data files do not exist, creates files containing random numbers.
Parameters
----------
datadir: String. Default: './data'.
The directory the data files are located in.
firstfile, lastfile: Integers. Default: 0, 11.
The range of file numbers that are being read.
Returns
----------
None.
"""
print("Running my example in parallel.")
# Check to see if the data directory exists.
if not os.path.exists(datadir) and rank == 0:
os.makedirs(datadir)
# If there aren't any data files, create some data.
fname = "{0}/data_{1}.txt".format(datadir, firstfile + rank)
if not os.path.isfile(fname):
create_data(datadir=datadir, firstfile=firstfile, lastfile=lastfile)
comm.Barrier() # Rank 0 may still be creating data so wait here.
sum_thistask = 0 # Initialize.
N_thistask = 0
# Now each Task will get its own set of files to read in.
# This loop ensures each file is only read one.
for filenr in range(firstfile + rank, lastfile + 1, size):
fname = "{0}/data_{1}.txt".format(datadir, filenr)
data_thisfile = np.loadtxt(fname)
# Sum up the data from this file.
sum_thistask += sum(data_thisfile)
N_thistask += len(data_thisfile)
# Then after all files have been read, reduce everything onto rank 0.
global_sum = comm.reduce(sum_thistask, op=MPI.SUM)
global_N = comm.reduce(N_thistask, op=MPI.SUM)
print("I am rank {0} and I processed a total of {1} values.".format(rank,
N_thistask))
if rank == 0:
print("I am rank {0} and {1} total values were processed with a sum "
"of {2} and a mean of {3}".format(rank, global_N, global_sum,
global_sum / global_N))
def my_example_serial(datadir="./data", firstfile=0, lastfile=11):
"""
Calculates the mean for numbers across different data files. Is only run
on one process to validate the results of `my_example()`.
If the data files do not exist, creates files containing random numbers.
Parameters
----------
datadir: String. Default: './data'.
The directory the data files are located in.
firstfile, lastfile: Integers. Default: 0, 11.
The range of file numbers that are being read.
Returns
----------
None.
"""
print("Running my example in serial.")
# Check to see if the data directory exists.
if not os.path.exists(datadir) and rank == 0:
os.makedirs(datadir)
# If there aren't any data files, create some data.
fname = "{0}/data_{1}.txt".format(datadir, firstfile)
if not os.path.isfile(fname):
create_data(datadir=datadir, firstfile=firstfile, lastfile=lastfile)
sum_local = 0 # Initialize.
N_local = 0
# Now each Task will get its own set of files to read in.
# This loop ensures each file is only read one.
for filenr in range(firstfile, lastfile + 1):
fname = "{0}/data_{1}.txt".format(datadir, filenr)
data_thisfile = np.loadtxt(fname)
# Sum up the data from this file.
sum_local += sum(data_thisfile)
N_local += len(data_thisfile)
print("There were {0} values processed with a sum of {1} and mean of {2}"
.format(N_local, sum_local, sum_local / N_local))
def create_data(datadir="./data", firstfile=0, lastfile=11, N_lower=5e5,
N_upper=6e5):
"""
Creates .txt files with a random number of random numbers. The number of
files created is lastfile - firstfile + 1. Used for `my_example()`.
Parameters
----------
datadir: String. Default: './data'.
The directory the data files are located in.
firstfile, lastfile: Integers. Default: 0, 11.
The range of file numbers that are being read.
N_lower, N_upper: Integers. Default: 5e5, 6e5.
Generates N random numbers where N_lower < N < N_upper + 1.
Each random number generated, x_i, is -int(N_lower*3) < x_i < int(N_upper * 3).
Returns
----------
None. Data files are created in the `datadir` directory.
"""
print("Creating data.")
for filenr in range(firstfile + rank, lastfile + 1, size):
N = random.randint(N_lower, N_upper + 1)
data = np.array(random.sample(range(-int(N_lower*3),
int(N_upper*3)), N))
fname = "{0}/data_{1}.txt".format(datadir, filenr)
np.savetxt(fname, data)
print("Done!")
if __name__ == "__main__":
#hello_world()
#send_recv()
#send_recv_mean()
#reduce_example()
reduce_array_example()
#my_example()
#my_example_serial()
|
# Generated by Django 2.1.8 on 2019-06-15 14:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='skucomments',
name='goods',
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.