hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a16a16890b44f8d9db7270239aa01481187f4cc
| 3,863
|
py
|
Python
|
tests/unit/test_v3bw.py
|
pastly/flashflow
|
b7f53f71683fc2e9a6456c04aab110c843baf182
|
[
"CC0-1.0"
] | null | null | null |
tests/unit/test_v3bw.py
|
pastly/flashflow
|
b7f53f71683fc2e9a6456c04aab110c843baf182
|
[
"CC0-1.0"
] | null | null | null |
tests/unit/test_v3bw.py
|
pastly/flashflow
|
b7f53f71683fc2e9a6456c04aab110c843baf182
|
[
"CC0-1.0"
] | null | null | null |
import unittest
from flashflow import v3bw
import os
import time
from typing import Tuple
from tempfile import TemporaryDirectory
def touch(fname: str, times: Tuple[float, float] = None):
''' Update ``fname``\'s last access and modified times to now. If it does
not exist, create it first. If ``times`` are specified, use them instead of
the current time.
:param fname: Name of file to update access and modified time
:param times: tuple with access time and modified time, respectively
'''
if not times:
now = time.time()
times = (now, now)
with open(fname, 'a') as fd:
os.utime(fd.fileno(), times=times)
class TestFindFiles(unittest.TestCase):
def test_nonexist(self):
''' Result file not existing, nor anything to glob for, returns empty
list '''
path = '/path/that/does/not/exist'
assert v3bw._find_files(path) == []
def test_justbase(self):
''' Result file exists, but nothing to glob for, still get list
containing result file '''
with TemporaryDirectory() as tempdir:
base = os.path.join(tempdir, 'results.log')
touch(base)
assert v3bw._find_files(base) == [base]
def test_justbase_unrelated(self):
''' Result file exists, nothing to glob for, and unrelated file in same
dir. Get just result file. '''
with TemporaryDirectory() as tempdir:
base = os.path.join(tempdir, 'results.log')
touch(base)
touch(os.path.join(tempdir, 'unrelated_file'))
assert v3bw._find_files(base) == [base]
def test_justbase_unrelated_dir(self):
''' Result file exists, nothing to glob for, and similarly-named
directory. Get just result file. '''
with TemporaryDirectory() as tempdir:
base = os.path.join(tempdir, 'results.log')
touch(base)
os.mkdir(os.path.join(tempdir, 'results.log.thisisadir'))
assert v3bw._find_files(base) == [base]
def test_multi(self):
''' Multiple files to match, and they are returned in the correct order
'''
with TemporaryDirectory() as tempdir:
# f1 is older, so returned first
base = os.path.join(tempdir, 'results.log')
f1 = base + 'f1'
f2 = base + 'f2'
touch(f1)
touch(f2)
assert v3bw._find_files(base) == [f1, f2]
# f2 is older, so returned first
touch(f2)
touch(f1)
assert v3bw._find_files(base) == [f2, f1]
def test_base_not_first(self):
''' The base file isn't treated special and returned first. It's sorted
based on modtime '''
with TemporaryDirectory() as tempdir:
base = os.path.join(tempdir, 'results.log')
other = base + '.foo'
touch(base)
touch(other)
# base is older, so returned first
assert v3bw._find_files(base) == [base, other]
# other is older, so returned first
touch(other)
touch(base)
assert v3bw._find_files(base) == [other, base]
def test_too_old(self):
''' The only file that exists is too old to be considered '''
with TemporaryDirectory() as tempdir:
fname = os.path.join(tempdir, 'foo')
now = 1000000
touch(fname, times=(now, now))
assert v3bw._find_files(fname, min_ts=now+1) == []
def test_recent_enough(self):
''' The only file that exists is new enough to be considered '''
with TemporaryDirectory() as tempdir:
fname = os.path.join(tempdir, 'foo')
now = 1000000
touch(fname, times=(now, now))
assert v3bw._find_files(fname, min_ts=now-1) == [fname]
| 37.504854
| 79
| 0.592286
|
4a16a1c69888efa81b8c70c771541fbaa972f03d
| 1,510
|
py
|
Python
|
s5_scatter.py
|
grmagicdog/ChIP-seq-Correlation
|
c869692ac281c49bd71920490bbbc94341014e67
|
[
"MIT"
] | 2
|
2019-05-29T06:56:38.000Z
|
2020-09-18T12:44:26.000Z
|
s6_scatter.py
|
gramgicdog/ChIP-seq-Correlation
|
c869692ac281c49bd71920490bbbc94341014e67
|
[
"MIT"
] | null | null | null |
s6_scatter.py
|
gramgicdog/ChIP-seq-Correlation
|
c869692ac281c49bd71920490bbbc94341014e67
|
[
"MIT"
] | 1
|
2019-05-29T06:56:40.000Z
|
2019-05-29T06:56:40.000Z
|
#!/usr/bin/python3
# encoding=utf-8
from config import *
import matplotlib.pyplot as plt
from scipy.stats import linregress
from table import *
import numpy as np
def draw(x, y=None):
x, y = np.array(x), np.array(y)
slope, intercept, rvalue, pvalue, stderr = linregress(x, y)
fig, axs = plt.subplots(1, 2)
axs[1].axis((1e-3, 1e3, 1e-3, 1e3))
axs[1].set_xscale('log')
axs[1].set_yscale('log')
for ax in axs:
ax.set_xlabel('RCM-1 1000bp Mean Signal')
ax.set_ylabel('CBF-1 1000bp Mean Signal')
ax.scatter(x, y, 1, marker='.')
axs[0].set_title('Correlation between RCM-1 & CBF-1 (linear scale)')
axs[0].text(100, 300, 'n = {}\nr = {}\n'.format(len(x), '%.4f' %float(rvalue)))
axs[1].set_title('Correlation between RCM-1 & CBF-1 (log scale)')
axs[1].text(0.1, 100, 'n = {}\nr = {}\n'.format(len(x), '%.4f' %float(rvalue)))
plt.show()
def readvalue(samples, path=''):
values = []
for sam in samples:
filename = path + sam + '_mean.txt'
values.append(list(map(float, readcols(filename)[3])))
return tuple(values)
def delzero(*inputs):
i = 0
n = len(inputs[0])
while i < n:
findzero = False
for data in inputs:
if data[i] == 0:
findzero = True
if findzero:
for data in inputs:
data.pop(i)
i -= 1
n -= 1
i += 1
return tuple(inputs)
x, y = readvalue(samples, dataPath)
draw(x, y)
| 27.454545
| 83
| 0.562914
|
4a16a2b8ef002b629ac348ec880b026c50c0b853
| 7,874
|
py
|
Python
|
VocCode/Model/Deeplabv3_plus/Backbones/resnet.py
|
yyliu01/PS-MT
|
91268eaca383d7956f5f4cdf7135256e9bbfa04c
|
[
"MIT"
] | 7
|
2022-03-28T04:07:17.000Z
|
2022-03-31T13:49:04.000Z
|
VocCode/Model/Deeplabv3_plus/Backbones/resnet.py
|
yyliu01/PS-MT
|
91268eaca383d7956f5f4cdf7135256e9bbfa04c
|
[
"MIT"
] | null | null | null |
VocCode/Model/Deeplabv3_plus/Backbones/resnet.py
|
yyliu01/PS-MT
|
91268eaca383d7956f5f4cdf7135256e9bbfa04c
|
[
"MIT"
] | null | null | null |
import functools
import torch.nn as nn
from Utils.pyt_utils import load_model
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, norm_layer=None,
bn_eps=1e-5, bn_momentum=0.1, downsample=None, inplace=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes, eps=bn_eps, momentum=bn_momentum)
self.relu = nn.ReLU(inplace=inplace)
self.relu_inplace = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes, eps=bn_eps, momentum=bn_momentum)
self.downsample = downsample
self.stride = stride
self.inplace = inplace
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.inplace:
out += residual
else:
out = out + residual
out = self.relu_inplace(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1,
norm_layer=None, bn_eps=1e-5, bn_momentum=0.1,
downsample=None, inplace=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes, eps=bn_eps, momentum=bn_momentum)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = norm_layer(planes, eps=bn_eps, momentum=bn_momentum)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = norm_layer(planes * self.expansion, eps=bn_eps,
momentum=bn_momentum)
self.relu = nn.ReLU(inplace=inplace)
self.relu_inplace = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.inplace = inplace
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.inplace:
out += residual
else:
out = out + residual
out = self.relu_inplace(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, norm_layer=nn.BatchNorm2d, bn_eps=1e-5,
bn_momentum=0.1, deep_stem=False, stem_width=32, inplace=True):
self.inplanes = stem_width * 2 if deep_stem else 64
super(ResNet, self).__init__()
if deep_stem:
self.conv1 = nn.Sequential(
nn.Conv2d(3, stem_width, kernel_size=3, stride=2, padding=1,
bias=False),
norm_layer(stem_width, eps=bn_eps, momentum=bn_momentum),
nn.ReLU(inplace=inplace),
nn.Conv2d(stem_width, stem_width, kernel_size=3, stride=1,
padding=1,
bias=False),
norm_layer(stem_width, eps=bn_eps, momentum=bn_momentum),
nn.ReLU(inplace=inplace),
nn.Conv2d(stem_width, stem_width * 2, kernel_size=3, stride=1,
padding=1,
bias=False),
)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(stem_width * 2 if deep_stem else 64, eps=bn_eps,
momentum=bn_momentum)
self.relu = nn.ReLU(inplace=inplace)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, norm_layer, 64, layers[0],
inplace,
bn_eps=bn_eps, bn_momentum=bn_momentum)
self.layer2 = self._make_layer(block, norm_layer, 128, layers[1],
inplace, stride=2,
bn_eps=bn_eps, bn_momentum=bn_momentum)
self.layer3 = self._make_layer(block, norm_layer, 256, layers[2],
inplace, stride=2,
bn_eps=bn_eps, bn_momentum=bn_momentum)
self.layer4 = self._make_layer(block, norm_layer, 512, layers[3],
inplace, stride=2,
bn_eps=bn_eps, bn_momentum=bn_momentum)
def _make_layer(self, block, norm_layer, planes, blocks, inplace=True,
stride=1, bn_eps=1e-5, bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
norm_layer(planes * block.expansion, eps=bn_eps,
momentum=bn_momentum),
)
layers = []
layers.append(block(self.inplanes, planes, stride, norm_layer, bn_eps,
bn_momentum, downsample, inplace))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
norm_layer=norm_layer, bn_eps=bn_eps,
bn_momentum=bn_momentum, inplace=inplace))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
blocks = []
x = self.layer1(x);
blocks.append(x)
x = self.layer2(x);
blocks.append(x)
x = self.layer3(x);
blocks.append(x)
x = self.layer4(x);
blocks.append(x)
return blocks
def resnet18(pretrained_model=None, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained_model is not None:
model = load_model(model, pretrained_model)
return model
def resnet34(pretrained_model=None, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained_model is not None:
model = load_model(model, pretrained_model)
return model
def resnet50(pretrained_model=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained_model is True:
pretrained_model = 'Model/Deeplabv3_plus/Backbones/pretrained/resnet50.pth'
if pretrained_model is not None:
model = load_model(model, pretrained_model)
return model
def resnet101(pretrained_model=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained_model is not None:
model = load_model(model, pretrained_model)
return model
def resnet152(pretrained_model=None, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained_model is not None:
model = load_model(model, pretrained_model)
return model
| 34.995556
| 83
| 0.567818
|
4a16a2c4aa678336a518c175b2e638a333138ae0
| 187
|
py
|
Python
|
apptheme_mezzanine/__init__.py
|
cedadev/apptheme-mezzanine
|
fab3e373687f01de3b1edcc7c752297a302b8986
|
[
"MIT"
] | null | null | null |
apptheme_mezzanine/__init__.py
|
cedadev/apptheme-mezzanine
|
fab3e373687f01de3b1edcc7c752297a302b8986
|
[
"MIT"
] | 5
|
2018-04-10T16:00:56.000Z
|
2019-11-25T16:52:42.000Z
|
apptheme_mezzanine/__init__.py
|
cedadev/apptheme-mezzanine
|
fab3e373687f01de3b1edcc7c752297a302b8986
|
[
"MIT"
] | null | null | null |
"""
Main module for the Django framework theme app.
"""
__author__ = "Matt Pritchard"
__copyright__ = "Copyright 2018 UK Science and Technology Facilities Council"
__version__ = "0.4a"
| 20.777778
| 77
| 0.754011
|
4a16a2eac0a4ada33073fdd28e08b9378f2a86b7
| 471
|
py
|
Python
|
list_remove_duplicates.py
|
Johne-DuChene/python_practice
|
108582743b2e37e4e47fcea7611837f6ef2997e4
|
[
"MIT"
] | null | null | null |
list_remove_duplicates.py
|
Johne-DuChene/python_practice
|
108582743b2e37e4e47fcea7611837f6ef2997e4
|
[
"MIT"
] | null | null | null |
list_remove_duplicates.py
|
Johne-DuChene/python_practice
|
108582743b2e37e4e47fcea7611837f6ef2997e4
|
[
"MIT"
] | null | null | null |
'''Write a program that takes a list
and returns a new list that contains
all the same elements but with
duplicates removed.
Write two functions to do this, one
with a loop, and one with a set.'''
rand = [0, 1, 1, 2, 3, 3, 4, 4, 5]
def rem_dupes(items):
return set(items)
print(rem_dupes(rand))
def loop_rem_dupes(items):
newlist = []
for i in items:
if i not in newlist:
newlist.append(i)
return newlist
print(loop_rem_dupes(rand))
| 23.55
| 36
| 0.673036
|
4a16a34cd3cf2e1d3f18257ec4ffa8e75bd93ef5
| 2,141
|
py
|
Python
|
src/oic/extension/heart.py
|
layoaster/pyoidc
|
6b03b8a285c3f4652dea474df4429d8ee6e5298b
|
[
"Apache-2.0"
] | null | null | null |
src/oic/extension/heart.py
|
layoaster/pyoidc
|
6b03b8a285c3f4652dea474df4429d8ee6e5298b
|
[
"Apache-2.0"
] | 1
|
2019-02-08T09:11:49.000Z
|
2019-02-08T09:11:49.000Z
|
src/oic/extension/heart.py
|
layoaster/pyoidc
|
6b03b8a285c3f4652dea474df4429d8ee6e5298b
|
[
"Apache-2.0"
] | 1
|
2019-02-25T10:08:48.000Z
|
2019-02-25T10:08:48.000Z
|
from urllib.parse import urlparse
from oic.oauth2.message import REQUIRED_LIST_OF_STRINGS
from oic.oauth2.message import SINGLE_REQUIRED_STRING
from oic.oic.message import SINGLE_REQUIRED_INT
from oic.oic.message import JasonWebToken
from oic.utils.keyio import KeyBundle
__author__ = 'roland'
class PrivateKeyJWT(JasonWebToken):
c_param = JasonWebToken.c_param.copy()
c_param.update({
'aud': SINGLE_REQUIRED_STRING,
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_REQUIRED_STRING,
"aud": SINGLE_REQUIRED_STRING,
"exp": SINGLE_REQUIRED_INT,
"iat": SINGLE_REQUIRED_INT,
"jti": SINGLE_REQUIRED_STRING,
})
def verify_url(url):
"""
Verify security of URL.
Hosted on a website with Transport Layer Security (TLS) protection
(a Hypertext Transfer Protocol – Secure (HTTPS) URI)
Hosted on the local domain of the client (e.g., http://localhost/)
Hosted on a client-specific non-remote-protocol URI scheme (e.g., myapp://)
:param url:
:return:
"""
if url.startswith('http://localhost'):
return True
else:
p = urlparse(url)
if p.scheme == 'http':
return False
return True
class HeartSoftwareStatement(JasonWebToken):
c_param = JasonWebToken.c_param.copy()
c_param.update({
'redirect_uris': REQUIRED_LIST_OF_STRINGS,
'grant_types': SINGLE_REQUIRED_STRING,
'jwks_uri': SINGLE_REQUIRED_STRING,
'jwks': SINGLE_REQUIRED_STRING,
'client_name': SINGLE_REQUIRED_STRING,
'client_uri': SINGLE_REQUIRED_STRING
})
c_allowed_values = {"grant_types": ["authorization_code", "implicit"]}
def verify(self, **kwargs):
if "jwks" in self:
try:
_keys = self["jwks"]["keys"]
except KeyError:
raise SyntaxError('"keys" parameter missing')
else:
# will raise an exception if syntax error
KeyBundle(_keys)
for param in ['jwks_uri', 'client_uri']:
verify_url(self[param])
JasonWebToken.verify(self, **kwargs)
| 29.736111
| 79
| 0.648762
|
4a16a44f46d16c2d46259661b176cd15b045261c
| 1,221
|
py
|
Python
|
settings/common.py
|
egemsoft/esef-yawd-translation
|
7a104d02be8dc6794f9bc48e7db14078449a0a11
|
[
"BSD-3-Clause"
] | null | null | null |
settings/common.py
|
egemsoft/esef-yawd-translation
|
7a104d02be8dc6794f9bc48e7db14078449a0a11
|
[
"BSD-3-Clause"
] | null | null | null |
settings/common.py
|
egemsoft/esef-yawd-translation
|
7a104d02be8dc6794f9bc48e7db14078449a0a11
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import os
__author__ = 'cenk'
BASE_DIR = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3.db'),
}
}
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.postgres',
'translations',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'translations.urls'
STATIC_URL = '/static/'
SECRET_KEY = 'abcde12345'
| 23.037736
| 69
| 0.72154
|
4a16a4c6472e001dc1d1d919c395cded91d59b55
| 2,901
|
py
|
Python
|
dev/Editor/Scripts/viewmodes.py
|
BadDevCode/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 2
|
2020-06-27T12:13:44.000Z
|
2020-06-27T12:13:46.000Z
|
dev/Editor/Scripts/viewmodes.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | null | null | null |
dev/Editor/Scripts/viewmodes.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | null | null | null |
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import sys
import itertools
from tools_shelf_actions import *
import azlmbr.legacy.general as general
if len(sys.argv) > 1:
mode = sys.argv[1]
if mode == 'Fullshading':
updateCvars('r_DebugGBuffer', 0)
elif mode == 'Normals':
toggleCvarsValue('mode_%s' % mode, 'r_DebugGBuffer', 1, 0)
elif mode == 'Smoothness':
toggleCvarsValue('mode_%s' % mode, 'r_DebugGBuffer', 2, 0)
elif mode == 'Reflectance':
toggleCvarsValue('mode_%s' % mode, 'r_DebugGBuffer', 3, 0)
elif mode == 'Albedo':
toggleCvarsValue('mode_%s' % mode, 'r_DebugGBuffer', 4, 0)
elif mode == 'Lighting_Model':
toggleCvarsValue('mode_%s' % mode, 'r_DebugGBuffer', 5, 0)
elif mode == 'Translucency':
toggleCvarsValue('mode_%s' % mode, 'r_DebugGBuffer', 6, 0)
elif mode == 'Sun_self_shadowing':
toggleCvarsValue('mode_%s' % mode, 'r_DebugGBuffer', 7, 0)
elif mode == 'Subsurface_scattering':
toggleCvarsValue('mode_%s' % mode, 'r_DebugGBuffer', 8, 0)
elif mode == 'Specular_validation_overlay':
toggleCvarsValue('mode_%s' % mode, 'r_DebugGBuffer', 9, 0)
elif mode == 'default_material':
toggleCvarsValue('mode_%s' % mode, 'e_defaultmaterial', 1, 0)
elif mode == 'default_material_normals':
toggleCvarsValue('mode_%s' % mode, 'r_TexBindMode', 6, 0)
elif mode == 'collisionshapes':
toggleCvarsValue('mode_%s' % mode, 'p_draw_helpers', '1', '0')
elif mode == 'shaded_wireframe':
toggleCvarsValue('mode_%s' % mode, 'r_showlines', 2, 0)
elif mode == 'wireframe':
toggleCvarsValue('mode_%s' % mode, 'r_wireframe', 1, 0)
elif mode == 'Tangents':
toggleCvarsValue('mode_%s' % mode, 'r_ShowTangents', 1, 0)
elif mode == 'texelspermeter360':
toggleCvarsValue('mode_%s' % mode, 'r_TexelsPerMeter', float(256), float(0))
elif mode == 'texelspermeterpc':
toggleCvarsValue('mode_%s' % mode, 'r_TexelsPerMeter', float(512), float(0))
elif mode == 'texelspermeterpc2':
toggleCvarsValue('mode_%s' % mode, 'r_TexelsPerMeter', float(1024), float(0))
elif mode == 'lods':
cycleCvarsIntValue("e_DebugDraw", [0, 3, -3])
elif mode == 'lods_level':
cycleCvarsIntValue("e_LodMin", [0, 1, 2, 3, 4, 5])
elif mode == 'default_view':
for cVars in ['r_DebugGBuffer', 'e_defaultmaterial', 'r_TexBindMode',
'p_draw_helpers', 'r_showlines', 'r_wireframe', 'r_shownormals',
'r_ShowTangents', 'r_TexelsPerMeter', 'e_DebugDraw', 'e_LodMin']:
restoreDefaultValue(cVars)
| 44.630769
| 85
| 0.705274
|
4a16a657d30ff31425d313e75e50dc53f8d48982
| 587
|
py
|
Python
|
stanford/sms-tools/lectures/04-STFT/plots-code/hanning.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | 1
|
2021-03-12T18:32:06.000Z
|
2021-03-12T18:32:06.000Z
|
stanford/sms-tools/lectures/04-STFT/plots-code/hanning.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | null | null | null |
stanford/sms-tools/lectures/04-STFT/plots-code/hanning.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft
M = 64
N = 512
hN = N//2
hM = M//2
fftbuffer = np.zeros(N)
mX1 = np.zeros(N)
plt.figure(1, figsize=(7.5, 3.5))
fftbuffer[hN-hM:hN+hM]=np.hanning(M)
plt.subplot(2,1,1)
plt.plot(np.arange(-hN, hN), fftbuffer, 'b', lw=1.5)
plt.axis([-hN, hN, 0, 1.1])
X = fft(fftbuffer)
mX = 20*np.log10(abs(X))
mX1[:hN] = mX[hN:]
mX1[N-hN:] = mX[:hN]
plt.subplot(2,1,2)
plt.plot(np.arange(-hN, hN), mX1-max(mX), 'r', lw=1.5)
plt.axis([-hN,hN,-80,0])
plt.tight_layout()
plt.savefig('hanning.png')
plt.show()
| 18.935484
| 54
| 0.623509
|
4a16a7a1544a120c8896407e46f69e66f1a57596
| 765
|
py
|
Python
|
proto/rpc/service/rpcservice_stub.py
|
ptsurko/coursera_cloud
|
ed34a409034e2b7a85c6a3d5700c621fcabe8bde
|
[
"MIT"
] | null | null | null |
proto/rpc/service/rpcservice_stub.py
|
ptsurko/coursera_cloud
|
ed34a409034e2b7a85c6a3d5700c621fcabe8bde
|
[
"MIT"
] | null | null | null |
proto/rpc/service/rpcservice_stub.py
|
ptsurko/coursera_cloud
|
ed34a409034e2b7a85c6a3d5700c621fcabe8bde
|
[
"MIT"
] | null | null | null |
from proto.rpc._method_descriptor import _MethodDescriptor
from proto.rpc._service_descriptor import _ServiceDescriptor
class RpcServiceStub(object):
_descriptor = None
@classmethod
def get_descriptor(cls):
return cls._descriptor
def handler(request_class, response_class=None):
def func(method_obj):
method_descriptor = _MethodDescriptor(method_obj.__name__, method_obj, request_class, response_class)
method_obj._descriptor = method_descriptor
return method_obj
return func
def service(name=None):
def func(class_obj):
service_name = name if name else class_obj.__name__
class_obj._descriptor = _ServiceDescriptor(service_name)
return class_obj
return func
| 29.423077
| 109
| 0.734641
|
4a16a8011c18bbe3e224396f296ebbc079c749e1
| 7,525
|
py
|
Python
|
lldb/test/API/lang/objc/hidden-ivars/TestHiddenIvars.py
|
rarutyun/llvm
|
76fa6b3bcade074bdedef740001c4528e1aa08a8
|
[
"Apache-2.0"
] | 305
|
2019-09-14T17:16:05.000Z
|
2022-03-31T15:05:20.000Z
|
lldb/test/API/lang/objc/hidden-ivars/TestHiddenIvars.py
|
rarutyun/llvm
|
76fa6b3bcade074bdedef740001c4528e1aa08a8
|
[
"Apache-2.0"
] | 11
|
2019-10-17T21:11:52.000Z
|
2022-02-17T20:10:00.000Z
|
lldb/test/API/lang/objc/hidden-ivars/TestHiddenIvars.py
|
rarutyun/llvm
|
76fa6b3bcade074bdedef740001c4528e1aa08a8
|
[
"Apache-2.0"
] | 24
|
2019-10-03T11:22:11.000Z
|
2022-01-25T09:59:30.000Z
|
"""Test that hidden ivars in a shared library are visible from the main executable."""
import unittest2
import subprocess
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class HiddenIvarsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = 'main.m'
self.line = line_number(self.source, '// breakpoint1')
# The makefile names of the shared libraries as they appear in DYLIB_NAME.
# The names should have no loading "lib" or extension as they will be
# localized
self.shlib_names = ["InternalDefiner"]
@skipUnlessDarwin
@skipIf(
debug_info=no_match("dsym"),
bugnumber="This test requires a stripped binary and a dSYM")
@skipIfReproducer # FIXME: Unexpected packet during (passive) replay
def test_expr_stripped(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.expr(True)
@skipUnlessDarwin
@skipIfReproducer # FIXME: Unexpected packet during (passive) replay
def test_expr(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.expr(False)
@skipUnlessDarwin
@skipIf(
debug_info=no_match("dsym"),
bugnumber="This test requires a stripped binary and a dSYM")
def test_frame_variable_stripped(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.frame_var(True)
@skipUnlessDarwin
def test_frame_variable(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.frame_var(False)
@expectedFailure("rdar://18683637")
@skipUnlessDarwin
def test_frame_variable_across_modules(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.common_setup(False)
self.expect(
"frame variable k->bar",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 3"])
def common_setup(self, strip):
if strip:
exe = self.getBuildArtifact("stripped/a.out")
else:
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Register our shared libraries for remote targets so they get
# automatically uploaded
environment = self.registerSharedLibrariesWithTarget(
target, self.shlib_names)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
lldbutil.run_break_set_by_file_and_line(
self, "main.m", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
def expr(self, strip):
self.common_setup(strip)
# This should display correctly.
self.expect(
"expression (j->_definer->foo)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 4"])
self.expect(
"expression (j->_definer->bar)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 5"])
if strip:
self.expect(
"expression *(j->_definer)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["foo = 4"])
else:
self.expect(
"expression *(j->_definer)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 4",
"bar = 5"])
self.expect("expression (k->foo)", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 2"])
self.expect("expression (k->bar)", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 3"])
self.expect(
"expression k.filteredDataSource",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
' = 0x',
'"2 elements"'])
if strip:
self.expect("expression *(k)", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["foo = 2", ' = 0x', '"2 elements"'])
else:
self.expect(
"expression *(k)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 2",
"bar = 3",
'_filteredDataSource = 0x',
'"2 elements"'])
def frame_var(self, strip):
self.common_setup(strip)
# This should display correctly.
self.expect(
"frame variable j->_definer->foo",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 4"])
if not strip:
self.expect(
"frame variable j->_definer->bar",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 5"])
if strip:
self.expect(
"frame variable *j->_definer",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["foo = 4"])
else:
self.expect(
"frame variable *j->_definer",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 4",
"bar = 5"])
self.expect("frame variable k->foo", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 2"])
self.expect(
"frame variable k->_filteredDataSource",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
' = 0x',
'"2 elements"'])
if strip:
self.expect(
"frame variable *k",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 2",
'_filteredDataSource = 0x',
'"2 elements"'])
else:
self.expect(
"frame variable *k",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 2",
"bar = 3",
'_filteredDataSource = 0x',
'"2 elements"'])
| 32.575758
| 86
| 0.546179
|
4a16a838416b993ec70c8b656fb3be854ce7ef51
| 637
|
py
|
Python
|
hsapp/migrations/0037_auto_20171105_1901.py
|
jkbm/playesports
|
1e01c909f183499906b26858fc54735dba5409d9
|
[
"MIT"
] | null | null | null |
hsapp/migrations/0037_auto_20171105_1901.py
|
jkbm/playesports
|
1e01c909f183499906b26858fc54735dba5409d9
|
[
"MIT"
] | 12
|
2019-11-04T13:36:37.000Z
|
2022-03-11T23:32:50.000Z
|
hsapp/migrations/0037_auto_20171105_1901.py
|
jkbm/playesports
|
1e01c909f183499906b26858fc54735dba5409d9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-05 17:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hsapp', '0036_post_image'),
]
operations = [
migrations.AlterField(
model_name='post',
name='tags',
field=models.CharField(blank=True, max_length=500, null=True),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=300),
),
]
| 24.5
| 75
| 0.562009
|
4a16a9306780447674f4703cb900b6410a722e1f
| 7,560
|
py
|
Python
|
Scripts/GoogleChrome-postinstall.py
|
apizz/autopkg-mac-recipes
|
509591779e3766b2daf35da5a650481f48388052
|
[
"Apache-2.0"
] | 3
|
2021-04-10T05:43:54.000Z
|
2021-12-21T00:56:56.000Z
|
Scripts/GoogleChrome-postinstall.py
|
apizz/autopkg-mac-recipes
|
509591779e3766b2daf35da5a650481f48388052
|
[
"Apache-2.0"
] | 6
|
2021-01-14T09:10:17.000Z
|
2021-03-02T14:15:55.000Z
|
Scripts/GoogleChrome-postinstall.py
|
apizz/autopkg-mac-recipes
|
509591779e3766b2daf35da5a650481f48388052
|
[
"Apache-2.0"
] | 3
|
2020-12-08T12:21:28.000Z
|
2021-03-02T05:13:24.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
chrome-enable-autoupdates.py
This script enables system wide automatic updates for Google Chrome.
It should work for Chrome versions 18 and later. No configuration needed
as this is originally intended as a munki postinstall script.
Created by Hannes Juutilainen, hjuutilainen@mac.com
History:
--------
2020-11-13, Graham Pugh
- Make py2/3 compatible
2019-08-05, Andy Duss
- Fix keystone_registration_framework_path to point to correct directory
2017-09-01, Hannes Juutilainen
- Ignore errors when installing keystone
2015-09-25, Niklas Blomdalen
- Modifications to include old KeystoneRegistration installation (python version)
2014-11-20, Hannes Juutilainen
- Modifications for Chrome 39
2012-08-31, Hannes Juutilainen
- Added --force flag to keystone install as suggested by Riley Shott
2012-05-29, Hannes Juutilainen
- Added more error checking
2012-05-25, Hannes Juutilainen
- Added some error checking in main
2012-05-24, Hannes Juutilainen
- First version
"""
import sys
import os
import subprocess
import plistlib
from distutils.version import LooseVersion
chrome_path = "/Applications/Google Chrome.app"
info_plist_path = os.path.realpath(os.path.join(chrome_path, "Contents/Info.plist"))
brand_path = "/Library/Google/Google Chrome Brand.plist"
brand_key = "KSBrandID"
tag_path = info_plist_path
tag_key = "KSChannelID"
version_path = info_plist_path
version_key = "KSVersion"
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def chrome_installed():
"""Check if Chrome is installed"""
if os.path.exists(chrome_path):
return True
else:
return False
def chrome_version():
"""Returns Chrome version"""
info_plist = plistlib.readPlist(info_plist_path)
bundle_short_version = info_plist["CFBundleShortVersionString"]
return bundle_short_version
def chrome_update_url():
"""Returns KSUpdateURL from Chrome Info.plist"""
info_plist = plistlib.readPlist(info_plist_path)
update_url = info_plist["KSUpdateURL"]
return update_url
def chrome_product_id():
"""Returns KSProductID from Chrome Info.plist"""
info_plist = plistlib.readPlist(info_plist_path)
product_id = info_plist["KSProductID"]
return product_id
def keystone_registration_framework_path():
"""Returns KeystoneRegistration.framework path"""
if LooseVersion(chrome_version()) >= LooseVersion("76"):
keystone_registration = os.path.join(chrome_path, "Contents", "Frameworks")
keystone_registration = os.path.join(
keystone_registration, "Google Chrome Framework.framework"
)
keystone_registration = os.path.join(
keystone_registration, "Frameworks", "KeystoneRegistration.framework"
)
keystone_registration = os.path.join(
keystone_registration, "Versions", "Current"
)
elif LooseVersion(chrome_version()) >= LooseVersion("75") and LooseVersion(
chrome_version()
) < LooseVersion("76"):
keystone_registration = os.path.join(chrome_path, "Contents/Frameworks/")
keystone_registration = os.path.join(
keystone_registration, "Google Chrome Framework.framework/Versions"
)
keystone_registration = os.path.join(keystone_registration, chrome_version())
keystone_registration = os.path.join(
keystone_registration, "Frameworks/KeystoneRegistration.framework"
)
else:
keystone_registration = os.path.join(chrome_path, "Contents/Versions")
keystone_registration = os.path.join(keystone_registration, chrome_version())
keystone_registration = os.path.join(
keystone_registration, "Google Chrome Framework.framework"
)
keystone_registration = os.path.join(
keystone_registration, "Frameworks/KeystoneRegistration.framework"
)
return keystone_registration
def keystone_install():
"""Install the current Keystone"""
install_script = os.path.join(
keystone_registration_framework_path(), "Resources/ksinstall"
)
if LooseVersion(chrome_version()) >= LooseVersion("80"):
install_script = os.path.join(
keystone_registration_framework_path(), "Helpers/ksinstall"
)
if not os.path.exists(install_script):
install_script = os.path.join(
keystone_registration_framework_path(), "Resources/install.py"
)
keystone_payload = os.path.join(
keystone_registration_framework_path(), "Resources/Keystone.tbz"
)
if os.path.exists(install_script) and os.path.exists(keystone_payload):
ksinstall_process = [install_script, "--install", keystone_payload, "--force"]
p = subprocess.Popen(
ksinstall_process, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
(results, error) = p.communicate()
if results:
print(results)
if p.returncode != 0:
if error:
print >>sys.stderr, "%s" % error
print >>sys.stderr, "Keystone install exited with code %i" % p.returncode
# Since we used --force argument, succeed no matter what the exit code was.
return True
else:
print >>sys.stderr, "Error: KeystoneRegistration.framework not found"
return False
def register_chrome_with_keystone():
"""Registers Chrome with Keystone"""
ksadmin = "/Library/Google/GoogleSoftwareUpdate/GoogleSoftwareUpdate.bundle/Contents/MacOS/ksadmin"
if os.path.exists(ksadmin):
ksadmin_process = [
ksadmin,
"--register",
"--productid",
chrome_product_id(),
"--version",
chrome_version(),
"--xcpath",
chrome_path,
"--url",
chrome_update_url(),
"--tag-path",
tag_path,
"--tag-key",
tag_key,
"--brand-path",
brand_path,
"--brand-key",
brand_key,
"--version-path",
version_path,
"--version-key",
version_key,
]
p = subprocess.Popen(
ksadmin_process, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
(results, error) = p.communicate()
if error:
sys.stderr.write(error)
if results:
print(results)
if p.returncode == 0:
return True
else:
return False
else:
sys.stderr.write("Error: %s doesn't exist" % ksadmin)
return False
def main(argv=None):
if argv is None:
argv = sys.argv
try:
# Check for root
if os.geteuid() != 0:
sys.stderr.write("This script must be run as root")
return 1
if not chrome_installed():
sys.stderr.write("Error: Chrome is not installed on this computer")
return 1
if keystone_install():
print("Keystone installed")
else:
sys.stderr.write("Error: Keystone install failed")
return 1
if register_chrome_with_keystone():
print("Registered Chrome with Keystone")
return 0
else:
sys.stderr.write("Error: Failed to register Chrome with Keystone")
return 1
except Usage as err:
sys.stderr.write(err.msg)
sys.stderr.write("for help use --help")
return 2
if __name__ == "__main__":
sys.exit(main())
| 31.239669
| 103
| 0.65119
|
4a16aa2a7f1baf50b2e51d614dbf11b7e73bbae3
| 4,076
|
py
|
Python
|
dashboards/image_label_grid/ui.py
|
lichili233/dashboard_templates
|
2b8c0383cb117edeb26525e7d722f59c051531b2
|
[
"Apache-2.0"
] | null | null | null |
dashboards/image_label_grid/ui.py
|
lichili233/dashboard_templates
|
2b8c0383cb117edeb26525e7d722f59c051531b2
|
[
"Apache-2.0"
] | null | null | null |
dashboards/image_label_grid/ui.py
|
lichili233/dashboard_templates
|
2b8c0383cb117edeb26525e7d722f59c051531b2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 dashboard_templates Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main UI program."""
import streamlit as st
import numpy as np
import pandas as pd
from utils_data import load_imagenet
@st.cache(suppress_st_warning=True)
def paginator_by_label(title: str,
df_items_labels: pd.DataFrame,
df_labels: pd.DataFrame,
on_sidebar: bool = True,
item_col: str = 'img_fpath',
item_id_col: str = 'img_id',
label_id_col: str = 'label_id',
label_name_col: str = 'label_name'):
"""Paginates a set of images by their labels.
Derived from:
'paginator' function by Adrien Treuille (https://gist.github.com/treuille)
https://gist.github.com/treuille/2ce0acb6697f205e44e3e0f576e810b7
"""
# Figure out where to display the paginator
if on_sidebar:
location = st.sidebar
else:
location = st
# Display a pagination selectbox in the specified location.
n_pages = df_labels.shape[0]
page_format_func = lambda i: df_labels.iloc[i,:][label_name_col]
page_number = location.selectbox(label=title,
options=range(n_pages),
format_func=page_format_func)
# Iterate over the items in the page to let the user display them.
label_id = df_labels.iloc[page_number,:][label_id_col]
items_page = df_items_labels[df_items_labels[label_id_col]==label_id]
item_paths = items_page[item_col].tolist()
item_labels = items_page[label_name_col].tolist()
item_ids = items_page[item_id_col].tolist()
# Preview data as interactive dataframe
st.markdown("## data preview:")
st.dataframe(data=items_page, width=None, height=None)
return item_paths, item_ids
def main():
"""Main application function."""
st.markdown(
body="<h1 style='text-align: center; color: red;'>Computer Vision Image-Label Data Previewer</h1>",
unsafe_allow_html=True)
sample_per_label = st.sidebar.slider(
label='Sample size per label',
min_value=10,
max_value=200,
step=10,
help='Fixes the amount of sample showing for each ImageNet label.')
width_per_image = st.sidebar.slider(
label='Width per image',
min_value=64,
max_value=512,
step=16,
help='Resizes the images to this width for display.')
version_choice = st.sidebar.radio(
label='Select a dataset',
options=['full','tiny'],
help='The version of ImageNet to be chosen for display.')
shuffle_choice = st.sidebar.radio(
label='Shuffle images per label',
options=['True','False'],
help='Randomly shuffle the images per label for display.')
df, df_label_id_name = load_imagenet(
version=version_choice,
sample_per_label=sample_per_label,
shuffle=eval(shuffle_choice))
images_on_page, ids_on_page = paginator_by_label(
title='Select a label',
df_items_labels=df,
df_labels=df_label_id_name,
on_sidebar=True,
item_col='img_fpath',
item_id_col='img_id',
label_id_col='label_id',
label_name_col='label_name')
st.image(
image=images_on_page,
width=width_per_image,
caption=ids_on_page)
if __name__ == '__main__':
main()
| 36.720721
| 108
| 0.64107
|
4a16ab5d56f81ac3526d4709b48782f32530968e
| 1,531
|
py
|
Python
|
dribdat/user/forms.py
|
open-network-infrastructure/dribdat
|
dae13e630908a6ddaacbeba84c35f4b9d820eecb
|
[
"MIT"
] | 5
|
2017-10-16T14:17:20.000Z
|
2018-10-22T06:56:38.000Z
|
dribdat/user/forms.py
|
open-network-infrastructure/dribdat
|
dae13e630908a6ddaacbeba84c35f4b9d820eecb
|
[
"MIT"
] | 18
|
2019-02-19T12:50:52.000Z
|
2019-02-20T13:08:46.000Z
|
dribdat/user/forms.py
|
hackathons-ftw/dribdat2
|
379568b540bea2f01a9bdd37f9e8b37844100579
|
[
"MIT"
] | 1
|
2018-09-13T11:06:50.000Z
|
2018-09-13T11:06:50.000Z
|
# -*- coding: utf-8 -*-
from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import User
class RegisterForm(FlaskForm):
"""Register form."""
username = StringField('Username',
validators=[DataRequired(), Length(min=3, max=25)])
email = StringField('Email',
validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password',
validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Verify password',
[DataRequired(), EqualTo('password', message='Passwords must match')])
webpage_url = StringField(u'Online profile')
def __init__(self, *args, **kwargs):
"""Create instance."""
super(RegisterForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = User.query.filter_by(username=self.username.data).first()
if user:
self.username.errors.append('Username already registered')
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append('Email already registered')
return False
return True
| 37.341463
| 98
| 0.615284
|
4a16ab7f70edce1142aa9244e84de210e4a1b1f4
| 2,101
|
py
|
Python
|
download_signatures.py
|
ScatteredInk/dbpedia-datset-creator
|
fc5265d0209b8f6355446e0fa9c97bf8fc65988f
|
[
"MIT"
] | null | null | null |
download_signatures.py
|
ScatteredInk/dbpedia-datset-creator
|
fc5265d0209b8f6355446e0fa9c97bf8fc65988f
|
[
"MIT"
] | null | null | null |
download_signatures.py
|
ScatteredInk/dbpedia-datset-creator
|
fc5265d0209b8f6355446e0fa9c97bf8fc65988f
|
[
"MIT"
] | null | null | null |
import os
import mwclient
import logging
import requests
from csv import DictWriter
from utf8_csv import UnicodeReader, UnicodeWriter
from collections import OrderedDict
logging.basicConfig(filename='downloads.log',level=logging.DEBUG)
def main():
#login using mwclient and env variables
WIKI_MEDIA_SITE = 'commons.wikimedia.org'
try:
user = os.environ['MEDIA_WIKI_USER']
password = os.environ['MEDIA_WIKI_PASS']
user_agent = os.environ['MEDIA_WIKI_USER_AGENT']
except KeyError, e:
logging.debug('Credentials not found in environment')
raise e
site = mwclient.Site(WIKI_MEDIA_SITE, clients_useragent=user_agent)
site.login(user, password)
#no race conditions, just check if images dir exists
if not os.path.exists('images'):
os.makedirs('images')
bad_urls = []
bad_downloads = []
#read in the mediawiki URIs from csv
with open('signatures.csv', 'rb') as f:
reader = UnicodeReader(f)
headers = reader.next()
for row in reader:
image_name = row[1]
image_obj = site.Images[image_name]
url = image_obj.imageinfo.get('url')
if url is None:
bad_urls.append(row)
else:
try:
r = requests.get(url)
with open(os.path.join('images',image_name), 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
except requests.exceptions.RequestException, e:
print e
logging.warning('Error downloading {0}, {1}'.format(
url, e))
bad_downloads.append(row)
print "Downloaded {0}".format(url)
with open('bad_urls.csv', 'wb') as csvfile:
csv_headers = OrderedDict([('Person_URI',None),('Signature',None)])
dw = DictWriter(csvfile, delimiter=',', fieldnames=csv_headers)
dw.writeheader()
for row in bad_urls:
writer = UnicodeWriter(csvfile)
writer.writerow(row)
with open('bad_downloads.csv', 'wb') as csvfile:
csv_headers = OrderedDict([('Person_URI',None),('Signature',None)])
dw = DictWriter(csvfile, delimiter=',', fieldnames=csv_headers)
dw.writeheader()
for row in bad_downloads:
writer = UnicodeWriter(csvfile)
writer.writerow(row)
if __name__ == '__main__':
main()
| 26.2625
| 69
| 0.710614
|
4a16abd119570b7e91879a09d0c65b352c45ee9a
| 1,659
|
py
|
Python
|
sdk/python/pulumi_azure_native/devices/v20170701/__init__.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/devices/v20170701/__init__.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/devices/v20170701/__init__.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .certificate import *
from .get_certificate import *
from .get_iot_hub_resource import *
from .get_iot_hub_resource_event_hub_consumer_group import *
from .iot_hub_resource import *
from .iot_hub_resource_event_hub_consumer_group import *
from .list_iot_hub_resource_keys import *
from .list_iot_hub_resource_keys_for_key_name import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:devices/v20170701:Certificate":
return Certificate(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:devices/v20170701:IotHubResource":
return IotHubResource(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:devices/v20170701:IotHubResourceEventHubConsumerGroup":
return IotHubResourceEventHubConsumerGroup(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "devices/v20170701", _module_instance)
_register_module()
| 37.704545
| 98
| 0.7173
|
4a16ac8c1233d38c3ba789fcb5f57aa92691ed0d
| 5,779
|
py
|
Python
|
cpmpy/nontransitive_dice.py
|
tias/hakank
|
87b7f180c9393afce440864eb9e5fb119bdec1a4
|
[
"MIT"
] | null | null | null |
cpmpy/nontransitive_dice.py
|
tias/hakank
|
87b7f180c9393afce440864eb9e5fb119bdec1a4
|
[
"MIT"
] | null | null | null |
cpmpy/nontransitive_dice.py
|
tias/hakank
|
87b7f180c9393afce440864eb9e5fb119bdec1a4
|
[
"MIT"
] | null | null | null |
"""
Nontransitive dice in cpmpy.
From
http://en.wikipedia.org/wiki/Nontransitive_dice
""
A set of nontransitive dice is a set of dice for which the relation
"is more likely to roll a higher number" is not transitive. See also
intransitivity.
This situation is similar to that in the game Rock, Paper, Scissors,
in which each element has an advantage over one choice and a
disadvantage to the other.
""
I start with the 3 dice version, e.g.
""
* die A has sides {2,2,4,4,9,9},
* die B has sides {1,1,6,6,8,8}, and
* die C has sides {3,3,5,5,7,7}.
""
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
def nontransitive_dice(m=3,n=6,given_dice=""):
# m = 3 # number of dice
# n = 6 # number of sides of each die
min_val = 1
max_val = 6
if given_dice != "":
min_val = min(flatten_lists(given_dice))
max_val = max(flatten_lists(given_dice))
# the dice
# start value might be 0 since Efron's dice requires it.
dice = intvar(min_val,max_val*2,shape=(m, n), name='dice')
# the competitions
# comp[0,0]: die 0 vs die 1
# comp[0,1]: die 1 vs die 0
# comp[1,0]: die 1 vs die 2
# comp[1,1]: die 2 vs die 1
# ...
# comp[m-1,0]: die (m-1) vs die 0
# comp[m-1,1]: die 0 vs die (m-1)
comp = intvar(0,n*n,shape=(m, 2), name='comp')
# probability gap
gap = intvar(0, n*n,shape=m,name='gap')
gap_sum = intvar(0,m*n*n, name='gap_sum')
max_val = intvar(0,n*2,name='max_val')
max_win = intvar(0,n*n,name='max_win')
model = Model (
max_win == max(flatten_lists(comp)),
max_val == max(flatten_lists(dice)),
# increasing order of a die
[ increasing(dice[i]) for i in range(m) ],
# nontransitivity:
[ comp[i,0] > comp[i,1] for i in range(m)],
# probability gap
[gap[i] == comp[i,0] - comp[i,1] for i in range(m)],
gap_sum == sum(gap),
)
if given_dice != "":
for i in range(m):
for j in range(n):
model += [dice[i,j] == given_dice[i][j]]
#
# Extra constraints to play with
#
# all wins has the same value
# [ comp[i,0] == comp[i+1,0] for i in range(m-1)],
# all values of the dice are different
# AllDifferent(dice),
# calculate the number of winnings of each round
# (0 vs 1 and 1 vs 0, 1 vs 2 and 2 vs 1, ... m-1 vs 0 and 0 vs m-1)
for d in range(m):
model += [comp[d % m,0] == sum([dice[d % m, r1] > dice[(d+1) % m, r2]
for r1 in range(n) for r2 in range(n)])]
model += [ comp[d % m,1] == sum([dice[(d+1) % m, r1] > dice[(d) % m, r2]
for r1 in range(n) for r2 in range(n)])]
# Symmetry breaking: lex_less
# Note: this don't work for some of the hardcoded examples.
if given_dice == "":
model += [lex_less(dice[i],dice[i+1]) for i in range(m-1)]
num_solutions = 0
ss = CPM_ortools(model)
if ss.solve():
num_solutions += 1
print("dice:\n", dice.value())
print("comp:\n", comp.value())
print("probabilities:\n", [(comp[i,0].value()/(n*n*1.0),comp[i,1].value()/(n*n*1.0)) for i in range(m)])
print("gap:", gap.value())
print("gap_sum:", gap_sum.value())
print("max_val:", max_val.value())
print("max_win:", max_win.value())
print()
# get_different_solution(ss,flatten_lists(dice))
print("num_solutions:", num_solutions)
print("status:", ss.status())
#
# Examples of nontransitivity dice.
#
# Note:
# When running these make sure that other constraints don't
# conflicts with it, e.g. lex_less
#
dice_examples = {
# Testing the dice from the Wikipedia page
# 3 dice
# dice[0] == [2,2,4,4,9,9], # die A
# dice[1] == [1,1,6,6,8,8], # die B
# dice[2] == [3,3,5,5,7,7], # die C
"wikipedia": [
[2,2,4,4,9,9], # die A
[1,1,6,6,8,8], # die B
[3,3,5,5,7,7], # die C
],
# Example from Tutorial, page 32 (slide 67/175)
# dice[0] == [1,2,3,4,5,5], # die A
# dice[1] == [3,3,3,3,3,3], # die B
# dice[2] == [2,2,2,3,6,6], # die C
"tutorial" : [
[1,2,3,4,5,5], # die A
[3,3,3,3,3,3], # die B
[2,2,2,3,6,6], # die C
],
# Efron's 4 dice, the number of each die are re-ordered
# (from the Wikipedia page)
# dice[0] == [0, 0, 4, 4, 4, 4], # A
# dice[1] == [3, 3, 3, 3, 3, 3], # B
# dice[2] == [2, 2, 2, 2, 6, 6], # C
# dice[3] == [1, 1, 1, 5, 5, 5], # D
"efron" : [
[0, 0, 4, 4, 4, 4], # A
[3, 3, 3, 3, 3, 3], # B
[2, 2, 2, 2, 6, 6], # C
[1, 1, 1, 5, 5, 5], # D
],
# Miwin's dice (3 dice)
# Miwin's Dice were invented in 1975 by the physicist Michael Winkelmann.
# (from the Wikipedia page)
# dice[0] == [1, 2, 5, 6, 7, 9], # III
# dice[1] == [1, 3, 4, 5, 8, 9], # IV
# dice[2] == [2, 3, 4, 6, 7, 8], # V
"mitwin" : [
[1, 2, 5, 6, 7, 9], # III
[1, 3, 4, 5, 8, 9], # IV
[2, 3, 4, 6, 7, 8], # V
]
}
num_dice = 3 # number of dice
num_sides = 6 # number of sides of each die
nontransitive_dice(num_dice,num_sides)
#
# Check all instances
#
for p in dice_examples:
print("\nproblem:", p)
t = dice_examples[p]
num_dice = len(t)
num_sides = len(t[0])
nontransitive_dice(num_dice,num_sides,t)
| 28.895
| 113
| 0.521024
|
4a16ac8f684328007bd4700e62cf311c55fda8e5
| 13,819
|
py
|
Python
|
src/pyxer/routing.py
|
tml/pyxer
|
4e3677b3f2c7f23ebf039a9ba9733f68a8460189
|
[
"MIT"
] | 2
|
2016-01-25T06:01:14.000Z
|
2016-02-07T20:30:25.000Z
|
src/pyxer/routing.py
|
tml/pyxer
|
4e3677b3f2c7f23ebf039a9ba9733f68a8460189
|
[
"MIT"
] | 2
|
2018-03-21T06:27:50.000Z
|
2018-03-22T12:57:58.000Z
|
src/pyxer/routing.py
|
tml/pyxer
|
4e3677b3f2c7f23ebf039a9ba9733f68a8460189
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
#############################################
## (C)opyright by Dirk Holtwick, 2008 ##
## All rights reserved ##
#############################################
from webob import Request
from webob import exc
from pyxer.controller import \
Controller, isController, c, g, h, config, \
session, response, request, resp, req
import re
# import urllib
import copy
import sys
import types
import os.path
import paste.fileapp
import logging
log = logging.getLogger(__name__)
# Static file handling
def static():
tail = req.urlvars["static"]
path = os.path.join(req.urlvars["pyxer.path"], tail)
# Is it a folder? Or a link?
if os.path.isdir(path) or os.path.islink(path):
if (not tail) or tail.endswith("/"):
path = os.path.join(path, "index.html")
elif tail:
location = (req.environ["PATH_INFO"] + "/")
# XXX not tested!
if request.environ.has_key("HTTP_X_FORWARDED_HOST"):
# log.debug("URL (x) %r %r", obj, request.environ["HTTP_X_FORWARDED_HOST"])
location = "http://" + request.environ["HTTP_X_FORWARDED_HOST"]
raise exc.HTTPMovedPermanently(location = location).exception
if not os.path.isfile(path):
raise exc.HTTPNotFound().exception
return paste.fileapp.FileApp(path)(request.environ, request.start_response)
static.iscontroller = True
class ModuleHook:
" Constructor and destructor for modules "
def __init__(self, module):
self.module = module
try:
module.__init__()
except:
pass
def __del__(self):
try:
self.module.__del__()
except:
pass
del self.module
var_regex = re.compile(r'''
\{ # The exact character "{"
(\w+) # The variable name (restricted to a-z, 0-9, _)
(?::([^}]+))? # The optional :regex part
\} # The exact character "}"
''', re.VERBOSE)
def template_to_regex(template, ismodule=False):
regex = ''
last_pos = 0
for match in var_regex.finditer(template):
regex += re.escape(template[last_pos:match.start()])
var_name = match.group(1)
expr = match.group(2) or '[^/]+'
expr = '(?P<%s>%s)' % (var_name, expr)
regex += expr
last_pos = match.end()
regex += re.escape(template[last_pos:])
if ismodule:
if not regex.endswith("\/"):
regex += "\/"
regex = '^%s' % regex
else:
regex = '^%s$' % regex
return regex
'''
def url(*segments, **vars):
base_url = get_request().application_url
path = '/'.join(str(s) for s in segments)
if not path.startswith('/'):
path = '/' + path
if vars:
path += '?' + urllib.urlencode(vars)
return base_url + path
'''
class RouteObject(object):
def __init__(self,
template,
module = None,
controller = None,
name = None,
vars = {}):
if module and controller:
raise Exception("Route to module and controller the same time is not allowed")
# log.debug("Template for routing %r", template)
self.template = re.compile(template) #template_to_regex
self.module = module
self.controller = controller
self.name = name
self.vars = copy.copy(vars)
self.vars["controller"] = self.controller
self.vars["module"] = self.module
def __repr__(self):
return "<RouteObject '%s'; pattern '%s'>" % (
self.name, self.template.pattern)
__str__ = __repr__
class Router(object):
def __init__(self, module = None, prefix = "", use_default = True, do_reload = False):
self.module = None
self.module_name = None
self.prefix = prefix
self.routes = []
self.routes_default = []
self.do_reload = do_reload
# Set first module
self.set_module(module)
# This should only apply to the firt router ever
if self.module and hasattr(self.module, "router"):
self.routes = self.module.router.routes
# Default routings
if use_default:
# /
self.add_default("^$",
controller = "index",
name = "_action_index")
# /demo, /demo.html, /demo.htm, /demo.xml
self.add_default("^(?P<controller>[^\/\.]+?)(\.html?|\.xml)?$",
name = "_action")
# /demo/
self.add_default("^(?P<module>[^\/\.]+?)\/",
name = "_module")
# demo.py
self.add_default("^[^\/\.]+?\.(py[co]?)$",
controller = None,
module = None,
name = "_ignore_py")
# demo.xyz
#self.add_default("^(?P<static>[^\/\.]+?\.[^\/\.]+?)$",
# controller = "static", # "static"
# name = "_static")
# demo-xyz-abc
self.add_default("^[^\/\.]*?$",
controller = "default",
name = "_action_default")
#
self.add_default("^(?P<static>.*?)$",
controller = "static",
name = "_static_all")
self.add_default("^(?P<static>.*?)$",
controller = static,
name = "_static_all")
def init_module(self, module, hook = False):
" If needed reload a module and apply module hook if needed or forced to "
if self.do_reload:
module = reload(module)
module.__module_hook__ = ModuleHook(module)
elif hook:
module.__module_hook__ = ModuleHook(module)
return module
def load_module(self, *names):
" Load module "
name = ".".join(names)
if sys.modules.has_key(name):
return self.init_module(sys.modules[name])
try:
__import__(name)
return self.init_module(sys.modules[name], True)
except ImportError, msg:
# Try to filter import errors that are within the loaded module
if name and (not (str(msg).endswith("." + names[-1]) or str(msg).endswith(" " + names[-1]))):
log.exception("Error while importing module")
raise
return None
def set_module(self, module = None):
" Set module and its name "
if module is not None:
if isinstance(module, basestring):
self.module = self.load_module(module)
else:
self.module = module
self.module_name = self.module.__name__
return self
def add(self, template, **kw):
self.routes.append(RouteObject(template_to_regex(template, kw.get("module", None)), **kw))
return self
def add_re(self, template, **kw):
self.routes.append(RouteObject(template, **kw))
return self
def add_default(self, template, **kw):
self.routes_default.append(RouteObject(template, **kw))
return self
def match(self, path):
if path.startswith("/"):
path = path[1:]
obj, vars = self._match(path)
return obj, vars
def _match(self, path, module = None, urlvars = {}):
# Normalize module infos
self.set_module(module)
# Search
for route in self.routes + self.routes_default:
match = route.template.match(path)
# log.debug("Try to match %r %r", route, match)
if match:
urlvars = {}
urlvars.update(route.vars)
urlvars.update(match.groupdict())
tail = path[match.end():].lstrip("/")
urlvars["pyxer.tail"] = tail
urlvars["pyxer.match"] = path[match.start():match.end()]
urlvars["pyxer.path"] = os.path.dirname(os.path.abspath(self.module.__file__))
log.debug("Matched %r %r %r %r", path, route, urlvars, route.vars)
# Abort matching
if urlvars["module"] is None and urlvars["controller"] is None:
return (None, None)
# Handle module
if urlvars["module"] is not None:
obj = urlvars["module"]
# If it is a module go ahead
if isinstance(obj, types.ModuleType):
module = obj
# If it is a string it could be a module or a
elif isinstance(obj, basestring):
# Load module relatively or absolute
module = (
self.load_module(self.module_name, obj)
or self.load_module(obj))
if module is None:
log.debug("Module %r not found", obj)
continue
# If it is anything else, let the caller decide what to do
else:
raise Exception("No module")
# Let's see if they need a Router()
if not hasattr(module, "router"):
module.router = Router(module)
# The router goes to the next round
return module.router._match(tail, module) #, urlvars)
# Handle controller
if urlvars["controller"] is not None:
obj = urlvars["controller"]
if isinstance(obj, basestring):
if hasattr(self.module, obj):
obj = getattr(self.module, obj)
if hasattr(obj, "iscontroller") or isController(obj):
return obj, urlvars
else:
log.debug("Object %r is not a controller", obj)
continue
return (None, None)
"""
- urlvars nicht bei modulen möglich, oder doch z.B. für sprachen?
- subdomain ermöglichen, z.b. für sprachwechsel?
- genaues macthing nicht test -> test/
- '' oder '*' einführen, steht nur alleine und heisst: der gnaze rest
- umleitung zu default static oder als parameter? ('', static)
- url_for equivalent
- benannte url schemata
- module, controller, action heissen alle object und können auch strings sein
- explizite actions in den urlvars {action:*}
- redirects, auch zu großen domains: ('google', redirect('google.com')
- auf für fehler error(404)
"""
def testing():
from pyxer.controller import getObjectsFullName, isController
static = "pyxer.routing:static"
if __name__=="__main__":
module = "__main__"
else:
module = "pyxer.routing"
data = [
("", "public:index"),
("/", "public:index"),
("index", "public:index"),
("/index", "public:index"), # slash is ignored
("index.htm", "public:index"),
("index.html", "public:index"),
("index.gif", "pyxer.routing:static", dict(static="index.gif")),
# Without slash a module is not recognized (could be handled by 'static' though)
("sub1", 'pyxer.routing:static', {'static': 'sub1'}),
# sub1
("sub1/", "public.sub1:index"),
("sub1/dummy", "public.sub1:dummy"),
("sub1/dummy2", "public.sub1:default"),
("sub1/content1", "public.sub1:content1"),
("sub1/content1/some", "public.sub1:content1", dict(name="some")),
("sub1/content2/some", "public.sub1:content2", dict(name="some")),
("sub1/content1/some/more", "public.sub1:content1", dict(name="some/more")),
("sub1/content2/some/more", 'pyxer.routing:static', {'static': 'content2/some/more'}),
# Doesn't match at all and is therefore passed to 'static'
("/some/path/index.gif", "pyxer.routing:static", dict(static="some/path/index.gif")),
# Referencing an external module
("sub1/pub2/", "public2:index", dict()),
("sub1/pub2/path/index.gif", "pyxer.routing:static", dict(static="path/index.gif")),
]
router = Router("public")
for sample in data:
if len(sample)==3:
path, object_name, object_vars = sample
else:
path, object_name = sample
object_vars = dict()
obj, vars = router.match(path)
if vars is None:
vars = dict()
else:
vars.pop("controller")
vars.pop("module")
for k in vars.keys():
if k.startswith("pyxer."):
del vars[k]
name = getObjectsFullName(obj)
ct = isController(obj)
# print "%-35r %r, %r" % (path, name, vars)
assert object_name == name
assert object_vars == vars
if __name__ == "__main__":
import sys
import os.path
sys.path.insert(0, os.path.join(__file__, "..", "..", "..", "tests"))
testing()
'''
print template_to_regex('/a/static/path')
print template_to_regex('/{year:\d\d\d\d}/{month:\d\d}/{slug}')
route('/', controller='controllers:index')
route('/{year:\d\d\d\d}/',
controller='controllers:archive')
route('/{year:\d\d\d\d}/{month:\d\d}/',
controller='controllers:archive')
route('/{year:\d\d\d\d}/{month:\d\d}/{slug}',
controller='controllers:view')
route('/post', controller='controllers:post')
'''
| 34.634085
| 105
| 0.522541
|
4a16aca81fe762cb9d203c8a36be4f65121fa05e
| 23,995
|
py
|
Python
|
geo/bms/tax.py
|
Tamlyn78/geo
|
dd63372acdd1fe8b744c05eca5ad23836e6a1604
|
[
"MIT"
] | null | null | null |
geo/bms/tax.py
|
Tamlyn78/geo
|
dd63372acdd1fe8b744c05eca5ad23836e6a1604
|
[
"MIT"
] | null | null | null |
geo/bms/tax.py
|
Tamlyn78/geo
|
dd63372acdd1fe8b744c05eca5ad23836e6a1604
|
[
"MIT"
] | null | null | null |
"""A script to first delete the contents of the new jobs module and then import data from the old jobs module"""
from os import getcwd, listdir
from os.path import isfile, join
from importlib import import_module
import psycopg2
from datetime import datetime
import pytz
import numpy as np
import pandas as pd
old_job_ids = pd.read_csv('job_order.csv')['old_id']
con = psycopg2.connect("dbname=geo4 host=10.78.81.3 user=geo password=Hk&x:dZ=wt3bq/}#")
def receipts():
years = [int(i) for i in listdir('media')]
years.sort()
for i in years:
previous_year = str(i - 1) + '-06-01'
current_year = str(i) + '-05-30'
print(previous_year + ' to ' + current_year)
cur = con.cursor()
sql = "SELECT value FROM old_receipt WHERE date BETWEEN '%s' AND '%s'"
cur.execute(sql % (previous_year, current_year))
r = [i[0] for i in cur.fetchall()]
s = sum(r)
print(s)
def upload_receipts(directory):
"""Upload a batch of receipts in a directory. Must be run in django environment"""
d = directory
lst = [join(d, i) for i in listdir(directory) if isfile(join(d, i))]
print(lst)
from old.models import Receipt
for i in lst:
r = Receipt(upload=i, date='1900-01-01', value=0.00)
#r.save()
receipts()
d = '/media/geo/admin/receipts/'
#upload_receipts(d)
exit()
class Model:
""""""
def __init__(self, app):
self.app = app
def get(self, model):
package = import_module(self.app + '.models')
m = getattr(package, model)
return(m)
def delete_content(self, model):
for i in model.objects.all():
i.delete()
def reset_sequence(self, table):
sql = """ALTER SEQUENCE %s_id_seq RESTART WITH 1"""
cur = con.cursor()
try:
cur.execute(sql % table)
con.commit()
except Exception as e:
print(e)
cur.close()
class Old(Model):
""""""
def __init__(self):
self.app = 'old'
self.model = Model(self.app)
def get_jobs(self):
model = self.model.get('Job')
jobs = [model.objects.get(id=i) for i in old_job_ids]
return(jobs)
def get_datetime(self, job):
date = job.open
y, m, d = date.year, date.month, date.day
dt = datetime(y, m, d, 9, 0, 0, 0, tzinfo=pytz.UTC)
return(dt)
def old_to_new_id(self, old_id):
o = old_job_ids
new_id = o[o==old_id].index.values[0] + 1
return(new_id)
class Contact(Model):
""""""
def __init__(self):
self.app = 'contact'
self.model_list = [
'Organisation',
'Contact',
'Name',
]
self.model = Model(self.app)
self.old = Old()
def delete(self):
for i in self.model_list:
m = self.model
model = m.get(i)
m.delete_content(model)
self.reset(i)
def reset(self, model_name):
table = self.app + '_' + model_name.lower()
self.model.reset_sequence(table)
def populate(self):
for i in self.old.get_jobs():
self.new_organisation(i)
self.new_contact(i)
def new_organisation(self, old_job):
o = old_job.client.organisation
m = self.model.get('Organisation')
abbr = o.abbreviation
try:
m.objects.get(abbr=abbr)
except:
name = o.name if o.name else abbr
org = m(name=name, abbr=abbr, note=o.notes)
org.save()
dt = self.old.get_datetime(old_job)
m.objects.filter(abbr=abbr).update(timestamp=dt)
def new_contact(self, old_job):
client = old_job.client
first = client.firstname
last = client.lastname
note = client.notes
cm = self.model.get('Contact')
nm = self.model.get('Name')
try:
nm.objects.get(first=first, last=last)
except:
dt = self.old.get_datetime(old_job)
c = cm(note=note)
c.save()
cm.objects.filter(id=c.id).update(timestamp=dt)
# handle a change of name case
if last == 'Barry':
laressa = nm.objects.get(last='Berehowyj')
c = cm.objects.get(id=laressa.id)
n = nm(contact=c, first=first, last=last)
n.save()
nm.objects.filter(first=first, last=last).update(timestamp=dt)
class Element(Model):
""""""
def __init__(self):
self.app = 'element'
self.model_list = [
'Element',
'Factor'
]
self.model = Model(self.app)
self.old = Old()
def delete(self):
for i in self.model_list:
m = self.model
model = m.get(i)
m.delete_content(model)
self.reset(i)
def reset(self, model_name):
table = self.app + '_' + model_name.lower()
self.model.reset_sequence(table)
def get_df(self):
df = self.merge_elements()
df['order'] = None
for i in df.index:
self.order_element(df, i)
new_job_id = [self.old.old_to_new_id(i) for i in df['job']]
df['new_job_id'] = new_job_id
df.sort_values(['new_job_id', 'order'], inplace=True)
df.reset_index(inplace=True, drop=True)
df['idx'] = df.index.values + 1
cols = df.columns.values
corder = [10,0,9,3,4,1,5,6,2,7,8]
df = df[cols[corder]]
jobs = df['job']
for i in jobs:
try:
if i != j:
group += 1
except:
j = jobs[0]
group = 1
lst = []
j = i
lst += [group]
df['group'] = lst
df.to_csv('elements.csv', index=False)
return(df)
def merge_elements(self):
f = self.factors_to_df()
e = self.elements_to_df()
r = self.ranks_to_df()
a = pd.merge(f, e, how='outer', left_on='id', right_on='factor')
b = pd.merge(a, r, how='outer', left_on='id_y', right_on='child')
cols = b.columns.values
cols[[3,4,7]] = ['note_factor', 'element', 'note_element']
b.columns = cols
b.drop(['id_x', 'id', 'child'], axis=1, inplace=True)
return(b)
def factors_to_df(self):
m = self.old.model.get('Factor')
f = m.objects.all().order_by('id')
df = pd.DataFrame([(i.id, i.job_id, i.label, i.notes) for i in f])
df.columns = ['id', 'job', 'label', 'note']
return(df)
def elements_to_df(self):
m = self.old.model.get('Element')
e = m.objects.all().order_by('id')
df = pd.DataFrame([(i.id, i.factor_id, i.value, i.notes) for i in e])
df.columns = ['id', 'factor', 'value', 'note']
return(df)
def ranks_to_df(self):
m = self.old.model.get('Rank')
r = m.objects.all().order_by('id')
df = pd.DataFrame([(i.id, i.parent_id, i.child_id) for i in r])
df.columns = ['id', 'parent', 'child']
return(df)
def order_element(self, df, i):
p = df.loc[i, 'parent']
if pd.isnull(p):
df.loc[i, 'order'] = 1
else:
order = df.loc[df['element']==int(p), 'order'] + 1
order = order.values[0]
# handle a single case where the child comes before parent in the list
df.loc[i, 'order'] = 2 if pd.isnull(order) else order
def populate(self):
df = self.get_df()
for n, i in df.iterrows():
parent = df.loc[df['element']==i.parent]
self.create_element(n, i, parent)
def create_element(self, idx, old_dat, parent_dat):
o = old_dat
m = self.model.get('Factor')
factor = o.label
try:
f = m.objects.get(group=o.group, factor=o.label, note=o.note_factor)
except:
f = m(group=o.group, factor=o.label, note=o.note_factor)
f.save()
if len(parent_dat.index) == 1:
m = self.model.get('Element')
p_id = parent_dat.iloc[0].idx
parent = m.objects.get(id=p_id)
else:
parent = None
m = self.model.get('Element')
e = m(factor=f, value=o.value, parent=parent, note=o.note_element)
e.save()
class Sample(Model):
""""""
def __init__(self):
self.app = 'sample'
self.model_list = [
'Element',
'Factor'
]
self.model = Model(self.app)
self.old = Old()
def delete(self):
for i in self.model_list:
m = self.model
model = m.get(i)
m.delete_content(model)
self.reset(i)
def reset(self, model_name):
table = self.app + '_' + model_name.lower()
self.model.reset_sequence(table)
def populate(self):
df = self.get_factors()
for n, i in df.iterrows():
parent = df.loc[df['element']==i.parent]
self.element(n, i, parent)
class Job(Model):
""""""
def __init__(self):
self.app = 'job'
self.model_list = [
'Job',
'Title',
'Status',
'Location',
'Contact',
'Element',
]
self.model = Model(self.app)
self.old = Old()
self.elements = Element().get_df()
def delete(self):
for i in self.model_list:
m = self.model
model = m.get(i)
m.delete_content(model)
self.reset(i)
def reset(self, model_name):
table = self.app + '_' + model_name.lower()
self.model.reset_sequence(table)
def populate(self):
for i in self.old.get_jobs():
j = self.job(i)
self.title(i, j)
self.status(i, j)
self.location(i, j)
self.contact(i, j)
self.element(j)
def job(self, old_job):
""""""
o = old_job
m = self.model.get('Job')
j = m(note=o.notes)
j.save()
dt = self.old.get_datetime(o)
m.objects.filter(id=j.id).update(timestamp=dt)
return(j)
def title(self, old_job, new_job):
""""""
o, j = old_job, new_job
m = self.model.get('Title')
t = m(job=j, title=o.description)
t.save()
m.objects.filter(job_id=j.id).update(timestamp=j.timestamp)
def status(self, old_job, new_job):
o, j = old_job, new_job
m = self.model.get('Status')
s = m(job=j, status=True)
s.save()
m.objects.filter(job_id=j.id).update(timestamp=j.timestamp)
def location(self, old_job, new_job):
o, j = old_job, new_job
location = o.location
if location:
m = self.model.get('Location')
description = location.description
note = location.notes
l = m(job=j, location=description, note=note)
l.save()
m.objects.filter(job_id=j).update(timestamp=j.timestamp)
def contact(self, old_job, new_job):
o, j = old_job, new_job
client = o.client
first = client.firstname
last = client.lastname
abbr = client.organisation.abbreviation
m = Contact().model.get('Name')
name = m.objects.get(first=first, last=last)
m = Contact().model.get('Organisation')
org = m.objects.get(abbr=abbr)
m = self.model.get('Contact')
c = m(job=j, contact=name.contact, organisation=org)
c.save()
def element(self, new_job):
df = self.elements
r = df.loc[df['new_job_id']==new_job.id]
m = self.model.get('Element')
for n, i in r.iterrows():
e = m(job=new_job, element_id=i.idx)
e.save()
def receipts():
df = pd.read_csv('receipt.csv')
print(df)
from old.models import Receipt
#r = Receipt
for n, i in df.iterrows():
#print(i)
r = Receipt(upload=i.upload, date=i.date, value=i.value, description=i.description, note=i.note, category=i.category, currency=i.currency)
#r = Receipt(upload=i.upload)
#print(dir(r))
r.save()
#c = Contact()
#c.delete()
#c.populate()
#e = Element()
#e.delete()
#e.populate()
#s = Sample()
#s.delete()
#s.populate()
#j = Job()
#j.delete()
#j.populate()
receipts()
exit()
from contact.models import Organisation
from old.models import Job as OldJob, JobStatus as OldJobStatus, Location as OldLocation, Client as OldClient, Organisation as OldOrganisation
class OldModels:
models = ['Organisation', 'Client', 'Location', 'Job', 'JobStatus', 'Closure', 'Invoice', 'Quote', 'Receipt', 'Factor', 'Element', 'Rank', 'ASC', 'Sample', 'PSA']
d = dict(zip(models, [get_module(i, 'old.models') for i in models]))
job_order = pd.read_csv('job_order.csv')
old_id = job_order['old_id']
def get_id(self, i):
""""""
m = self.d['Job']
o = m.objects.get(id=i)
o.datetime = self.get_datetime(o)
return(o)
def get_datetime(self, job):
date = job.open
y, m, d = date.year, date.month, date.day
dt = datetime(y, m, d, 9, 0, 0, 0, tzinfo=pytz.UTC)
return(dt)
def factors_to_df(self):
f = self.d['Factor'].objects.all().order_by('id')
df = pd.DataFrame([(i.id, i.job_id, i.label, i.notes) for i in f])
df.columns = ['id', 'job', 'label', 'note']
#df.to_csv('factors.csv', index=False)
return(df)
def elements_to_df(self):
e = self.d['Element'].objects.all().order_by('id')
df = pd.DataFrame([(i.id, i.factor_id, i.value, i.notes) for i in e])
df.columns = ['id', 'factor', 'value', 'note']
#df.to_csv('elements.csv', index=False)
return(df)
def ranks_to_df(self):
r = self.d['Rank'].objects.all().order_by('id')
df = pd.DataFrame([(i.id, i.parent_id, i.child_id) for i in r])
df.columns = ['id', 'parent', 'child']
#rdf.to_csv('ranks.csv', index=False)
return(df)
def merge_elements(self):
f = self.factors_to_df()
e = self.elements_to_df()
r = self.ranks_to_df()
a = pd.merge(f, e, how='outer', left_on='id', right_on='factor')
b = pd.merge(a, r, how='outer', left_on='id_y', right_on='child')
#b.drop(['id_x'], axis=1, inplace=True)
cols = b.columns.values
cols[[3,4,7]] = ['note_factor', 'element', 'note_element']
b.columns = cols
b.drop(['id_x', 'id', 'child'], axis=1, inplace=True)
return(b)
def order_element(self, df, i):
p = df.loc[i, 'parent']
if pd.isnull(p):
df.loc[i, 'order'] = 1
else:
order = df.loc[df['element']==int(p), 'order'] + 1
order = order.values[0]
# handle a single case where the child comes before parent in the list
df.loc[i, 'order'] = 2 if pd.isnull(order) else order
def get_factors(self):
df = self.merge_elements()
df['order'] = None
for i in df.index:
self.order_element(df, i)
new_job_id = [get_new_id(i) for i in df['job']]
df['new_job_id'] = new_job_id
df.sort_values(['new_job_id', 'order'], inplace=True)
df.reset_index(inplace=True)
df['index'] = df.index + 1
cols = df.columns.values
corder = [0,1,10,4,5,2,6,7,3,8,9]
df = df[cols[corder]]
df.loc[df['parent'].isnull(), 'parent'] = 0
lst = []
for n, i in df.iterrows():
lst += [np.nan] if i.parent == 0 else [df.loc[df['element']==i.parent, 'index'].item()]
df['new_parent'] = lst
df.to_csv('merged_elements.csv', index=False)
return(df)
class ContactTrans:
models = ['Organisation', 'Contact', 'Name']
d = dict(zip(models, [get_module(i, 'contact.models') for i in models]))
def delete(self):
for i in self.d.keys():
delete_content(self.d[i])
def reset(self):
for i in self.models:
table = 'contact_' + i.lower()
reset_sequence(table)
def contact(self, cdict):
mcont = self.d['Contact']
mname = self.d['Name']
firstname, lastname = cdict['firstname'], cdict['lastname']
try:
idx = mname.objects.get(first=firstname, last=lastname).contact
c = mcont.objects.get(id=idx)
except:
if lastname != 'Barry':
c = mcont()
c.save()
mcont.objects.filter(id=c.id).update(timestamp=cdict['datetime'])
else:
c = mname.objects.get(last='Berehowyj').contact
n = mname(contact=c, first=firstname, last=lastname)
n.save()
mname.objects.filter(first=firstname, last=lastname).update(timestamp=cdict['datetime'])
return(c)
def organisation(self, odict):
"""Add organisation if it doesn't exist."""
m = self.d['Organisation']
name = odict['name']
abbr = odict['abbr']
note = odict['note']
try:
o = m.objects.get(abbr=abbr)
except:
name = name if name else abbr
o = m(name=name, abbr=abbr, note=note)
o.save()
m.objects.filter(abbr=abbr).update(timestamp=odict['datetime'])
return(o)
class JobTrans(GetModel):
models = ['Job', 'Title', 'Status', 'Location', 'Contact']
#d = dict(zip(models, [get_module(i, 'job.models') for i in models]))
#job_order = pd.read_csv('job_order.csv')
#old_id = job_order['old_id']
#ctrans = ContactTrans()
app = 'job'
def __init__(self):
self.model = GetModel('job')
wee = self.model.get('Job')
print('weewee')
exit()
self.delete()
self.reset()
self.ctrans.delete()
self.ctrans.reset()
for i in self.old_id:
print(i)
o = OldModels().get_id(i)
j = self.job(o)
self.title(j, o)
self.status(j, o)
self.location(j, o)
self.contact(j, o)
def delete(self):
for i in self.d.keys():
delete_content(self.d[i])
def reset(self):
for i in self.models:
table = 'job_' + i.lower()
reset_sequence(table)
def job(self, old_job):
""""""
m = self.d['Job']
j = m(note=old_job.notes, directory=old_job.id)
j.save()
m.objects.filter(id=j.id).update(timestamp=old_job.datetime)
return(j)
def title(self, j, old_job):
""""""
m = self.d['Title']
t = m(job=j, title=old_job.description)
t.save()
m.objects.filter(job_id=j.id).update(timestamp=old_job.datetime)
def status(self, j, old_job):
m = self.d['Status']
s = m(job=j, status=True)
s.save()
m.objects.filter(job_id=j.id).update(timestamp=old_job.datetime)
def location(self, j, old_job):
if old_job.location:
m = self.d['Location']
l = m(job=j, location=old_job.location)
l.save()
m.objects.filter(job_id=j.id).update(timestamp=old_job.datetime)
def contact(self, j, old_job):
c = old_job.client
o = c.organisation
cdict = {
'firstname': c.firstname,
'lastname': c.lastname,
'status': c.status,
'note': c.notes,
'datetime': old_job.datetime,
'name_change': [('Barry', 'Berehowyj')],
}
cnct = self.ctrans.contact(cdict)
odict = {
'name': o.name,
'abbr': o.abbreviation,
'note': o.notes,
'datetime': old_job.datetime,
}
org = self.ctrans.organisation(odict)
m = self.d['Contact']
job_contact = m(job=j, contact=cnct, organisation=org)
job_contact.save()
m.objects.filter(job=j).update(timestamp=old_job.datetime)
class ElementTrans:
models = ['Element', 'Factor']
d = dict(zip(models, [get_module(i, 'element.models') for i in models]))
job_order = pd.read_csv('job_order.csv')
old_id = job_order['old_id']
old_models = OldModels()
def __init__(self):
self.delete()
self.reset()
df = self.subset_factors()
for n, i in df.iterrows():
p = df.loc[(df['new_job_id']==i.new_job_id) & (df['order']==i.order-1)]
parent = None if p.empty else p.id.item()
factor = self.d['Factor'](job_id=i.new_job_id, factor=i.label, parent_id=parent, note=i.note_factor)
factor.save()
df = self.subset_elements()
for n, i in df.iterrows():
f = self.d['Factor'].objects.get(job_id=i.new_job_id, factor=i.label)
e = self.d['Element'](factor=f, value=i.value, note=i.note_element)
e.save()
def get_elements(self):
df = self.old_models.get_factors()
df = df.loc[df['note_factor']!='Dont know what this job is for']
return(df)
def subset_factors(self):
df = self.get_elements()
f = df[['new_job_id', 'label', 'order', 'note_factor']].drop_duplicates()
f.reset_index(drop=True, inplace=True)
f['id'] = f.index.values + 1
return(f)
def subset_elements(self):
df = self.get_elements()
df = df[['new_job_id', 'label', 'value', 'note_element']]
return(df)
def delete(self):
for i in self.d.keys():
delete_content(self.d[i])
def reset(self):
for i in self.models:
table = 'element_' + i.lower()
reset_sequence(table)
class ElementTrans2:
models = ['Element', 'Factor']
d = dict(zip(models, [get_module(i, 'element.models') for i in models]))
job_order = pd.read_csv('job_order.csv')
old_id = job_order['old_id']
old_models = OldModels()
def __init__(self):
self.delete()
self.reset()
f = self.d['Factor'](group='wee')
exit()
df = self.subset_factors()
print(df)
exit()
for n, i in df.iterrows():
pass
#p = df.loc[(df['new_job_id']==i.new_job_id) & (df['order']==i.order-1)]
#parent = None if p.empty else p.id.item()
f = self.d['Factor']
#factor = self.d['Factor2'](group=str(i.new_job_id), factor=i.label, note=i.note_factor)
factor = f(group='poo')
#factor.save()
exit()
#df = self.subset_elements()
#for n, i in df.iterrows():
# f = self.d['Factor'].objects.get(job_id=i.new_job_id, factor=i.label)
# e = self.d['Element'](factor=f, value=i.value, note=i.note_element)
# e.save()
def get_elements(self):
df = self.old_models.get_factors()
df = df.loc[df['note_factor']!='Dont know what this job is for']
return(df)
def subset_factors(self):
df = self.get_elements()
f = df[['new_job_id', 'label', 'order', 'note_factor']].drop_duplicates()
f.reset_index(drop=True, inplace=True)
f['id'] = f.index.values + 1
return(f)
def subset_elements(self):
df = self.get_elements()
df = df[['new_job_id', 'label', 'value', 'note_element']]
return(df)
def delete(self):
for i in self.d.keys():
delete_content(self.d[i])
def reset(self):
for i in self.models:
table = 'element_' + i.lower()
reset_sequence(table)
JobTrans()
#ElementTrans()
#ElementTrans2()
con.close()
| 29.770471
| 166
| 0.537612
|
4a16ad0aa82c52f8be640b25a18f464209687b13
| 5,605
|
py
|
Python
|
codes/inertial_conversion.py
|
preetham-ganesh/multi-sensor-human-activity-recognition
|
42b491fa39fee36870e48960b96af01b836e2e9f
|
[
"MIT"
] | 1
|
2022-01-12T05:08:57.000Z
|
2022-01-12T05:08:57.000Z
|
codes/inertial_conversion.py
|
preetham-ganesh/multi-sensor-human-activity-recognition
|
42b491fa39fee36870e48960b96af01b836e2e9f
|
[
"MIT"
] | null | null | null |
codes/inertial_conversion.py
|
preetham-ganesh/multi-sensor-human-activity-recognition
|
42b491fa39fee36870e48960b96af01b836e2e9f
|
[
"MIT"
] | null | null | null |
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = 'preetham.ganesh2015@gmail.com'
import pandas as pd
import numpy as np
from scipy.io import loadmat
from skeleton_points_extraction import choose_caffe_model_files
from skeleton_points_extraction import exports_processed_data
def per_video_inertial_converter(inertial_file: np.ndarray,
skeleton_pose_model: str,
data_version: str,
modality: str,
data_name: str,
skeleton_point_information: pd.DataFrame):
"""Converts inertial information (based on model name) from the MATLAB file given as input. Adds the converted
information to the existing skeleton point information and exports the dataframe into a CSV file.
Args:
inertial_file: MATLAB inertial file for the current video.
skeleton_pose_model: Model name which will be used to import model details.
data_version: Current version of the dataframe.
modality: Current modality of the dataframe.
data_name: Name with which the dataframe should be saved.
skeleton_point_information: Current version of skeleton point information for the current video.
Returns:
None.
"""
# Imports number of skeleton points model based on the skeleton_pose_model given as input.
_, _, n_skeleton_points = choose_caffe_model_files(skeleton_pose_model)
# Column names for the converted inertial information dictionary.
inertial_columns = ['acc_x', 'acc_y', 'acc_z', 'gyr_x', 'gyr_y', 'gyr_z']
inertial_information = {inertial_columns[i]: [] for i in range(len(inertial_columns))}
inertial_information['frame'] = [i for i in range(len(skeleton_point_information))]
# Iterates across the column names for inertial sensors information.
for i in range(len(inertial_columns)):
# Filters information of current inertial sensor.
current_inertial = inertial_file[:, i]
current_inertial_moving_average = []
# Iterates across the frames of current inertial sensor information and computes moving average.
for j in range(0, inertial_file.shape[0], 3):
current_inertial_moving_average.append(round(float(np.mean(current_inertial[j: j + 3])), 5))
# Adds the moving average information for the current inertial sensor information to the skeleton point
# information
inertial_information[inertial_columns[i]] = current_inertial_moving_average[:len(skeleton_point_information)]
# Converts inertial information dictionary into a pandas dataframe.
inertial_information_df = pd.DataFrame(inertial_information, columns=['frame'] + inertial_columns)
# Exports the updated version of the skeleton point information into a CSV file.
exports_processed_data(inertial_information_df, data_version, modality, '{}_{}'.format(data_name,
skeleton_pose_model))
def inertial_converter(n_actions: int,
n_subjects: int,
n_takes: int,
skeleton_pose_models: list):
"""Converts MATLAB inertial information and adds them to the skeleton point information for all actions, subjects,
and takes.
Args:
n_actions: Total number of actions in the original dataset.
n_subjects: Total number of subjects in the original dataset.
n_takes: Total number of takes in the original dataset.
skeleton_pose_models: Model names which will be used to import model details.
Returns:
None.
Raises:
FileNotFoundError: If a particular video file is not found.
"""
modality = 'inertial'
data_version = 'processed_data'
# Iterates across all actions, subjects and takes in the dataset.
for i in range(1, n_actions + 1):
for j in range(1, n_subjects + 1):
for k in range(1, n_takes + 1):
for m in range(len(skeleton_pose_models)):
data_name = 'a{}_s{}_t{}'.format(i, j, k)
# Imports MATLAB inertial file and the skeleton point information for the current action, subject &
# take.
try:
inertial_file = loadmat('../data/original_data/{}/{}_{}.mat'.format(modality.title(), data_name,
modality))
skeleton_file = pd.read_csv('../data/{}/{}/{}_{}.csv'.format(data_version, 'depth', data_name,
skeleton_pose_models[m]))
per_video_inertial_converter(inertial_file['d_iner'], skeleton_pose_models[m], data_version,
modality, data_name, skeleton_file)
except FileNotFoundError:
print('Video file for {}_{} does not exist.'.format(data_name, skeleton_pose_models[m]))
print()
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
inertial_converter(n_actions, n_subjects, n_takes, skeleton_pose_models)
if __name__ == '__main__':
main()
| 47.5
| 120
| 0.62355
|
4a16ae1c0277205d1d71010d18149d292fe2ab71
| 10,115
|
py
|
Python
|
contrib/status_testing/piped_status_testing/statustest.py
|
alexbrasetvik/Piped
|
0312c14d6c4c293df378c915cc9787bcc7faed36
|
[
"MIT"
] | 3
|
2015-02-12T20:34:30.000Z
|
2016-08-06T06:54:48.000Z
|
contrib/status_testing/piped_status_testing/statustest.py
|
alexbrasetvik/Piped
|
0312c14d6c4c293df378c915cc9787bcc7faed36
|
[
"MIT"
] | null | null | null |
contrib/status_testing/piped_status_testing/statustest.py
|
alexbrasetvik/Piped
|
0312c14d6c4c293df378c915cc9787bcc7faed36
|
[
"MIT"
] | 2
|
2015-12-16T14:18:14.000Z
|
2019-04-12T01:43:10.000Z
|
# Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# See LICENSE for details.
""" Status tests are tests that run inside a live process to do
system-/health-checks, etc --- possibly being re-run at arbitrary
intervals.
This module is heavily based on twisted.trial, but adapted to run
inside an already running reactor.
"""
import sys
import warnings
from twisted.internet import defer, reactor, utils
from twisted.python import failure, log as twisted_log
from twisted.trial import unittest, reporter, runner, itrial, util
from piped_status_testing import util as status_util
class StatusTestSuite(unittest.TestSuite):
""" A TestSuite that runs its tests asynchronously. """
def __init__(self, namespace=None, *a, **kw):
super(StatusTestSuite, self).__init__(*a, **kw)
self.namespace = namespace or dict()
def __call__(self, namespace, result):
return self.run(result)
@defer.inlineCallbacks
def run(self, result):
for test in self._tests:
if result.shouldStop:
break
yield test(self.namespace, result)
defer.returnValue(result)
class StatusTestLoader(runner.TestLoader):
""" A test loader that looks for functions/methods with the prefix
``statustest`` and creates :class:`StatusTestSuite` instances.
"""
methodPrefix = 'statustest'
modulePrefix = '' # what modules to look for statustests in. TODO: set to statustests_ ?
test_suite_class = StatusTestSuite
def __init__(self, namespace, *a, **kw):
super(StatusTestLoader, self).__init__(*a, **kw)
self.namespace = namespace
self.suiteFactory = self.create_suite
def create_suite(self, *a, **kw):
return self.test_suite_class(self.namespace, *a, **kw)
class StreamAdapter(object):
""" An adapter for streams that ensures that the twisted logging
mechanism has an given context during each write.
If the stream does not have an ``isatty`` attribute, this adapter provides
one that returns ``False`` by default.
"""
def __init__(self, stream=sys.stdout, system='status_test', isatty=False):
"""
:param stream: A stream-like object.
:param system: The system string to tell twisted.python.log to use.
:param isatty: Used as the return value if the provided stream does not
have an ``isatty`` attribute.
"""
self._stream = stream
self._system = system
self._isatty = isatty
def __getattr__(self, item):
return getattr(self._stream, item)
def write(self, data):
twisted_log.callWithContext(dict(system=self._system), self._stream.write, data)
def isatty(self):
if hasattr(self._stream, 'isatty'):
return self._stream.isatty()
return self._isatty
class StatusReporter(reporter.TreeReporter):
""" A reporter used for StatusTests.
This reporter injects an adapted stream in order to work around
http://twistedmatrix.com/trac/ticket/3067 since we run tests inside an
already running process where logging always is configured.
"""
def __init__(self, stream=sys.stdout, **kw):
super(StatusReporter, self).__init__(stream=StreamAdapter(stream), **kw)
class ProcessorReporter(StatusReporter):
""" Reporter that can pass test results using a processor. """
def __init__(self, processor_dependency=None, *a, **kw):
super(ProcessorReporter, self).__init__(*a, **kw)
self.processor_dependency = processor_dependency
self._in_processing = []
def wait_for_result_processing(self):
self._in_processing, ds = [], self._in_processing
return defer.DeferredList(ds)
@defer.inlineCallbacks
def process_baton(self, baton):
if not self.processor_dependency:
return
processor = yield self.processor_dependency.wait_for_resource()
baton['reporter'] = self
yield processor(baton)
def addSuccess(self, test):
super(ProcessorReporter, self).addSuccess(test)
d = self.process_baton(dict(test=test, status='success'))
self._in_processing.append(d)
def addFailure(self, test, err):
super(ProcessorReporter, self).addFailure(test, err)
d = self.process_baton(dict(test=test, failure=err, status='failure'))
self._in_processing.append(d)
def addError(self, test, err):
super(ProcessorReporter, self).addError(test, err)
d = self.process_baton(dict(test=test, failure=err, status='error'))
self._in_processing.append(d)
def addSkip(self, test, err):
super(ProcessorReporter, self).addSkip(test, err)
d = self.process_baton(dict(test=test, failure=err, status='skipped'))
self._in_processing.append(d)
def addExpectedFailure(self, test, err, todo):
super(ProcessorReporter, self).addExpectedFailure(test, err, todo)
d = self.process_baton(dict(test=test, failure=err, status='expected_failure'))
self._in_processing.append(d)
def addUnexpectedSuccess(self, test, todo):
super(ProcessorReporter, self).addUnexpectedSuccess(test, todo)
d = self.process_baton(dict(test=test, todo=todo, status='todone'))
self._in_processing.append(d)
class _MethodWrapper(object):
""" A wrapper used to call a method with some injected keyword arguments. """
def __init__(self, method, namespace):
"""
:param method: The method to call
:param namespace: A dict of injected keyword arguments.
"""
self.namespace = namespace
self.method = method
def __call__(self, *a, **kw):
kw.update(self.namespace)
return self.method(*a, **kw)
class StatusTestCase(unittest.TestCase):
""" An asynchronous TestCase that can be run inside a running reactor. """
def _cleanUp(self, result):
# Difference from unittest.TestCase: we dont call util._Janitor(self, result).postCaseCleanup(), which would mess up the reactor
# XXX: This has the side-effect that we cannot fail test cases due to "dirty reactor state" etc.
for error in self._observer.getErrors():
result.addError(self, error)
self._passed = False
self.flushLoggedErrors()
self._removeObserver()
if self._passed:
result.addSuccess(self)
def _classCleanUp(self, result):
# Difference from unittest.TestCase: skip calling util._Janitor(self, result).postClassCleanup(), which would mess up the reactor
pass
@defer.inlineCallbacks
def run(self, namespace, result):
"""
Run the test case, storing the results in C{result}.
First runs C{setUp} on self, then runs the test method (defined in the
constructor), then runs C{tearDown}. Any of these may return
L{Deferred}s. After they complete, does some reactor cleanup.
@param result: A L{TestResult} object.
"""
# Difference from unittest.TestCase: we run in an asynchronous environment, so we yield instead of _wait
# We also inject the namespace as keyword arguments to the setUp method,
# and don't collect warnings.
setattr(self, 'setUp', _MethodWrapper(getattr(self, 'setUp'), namespace))
new_result = itrial.IReporter(result, None)
if new_result is None:
result = unittest.PyUnitResultAdapter(result)
else:
result = new_result
self._timedOut = False
result.startTest(self)
if self.getSkip(): # don't run test methods that are marked as .skip
result.addSkip(self, self.getSkip())
result.stopTest(self)
return
self._observer = unittest._logObserver
@defer.inlineCallbacks
def runThunk():
self._passed = False
self._deprecateReactor(reactor)
try:
d = self.deferSetUp(None, result)
try:
yield d
finally:
self._cleanUp(result)
self._classCleanUp(result)
finally:
self._undeprecateReactor(reactor)
yield runThunk()
result.stopTest(self)
def _run(self, methodName, result):
# Difference from unittest.TestCase: we use maybe_deferred_with_noncleaning_failure in order to avoid having
# t.i.defer mangle our locals and globals
timeout = self.getTimeout()
def onTimeout(d):
e = defer.TimeoutError("%r (%s) still running at %s secs"
% (self, methodName, timeout))
f = failure.Failure(e)
# try to errback the deferred that the test returns (for no gorram
# reason) (see issue1005 and test_errorPropagation in
# test_deferred)
try:
d.errback(f)
except defer.AlreadyCalledError:
# if the deferred has been called already but the *back chain
# is still unfinished, crash the reactor and report timeout
# error ourself.
# reactor.crash() # TODO: decide what to do wrt timeouts -- Njal
self._timedOut = True # see self._wait
todo = self.getTodo()
if todo is not None and todo.expected(f):
result.addExpectedFailure(self, f, todo)
else:
result.addError(self, f)
onTimeout = utils.suppressWarnings(
onTimeout, util.suppress(category=DeprecationWarning))
method = getattr(self, methodName)
d = status_util.maybe_deferred_with_noncleaning_failure(utils.runWithWarningsSuppressed,
self.getSuppress(), method)
call = reactor.callLater(timeout, onTimeout, d)
d.addBoth(lambda x : call.active() and call.cancel() or x)
return d
| 37.742537
| 137
| 0.644093
|
4a16af0d9af19f3ead6082b972d434aaef66203a
| 11,517
|
py
|
Python
|
pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py
|
froth-synthesio/PyABSA
|
61406e7a49f93f6c986dfd7e583d730b69c2861c
|
[
"MIT"
] | 199
|
2021-06-07T15:07:28.000Z
|
2022-03-31T11:53:28.000Z
|
pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py
|
froth-synthesio/PyABSA
|
61406e7a49f93f6c986dfd7e583d730b69c2861c
|
[
"MIT"
] | 98
|
2021-06-06T06:01:02.000Z
|
2022-03-31T15:48:28.000Z
|
pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py
|
froth-synthesio/PyABSA
|
61406e7a49f93f6c986dfd7e583d730b69c2861c
|
[
"MIT"
] | 55
|
2021-06-10T08:52:17.000Z
|
2022-03-31T11:08:58.000Z
|
# -*- coding: utf-8 -*-
# file: data_utils.py
# author: songyouwei <youwei0314@gmail.com>
# Copyright (C) 2018. All Rights Reserved.
import os
import pickle
import numpy as np
import tqdm
from findfile import find_file
from google_drive_downloader.google_drive_downloader import GoogleDriveDownloader as gdd
from termcolor import colored
from torch.utils.data import Dataset
from transformers import AutoTokenizer
from pyabsa.core.apc.classic.__glove__.dataset_utils.dependency_graph import prepare_dependency_graph, configure_spacy_model
from pyabsa.core.apc.dataset_utils.apc_utils import load_apc_datasets
from pyabsa.utils.pyabsa_utils import check_and_fix_labels, validate_example
def prepare_glove840_embedding(glove_path):
glove840_id = '1G-vd6W1oF9ByyJ-pzp9dcqKnr_plh4Em'
if not os.path.exists(glove_path):
os.mkdir(glove_path)
elif os.path.isfile(glove_path):
return glove_path
elif os.path.isdir(glove_path):
embedding_file = None
dir_path = os.path.dirname(glove_path)
if find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip')[0]
if embedding_file:
print('Find potential embedding files: {}'.format(embedding_file))
return embedding_file
zip_glove_path = os.path.join(glove_path, '__glove__.840B.300d.txt.zip')
print('No GloVe embedding found at {},'
' downloading __glove__.840B.300d.txt (2GB transferred / 5.5GB unzipped)...'.format(glove_path))
gdd.download_file_from_google_drive(file_id=glove840_id,
dest_path=zip_glove_path,
unzip=True
)
glove_path = find_file(glove_path, 'txt', exclude_key='.zip')
return glove_path
def build_tokenizer(dataset_list, max_seq_len, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_name, dat_fname)):
print('Loading tokenizer on {}'.format(os.path.join(opt.dataset_name, dat_fname)))
tokenizer = pickle.load(open(os.path.join(opt.dataset_name, dat_fname), 'rb'))
else:
text = ''
for dataset_type in dataset_list:
for file in dataset_list[dataset_type]:
fin = open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(0, len(lines), 3):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
text_raw = text_left + " " + aspect + " " + text_right
text += text_raw + " "
tokenizer = Tokenizer(max_seq_len)
tokenizer.fit_on_text(text)
if not os.path.exists(os.path.join(opt.dataset_name)):
os.makedirs(os.path.join(opt.dataset_name))
pickle.dump(tokenizer, open(os.path.join(opt.dataset_name, dat_fname), 'wb'))
return tokenizer
def _load_word_vec(path, word2idx=None, embed_dim=300):
fin = open(path, 'r', encoding='utf-8', newline='\n', errors='ignore')
word_vec = {}
for line in tqdm.tqdm(fin, postfix='Loading embedding file...'):
tokens = line.rstrip().split()
word, vec = ' '.join(tokens[:-embed_dim]), tokens[-embed_dim:]
if word in word2idx.keys():
word_vec[word] = np.asarray(vec, dtype='float32')
return word_vec
def build_embedding_matrix(word2idx, embed_dim, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_name, dat_fname)):
print('Loading cached embedding_matrix for {}'.format(os.path.join(opt.dataset_name, dat_fname)))
embedding_matrix = pickle.load(open(os.path.join(opt.dataset_name, dat_fname), 'rb'))
else:
print('Extracting embedding_matrix for {}'.format(dat_fname))
glove_path = prepare_glove840_embedding(opt.dataset_name)
opt.glove = glove_path
embedding_matrix = np.zeros((len(word2idx) + 2, embed_dim)) # idx 0 and len(word2idx)+1 are all-zeros
word_vec = _load_word_vec(glove_path, word2idx=word2idx, embed_dim=embed_dim)
for word, i in tqdm.tqdm(word2idx.items(), postfix='Building embedding_matrix {}'.format(dat_fname)):
vec = word_vec.get(word)
if vec is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = vec
if not os.path.exists(os.path.join(opt.dataset_name)):
os.makedirs(os.path.join(opt.dataset_name))
pickle.dump(embedding_matrix, open(os.path.join(opt.dataset_name, dat_fname), 'wb'))
return embedding_matrix
def pad_and_truncate(sequence, maxlen, dtype='int64', padding='post', truncating='post', value=0):
x = (np.ones(maxlen) * value).astype(dtype)
if truncating == 'pre':
trunc = sequence[-maxlen:]
else:
trunc = sequence[:maxlen]
trunc = np.asarray(trunc, dtype=dtype)
if padding == 'post':
x[:len(trunc)] = trunc
else:
x[-len(trunc):] = trunc
return x
class Tokenizer(object):
def __init__(self, max_seq_len, lower=True):
self.lower = lower
self.max_seq_len = max_seq_len
self.word2idx = {}
self.idx2word = {}
self.idx = 1
def fit_on_text(self, text):
if self.lower:
text = text.lower()
words = text.split()
for word in words:
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
if self.lower:
text = text.lower()
words = text.split()
unknownidx = len(self.word2idx) + 1
sequence = [self.word2idx[w] if w in self.word2idx else unknownidx for w in words]
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class Tokenizer4Pretraining:
def __init__(self, max_seq_len, pretrained_bert_name):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_name)
self.max_seq_len = max_seq_len
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class BERTBaselineABSADataset(Dataset):
def __init__(self, dataset_list, tokenizer, opt):
configure_spacy_model(opt)
lines = load_apc_datasets(dataset_list)
all_data = []
label_set = set()
dep_cache_path = os.path.join(os.getcwd(), '{}_dependency_cache'.format(opt.dataset_name))
if not os.path.exists(dep_cache_path):
os.mkdir(dep_cache_path)
graph_path = prepare_dependency_graph(dataset_list, dep_cache_path, opt.max_seq_len)
fin = open(graph_path, 'rb')
idx2graph = pickle.load(fin)
ex_id = 0
if len(lines) % 3 != 0:
print(colored('ERROR: one or more datasets are corrupted, make sure the number of lines in a dataset should be multiples of 3.', 'red'))
for i in tqdm.tqdm(range(0, len(lines), 3), postfix='building word indices...'):
if lines[i].count("$T$") > 1:
continue
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
text_raw = text_left + ' ' + aspect + ' ' + text_right
polarity = lines[i + 2].strip()
# polarity = int(polarity)
text_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + ' ' + aspect + ' ' + text_right + " [SEP]")
context_indices = tokenizer.text_to_sequence(text_left + text_right)
left_indices = tokenizer.text_to_sequence(text_left)
left_with_aspect_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + " " + aspect + " [SEP]")
right_indices = tokenizer.text_to_sequence(text_right, reverse=False)
right_with_aspect_indices = tokenizer.text_to_sequence(aspect + " " + text_right, reverse=False)
aspect_indices = tokenizer.text_to_sequence(aspect)
aspect_len = np.sum(aspect_indices != 0)
left_len = min(opt.max_seq_len - aspect_len, np.sum(left_indices != 0))
left_indices = np.concatenate((left_indices[:left_len], np.asarray([0] * (opt.max_seq_len - left_len))))
aspect_boundary = np.asarray([left_len, left_len + aspect_len - 1], dtype=np.int64)
dependency_graph = np.pad(idx2graph[text_raw],
((0, max(0, opt.max_seq_len - idx2graph[text_raw].shape[0])),
(0, max(0, opt.max_seq_len - idx2graph[text_raw].shape[0]))),
'constant')
dependency_graph = dependency_graph[:, range(0, opt.max_seq_len)]
dependency_graph = dependency_graph[range(0, opt.max_seq_len), :]
validate_example(text_raw, aspect, polarity)
data = {
'ex_id': ex_id,
'text_indices': text_indices
if 'text_indices' in opt.inputs_cols else 0,
'context_indices': context_indices
if 'context_indices' in opt.inputs_cols else 0,
'left_indices': left_indices
if 'left_indices' in opt.inputs_cols else 0,
'left_with_aspect_indices': left_with_aspect_indices
if 'left_with_aspect_indices' in opt.inputs_cols else 0,
'right_indices': right_indices
if 'right_indices' in opt.inputs_cols else 0,
'right_with_aspect_indices': right_with_aspect_indices
if 'right_with_aspect_indices' in opt.inputs_cols else 0,
'aspect_indices': aspect_indices
if 'aspect_indices' in opt.inputs_cols else 0,
'aspect_boundary': aspect_boundary
if 'aspect_boundary' in opt.inputs_cols else 0,
'dependency_graph': dependency_graph
if 'dependency_graph' in opt.inputs_cols else 0,
'polarity': polarity,
}
ex_id += 1
label_set.add(polarity)
all_data.append(data)
check_and_fix_labels(label_set, 'polarity', all_data, opt)
opt.polarities_dim = len(label_set)
self.data = all_data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
| 42.655556
| 148
| 0.623339
|
4a16b0f32a9f7a00b33ac69168b9a736901bb1cf
| 7,678
|
py
|
Python
|
basenji/dna_io.py
|
lisabang/basenji
|
f91bb195b4062c55e487a4091e13a0e813ef07d6
|
[
"Apache-2.0"
] | null | null | null |
basenji/dna_io.py
|
lisabang/basenji
|
f91bb195b4062c55e487a4091e13a0e813ef07d6
|
[
"Apache-2.0"
] | null | null | null |
basenji/dna_io.py
|
lisabang/basenji
|
f91bb195b4062c55e487a4091e13a0e813ef07d6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
import random
import sys
import numpy as np
################################################################################
# io.py
#
# Methods to load the training data.
################################################################################
def dna_1hot(seq, seq_len=None, n_random=False):
""" dna_1hot
Args:
seq: nucleotide sequence.
seq_len: length to extend sequences to.
Returns:
seq_code: length by nucleotides array representation.
"""
if seq_len is None:
seq_len = len(seq)
seq_start = 0
else:
if seq_len <= len(seq):
# trim the sequence
seq_trim = (len(seq) - seq_len) // 2
seq = seq[seq_trim : seq_trim + seq_len]
seq_start = 0
else:
seq_start = (seq_len - len(seq)) // 2
seq = seq.upper()
# map nt's to a matrix len(seq)x4 of 0's and 1's.
seq_code = np.zeros((seq_len, 4), dtype="bool")
for i in range(seq_len):
if i >= seq_start and i - seq_start < len(seq):
nt = seq[i - seq_start]
if nt == "A":
seq_code[i, 0] = 1
elif nt == "C":
seq_code[i, 1] = 1
elif nt == "G":
seq_code[i, 2] = 1
elif nt == "T":
seq_code[i, 3] = 1
elif n_random:
ni = random.randint(0, 3)
seq_code[i, ni] = 1
return seq_code
def dna_1hot_float(seq, seq_len=None):
""" dna_1hot
Args:
seq: nucleotide sequence.
seq_len: length to extend sequences to.
Returns:
seq_code: length by nucleotides array representation.
"""
if seq_len is None:
seq_len = len(seq)
seq_start = 0
else:
if seq_len <= len(seq):
# trim the sequence
seq_trim = (len(seq) - seq_len) // 2
seq = seq[seq_trim : seq_trim + seq_len]
seq_start = 0
else:
seq_start = (seq_len - len(seq)) // 2
seq = seq.upper()
seq = seq.replace("A", "0")
seq = seq.replace("C", "1")
seq = seq.replace("G", "2")
seq = seq.replace("T", "3")
# map nt's to a matrix len(seq)x4 of 0's and 1's.
# dtype='int8' fails for N's
seq_code = np.zeros((seq_len, 4), dtype="float16")
for i in range(seq_len):
if i < seq_start:
seq_code[i, :] = 0.25
else:
try:
seq_code[i, int(seq[i - seq_start])] = 1
except:
seq_code[i, :] = 0.25
return seq_code
def hot1_augment(Xb, fwdrc, shift):
""" Transform a batch of one hot coded sequences to augment training.
Args:
Xb: Batch x Length x 4 array
fwdrc: Boolean representing forward versus reverse complement strand.
shift: Integer shift
Returns:
Xbt: Transformed version of Xb
"""
if Xb.dtype == bool:
nval = 0
else:
nval = 1.0 / Xb.shape[2]
if shift == 0:
Xbt = Xb
elif shift > 0:
Xbt = np.zeros(Xb.shape)
# fill in left unknowns
Xbt[:, :shift, :] = nval
# fill in sequence
Xbt[:, shift:, :] = Xb[:, :-shift, :]
# e.g.
# Xbt[:,1:,] = Xb[:,:-1,:]
elif shift < 0:
Xbt = np.zeros(Xb.shape)
# fill in right unknowns
Xbt[:, shift:, :] = nval
# fill in sequence
Xbt[:, :shift, :] = Xb[:, -shift:, :]
# e.g.
# Xb_shift[:,:-1,:] = Xb[:,1:,:]
if not fwdrc:
Xbt = hot1_rc(Xbt)
return Xbt
def hot1_delete(seq_1hot, pos, delete_len):
""" hot1_delete
Delete "delete_len" nucleotides starting at
position "pos" in the Lx4 array "seq_1hot".
"""
# shift left
seq_1hot[pos:-delete_len, :] = seq_1hot[pos + delete_len :, :]
# e.g.
# seq_1hot[100:-3,:] = seq_1hot[100+3:,:]
# change right end to N's
if seq_1hot.dtype == bool:
nval = 0
else:
nval = 0.25
seq_1hot[-delete_len:, :] = nval
def hot1_dna(seqs_1hot):
""" Convert 1-hot coded sequences to ACGTN. """
singleton = False
if seqs_1hot.ndim == 2:
singleton = True
seqs_1hot = np.expand_dims(seqs_1hot, 0)
seqs = []
for si in range(seqs_1hot.shape[0]):
seq_list = ["A"] * seqs_1hot.shape[1]
for li in range(seqs_1hot.shape[1]):
if seqs_1hot[si, li, 0] == 1:
seq_list[li] = "A"
elif seqs_1hot[si, li, 1] == 1:
seq_list[li] = "C"
elif seqs_1hot[si, li, 2] == 1:
seq_list[li] = "G"
elif seqs_1hot[si, li, 3] == 1:
seq_list[li] = "T"
else:
seq_list[li] = "N"
seqs.append("".join(seq_list))
if singleton:
seqs = seqs[0]
return seqs
def hot1_get(seqs_1hot, pos):
""" hot1_get
Return the nucleotide corresponding to the one hot coding
of position "pos" in the Lx4 array seqs_1hot.
"""
if seqs_1hot[pos, 0] == 1:
nt = "A"
elif seqs_1hot[pos, 1] == 1:
nt = "C"
elif seqs_1hot[pos, 2] == 1:
nt = "G"
elif seqs_1hot[pos, 3] == 1:
nt = "T"
else:
nt = "N"
return nt
def hot1_insert(seq_1hot, pos, insert_seq):
""" hot1_insert
Insert "insert_seq" at position "pos" in the Lx4 array "seq_1hot".
"""
# shift right
seq_1hot[pos + len(insert_seq) :, :] = seq_1hot[pos : -len(insert_seq), :]
# e.g.
# seq_1hot[100+3:,:] = seq_1hot[100:-3,:]
# reset
seq_1hot[pos : pos + len(insert_seq), :] = 0
for i in range(len(insert_seq)):
nt = insert_seq[i]
# set
if nt == "A":
seq_1hot[pos + i, 0] = 1
elif nt == "C":
seq_1hot[pos + i, 1] = 1
elif nt == "G":
seq_1hot[pos + i, 2] = 1
elif nt == "T":
seq_1hot[pos + i, 3] = 1
else:
print("Invalid nucleotide set %s" % nt, file=sys.stderr)
def hot1_rc(seqs_1hot):
""" Reverse complement a batch of one hot coded sequences """
seqs_1hot_rc = seqs_1hot.copy()
# reverse
seqs_1hot_rc = seqs_1hot_rc[:, ::-1, :]
# seqs_1hot_rc[:,::-1,:]
# swap A and T
seqs_1hot_rc[:, :, [0, 3]] = seqs_1hot_rc[:, :, [3, 0]]
# swap C and G
seqs_1hot_rc[:, :, [1, 2]] = seqs_1hot_rc[:, :, [2, 1]]
return seqs_1hot_rc
def hot1_set(seq_1hot, pos, nt):
""" hot1_set
Set position "pos" in the Lx4 array "seqs_1hot"
to nucleotide "nt".
"""
# reset
seq_1hot[pos, :] = 0
# set
if nt == "A":
seq_1hot[pos, 0] = 1
elif nt == "C":
seq_1hot[pos, 1] = 1
elif nt == "G":
seq_1hot[pos, 2] = 1
elif nt == "T":
seq_1hot[pos, 3] = 1
else:
print("Invalid nucleotide set %s" % nt, file=sys.stderr)
def dna_rc(seq):
return seq.translate(str.maketrans("ATCGatcg", "TAGCtagc"))[::-1]
| 24.767742
| 80
| 0.514327
|
4a16b1186f2e79e8075d6a6092e5cf9132f104d8
| 703
|
py
|
Python
|
build/android/pylib/instrumentation/test_options.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5
|
2015-04-30T00:13:21.000Z
|
2019-07-10T02:17:24.000Z
|
build/android/pylib/instrumentation/test_options.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
build/android/pylib/instrumentation/test_options.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-03-27T11:15:39.000Z
|
2016-08-17T14:19:56.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines the InstrumentationOptions named tuple."""
import collections
InstrumentationOptions = collections.namedtuple('InstrumentationOptions', [
'tool',
'cleanup_test_files',
'annotations',
'exclude_annotations',
'test_filter',
'test_data',
'save_perf_json',
'screenshot_failures',
'wait_for_debugger',
'coverage_dir',
'test_apk',
'test_apk_path',
'test_apk_jar_path',
'test_runner',
'test_support_apk_path',
'device_flags',
'isolate_file_path',
'set_asserts'])
| 25.107143
| 75
| 0.70128
|
4a16b1cf96bae1d59ead4e12a91c74461571e7d8
| 16,134
|
py
|
Python
|
nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py
|
fedorovgv/NeMo
|
48ff3dc75b21b09ac55d114abde2bc880c4104da
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py
|
fedorovgv/NeMo
|
48ff3dc75b21b09ac55d114abde2bc880c4104da
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py
|
fedorovgv/NeMo
|
48ff3dc75b21b09ac55d114abde2bc880c4104da
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.modules.common.megatron.fused_bias_dropout_add import bias_dropout_add_fused_inference
from nemo.collections.nlp.modules.common.megatron.language_model import Embedding
from nemo.collections.nlp.modules.common.megatron.megatron_decoders import get_decoder_model
from nemo.collections.nlp.modules.common.megatron.megatron_encoder_decoder import (
MegatronTransformerEncoderDecoderModule,
)
from nemo.collections.nlp.modules.common.megatron.megatron_encoders import get_encoder_model
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.utils import (
ApexGuardDefaults,
build_position_ids,
init_method_normal,
parallel_lm_logits,
scaled_init_method_normal,
)
try:
from apex.transformer import tensor_parallel
from apex.transformer.enums import AttnMaskType, ModelType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
AttnMaskType = ApexGuardDefaults()
ModelType = ApexGuardDefaults()
__all__ = ["MegatronTokenLevelHead", "MegatronTokenLevelEncoderDecoderModule"]
class MegatronTokenLevelHead(MegatronModule):
"""Masked LM head for token-based encoder-decoder models (e.g., T5)
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
parallel_output: wether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, parallel_output):
super(MegatronTokenLevelHead, self).__init__()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
self.bias.model_parallel = True
self.bias.partition_dim = 0
self.bias.stride = 1
self.parallel_output = parallel_output
def forward(self, hidden_states, word_embeddings_weight):
output = parallel_lm_logits(hidden_states, word_embeddings_weight, self.parallel_output, bias=self.bias)
return output
# TODO: add soft prompts as an Embedding sub-class
class MegatronTokenLevelEncoderDecoderModule(MegatronModule):
"""Token-based (input/output is tokens) encoder-decoder model (e.g. T5 Language model.)"""
def __init__(
self,
encoder_arch,
decoder_arch,
vocab_size,
hidden_size,
max_position_embeddings,
num_layers,
num_attention_heads,
ffn_hidden_size,
apply_query_key_layer_scaling=True,
kv_channels=None,
num_tokentypes=0,
parallel_output=True,
pre_process=True,
post_process=True,
init_method_std=0.02,
fp16_cross_entropy=False,
use_cpu_initialization=False,
hidden_dropout=0.1,
attention_dropout=0.1,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=1,
layernorm_epsilon=1e-5,
persist_layer_norm=False,
bias_gelu_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
openai_gelu=False,
activation='gelu',
onnx_safe=False,
bias=True,
hidden_steps=-1,
hidden_blocks=1,
add_encoder=True,
add_decoder=True,
):
super(MegatronTokenLevelEncoderDecoderModule, self).__init__()
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.fp16_cross_entropy = fp16_cross_entropy
self.precision = precision
self.add_encoder = add_encoder
self.add_decoder = add_decoder
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
encoder, decoder = None, None
if add_encoder:
if pre_process:
self.encoder_embedding = Embedding(
hidden_size=hidden_size,
vocab_size=vocab_size,
max_sequence_length=max_position_embeddings,
init_method=init_method_normal(init_method_std),
num_tokentypes=num_tokentypes,
use_cpu_initialization=use_cpu_initialization,
embedding_dropout_prob=hidden_dropout,
)
self._encoder_embedding_key = "encoder_embedding"
encoder = get_encoder_model(
arch=encoder_arch,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
init_method=init_method_normal(init_method_std),
scaled_init_method=scaled_init_method_normal(init_method_std, num_layers),
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=pre_process,
post_process=post_process,
init_method_std=init_method_std,
use_cpu_initialization=use_cpu_initialization,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
layernorm_epsilon=layernorm_epsilon,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
hidden_steps=hidden_steps,
hidden_blocks=hidden_blocks,
activation=activation,
bias=bias,
parent_model_type=ModelType.encoder_and_decoder,
)
if add_decoder:
# If this is the decoder first stage
if pre_process:
# If the encoder also lies on this rank (PP = 1), then just assign embeddings directly.
if hasattr(self, 'encoder_embedding'):
self.decoder_embedding = self.encoder_embedding
else:
# This is the case where PP > 1 and first decoder first stage.
# We initialize decoder embeddings, but set them to zero since we they're tied with the encoder embeddings.
# A later initialize_embedding call will synchronize the embeddings.
self.decoder_embedding = Embedding(
hidden_size=hidden_size,
vocab_size=vocab_size,
max_sequence_length=max_position_embeddings,
init_method=init_method_normal(init_method_std),
num_tokentypes=num_tokentypes,
use_cpu_initialization=use_cpu_initialization,
embedding_dropout_prob=hidden_dropout,
)
self.decoder_embedding.zero_parameters()
self._decoder_embedding_key = "decoder_embedding"
decoder = get_decoder_model(
arch=decoder_arch,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
init_method=init_method_normal(init_method_std),
scaled_init_method=scaled_init_method_normal(init_method_std, num_layers),
decoder_attn_mask_type=AttnMaskType.causal,
pre_process=pre_process,
post_process=post_process,
init_method_std=init_method_std,
use_cpu_initialization=use_cpu_initialization,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
activations_checkpoint_method=activations_checkpoint_method,
activations_checkpoint_num_layers=activations_checkpoint_num_layers,
layernorm_epsilon=layernorm_epsilon,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
hidden_steps=hidden_steps,
hidden_blocks=hidden_blocks,
activation=activation,
bias=bias,
parent_model_type=ModelType.encoder_and_decoder,
)
self.enc_dec_model = MegatronTransformerEncoderDecoderModule(encoder=encoder, decoder=decoder)
self._enc_dec_model_key = "enc_dec_model"
self.initialize_word_embeddings(
init_method=init_method_normal(init_method_std), vocab_size=vocab_size, hidden_size=hidden_size
)
if add_decoder and post_process:
self.tokens_head = MegatronTokenLevelHead(self.word_embeddings_weight().size(0), parallel_output)
self._tokens_head_key = 'tokens_head'
def set_input_tensor(self, input_tensor):
""" See megatron.model.transformer.set_input_tensor()"""
# This is usually handled in schedules.py but some inference code still
# gives us non-lists or None
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
if self.add_encoder and self.add_decoder:
assert (
len(input_tensor) == 1
), 'input_tensor should only be length 1 for stage with both encoder and decoder'
self.enc_dec_model.encoder.set_input_tensor(input_tensor[0])
elif self.add_encoder:
assert len(input_tensor) == 1, 'input_tensor should only be length 1 for stage with only encoder'
self.enc_dec_model.encoder.set_input_tensor(input_tensor[0])
elif self.add_decoder:
if len(input_tensor) == 2:
self.enc_dec_model.decoder.set_input_tensor(input_tensor[0])
self.enc_dec_model.encoder_hidden_state = input_tensor[1]
elif len(input_tensor) == 1:
self.enc_dec_model.decoder.set_input_tensor(None)
self.enc_dec_model.encoder_hidden_state = input_tensor[0]
else:
raise Exception('input_tensor must have either length 1 or 2')
else:
raise Exception('Stage must have at least either encoder or decoder')
def forward(
self,
enc_input_ids,
enc_attn_mask,
dec_input_ids,
dec_attn_mask,
token_type_ids=None,
labels=None,
enc_hidden_states=None,
enc_output_mask=None,
output_enc_hidden_only=False,
enc_input=None,
):
"""
Return value is per token / per dimension (i.e., non collapsed loss value)
"""
if self.pre_process and self.add_encoder:
# encoder embeddings
enc_position_ids = build_position_ids(enc_input_ids)
enc_input = self.encoder_embedding(enc_input_ids, enc_position_ids, token_type_ids=token_type_ids)
else:
enc_input = None
if output_enc_hidden_only:
enc_output = self.enc_dec_model.encode(
enc_input=enc_input, enc_attn_mask=enc_attn_mask, enc_layer_past=None, enc_get_key_value=False,
)
return enc_output
else:
if self.pre_process and self.add_decoder:
dec_position_ids = build_position_ids(dec_input_ids)
dec_input = self.decoder_embedding(dec_input_ids, dec_position_ids, token_type_ids=token_type_ids)
else:
# Note: This is when the decoder itself is split across PP ranks.
dec_input = None
output = self.enc_dec_model(
enc_input=enc_input,
enc_attn_mask=enc_attn_mask,
dec_input=dec_input,
dec_attn_mask=dec_attn_mask,
enc_layer_past=None,
enc_get_key_value=False,
enc_output=None,
dec_layer_past=None,
dec_get_key_value=False,
)
if self.post_process and self.add_decoder:
dec_output, enc_output = output
# project decoder output to vocabulary-size dimensions
token_logits = self.tokens_head(dec_output, self.word_embeddings_weight())
if labels is not None:
# tensor_parallel.vocab_parallel_cross_entropy performs log_softmax and return log p(x_i|z) per token i
if self.fp16_cross_entropy:
assert token_logits.dtype == torch.half
tokens_loss = tensor_parallel.vocab_parallel_cross_entropy(token_logits, labels)
else:
tokens_loss = tensor_parallel.vocab_parallel_cross_entropy(token_logits.float(), labels)
return tokens_loss
else:
return token_logits
elif self.add_decoder and not self.add_encoder:
decoder_output, _ = output
return decoder_output
else:
encoder_output = output
return encoder_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._encoder_embedding_key] = self.encoder_embedding.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._decoder_embedding_key] = self.decoder_embedding.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._enc_dec_model_key] = self.enc_dec_model.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
state_dict_[self._tokens_head_key] = self.tokens_head.state_dict_for_save_checkpoint(
destination, prefix, keep_vars
)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.encoder_embedding.encoder_embeddingload_state_dict(state_dict[self._encoder_embedding_key], strict=strict)
self.decoder_embedding.load_state_dict(state_dict[self._decoder_embedding_key], strict=strict)
self.enc_dec_model.load_state_dict(state_dict[self._enc_dec_model_key], strict=strict)
self.tokens_head.load_state_dict(state_dict[self._tokens_head_key], strict=strict)
| 43.139037
| 127
| 0.653217
|
4a16b256216f86f940637176366668d99c000ed1
| 292
|
py
|
Python
|
script1.py
|
jackomo007/PortfolioFlask
|
e3b755ac6ffc98816404331270a861e5832170f7
|
[
"MIT"
] | 1
|
2020-07-07T17:19:59.000Z
|
2020-07-07T17:19:59.000Z
|
script1.py
|
jackomo007/PortfolioFlask
|
e3b755ac6ffc98816404331270a861e5832170f7
|
[
"MIT"
] | null | null | null |
script1.py
|
jackomo007/PortfolioFlask
|
e3b755ac6ffc98816404331270a861e5832170f7
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/flask")
def home():
return render_template("home.html")
@app.route("/flask/about")
def about():
return render_template("about.html")
if __name__ == "__main__":
app.run(debug=True)
| 16.222222
| 41
| 0.64726
|
4a16b28e99883740e2ff5f9f4425d18caded6e43
| 1,174
|
py
|
Python
|
src/package_controller/library/utils/git/gitignore.py
|
alexseitsinger/package_controller
|
0ee896986cfa17a96bf9fb6afff35dd97f0b1211
|
[
"BSD-2-Clause"
] | 2
|
2020-11-24T14:16:38.000Z
|
2021-03-16T19:29:45.000Z
|
src/package_controller/library/utils/git/gitignore.py
|
alexseitsinger/package_controller
|
0ee896986cfa17a96bf9fb6afff35dd97f0b1211
|
[
"BSD-2-Clause"
] | 2
|
2020-11-25T01:00:45.000Z
|
2020-11-25T01:59:58.000Z
|
src/package_controller/library/utils/git/gitignore.py
|
alexseitsinger/package_controller
|
0ee896986cfa17a96bf9fb6afff35dd97f0b1211
|
[
"BSD-2-Clause"
] | null | null | null |
import requests
import os
GITHUB_URL = "https://raw.githubusercontent.com/github/gitignore/{}.gitignore"
def get_gitignore_for_language(language, timeout=5.0):
try:
language = language.capitalize()
url = GITHUB_URL.format(language)
response = requests.get(url, timeout=timeout)
if not str(response.status_code).startswith("2"):
raise requests.HTTPError(
"Failed to get gitignore for {}.\n\n{}\n\n{}: {}".format(
language, url, response.status_code, response.text
)
)
return response.text
except requests.Timeout:
raise requests.Timeout(
"Failed to connect to {} within {} seconds.".format(url, timeout)
)
def set_gitignore_for_language(language, replace=False):
path = os.path.join(os.getcwd(), ".gitignore")
if os.path.isfile(path):
if replace is False:
raise FileExistsError("There is already a .gitignore file.")
print("Removing existing .gitignore.")
os.remove(path)
with open(path, "w") as f:
f.write(get_gitignore_for_language(language))
return path
| 32.611111
| 78
| 0.623509
|
4a16b43c52af654be4aa50d3d44dbfd167b20896
| 9,683
|
py
|
Python
|
docs/source/conf.py
|
KnowEnG-Research/Feature_Prioritization_Pipeline
|
6185dfb70c8941e0526026a063a2caf4f0a071d4
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
KnowEnG-Research/Feature_Prioritization_Pipeline
|
6185dfb70c8941e0526026a063a2caf4f0a071d4
|
[
"MIT"
] | 1
|
2019-01-24T14:47:59.000Z
|
2019-01-24T18:00:24.000Z
|
docs/source/conf.py
|
KnowEnG-Research/Feature_Prioritization_Pipeline
|
6185dfb70c8941e0526026a063a2caf4f0a071d4
|
[
"MIT"
] | 3
|
2017-12-13T17:06:38.000Z
|
2019-01-23T18:30:36.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# feature_prioritization_pipeline documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 17 13:35:39 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'feature_prioritization_pipeline'
copyright = '2016, knoweng_team'
author = 'knoweng_team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'feature_prioritization_pipeline v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'feature_prioritization_pipelinedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'feature_prioritization_pipeline.tex', 'feature\\_prioritization\\_pipeline Documentation',
'knoweng\\_team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'feature_prioritization_pipeline', 'feature_prioritization_pipeline Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'feature_prioritization_pipeline', 'feature_prioritization_pipeline Documentation',
author, 'feature_prioritization_pipeline', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 33.274914
| 108
| 0.724259
|
4a16b59798e5030a2a2f4f0b6c83e7526f2f199c
| 1,485
|
py
|
Python
|
2021/day09/python/part2.py
|
jmkacz/practice-advent-of-code
|
c06f474576e91ed0778c8a30a51bad848a602eb6
|
[
"MIT"
] | null | null | null |
2021/day09/python/part2.py
|
jmkacz/practice-advent-of-code
|
c06f474576e91ed0778c8a30a51bad848a602eb6
|
[
"MIT"
] | null | null | null |
2021/day09/python/part2.py
|
jmkacz/practice-advent-of-code
|
c06f474576e91ed0778c8a30a51bad848a602eb6
|
[
"MIT"
] | null | null | null |
import math
from typing import List, Tuple
def find_low_points(lines: List[str]) -> List[Tuple[int, int]]:
result = []
rows = len(lines)
cols = len(lines[0])
for r in range(rows):
for c in range(cols):
# up
if r > 0 and lines[r - 1][c] <= lines[r][c]:
continue
# down
if r < rows - 1 and lines[r + 1][c] <= lines[r][c]:
continue
# left
if c > 0 and lines[r][c - 1] <= lines[r][c]:
continue
# right
if c < cols - 1 and lines[r][c + 1] <= lines[r][c]:
continue
result.append((r, c))
return result
def compute_answer(lines: List[str]) -> int:
result = 0
basins: List[int] = []
rows = len(lines)
cols = len(lines[0])
visited = [[False] * cols for _ in range(rows)]
low_points = find_low_points(lines)
for low_point in low_points:
q = [low_point]
basin = 0
while q:
(r, c) = q.pop()
if r < 0 or r >= rows or c < 0 or c >= cols:
continue
if visited[r][c]:
continue
if lines[r][c] == "9":
continue
visited[r][c] = True
basin += 1
q.extend([(r, c - 1), (r, c + 1), (r - 1, c), (r + 1, c)])
basins = sorted(basins + [basin], reverse=True)[0:3]
result = math.prod(basins)
return result
| 26.517857
| 70
| 0.453199
|
4a16b5b91287a989b08f209fcba004257d253c02
| 1,694
|
py
|
Python
|
python/caffe/test/test_io.py
|
Jiawei-Gu/caffe_gu
|
4b14878abf6cc9c73bdcc88245d546d2512429df
|
[
"BSD-2-Clause"
] | 36,275
|
2015-01-01T01:59:21.000Z
|
2022-03-31T22:23:56.000Z
|
python/caffe/test/test_io.py
|
wangrui1996/caffeface
|
feeb1d7c40e4a065c947933d6fab6fb218449551
|
[
"Intel",
"BSD-2-Clause"
] | 5,493
|
2015-01-01T09:07:53.000Z
|
2022-03-31T10:19:53.000Z
|
python/caffe/test/test_io.py
|
wangrui1996/caffeface
|
feeb1d7c40e4a065c947933d6fab6fb218449551
|
[
"Intel",
"BSD-2-Clause"
] | 18,620
|
2015-01-01T01:40:01.000Z
|
2022-03-31T11:17:59.000Z
|
import numpy as np
import unittest
import caffe
class TestBlobProtoToArray(unittest.TestCase):
def test_old_format(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
shape = (1,1,10,10)
blob.num, blob.channels, blob.height, blob.width = shape
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr.shape, shape)
def test_new_format(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
blob.shape.dim.extend(list(data.shape))
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr.shape, data.shape)
def test_no_shape(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
with self.assertRaises(ValueError):
caffe.io.blobproto_to_array(blob)
def test_scalar(self):
data = np.ones((1)) * 123
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr, 123)
class TestArrayToDatum(unittest.TestCase):
def test_label_none_size(self):
# Set label
d1 = caffe.io.array_to_datum(
np.ones((10,10,3)), label=1)
# Don't set label
d2 = caffe.io.array_to_datum(
np.ones((10,10,3)))
# Not setting the label should result in a smaller object
self.assertGreater(
len(d1.SerializeToString()),
len(d2.SerializeToString()))
| 29.719298
| 65
| 0.621015
|
4a16b67dbc05077a122bacdd72ef56f78bf961bc
| 8,108
|
py
|
Python
|
create_template.py
|
DJClean/vmware_ipxe
|
1c5f4c7abfe7b968460ebdf82d5640ac62aea193
|
[
"MIT"
] | null | null | null |
create_template.py
|
DJClean/vmware_ipxe
|
1c5f4c7abfe7b968460ebdf82d5640ac62aea193
|
[
"MIT"
] | null | null | null |
create_template.py
|
DJClean/vmware_ipxe
|
1c5f4c7abfe7b968460ebdf82d5640ac62aea193
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
import sys
import sqlite3
import re
from string import Template
import ipaddress
inputfile = None
templatefolder = None
outputfolder = None
def main(arguments):
global inputfile
global templatefolder
global outputfolder
inputfile = arguments.inputfile
templatefolder = arguments.templatefolder
outputfolder = arguments.outputfolder
if inputfile is None or not os.path.exists(inputfile):
print("Databasefile (%s) does not exist" % (inputfile))
sys.exit(1)
if not os.path.isdir(templatefolder):
print("Templatefolder (%s) does not exist" % (templatefolder))
sys.exit(1)
if not os.path.isdir(outputfolder):
print("Outputfolder (%s) does not exist, creating!" % (outputfolder))
os.mkdir(outputfolder)
#inputfile = '/home/dennis/git/vmware_pxe_tools/db/test.db'
conn, c = connect(inputfile)
filepath = '%s/boot' % outputfolder
if not os.path.isdir(filepath):
print("Boot folder (%s) does not exist, creating!" % (filepath))
os.mkdir(filepath)
menu_file = open('%s/menu.ipxe' % (filepath), 'w')
for items in build_menu(c):
menu_file.write(items)
menu_file.close()
close(conn)
def build_menu(cursor):
query = cursor.execute("SELECT DISTINCT(vcenter) FROM hosts")
template_menu_start = Template(open("%s/ipxe/00-main-start.menu"
% templatefolder).read())
template_menu_end = Template(open("%s/ipxe/01-main-end.menu"
% templatefolder).read())
d = {
'VCENTER': 'unused'
}
items = []
items.append(template_menu_start.template)
result = query.fetchall()
menu_items = []
for row in result:
vcentername = row[0]
for menuitem in build_menu_vcenter(cursor, vcentername):
menu_items.append(menuitem)
items.append('item menu-%s %s\n' % (parse_name(vcentername), vcentername))
items.append(template_menu_end.template)
for item in menu_items:
items.append(item)
return items
def build_menu_vcenter(cursor, vcenter):
#print("vCenter Menu: %s" % (vcenter))
query = cursor.execute("SELECT DISTINCT(cluster) FROM hosts \
WHERE vcenter = ? \
ORDER BY cluster ASC",
(vcenter, ))
template_vcenter_start = Template(open("%s/ipxe/02-menu-vcenter-start.menu"
% templatefolder).read())
template_vcenter_end = Template(open("%s/ipxe/03-menu-vcenter-end.menu"
% templatefolder).read())
d = {
'VCENTER': parse_name(vcenter),
'VCENTERNAME': vcenter
}
items = []
items.append(template_vcenter_start.substitute(d))
result = query.fetchall()
cluster_items = []
for row in result:
clustername = row[0]
for cluster in build_menu_cluster(cursor, clustername):
cluster_items.append(cluster)
items.append('item menu-%s %s\n' % (parse_name(clustername), clustername))
items.append(template_vcenter_end.substitute(d))
for item in cluster_items:
items.append(item)
return items
def build_menu_cluster(cursor, cluster):
#print("Cluster Menu: %s" % (cluster))
query = cursor.execute("SELECT DISTINCT(vcenter) FROM hosts \
WHERE cluster = ? \
ORDER BY host ASC",
(cluster, ))
result = query.fetchone()
vcenter = result[0]
template_cluster_start = Template(open("%s/ipxe/04-menu-cluster-start.menu"
% templatefolder).read())
template_cluster_end = Template(open("%s/ipxe/05-menu-cluster-end.menu"
% templatefolder).read())
d = {
'CLUSTER': cluster,
'PARSEABLE': parse_name(cluster),
'VCENTER': parse_name(vcenter),
'VCENTERNAME': vcenter
}
items = []
items.append(template_cluster_start.substitute(d))
query = cursor.execute("SELECT host FROM hosts \
WHERE cluster = ? \
ORDER BY host ASC",
(cluster, ))
result = query.fetchall()
host_items = []
for row in result:
host = row[0]
items.append('item esx-%s Install %s\n' % (parse_name(strip_name(host)), host))
host_items.append(build_menu_host(cursor, host))
items.append(template_cluster_end.substitute(d))
for item in host_items:
items.append(item)
return items
def build_menu_host(cursor, host):
#print("Processing: %s" % (host))
template_host = Template(open("%s/ipxe/06-menu-host.menu" % templatefolder).read())
query = cursor.execute("SELECT version, vlan, cluster, vcenter FROM hosts WHERE host = ?", (host, ))
result = query.fetchone()
version = result[0]
vlan = result[1]
cluster = result[2]
vcenter = result[3]
query = cursor.execute("SELECT bootnetwork FROM vcenters WHERE vcenter = ?", (vcenter, ))
result = query.fetchone()
bootnetwork = result[0]
d = {
'HOST': host,
'VERSION': version,
'VLAN': vlan,
'CLUSTER': parse_name(cluster),
'PARSEABLE': parse_name(strip_name(host)),
'BOOTNETWORK': bootnetwork
}
s = template_host.substitute(d)
query = cursor.execute("SELECT host, ip, gateway, dns, vlan, vmnic FROM hosts \
WHERE host = ?", (host, ))
result = query.fetchone()
host = result[0]
ip = ipaddress.ip_interface(result[1])
ipaddr = ip.ip
netmask = ip.netmask
gateway = result[2]
dns = result[3]
vlan = result[4]
vmnic1 = result[5].split(',')[0]
vmnic2 = result[5].split(',')[1]
template_kickstart = Template(open("%s/kickstart/default.ks" % templatefolder).read())
d = {
'VMHOST': host,
'IPADDRESS': ipaddr,
'NETMASK': netmask,
'GATEWAY': gateway,
'VLAN': vlan,
'DNS': dns,
'UPLINK0': vmnic1,
'UPLINK1': vmnic2
}
ks = template_kickstart.substitute(d)
filepath = '%s/kickstart' % outputfolder
if not os.path.isdir(filepath):
print("Kickstart folder (%s) does not exist, creating!" % (filepath))
os.mkdir(filepath)
kickstart_file = open('%s/%s.ks' % (filepath , parse_name(strip_name(host))), 'w')
kickstart_file.write(ks)
kickstart_file.close()
return s
def parse_name(name):
parsed = re.sub('[^a-zA-Z0-9 \n]', '', name)
return parsed
def strip_name(name):
stripped = name.split('.', 1)[0]
return stripped
def connect(sqlite_file):
""" Make connection to an SQLite database file """
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
return conn, c
def close(conn):
""" Commit changes and close connection to the database """
conn.commit()
conn.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file",
action="store",
dest="inputfile",
metavar="DATABASEFILE",
default='db/ipxe.db',
required=True,
help="Database file")
parser.add_argument("-t", "--templates",
action="store",
dest="templatefolder",
default="templates/",
required=True,
help="Folder containing all template files")
parser.add_argument("-o", "--output",
action="store",
dest="outputfolder",
default="output/",
help="Folder where output files will be written")
args = parser.parse_args()
main(args)
# ipaddress.ip_interface('ip/mask')
| 27.958621
| 104
| 0.575234
|
4a16b834be526461d1aa961bbd85cbd58928e855
| 905
|
py
|
Python
|
src/lightmlboard/metrics/__init__.py
|
sdpython/lightmlboard
|
15e3f9522e2b5f5ef9d358d2d42b9c1f271fc143
|
[
"MIT"
] | null | null | null |
src/lightmlboard/metrics/__init__.py
|
sdpython/lightmlboard
|
15e3f9522e2b5f5ef9d358d2d42b9c1f271fc143
|
[
"MIT"
] | 1
|
2018-04-19T19:58:08.000Z
|
2021-12-29T10:58:07.000Z
|
src/lightmlboard/metrics/__init__.py
|
sdpython/lightmlboard
|
15e3f9522e2b5f5ef9d358d2d42b9c1f271fc143
|
[
"MIT"
] | null | null | null |
"""
@file
@brief Implements metrics.
"""
import sklearn.metrics as skmetrics
from .classification import roc_auc_score_micro, roc_auc_score_macro, reshape, multi_label_jaccard
from .regression import mse
from .regression_custom import l1_reg_max
def sklearn_metric(met, exp, val):
"""
Looks into metrics available in
:epkg:`scikit-learn:metrics`.
@param met function name
@param exp expected values
@param val values
@return number
"""
if isinstance(val, str):
raise TypeError("val must be a container of floats")
if isinstance(exp, str):
raise TypeError("exp must be a container of floats")
if hasattr(skmetrics, met):
f = getattr(skmetrics, met)
exp, val = reshape(exp, val)
return f(exp, val)
else:
raise AttributeError("Unable to find metric '{0}'.".format(met))
| 29.193548
| 98
| 0.651934
|
4a16b9101a16587e7a3cb0f1054b536c88f74db2
| 14,611
|
py
|
Python
|
src/utils/fixtures_utils.py
|
maticardenas/football_api_notif
|
81f9e265d4effb7545e3d9ad80ee1109cd9b8edf
|
[
"MIT"
] | null | null | null |
src/utils/fixtures_utils.py
|
maticardenas/football_api_notif
|
81f9e265d4effb7545e3d9ad80ee1109cd9b8edf
|
[
"MIT"
] | null | null | null |
src/utils/fixtures_utils.py
|
maticardenas/football_api_notif
|
81f9e265d4effb7545e3d9ad80ee1109cd9b8edf
|
[
"MIT"
] | null | null | null |
import re
import urllib
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from urllib.error import HTTPError
from deep_translator import GoogleTranslator
from sqlmodel import select
from src.api.fixtures_client import FixturesClient
from src.api.images_search_client import ImagesSearchClient
from src.api.videos_search_client import VideosSearchClient
from src.api.youtube_search_client import YoutubeSearchClient
from src.db.db_manager import NotifierDBManager
from src.db.notif_sql_models import Fixture as DBFixture
from src.db.notif_sql_models import League as DBLeague
from src.db.notif_sql_models import Team as DBTeam
from src.entities import (
Championship,
Fixture,
FixtureForDB,
LineUp,
MatchHighlights,
MatchScore,
Player,
Team,
TeamStanding,
)
from src.utils.date_utils import TimeZones, get_time_in_time_zone
from src.utils.message_utils import TEAMS_ALIASES
def get_team_aliases(team_id: str) -> list:
return TEAMS_ALIASES.get(team_id, [])
def get_champions_league_fixtures(
all_team_fixtures: Dict[str, Any]
) -> List[Dict[str, str]]:
return [
fixture
for fixture in all_team_fixtures["response"]
if fixture["league"]["id"] == 2
]
def date_diff(date: str) -> datetime:
return datetime.strptime(date[:-6], "%Y-%m-%dT%H:%M:%S") - datetime.utcnow()
def get_next_fixture(
team_fixtures: List[Dict[str, Any]], team_id: str
) -> Optional[Fixture]:
min_fixture = None
min_diff = 999999999
for fixture in team_fixtures:
fixture_date_diff = int(date_diff(fixture["fixture"]["date"]).total_seconds())
if not min_fixture and fixture_date_diff >= 0:
min_fixture = fixture
min_diff = fixture_date_diff
if fixture_date_diff >= 0 and (fixture_date_diff < min_diff):
min_fixture = fixture
min_diff = fixture_date_diff
return (
convert_fixture_response(min_fixture, min_diff, team_id)
if min_fixture
else None
)
def get_next_fixture_db(team_fixtures: List[DBFixture]) -> Optional[DBFixture]:
min_fixture = None
min_diff = 999999999
for fixture in team_fixtures:
fixture_date_diff = int(date_diff(fixture.utc_date).total_seconds())
if not min_fixture and fixture_date_diff >= 0:
min_fixture = fixture
min_diff = fixture_date_diff
if fixture_date_diff >= 0 and (fixture_date_diff < min_diff):
min_fixture = fixture
min_diff = fixture_date_diff
return convert_db_fixture(min_fixture) if min_fixture else None
def get_last_fixture_db(team_fixtures: List[DBFixture]) -> Optional[Fixture]:
min_fixture = None
min_diff = -999999999
for fixture in team_fixtures:
fixture_date_diff = int(date_diff(fixture.utc_date).total_seconds())
if not min_fixture and fixture_date_diff < 0:
min_fixture = fixture
min_diff = fixture_date_diff
if fixture_date_diff < 0 and (fixture_date_diff > min_diff):
min_fixture = fixture
min_diff = fixture_date_diff
return convert_db_fixture(min_fixture) if min_fixture else None
def get_last_fixture(
team_fixtures: List[Dict[str, Any]], team_id: str
) -> Optional[Fixture]:
min_fixture = None
min_diff = -999999999
for fixture in team_fixtures:
fixture_date_diff = int(date_diff(fixture["fixture"]["date"]).total_seconds())
if not min_fixture and fixture_date_diff < 0:
min_fixture = fixture
min_diff = fixture_date_diff
if fixture_date_diff < 0 and (fixture_date_diff > min_diff):
min_fixture = fixture
min_diff = fixture_date_diff
return (
convert_fixture_response(min_fixture, min_diff, team_id)
if min_fixture
else None
)
def get_team_standings_for_league(team_standings: dict, league_id: int) -> TeamStanding:
for team_standing in team_standings:
if team_standing["league"]["id"] == league_id:
return __convert_standing_response(team_standing)
def __convert_standing_response(team_standing: dict) -> TeamStanding:
standing_desc = team_standing["league"]["standings"][0][0]
return TeamStanding(
Championship(
team_standing["league"]["id"],
team_standing["league"]["name"],
team_standing["league"]["country"],
team_standing["league"]["logo"],
),
standing_desc["rank"],
standing_desc["points"],
standing_desc["goalsDiff"],
standing_desc["description"],
)
def convert_db_fixture(fixture: DBFixture) -> Fixture:
utc_date = datetime.strptime(fixture.utc_date[:-6], "%Y-%m-%dT%H:%M:%S")
ams_date = get_time_in_time_zone(utc_date, TimeZones.AMSTERDAM)
bsas_date = get_time_in_time_zone(utc_date, TimeZones.BSAS)
# league_name, round_name = __get_translated_league_name_and_round(fixture)
notifier_db_manager = NotifierDBManager()
league: DBLeague = notifier_db_manager.select_records(
select(DBLeague).where(DBLeague.id == fixture.league)
)[0]
home_team: DBTeam = notifier_db_manager.select_records(
select(DBTeam).where(DBTeam.id == fixture.home_team)
)[0]
away_team: DBTeam = notifier_db_manager.select_records(
select(DBTeam).where(DBTeam.id == fixture.away_team)
)[0]
return Fixture(
fixture.id,
utc_date,
ams_date,
bsas_date,
int(date_diff(fixture.utc_date).total_seconds()),
"",
"",
Championship(
league.id,
league.name,
league.country,
league.logo,
),
fixture.round,
Team(
home_team.id,
home_team.name,
home_team.picture,
get_team_aliases(str(home_team.id)),
),
Team(
away_team.id,
away_team.name,
away_team.picture,
get_team_aliases(str(away_team.id)),
),
MatchScore(fixture.home_score, fixture.away_score),
# get_line_up(fixture_response["fixture"]["id"], team_id),
)
def convert_fixture_response(
fixture_response: Dict[str, Any], date_diff: int, team_id: str = 1
) -> Fixture:
utc_date = datetime.strptime(
fixture_response["fixture"]["date"][:-6], "%Y-%m-%dT%H:%M:%S"
)
ams_date = get_time_in_time_zone(utc_date, TimeZones.AMSTERDAM)
bsas_date = get_time_in_time_zone(utc_date, TimeZones.BSAS)
league_name, round_name = __get_translated_league_name_and_round(fixture_response)
home_team_id = fixture_response["teams"]["home"]["id"]
away_team_id = fixture_response["teams"]["away"]["id"]
return Fixture(
fixture_response["fixture"]["id"],
utc_date,
ams_date,
bsas_date,
date_diff,
fixture_response["fixture"]["referee"],
fixture_response["fixture"]["status"]["long"],
Championship(
fixture_response["league"]["id"],
league_name,
fixture_response["league"]["country"],
fixture_response["league"]["logo"],
),
round_name,
Team(
home_team_id,
fixture_response["teams"]["home"]["name"],
fixture_response["teams"]["home"]["logo"],
get_team_aliases(str(home_team_id)),
),
Team(
away_team_id,
fixture_response["teams"]["away"]["name"],
fixture_response["teams"]["away"]["logo"],
get_team_aliases(str(away_team_id)),
),
MatchScore(
fixture_response["goals"]["home"], fixture_response["goals"]["away"]
),
# get_line_up(fixture_response["fixture"]["id"], team_id),
)
def convert_fixture_response_to_db(fixture_response: Dict[str, Any]) -> Fixture:
league_name, round_name = __get_translated_league_name_and_round(fixture_response)
home_team_id = fixture_response["teams"]["home"]["id"]
away_team_id = fixture_response["teams"]["away"]["id"]
return FixtureForDB(
fixture_response["fixture"]["id"],
fixture_response["fixture"]["date"],
date_diff,
fixture_response["fixture"]["referee"],
fixture_response["fixture"]["status"]["long"],
Championship(
fixture_response["league"]["id"],
league_name,
fixture_response["league"]["country"],
fixture_response["league"]["logo"],
),
round_name,
Team(
home_team_id,
fixture_response["teams"]["home"]["name"],
fixture_response["teams"]["home"]["logo"],
get_team_aliases(str(home_team_id)),
),
Team(
away_team_id,
fixture_response["teams"]["away"]["name"],
fixture_response["teams"]["away"]["logo"],
get_team_aliases(str(away_team_id)),
),
MatchScore(
fixture_response["goals"]["home"], fixture_response["goals"]["away"]
),
)
def __get_translated_league_name_and_round(
fixture_response: Dict[str, Any]
) -> Tuple[str, str]:
if __is_team_or_league_for_spanish_translation(fixture_response):
google_translator = GoogleTranslator(source="en", target="es")
league_name = google_translator.translate(fixture_response["league"]["name"])
round_name = google_translator.translate(fixture_response["league"]["round"])
else:
league_name = fixture_response["league"]["name"]
round_name = fixture_response["league"]["round"]
return (league_name, round_name)
def __is_team_or_league_for_spanish_translation(
fixture_response: Dict[str, Any]
) -> bool:
return fixture_response["league"][
"country"
].lower() == "argentina" or __teams_contain(fixture_response, "argentina")
def __teams_contain(fixture_response: Dict[str, Any], text: str) -> bool:
return any(
[
team_name
for team_name in [
fixture_response["teams"]["home"]["name"],
fixture_response["teams"]["away"]["name"],
]
if text in team_name.lower()
]
)
def get_image_search(query: str) -> str:
image_searcher = ImagesSearchClient()
images = image_searcher.get_images(query)
json_response = images.as_dict
for image in json_response["value"]:
url = image["contentUrl"]
if is_url_reachable(url):
return url
return ""
def is_url_reachable(url: str) -> bool:
try:
response_code = urllib.request.urlopen(url).getcode()
except HTTPError:
print(f"The image url {url} is NOT reachable.")
return False
return response_code == 200
def get_match_highlights(fixture: Fixture) -> List[MatchHighlights]:
videos_search_client = VideosSearchClient()
latest_videos = videos_search_client.search_football_videos()
match_highlights = []
for match in latest_videos.as_dict:
if is_corresponding_match_highlights(
fixture.home_team, fixture.away_team, match["title"]
):
if -3 <= date_diff(match["date"]).days <= 0:
match_highlights = search_highlights_videos(match)
break
return [convert_match_highlights(highlights) for highlights in match_highlights]
def is_corresponding_match_highlights(
home_team: Team, away_team: Team, match_title: str
) -> bool:
return (
home_team.name.lower() in match_title.lower()
or away_team.name.lower() in match_title.lower()
or any(
[
team_alias.lower() == match_title.lower()
for team_alias in home_team.aliases + away_team.aliases
]
)
)
def convert_match_highlights(highlights: dict) -> MatchHighlights:
url_match = re.search("http.*?'", highlights["embed"])
highlights_url = highlights["embed"][url_match.span()[0] : url_match.span()[1] - 1]
return MatchHighlights(highlights_url, highlights["embed"])
def search_highlights_videos(match_response):
return [
video for video in match_response["videos"] if video["title"] == "Highlights"
]
def get_youtube_highlights_videos(
home_team: Team, away_team: Team, number_of_options=3
) -> List[str]:
youtube_client = YoutubeSearchClient()
response = youtube_client.search_videos_by_keywords(
[home_team.name, away_team.name, "resumen", "jugadas"], "es", "ar"
)
json_response = response.as_dict
video_highlights = []
options_selected = 0
try:
for item in json_response["items"]:
title = item["snippet"]["title"]
home_team_words = home_team.name.lower().split(" ")
away_team_words = away_team.name.lower().split(" ")
if (
any(ht_word in title.lower() for ht_word in home_team_words)
or any(alias.lower() in title.lower() for alias in home_team.aliases)
) and (
any(at_word in title.lower() for at_word in away_team_words)
or any(alias.lower() in title.lower() for alias in away_team.aliases)
):
video_highlights.append(item["url"])
options_selected += 1
if options_selected >= number_of_options:
break
except Exception as e:
print(f"There was an issue retrieving video highlights. Error: {e}")
return video_highlights
def get_line_up(fixture_id: str, team_id: str) -> Optional[LineUp]:
fixture_client = FixturesClient()
response = fixture_client.get_line_up(fixture_id, team_id)
json_response = response.as_dict["response"]
line_up = None
if json_response:
if "startXI" in json_response[0]:
start_xi = json_response[0]["startXI"]
line_up = LineUp(
formation=json_response[0]["formation"],
goalkeeper=get_players(start_xi, "G")[0],
defenders=get_players(start_xi, "D"),
midfielders=get_players(start_xi, "M"),
forward_strikers=get_players(start_xi, "F"),
)
return line_up
def get_players(start_xi: dict, position: str) -> List[Player]:
return [
Player(
player["player"]["id"], player["player"]["name"], player["player"]["pos"]
)
for player in start_xi
if player["player"]["pos"] == position
]
| 31.763043
| 88
| 0.63856
|
4a16b9cf893176c6475514978ce377419db8fe72
| 1,677
|
py
|
Python
|
apps/webapp/__init__.py
|
mbuciora/DevOps
|
9f5010ae3ed0c29d1fe2ef0fbbca9d55a6406374
|
[
"MIT"
] | 1
|
2018-02-22T21:22:32.000Z
|
2018-02-22T21:22:32.000Z
|
apps/webapp/__init__.py
|
mbuciora/DevOps
|
9f5010ae3ed0c29d1fe2ef0fbbca9d55a6406374
|
[
"MIT"
] | null | null | null |
apps/webapp/__init__.py
|
mbuciora/DevOps
|
9f5010ae3ed0c29d1fe2ef0fbbca9d55a6406374
|
[
"MIT"
] | 3
|
2017-05-08T10:52:02.000Z
|
2020-02-05T17:01:00.000Z
|
import os
import sys
from os import path
from flask import Flask
from flask.ext.login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from flask_recaptcha import ReCaptcha
app = Flask(__name__)
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
#config for Forms, Register and FCM tokens
app.config.update(dict( DEBUG = True, SECRET_KEY = 'you-will-never-guess', SECURITY_PASSWORD_SALT = 'my_precious_two', FCM_APP_TOKEN = 'AAAAXUWoieY:APA91bGcVQ67M5mAEl7e2OSb5yKko8J17NH7GZtOspoq9NKjnHMyD9RiCePjLKUHfyBzn4II0aVJx_JnyyBHQijdbT6sYwxAoDrI15bZX_0FdBpHKgAVqMBpKMQAxIggXxakcZ3It54f', RECAPTCHA_ENABLED = True, RECAPTCHA_SITE_KEY = '6LetACUUAAAAAPckPB-tmBZdLo9eZDp5tacC1XA9', RECAPTCHA_SECRET_KEY = '6LetACUUAAAAAMUPZ3N1gjDO1AHxq8AVAXau9Fg-', RECAPTCHA_THEME = 'light'))
#recaptcha init
recaptcha = ReCaptcha()
recaptcha.init_app(app)
#conection to database
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
db = SQLAlchemy(app)
#Configure Flask-Login
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
### move to other module and resolve problem with second import models (Table 'user' is already defined for this MetaData instance)
import config_celery
#Configure Celery
app.config.update(CELERY_BROKER_URL=os.environ['REDIS_URL'], CELERY_RESULT_BACKEND=os.environ['REDIS_URL'])
celery = config_celery.make_celery(app)
###
#User types
#adm - admin, usr - regular user, oth - for later use
def enum(**enums):
return type('Enum', (), enums)
UserType = enum(adm=1, usr=2, oth=3)
ServiceState = enum(up=1, down=2, unspecified=3)
from webapp import tasks
from webapp import views
| 34.9375
| 476
| 0.80322
|
4a16b9dd83944034ce337dd4ce4315329240ee85
| 2,213
|
py
|
Python
|
restplus/api/v1/auth/login.py
|
davenmathews/Restplus
|
6a77c3eeccdec51d6109c7015bebe888a477343a
|
[
"MIT"
] | null | null | null |
restplus/api/v1/auth/login.py
|
davenmathews/Restplus
|
6a77c3eeccdec51d6109c7015bebe888a477343a
|
[
"MIT"
] | null | null | null |
restplus/api/v1/auth/login.py
|
davenmathews/Restplus
|
6a77c3eeccdec51d6109c7015bebe888a477343a
|
[
"MIT"
] | null | null | null |
from flask_login import login_user, LoginManager, current_user
from flask_restplus import Resource, fields
from flask_restplus.namespace import Namespace
from restplus.api.v1.auth.helpers import extract_auth_data, generate_auth_output
from restplus.models import users_list
auth_ns = Namespace('auth')
login_manager = LoginManager()
user_login_model = auth_ns.model('user_login', {
'email': fields.String(title='Your email address', required=True,
example='myemail@company.com'),
'password': fields.String(title='Your email address', required=True,
example='password.Pa55word')
})
@login_manager.user_loader
def load_user(user_id):
for a_user in users_list:
# In the session, user_id is stored as a unicode character
# The chr() converts the int id of the user found to unicode for comparing equality
if chr(a_user.id) == user_id:
return a_user
class Login(Resource):
@auth_ns.expect(user_login_model)
@auth_ns.response(200, 'user logged in successfully')
@auth_ns.response(415, 'request data not in json format')
@auth_ns.response(401, 'invalid password')
@auth_ns.response(400, 'bad request')
def post(self):
"""
User Login
Makes use of Flask-Login
Use the correct user information to login. Guidelines as stipulated in the register route should be followed
Note: Only one user can be logged in per client
"""
try:
return {'message': current_user.email + ' is currently logged in'}, 400
except AttributeError:
pass
email, password = extract_auth_data(self)
for a_user in users_list:
if email == a_user.email:
if a_user.authenticate(password):
login_user(a_user)
output = generate_auth_output(self, a_user)
response = self.api.make_response(output, 200)
return response
else:
auth_ns.abort(401, 'invalid password')
else:
continue
else:
auth_ns.abort(400, 'user not found!')
| 34.046154
| 116
| 0.633529
|
4a16ba1fef960cdb08c40046752f55ace9a5b9df
| 2,646
|
py
|
Python
|
20211127/e/union-find.py
|
seigot/atcoder
|
6c2da684c75b7c5de162de3713a13507aeecce1d
|
[
"MIT"
] | 2
|
2021-12-28T11:43:47.000Z
|
2022-02-20T14:41:27.000Z
|
20211127/e/union-find.py
|
seigot/atcoder
|
6c2da684c75b7c5de162de3713a13507aeecce1d
|
[
"MIT"
] | null | null | null |
20211127/e/union-find.py
|
seigot/atcoder
|
6c2da684c75b7c5de162de3713a13507aeecce1d
|
[
"MIT"
] | null | null | null |
#!/bin/python
n, m = map(int, input().split())
l = [list(map(int, input().split())) for l in range(m)]
# https://at274.hatenablog.com/entry/2018/02/02/173000
#class UnionFind:
# def __init__(self, n):
# self.par = [i for i in range(n+1)]
# self.rank = [0] * (n+1)
#
# # 検索
# def find(self, x):
# if self.par[x] == x:
# return x
# else:
# self.par[x] = self.find(self.par[x])
# return self.par[x]
#
# # 併合
# def union(self, x, y):
# x = self.find(x)
# y = self.find(y)
# if self.rank[x] < self.rank[y]:
# self.par[x] = y
# else:
# self.par[y] = x
# if self.rank[x] == self.rank[y]:
# self.rank[x] += 1
#
# # 同じ集合に属するか判定
# def same_check(self, x, y):
# return self.find(x) == self.find(y)
class UnionFind:
def __init__(self, n):
self.par = [i for i in range(n)]
self.rank = [0]*n
self.size = [1]*n
def find(self, x):
if self.par[x] == x:
return x
else:
self.par[x] = self.find(self.par[x])
return self.par[x]
def same_check(self, x, y):
return self.find(x) == self.find(y)
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if self.rank[x] < self.rank[y]:
if not self.same_check(x, y):
self.size[y] += self.size[x]
self.size[x] = 0
self.par[x] = y
else:
if not self.same_check(x, y):
self.size[x] += self.size[y]
self.size[y] = 0
self.par[y] = x
if self.rank[x] == self.rank[y]:
self.rank[x] += 1
def size(self, x):
x = self.find(x)
return self.size[x]
N, M = map(int, input().split())
Edge = []
for _ in range(M):
a, b = map(int, input().split())
a -= 1
b -= 1
Edge.append((a, b))
Edge.sort(reverse=True)
UF = UnionFind(N)
Ans = [-1]*N
ans = 0
edge_cnt = 0
for now in range(N)[::-1]:
ans += 1
while edge_cnt < M and now <= Edge[edge_cnt][0]:
if not UF.same_check(Edge[edge_cnt][0], Edge[edge_cnt][1]):
ans -= 1
UF.union(Edge[edge_cnt][0], Edge[edge_cnt][1])
edge_cnt += 1
Ans[now] = ans
for ans in Ans[1:]:
print(ans)
print(0)
#
#UF=UnionFind(10)
#
#---
#print(UF.size(1))
# Nはノード数, Mは条件数(友達関係)
#N = 6
#M = 4
#
#UF = UnionFind(n) # ノード数で初期化
#for _ in range(M):
# x, y = l[_] #inputmap() # 友達関係を取得(x=1,y=2)
# UF.union(x-1, y-1) # 同じ集合にする (0-index)
#
#print(UF.size(0)) # 1番目が属する集合の要素数を取得
| 23.210526
| 67
| 0.47997
|
4a16baec4d5caea2c0aad5fa922768d468550bdc
| 544
|
py
|
Python
|
buggy/migrations/0005_add_bug_index.py
|
fusionbox/buggy
|
fb6f4a34f6896b65c843ebe711f5bf3279d33049
|
[
"BSD-3-Clause"
] | 2
|
2017-05-08T23:11:41.000Z
|
2017-05-22T19:27:36.000Z
|
buggy/migrations/0005_add_bug_index.py
|
fusionbox/buggy
|
fb6f4a34f6896b65c843ebe711f5bf3279d33049
|
[
"BSD-3-Clause"
] | 4
|
2017-05-03T17:46:47.000Z
|
2017-05-08T17:13:57.000Z
|
buggy/migrations/0005_add_bug_index.py
|
fusionbox/buggy
|
fb6f4a34f6896b65c843ebe711f5bf3279d33049
|
[
"BSD-3-Clause"
] | 2
|
2017-05-22T19:28:21.000Z
|
2017-05-26T17:24:51.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-03 16:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('buggy', '0004_bug_fulltext'),
]
operations = [
migrations.RunSQL(
"""
CREATE INDEX bug_state_assigned_to_index ON buggy_bug (assigned_to_id) WHERE (state != 'closed');
""",
"""
DROP INDEX bug_state_assigned_to_index;
"""
)
]
| 22.666667
| 109
| 0.580882
|
4a16bc7499ab5db7ae78ab512a6fd916a56badd8
| 3,479
|
py
|
Python
|
berts/berts.py
|
yhyu/berts
|
ca1d6917cdba2aa7de611fe7aafb30d4c6d310b3
|
[
"Apache-2.0"
] | null | null | null |
berts/berts.py
|
yhyu/berts
|
ca1d6917cdba2aa7de611fe7aafb30d4c6d310b3
|
[
"Apache-2.0"
] | null | null | null |
berts/berts.py
|
yhyu/berts
|
ca1d6917cdba2aa7de611fe7aafb30d4c6d310b3
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow_hub as hub
def BertClassificationModel(
pretrain_url,
classes,
return_sequences=False,
max_seq_length=None,
dropout_rate=0.1,
train_bert=True):
# inputs
input_words_seq = keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name='input_words_seq')
input_attention_mask = keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name='input_attention_mask')
input_segment_mask = keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name='input_segment_mask')
# pre-trained bert
bert_layers = hub.KerasLayer(pretrain_url, trainable=train_bert)
pooled_output, seq_outputs = bert_layers([input_words_seq, input_attention_mask, input_segment_mask])
# classfication layer
if return_sequences:
classification_input = seq_outputs
else:
classification_input = pooled_output
output = keras.layers.Dropout(dropout_rate)(classification_input)
if classes <= 2: # binary classfication
output = keras.layers.Dense(1, activation='sigmoid')(output)
else: # categorical classfication
output = keras.layers.Dense(classes, activation='softmax')(output)
model = keras.models.Model(inputs=[input_words_seq, input_attention_mask, input_segment_mask], outputs=output)
return model, bert_layers
def BertEQAModel(
pretrain_url,
return_cls=False,
max_seq_length=None,
dropout_rate=0.1,
train_bert=True):
# inputs
input_words_seq = keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name='input_words_seq')
input_attention_mask = keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name='input_attention_mask')
input_segment_mask = keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name='input_segment_mask')
# pre-trained bert
bert_layers = hub.KerasLayer(pretrain_url, trainable=train_bert)
pooled_output, seq_outputs = bert_layers([input_words_seq, input_attention_mask, input_segment_mask])
if return_cls:
cls_output = keras.layers.Dropout(dropout_rate)(pooled_output)
cls_output = keras.layers.Dense(1, activation='sigmoid', name='cls')(cls_output)
# QA layer
seq_outputs = keras.layers.Dropout(dropout_rate)(seq_outputs)
ans_start = WeightedLayer()(seq_outputs)
ans_end = WeightedLayer()(seq_outputs)
mask = tf.cast(tf.equal(input_segment_mask, 0), tf.float32)
ans_start += (mask * -1e9)
ans_end += (mask * -1e9)
ans_start = keras.layers.Activation('softmax', name='ans_start')(ans_start)
ans_end = keras.layers.Activation('softmax', name='ans_end')(ans_end)
model_outputs = [ans_start, ans_end]
if return_cls:
model_outputs.append(cls_output)
model = keras.models.Model(inputs=[input_words_seq, input_attention_mask, input_segment_mask],
outputs=model_outputs
)
return model, bert_layers
class WeightedLayer(tf.keras.layers.Layer):
def __init__(self):
super(WeightedLayer, self).__init__()
def build(self, input_shape):
self.kernel = self.add_weight(shape=(1,input_shape[-1]),
initializer='random_normal',
trainable=True)
super(WeightedLayer, self).build(input_shape)
def call(self, x):
return keras.layers.dot([self.kernel, x], axes=[1,2])
| 39.988506
| 115
| 0.702788
|
4a16bcac184934b74355f570b954bbb1bb1f08b3
| 914
|
py
|
Python
|
metaclasses/avoid_init_using_metaclass.py
|
imsurinder90/metaclasses_and_patterns_in_python
|
456a2d25eb1a46f9029fe83ed7d2ed14919beaa5
|
[
"MIT"
] | null | null | null |
metaclasses/avoid_init_using_metaclass.py
|
imsurinder90/metaclasses_and_patterns_in_python
|
456a2d25eb1a46f9029fe83ed7d2ed14919beaa5
|
[
"MIT"
] | null | null | null |
metaclasses/avoid_init_using_metaclass.py
|
imsurinder90/metaclasses_and_patterns_in_python
|
456a2d25eb1a46f9029fe83ed7d2ed14919beaa5
|
[
"MIT"
] | null | null | null |
"""
With the help of metaclass we can make our class look
simple. Metaclass creates a class object and assigns
_fields to it.
"""
from inspect import Parameter, Signature
def make_signature(args):
return Signature(
Parameter(name, Parameter.POSITIONAL_OR_KEYWORD)
for name in args)
class AnimalMeta(type):
def __new__(cls, clsname, bases, clsdict):
clsobj = super().__new__(cls, clsname, bases, clsdict)
sig = make_signature(clsobj._fields)
setattr(clsobj, "_fields", sig)
return clsobj
class Animal(metaclass=AnimalMeta):
_fields = []
def __init__(self, *args, **kwargs):
bound_args = self._fields.bind(*args, **kwargs)
for key, val in bound_args.arguments.items():
setattr(self, key, val)
class Cat(Animal):
_fields = ["name", "speak"]
class Dog(Animal):
_fields = ["nickname", "is_pet"]
cat = Cat("my cat", "meow")
print(cat.name)
dog = Dog("Bruno", "yeah")
print(dog.nickname)
| 25.388889
| 56
| 0.714442
|
4a16bdd8ece06ec13e9f9fa51c20f250498fa510
| 961
|
py
|
Python
|
widgets/spacer/spacer_base.py
|
ardovm/wxGlade
|
a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb
|
[
"MIT"
] | 225
|
2018-03-26T11:23:22.000Z
|
2022-03-24T09:44:08.000Z
|
widgets/spacer/spacer_base.py
|
ardovm/wxGlade
|
a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb
|
[
"MIT"
] | 403
|
2018-01-03T19:47:28.000Z
|
2018-03-23T17:43:39.000Z
|
widgets/spacer/spacer_base.py
|
ardovm/wxGlade
|
a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb
|
[
"MIT"
] | 47
|
2018-04-08T16:48:38.000Z
|
2021-12-21T20:08:44.000Z
|
"""\
Code generator functions for spacers
@copyright: 2019 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
class SpacerMixin(object):
"Generic code to handle spacer code in all language code generators"
def get_code(self, obj):
sizer = obj.parent # parent is always a sizer
sizer_name = self.codegen._format_classattr(sizer)
size = (obj.width, obj.height)
flag = self.cn_f(obj.properties["flag"].get_string_value()) or '0'
if sizer.WX_CLASS!="wxGridBagSizer":
size = self.codegen.tmpl_spacersize%size
stmt = self.codegen.tmpl_sizeritem % ( sizer_name, size, obj.proportion, flag, obj.border )
else:
# GridBagSizer
index = sizer._get_row_col(obj.index)
stmt = self.codegen.tmpl_gridbagsizerspacer % ( sizer_name, size[0], size[1], index, obj.span, flag, obj.border )
return [stmt], []
| 38.44
| 125
| 0.656608
|
4a16beee133acb7ab138d995a268a5128522b4ea
| 67,669
|
py
|
Python
|
bleak/backends/_manufacturers.py
|
pjbosco/bleak
|
7c65d74e7dda14d130de4065eb33d1ca20553adf
|
[
"MIT"
] | 753
|
2018-08-01T08:46:21.000Z
|
2022-03-31T22:58:12.000Z
|
bleak/backends/_manufacturers.py
|
pjbosco/bleak
|
7c65d74e7dda14d130de4065eb33d1ca20553adf
|
[
"MIT"
] | 587
|
2018-04-27T09:47:58.000Z
|
2022-03-31T14:55:57.000Z
|
bleak/backends/_manufacturers.py
|
pjbosco/bleak
|
7c65d74e7dda14d130de4065eb33d1ca20553adf
|
[
"MIT"
] | 180
|
2018-09-28T09:34:58.000Z
|
2022-03-30T19:19:34.000Z
|
"""
Manufacturer data retrieved from https://www.bluetooth.com/specifications/assigned-numbers/company-identifiers
"""
MANUFACTURERS = {
0x0000: "Ericsson Technology Licensing",
0x0001: "Nokia Mobile Phones",
0x0002: "Intel Corp.",
0x0003: "IBM Corp.",
0x0004: "Toshiba Corp.",
0x0005: "3Com",
0x0006: "Microsoft",
0x0007: "Lucent",
0x0008: "Motorola",
0x0009: "Infineon Technologies AG",
0x000A: "Qualcomm Technologies International, Ltd. (QTIL)",
0x000B: "Silicon Wave",
0x000C: "Digianswer A/S",
0x000D: "Texas Instruments Inc.",
0x000E: "Parthus Technologies Inc.",
0x000F: "Broadcom Corporation",
0x0010: "Mitel Semiconductor",
0x0011: "Widcomm, Inc.",
0x0012: "Zeevo, Inc.",
0x0013: "Atmel Corporation",
0x0014: "Mitsubishi Electric Corporation",
0x0015: "RTX Telecom A/S",
0x0016: "KC Technology Inc.",
0x0017: "Newlogic",
0x0018: "Transilica, Inc.",
0x0019: "Rohde & Schwarz GmbH & Co. KG",
0x001A: "TTPCom Limited",
0x001B: "Signia Technologies, Inc.",
0x001C: "Conexant Systems Inc.",
0x001D: "Qualcomm",
0x001E: "Inventel",
0x001F: "AVM Berlin",
0x0020: "BandSpeed, Inc.",
0x0021: "Mansella Ltd",
0x0022: "NEC Corporation",
0x0023: "WavePlus Technology Co., Ltd.",
0x0024: "Alcatel",
0x0025: "NXP Semiconductors (formerly Philips Semiconductors)",
0x0026: "C Technologies",
0x0027: "Open Interface",
0x0028: "R F Micro Devices",
0x0029: "Hitachi Ltd",
0x002A: "Symbol Technologies, Inc.",
0x002B: "Tenovis",
0x002C: "Macronix International Co. Ltd.",
0x002D: "GCT Semiconductor",
0x002E: "Norwood Systems",
0x002F: "MewTel Technology Inc.",
0x0030: "ST Microelectronics",
0x0031: "Synopsys, Inc.",
0x0032: "Red-M (Communications) Ltd",
0x0033: "Commil Ltd",
0x0034: "Computer Access Technology Corporation (CATC)",
0x0035: "Eclipse (HQ Espana) S.L.",
0x0036: "Renesas Electronics Corporation",
0x0037: "Mobilian Corporation",
0x0038: "Syntronix Corporation",
0x0039: "Integrated System Solution Corp.",
0x003A: "Matsushita Electric Industrial Co., Ltd.",
0x003B: "Gennum Corporation",
0x003C: "BlackBerry Limited (formerly Research In Motion)",
0x003D: "IPextreme, Inc.",
0x003E: "Systems and Chips, Inc",
0x003F: "Bluetooth SIG, Inc",
0x0040: "Seiko Epson Corporation",
0x0041: "Integrated Silicon Solution Taiwan, Inc.",
0x0042: "CONWISE Technology Corporation Ltd",
0x0043: "PARROT AUTOMOTIVE SAS",
0x0044: "Socket Mobile",
0x0045: "Atheros Communications, Inc.",
0x0046: "MediaTek, Inc.",
0x0047: "Bluegiga",
0x0048: "Marvell Technology Group Ltd.",
0x0049: "3DSP Corporation",
0x004A: "Accel Semiconductor Ltd.",
0x004B: "Continental Automotive Systems",
0x004C: "Apple, Inc.",
0x004D: "Staccato Communications, Inc.",
0x004E: "Avago Technologies",
0x004F: "APT Ltd.",
0x0050: "SiRF Technology, Inc.",
0x0051: "Tzero Technologies, Inc.",
0x0052: "J&M Corporation",
0x0053: "Free2move AB",
0x0054: "3DiJoy Corporation",
0x0055: "Plantronics, Inc.",
0x0056: "Sony Ericsson Mobile Communications",
0x0057: "Harman International Industries, Inc.",
0x0058: "Vizio, Inc.",
0x0059: "Nordic Semiconductor ASA",
0x005A: "EM Microelectronic-Marin SA",
0x005B: "Ralink Technology Corporation",
0x005C: "Belkin International, Inc.",
0x005D: "Realtek Semiconductor Corporation",
0x005E: "Stonestreet One, LLC",
0x005F: "Wicentric, Inc.",
0x0060: "RivieraWaves S.A.S",
0x0061: "RDA Microelectronics",
0x0062: "Gibson Guitars",
0x0063: "MiCommand Inc.",
0x0064: "Band XI International, LLC",
0x0065: "Hewlett-Packard Company",
0x0066: "9Solutions Oy",
0x0067: "GN Netcom A/S",
0x0068: "General Motors",
0x0069: "A&D Engineering, Inc.",
0x006A: "MindTree Ltd.",
0x006B: "Polar Electro OY",
0x006C: "Beautiful Enterprise Co., Ltd.",
0x006D: "BriarTek, Inc",
0x006E: "Summit Data Communications, Inc.",
0x006F: "Sound ID",
0x0070: "Monster, LLC",
0x0071: "connectBlue AB",
0x0072: "ShangHai Super Smart Electronics Co. Ltd.",
0x0073: "Group Sense Ltd.",
0x0074: "Zomm, LLC",
0x0075: "Samsung Electronics Co. Ltd.",
0x0076: "Creative Technology Ltd.",
0x0077: "Laird Technologies",
0x0078: "Nike, Inc.",
0x0079: "lesswire AG",
0x007A: "MStar Semiconductor, Inc.",
0x007B: "Hanlynn Technologies",
0x007C: "A & R Cambridge",
0x007D: "Seers Technology Co., Ltd.",
0x007E: "Sports Tracking Technologies Ltd.",
0x007F: "Autonet Mobile",
0x0080: "DeLorme Publishing Company, Inc.",
0x0081: "WuXi Vimicro",
0x0082: "Sennheiser Communications A/S",
0x0083: "TimeKeeping Systems, Inc.",
0x0084: "Ludus Helsinki Ltd.",
0x0085: "BlueRadios, Inc.",
0x0086: "Equinux AG",
0x0087: "Garmin International, Inc.",
0x0088: "Ecotest",
0x0089: "GN ReSound A/S",
0x008A: "Jawbone",
0x008B: "Topcon Positioning Systems, LLC",
0x008C: "Gimbal Inc. (formerly Qualcomm Labs, Inc. and Qualcomm Retail Solutions, Inc.)",
0x008D: "Zscan Software",
0x008E: "Quintic Corp",
0x008F: "Telit Wireless Solutions GmbH (formerly Stollmann E+V GmbH)",
0x0090: "Funai Electric Co., Ltd.",
0x0091: "Advanced PANMOBIL systems GmbH & Co. KG",
0x0092: "ThinkOptics, Inc.",
0x0093: "Universal Electronics, Inc.",
0x0094: "Airoha Technology Corp.",
0x0095: "NEC Lighting, Ltd.",
0x0096: "ODM Technology, Inc.",
0x0097: "ConnecteDevice Ltd.",
0x0098: "zero1.tv GmbH",
0x0099: "i.Tech Dynamic Global Distribution Ltd.",
0x009A: "Alpwise",
0x009B: "Jiangsu Toppower Automotive Electronics Co., Ltd.",
0x009C: "Colorfy, Inc.",
0x009D: "Geoforce Inc.",
0x009E: "Bose Corporation",
0x009F: "Suunto Oy",
0x00A0: "Kensington Computer Products Group",
0x00A1: "SR-Medizinelektronik",
0x00A2: "Vertu Corporation Limited",
0x00A3: "Meta Watch Ltd.",
0x00A4: "LINAK A/S",
0x00A5: "OTL Dynamics LLC",
0x00A6: "Panda Ocean Inc.",
0x00A7: "Visteon Corporation",
0x00A8: "ARP Devices Limited",
0x00A9: "Magneti Marelli S.p.A",
0x00AA: "CAEN RFID srl",
0x00AB: "Ingenieur-Systemgruppe Zahn GmbH",
0x00AC: "Green Throttle Games",
0x00AD: "Peter Systemtechnik GmbH",
0x00AE: "Omegawave Oy",
0x00AF: "Cinetix",
0x00B0: "Passif Semiconductor Corp",
0x00B1: "Saris Cycling Group, Inc",
0x00B2: "Bekey A/S",
0x00B3: "Clarinox Technologies Pty. Ltd.",
0x00B4: "BDE Technology Co., Ltd.",
0x00B5: "Swirl Networks",
0x00B6: "Meso international",
0x00B7: "TreLab Ltd",
0x00B8: "Qualcomm Innovation Center, Inc. (QuIC)",
0x00B9: "Johnson Controls, Inc.",
0x00BA: "Starkey Laboratories Inc.",
0x00BB: "S-Power Electronics Limited",
0x00BC: "Ace Sensor Inc",
0x00BD: "Aplix Corporation",
0x00BE: "AAMP of America",
0x00BF: "Stalmart Technology Limited",
0x00C0: "AMICCOM Electronics Corporation",
0x00C1: "Shenzhen Excelsecu Data Technology Co.,Ltd",
0x00C2: "Geneq Inc.",
0x00C3: "adidas AG",
0x00C4: "LG Electronics",
0x00C5: "Onset Computer Corporation",
0x00C6: "Selfly BV",
0x00C7: "Quuppa Oy.",
0x00C8: "GeLo Inc",
0x00C9: "Evluma",
0x00CA: "MC10",
0x00CB: "Binauric SE",
0x00CC: "Beats Electronics",
0x00CD: "Microchip Technology Inc.",
0x00CE: "Elgato Systems GmbH",
0x00CF: "ARCHOS SA",
0x00D0: "Dexcom, Inc.",
0x00D1: "Polar Electro Europe B.V.",
0x00D2: "Dialog Semiconductor B.V.",
0x00D3: "Taixingbang Technology (HK) Co,. LTD.",
0x00D4: "Kawantech",
0x00D5: "Austco Communication Systems",
0x00D6: "Timex Group USA, Inc.",
0x00D7: "Qualcomm Technologies, Inc.",
0x00D8: "Qualcomm Connected Experiences, Inc.",
0x00D9: "Voyetra Turtle Beach",
0x00DA: "txtr GmbH",
0x00DB: "Biosentronics",
0x00DC: "Procter & Gamble",
0x00DD: "Hosiden Corporation",
0x00DE: "Muzik LLC",
0x00DF: "Misfit Wearables Corp",
0x00E0: "Google",
0x00E1: "Danlers Ltd",
0x00E2: "Semilink Inc",
0x00E3: "inMusic Brands, Inc",
0x00E4: "L.S. Research Inc.",
0x00E5: "Eden Software Consultants Ltd.",
0x00E6: "Freshtemp",
0x00E7: "KS Technologies",
0x00E8: "ACTS Technologies",
0x00E9: "Vtrack Systems",
0x00EA: "Nielsen-Kellerman Company",
0x00EB: "Server Technology Inc.",
0x00EC: "BioResearch Associates",
0x00ED: "Jolly Logic, LLC",
0x00EE: "Above Average Outcomes, Inc.",
0x00EF: "Bitsplitters GmbH",
0x00F0: "PayPal, Inc.",
0x00F1: "Witron Technology Limited",
0x00F2: "Morse Project Inc.",
0x00F3: "Kent Displays Inc.",
0x00F4: "Nautilus Inc.",
0x00F5: "Smartifier Oy",
0x00F6: "Elcometer Limited",
0x00F7: "VSN Technologies, Inc.",
0x00F8: "AceUni Corp., Ltd.",
0x00F9: "StickNFind",
0x00FA: "Crystal Code AB",
0x00FB: "KOUKAAM a.s.",
0x00FC: "Delphi Corporation",
0x00FD: "ValenceTech Limited",
0x00FE: "Stanley Black and Decker",
0x00FF: "Typo Products, LLC",
0x0100: "TomTom International BV",
0x0101: "Fugoo, Inc.",
0x0102: "Keiser Corporation",
0x0103: "Bang & Olufsen A/S",
0x0104: "PLUS Location Systems Pty Ltd",
0x0105: "Ubiquitous Computing Technology Corporation",
0x0106: "Innovative Yachtter Solutions",
0x0107: "William Demant Holding A/S",
0x0108: "Chicony Electronics Co., Ltd.",
0x0109: "Atus BV",
0x010A: "Codegate Ltd",
0x010B: "ERi, Inc",
0x010C: "Transducers Direct, LLC",
0x010D: "Fujitsu Ten LImited",
0x010E: "Audi AG",
0x010F: "HiSilicon Technologies Col, Ltd.",
0x0110: "Nippon Seiki Co., Ltd.",
0x0111: "Steelseries ApS",
0x0112: "Visybl Inc.",
0x0113: "Openbrain Technologies, Co., Ltd.",
0x0114: "Xensr",
0x0115: "e.solutions",
0x0116: "10AK Technologies",
0x0117: "Wimoto Technologies Inc",
0x0118: "Radius Networks, Inc.",
0x0119: "Wize Technology Co., Ltd.",
0x011A: "Qualcomm Labs, Inc.",
0x011B: "Hewlett Packard Enterprise",
0x011C: "Baidu",
0x011D: "Arendi AG",
0x011E: "Skoda Auto a.s.",
0x011F: "Volkswagen AG",
0x0120: "Porsche AG",
0x0121: "Sino Wealth Electronic Ltd.",
0x0122: "AirTurn, Inc.",
0x0123: "Kinsa, Inc",
0x0124: "HID Global",
0x0125: "SEAT es",
0x0126: "Promethean Ltd.",
0x0127: "Salutica Allied Solutions",
0x0128: "GPSI Group Pty Ltd",
0x0129: "Nimble Devices Oy",
0x012A: "Changzhou Yongse Infotech Co., Ltd.",
0x012B: "SportIQ",
0x012C: "TEMEC Instruments B.V.",
0x012D: "Sony Corporation",
0x012E: "ASSA ABLOY",
0x012F: "Clarion Co. Inc.",
0x0130: "Warehouse Innovations",
0x0131: "Cypress Semiconductor",
0x0132: "MADS Inc",
0x0133: "Blue Maestro Limited",
0x0134: "Resolution Products, Ltd.",
0x0135: "Aireware LLC",
0x0136: "Silvair, Inc.",
0x0137: "Prestigio Plaza Ltd.",
0x0138: "NTEO Inc.",
0x0139: "Focus Systems Corporation",
0x013A: "Tencent Holdings Ltd.",
0x013B: "Allegion",
0x013C: "Murata Manufacturing Co., Ltd.",
0x013D: "WirelessWERX",
0x013E: "Nod, Inc.",
0x013F: "B&B Manufacturing Company",
0x0140: "Alpine Electronics (China) Co., Ltd",
0x0141: "FedEx Services",
0x0142: "Grape Systems Inc.",
0x0143: "Bkon Connect",
0x0144: "Lintech GmbH",
0x0145: "Novatel Wireless",
0x0146: "Ciright",
0x0147: "Mighty Cast, Inc.",
0x0148: "Ambimat Electronics",
0x0149: "Perytons Ltd.",
0x014A: "Tivoli Audio, LLC",
0x014B: "Master Lock",
0x014C: "Mesh-Net Ltd",
0x014D: "HUIZHOU DESAY SV AUTOMOTIVE CO., LTD.",
0x014E: "Tangerine, Inc.",
0x014F: "B&W Group Ltd.",
0x0150: "Pioneer Corporation",
0x0151: "OnBeep",
0x0152: "Vernier Software & Technology",
0x0153: "ROL Ergo",
0x0154: "Pebble Technology",
0x0155: "NETATMO",
0x0156: "Accumulate AB",
0x0157: "Anhui Huami Information Technology Co., Ltd.",
0x0158: "Inmite s.r.o.",
0x0159: "ChefSteps, Inc.",
0x015A: "micas AG",
0x015B: "Biomedical Research Ltd.",
0x015C: "Pitius Tec S.L.",
0x015D: "Estimote, Inc.",
0x015E: "Unikey Technologies, Inc.",
0x015F: "Timer Cap Co.",
0x0160: "AwoX",
0x0161: "yikes",
0x0162: "MADSGlobalNZ Ltd.",
0x0163: "PCH International",
0x0164: "Qingdao Yeelink Information Technology Co., Ltd.",
0x0165: "Milwaukee Tool (Formally Milwaukee Electric Tools)",
0x0166: "MISHIK Pte Ltd",
0x0167: "Ascensia Diabetes Care US Inc.",
0x0168: "Spicebox LLC",
0x0169: "emberlight",
0x016A: "Cooper-Atkins Corporation",
0x016B: "Qblinks",
0x016C: "MYSPHERA",
0x016D: "LifeScan Inc",
0x016E: "Volantic AB",
0x016F: "Podo Labs, Inc",
0x0170: "Roche Diabetes Care AG",
0x0171: "Amazon Fulfillment Service",
0x0172: "Connovate Technology Private Limited",
0x0173: "Kocomojo, LLC",
0x0174: "Everykey Inc.",
0x0175: "Dynamic Controls",
0x0176: "SentriLock",
0x0177: "I-SYST inc.",
0x0178: "CASIO COMPUTER CO., LTD.",
0x0179: "LAPIS Semiconductor Co., Ltd.",
0x017A: "Telemonitor, Inc.",
0x017B: "taskit GmbH",
0x017C: "Daimler AG",
0x017D: "BatAndCat",
0x017E: "BluDotz Ltd",
0x017F: "XTel Wireless ApS",
0x0180: "Gigaset Communications GmbH",
0x0181: "Gecko Health Innovations, Inc.",
0x0182: "HOP Ubiquitous",
0x0183: "Walt Disney",
0x0184: "Nectar",
0x0185: "bel'apps LLC",
0x0186: "CORE Lighting Ltd",
0x0187: "Seraphim Sense Ltd",
0x0188: "Unico RBC",
0x0189: "Physical Enterprises Inc.",
0x018A: "Able Trend Technology Limited",
0x018B: "Konica Minolta, Inc.",
0x018C: "Wilo SE",
0x018D: "Extron Design Services",
0x018E: "Fitbit, Inc.",
0x018F: "Fireflies Systems",
0x0190: "Intelletto Technologies Inc.",
0x0191: "FDK CORPORATION",
0x0192: "Cloudleaf, Inc",
0x0193: "Maveric Automation LLC",
0x0194: "Acoustic Stream Corporation",
0x0195: "Zuli",
0x0196: "Paxton Access Ltd",
0x0197: "WiSilica Inc.",
0x0198: "VENGIT Korlatolt Felelossegu Tarsasag",
0x0199: "SALTO SYSTEMS S.L.",
0x019A: "TRON Forum (formerly T-Engine Forum)",
0x019B: "CUBETECH s.r.o.",
0x019C: "Cokiya Incorporated",
0x019D: "CVS Health",
0x019E: "Ceruus",
0x019F: "Strainstall Ltd",
0x01A0: "Channel Enterprises (HK) Ltd.",
0x01A1: "FIAMM",
0x01A2: "GIGALANE.CO.,LTD",
0x01A3: "EROAD",
0x01A4: "Mine Safety Appliances",
0x01A5: "Icon Health and Fitness",
0x01A6: "Wille Engineering (formely as Asandoo GmbH)",
0x01A7: "ENERGOUS CORPORATION",
0x01A8: "Taobao",
0x01A9: "Canon Inc.",
0x01AA: "Geophysical Technology Inc.",
0x01AB: "Facebook, Inc.",
0x01AC: "Trividia Health, Inc.",
0x01AD: "FlightSafety International",
0x01AE: "Earlens Corporation",
0x01AF: "Sunrise Micro Devices, Inc.",
0x01B0: "Star Micronics Co., Ltd.",
0x01B1: "Netizens Sp. z o.o.",
0x01B2: "Nymi Inc.",
0x01B3: "Nytec, Inc.",
0x01B4: "Trineo Sp. z o.o.",
0x01B5: "Nest Labs Inc.",
0x01B6: "LM Technologies Ltd",
0x01B7: "General Electric Company",
0x01B8: "i+D3 S.L.",
0x01B9: "HANA Micron",
0x01BA: "Stages Cycling LLC",
0x01BB: "Cochlear Bone Anchored Solutions AB",
0x01BC: "SenionLab AB",
0x01BD: "Syszone Co., Ltd",
0x01BE: "Pulsate Mobile Ltd.",
0x01BF: "Hong Kong HunterSun Electronic Limited",
0x01C0: "pironex GmbH",
0x01C1: "BRADATECH Corp.",
0x01C2: "Transenergooil AG",
0x01C3: "Bunch",
0x01C4: "DME Microelectronics",
0x01C5: "Bitcraze AB",
0x01C6: "HASWARE Inc.",
0x01C7: "Abiogenix Inc.",
0x01C8: "Poly-Control ApS",
0x01C9: "Avi-on",
0x01CA: "Laerdal Medical AS",
0x01CB: "Fetch My Pet",
0x01CC: "Sam Labs Ltd.",
0x01CD: "Chengdu Synwing Technology Ltd",
0x01CE: "HOUWA SYSTEM DESIGN, k.k.",
0x01CF: "BSH",
0x01D0: "Primus Inter Pares Ltd",
0x01D1: "August Home, Inc",
0x01D2: "Gill Electronics",
0x01D3: "Sky Wave Design",
0x01D4: "Newlab S.r.l.",
0x01D5: "ELAD srl",
0x01D6: "G-wearables inc.",
0x01D7: "Squadrone Systems Inc.",
0x01D8: "Code Corporation",
0x01D9: "Savant Systems LLC",
0x01DA: "Logitech International SA",
0x01DB: "Innblue Consulting",
0x01DC: "iParking Ltd.",
0x01DD: "Koninklijke Philips Electronics N.V.",
0x01DE: "Minelab Electronics Pty Limited",
0x01DF: "Bison Group Ltd.",
0x01E0: "Widex A/S",
0x01E1: "Jolla Ltd",
0x01E2: "Lectronix, Inc.",
0x01E3: "Caterpillar Inc",
0x01E4: "Freedom Innovations",
0x01E5: "Dynamic Devices Ltd",
0x01E6: "Technology Solutions (UK) Ltd",
0x01E7: "IPS Group Inc.",
0x01E8: "STIR",
0x01E9: "Sano, Inc.",
0x01EA: "Advanced Application Design, Inc.",
0x01EB: "AutoMap LLC",
0x01EC: "Spreadtrum Communications Shanghai Ltd",
0x01ED: "CuteCircuit LTD",
0x01EE: "Valeo Service",
0x01EF: "Fullpower Technologies, Inc.",
0x01F0: "KloudNation",
0x01F1: "Zebra Technologies Corporation",
0x01F2: "Itron, Inc.",
0x01F3: "The University of Tokyo",
0x01F4: "UTC Fire and Security",
0x01F5: "Cool Webthings Limited",
0x01F6: "DJO Global",
0x01F7: "Gelliner Limited",
0x01F8: "Anyka (Guangzhou) Microelectronics Technology Co, LTD",
0x01F9: "Medtronic Inc.",
0x01FA: "Gozio Inc.",
0x01FB: "Form Lifting, LLC",
0x01FC: "Wahoo Fitness, LLC",
0x01FD: "Kontakt Micro-Location Sp. z o.o.",
0x01FE: "Radio Systems Corporation",
0x01FF: "Freescale Semiconductor, Inc.",
0x0200: "Verifone Systems Pte Ltd. Taiwan Branch",
0x0201: "AR Timing",
0x0202: "Rigado LLC",
0x0203: "Kemppi Oy",
0x0204: "Tapcentive Inc.",
0x0205: "Smartbotics Inc.",
0x0206: "Otter Products, LLC",
0x0207: "STEMP Inc.",
0x0208: "LumiGeek LLC",
0x0209: "InvisionHeart Inc.",
0x020A: "Macnica Inc.",
0x020B: "Jaguar Land Rover Limited",
0x020C: "CoroWare Technologies, Inc",
0x020D: "Simplo Technology Co., LTD",
0x020E: "Omron Healthcare Co., LTD",
0x020F: "Comodule GMBH",
0x0210: "ikeGPS",
0x0211: "Telink Semiconductor Co. Ltd",
0x0212: "Interplan Co., Ltd",
0x0213: "Wyler AG",
0x0214: "IK Multimedia Production srl",
0x0215: "Lukoton Experience Oy",
0x0216: "MTI Ltd",
0x0217: "Tech4home, Lda",
0x0218: "Hiotech AB",
0x0219: "DOTT Limited",
0x021A: "Blue Speck Labs, LLC",
0x021B: "Cisco Systems, Inc",
0x021C: "Mobicomm Inc",
0x021D: "Edamic",
0x021E: "Goodnet, Ltd",
0x021F: "Luster Leaf Products Inc",
0x0220: "Manus Machina BV",
0x0221: "Mobiquity Networks Inc",
0x0222: "Praxis Dynamics",
0x0223: "Philip Morris Products S.A.",
0x0224: "Comarch SA",
0x0225: "Nestl Nespresso S.A.",
0x0226: "Merlinia A/S",
0x0227: "LifeBEAM Technologies",
0x0228: "Twocanoes Labs, LLC",
0x0229: "Muoverti Limited",
0x022A: "Stamer Musikanlagen GMBH",
0x022B: "Tesla Motors",
0x022C: "Pharynks Corporation",
0x022D: "Lupine",
0x022E: "Siemens AG",
0x022F: "Huami (Shanghai) Culture Communication CO., LTD",
0x0230: "Foster Electric Company, Ltd",
0x0231: "ETA SA",
0x0232: "x-Senso Solutions Kft",
0x0233: "Shenzhen SuLong Communication Ltd",
0x0234: "FengFan (BeiJing) Technology Co, Ltd",
0x0235: "Qrio Inc",
0x0236: "Pitpatpet Ltd",
0x0237: "MSHeli s.r.l.",
0x0238: "Trakm8 Ltd",
0x0239: "JIN CO, Ltd",
0x023A: "Alatech Tehnology",
0x023B: "Beijing CarePulse Electronic Technology Co, Ltd",
0x023C: "Awarepoint",
0x023D: "ViCentra B.V.",
0x023E: "Raven Industries",
0x023F: "WaveWare Technologies Inc.",
0x0240: "Argenox Technologies",
0x0241: "Bragi GmbH",
0x0242: "16Lab Inc",
0x0243: "Masimo Corp",
0x0244: "Iotera Inc",
0x0245: "Endress+Hauser",
0x0246: "ACKme Networks, Inc.",
0x0247: "FiftyThree Inc.",
0x0248: "Parker Hannifin Corp",
0x0249: "Transcranial Ltd",
0x024A: "Uwatec AG",
0x024B: "Orlan LLC",
0x024C: "Blue Clover Devices",
0x024D: "M-Way Solutions GmbH",
0x024E: "Microtronics Engineering GmbH",
0x024F: "Schneider Schreibgerte GmbH",
0x0250: "Sapphire Circuits LLC",
0x0251: "Lumo Bodytech Inc.",
0x0252: "UKC Technosolution",
0x0253: "Xicato Inc.",
0x0254: "Playbrush",
0x0255: "Dai Nippon Printing Co., Ltd.",
0x0256: "G24 Power Limited",
0x0257: "AdBabble Local Commerce Inc.",
0x0258: "Devialet SA",
0x0259: "ALTYOR",
0x025A: "University of Applied Sciences Valais/Haute Ecole Valaisanne",
0x025B: "Five Interactive, LLC dba Zendo",
0x025C: "NetEaseHangzhouNetwork co.Ltd.",
0x025D: "Lexmark International Inc.",
0x025E: "Fluke Corporation",
0x025F: "Yardarm Technologies",
0x0260: "SensaRx",
0x0261: "SECVRE GmbH",
0x0262: "Glacial Ridge Technologies",
0x0263: "Identiv, Inc.",
0x0264: "DDS, Inc.",
0x0265: "SMK Corporation",
0x0266: "Schawbel Technologies LLC",
0x0267: "XMI Systems SA",
0x0268: "Cerevo",
0x0269: "Torrox GmbH & Co KG",
0x026A: "Gemalto",
0x026B: "DEKA Research & Development Corp.",
0x026C: "Domster Tadeusz Szydlowski",
0x026D: "Technogym SPA",
0x026E: "FLEURBAEY BVBA",
0x026F: "Aptcode Solutions",
0x0270: "LSI ADL Technology",
0x0271: "Animas Corp",
0x0272: "Alps Electric Co., Ltd.",
0x0273: "OCEASOFT",
0x0274: "Motsai Research",
0x0275: "Geotab",
0x0276: "E.G.O. Elektro-Gertebau GmbH",
0x0277: "bewhere inc",
0x0278: "Johnson Outdoors Inc",
0x0279: "steute Schaltgerate GmbH & Co. KG",
0x027A: "Ekomini inc.",
0x027B: "DEFA AS",
0x027C: "Aseptika Ltd",
0x027D: "HUAWEI Technologies Co., Ltd. ( )",
0x027E: "HabitAware, LLC",
0x027F: "ruwido austria gmbh",
0x0280: "ITEC corporation",
0x0281: "StoneL",
0x0282: "Sonova AG",
0x0283: "Maven Machines, Inc.",
0x0284: "Synapse Electronics",
0x0285: "Standard Innovation Inc.",
0x0286: "RF Code, Inc.",
0x0287: "Wally Ventures S.L.",
0x0288: "Willowbank Electronics Ltd",
0x0289: "SK Telecom",
0x028A: "Jetro AS",
0x028B: "Code Gears LTD",
0x028C: "NANOLINK APS",
0x028D: "IF, LLC",
0x028E: "RF Digital Corp",
0x028F: "Church & Dwight Co., Inc",
0x0290: "Multibit Oy",
0x0291: "CliniCloud Inc",
0x0292: "SwiftSensors",
0x0293: "Blue Bite",
0x0294: "ELIAS GmbH",
0x0295: "Sivantos GmbH",
0x0296: "Petzl",
0x0297: "storm power ltd",
0x0298: "EISST Ltd",
0x0299: "Inexess Technology Simma KG",
0x029A: "Currant, Inc.",
0x029B: "C2 Development, Inc.",
0x029C: "Blue Sky Scientific, LLC",
0x029D: "ALOTTAZS LABS, LLC",
0x029E: "Kupson spol. s r.o.",
0x029F: "Areus Engineering GmbH",
0x02A0: "Impossible Camera GmbH",
0x02A1: "InventureTrack Systems",
0x02A2: "LockedUp",
0x02A3: "Itude",
0x02A4: "Pacific Lock Company",
0x02A5: "Tendyron Corporation ( )",
0x02A6: "Robert Bosch GmbH",
0x02A7: "Illuxtron international B.V.",
0x02A8: "miSport Ltd.",
0x02A9: "Chargelib",
0x02AA: "Doppler Lab",
0x02AB: "BBPOS Limited",
0x02AC: "RTB Elektronik GmbH & Co. KG",
0x02AD: "Rx Networks, Inc.",
0x02AE: "WeatherFlow, Inc.",
0x02AF: "Technicolor USA Inc.",
0x02B0: "Bestechnic(Shanghai),Ltd",
0x02B1: "Raden Inc",
0x02B2: "JouZen Oy",
0x02B3: "CLABER S.P.A.",
0x02B4: "Hyginex, Inc.",
0x02B5: "HANSHIN ELECTRIC RAILWAY CO.,LTD.",
0x02B6: "Schneider Electric",
0x02B7: "Oort Technologies LLC",
0x02B8: "Chrono Therapeutics",
0x02B9: "Rinnai Corporation",
0x02BA: "Swissprime Technologies AG",
0x02BB: "Koha.,Co.Ltd",
0x02BC: "Genevac Ltd",
0x02BD: "Chemtronics",
0x02BE: "Seguro Technology Sp. z o.o.",
0x02BF: "Redbird Flight Simulations",
0x02C0: "Dash Robotics",
0x02C1: "LINE Corporation",
0x02C2: "Guillemot Corporation",
0x02C3: "Techtronic Power Tools Technology Limited",
0x02C4: "Wilson Sporting Goods",
0x02C5: "Lenovo (Singapore) Pte Ltd. ( )",
0x02C6: "Ayatan Sensors",
0x02C7: "Electronics Tomorrow Limited",
0x02C8: "VASCO Data Security International, Inc.",
0x02C9: "PayRange Inc.",
0x02CA: "ABOV Semiconductor",
0x02CB: "AINA-Wireless Inc.",
0x02CC: "Eijkelkamp Soil & Water",
0x02CD: "BMA ergonomics b.v.",
0x02CE: "Teva Branded Pharmaceutical Products R&D, Inc.",
0x02CF: "Anima",
0x02D0: "3M",
0x02D1: "Empatica Srl",
0x02D2: "Afero, Inc.",
0x02D3: "Powercast Corporation",
0x02D4: "Secuyou ApS",
0x02D5: "OMRON Corporation",
0x02D6: "Send Solutions",
0x02D7: "NIPPON SYSTEMWARE CO.,LTD.",
0x02D8: "Neosfar",
0x02D9: "Fliegl Agrartechnik GmbH",
0x02DA: "Gilvader",
0x02DB: "Digi International Inc (R)",
0x02DC: "DeWalch Technologies, Inc.",
0x02DD: "Flint Rehabilitation Devices, LLC",
0x02DE: "Samsung SDS Co., Ltd.",
0x02DF: "Blur Product Development",
0x02E0: "University of Michigan",
0x02E1: "Victron Energy BV",
0x02E2: "NTT docomo",
0x02E3: "Carmanah Technologies Corp.",
0x02E4: "Bytestorm Ltd.",
0x02E5: "Espressif Incorporated ( () )",
0x02E6: "Unwire",
0x02E7: "Connected Yard, Inc.",
0x02E8: "American Music Environments",
0x02E9: "Sensogram Technologies, Inc.",
0x02EA: "Fujitsu Limited",
0x02EB: "Ardic Technology",
0x02EC: "Delta Systems, Inc",
0x02ED: "HTC Corporation",
0x02EE: "Citizen Holdings Co., Ltd.",
0x02EF: "SMART-INNOVATION.inc",
0x02F0: "Blackrat Software",
0x02F1: "The Idea Cave, LLC",
0x02F2: "GoPro, Inc.",
0x02F3: "AuthAir, Inc",
0x02F4: "Vensi, Inc.",
0x02F5: "Indagem Tech LLC",
0x02F6: "Intemo Technologies",
0x02F7: "DreamVisions co., Ltd.",
0x02F8: "Runteq Oy Ltd",
0x02F9: "IMAGINATION TECHNOLOGIES LTD",
0x02FA: "CoSTAR TEchnologies",
0x02FB: "Clarius Mobile Health Corp.",
0x02FC: "Shanghai Frequen Microelectronics Co., Ltd.",
0x02FD: "Uwanna, Inc.",
0x02FE: "Lierda Science & Technology Group Co., Ltd.",
0x02FF: "Silicon Laboratories",
0x0300: "World Moto Inc.",
0x0301: "Giatec Scientific Inc.",
0x0302: "Loop Devices, Inc",
0x0303: "IACA electronique",
0x0304: "Proxy Technologies, Inc.",
0x0305: "Swipp ApS",
0x0306: "Life Laboratory Inc.",
0x0307: "FUJI INDUSTRIAL CO.,LTD.",
0x0308: "Surefire, LLC",
0x0309: "Dolby Labs",
0x030A: "Ellisys",
0x030B: "Magnitude Lighting Converters",
0x030C: "Hilti AG",
0x030D: "Devdata S.r.l.",
0x030E: "Deviceworx",
0x030F: "Shortcut Labs",
0x0310: "SGL Italia S.r.l.",
0x0311: "PEEQ DATA",
0x0312: "Ducere Technologies Pvt Ltd",
0x0313: "DiveNav, Inc.",
0x0314: "RIIG AI Sp. z o.o.",
0x0315: "Thermo Fisher Scientific",
0x0316: "AG Measurematics Pvt. Ltd.",
0x0317: "CHUO Electronics CO., LTD.",
0x0318: "Aspenta International",
0x0319: "Eugster Frismag AG",
0x031A: "Amber wireless GmbH",
0x031B: "HQ Inc",
0x031C: "Lab Sensor Solutions",
0x031D: "Enterlab ApS",
0x031E: "Eyefi, Inc.",
0x031F: "MetaSystem S.p.A.",
0x0320: "SONO ELECTRONICS. CO., LTD",
0x0321: "Jewelbots",
0x0322: "Compumedics Limited",
0x0323: "Rotor Bike Components",
0x0324: "Astro, Inc.",
0x0325: "Amotus Solutions",
0x0326: "Healthwear Technologies (Changzhou)Ltd",
0x0327: "Essex Electronics",
0x0328: "Grundfos A/S",
0x0329: "Eargo, Inc.",
0x032A: "Electronic Design Lab",
0x032B: "ESYLUX",
0x032C: "NIPPON SMT.CO.,Ltd",
0x032D: "BM innovations GmbH",
0x032E: "indoormap",
0x032F: "OttoQ Inc",
0x0330: "North Pole Engineering",
0x0331: "3flares Technologies Inc.",
0x0332: "Electrocompaniet A.S.",
0x0333: "Mul-T-Lock",
0x0334: "Corentium AS",
0x0335: "Enlighted Inc",
0x0336: "GISTIC",
0x0337: "AJP2 Holdings, LLC",
0x0338: "COBI GmbH",
0x0339: "Blue Sky Scientific, LLC",
0x033A: "Appception, Inc.",
0x033B: "Courtney Thorne Limited",
0x033C: "Virtuosys",
0x033D: "TPV Technology Limited",
0x033E: "Monitra SA",
0x033F: "Automation Components, Inc.",
0x0340: "Letsense s.r.l.",
0x0341: "Etesian Technologies LLC",
0x0342: "GERTEC BRASIL LTDA.",
0x0343: "Drekker Development Pty. Ltd.",
0x0344: "Whirl Inc",
0x0345: "Locus Positioning",
0x0346: "Acuity Brands Lighting, Inc",
0x0347: "Prevent Biometrics",
0x0348: "Arioneo",
0x0349: "VersaMe",
0x034A: "Vaddio",
0x034B: "Libratone A/S",
0x034C: "HM Electronics, Inc.",
0x034D: "TASER International, Inc.",
0x034E: "SafeTrust Inc.",
0x034F: "Heartland Payment Systems",
0x0350: "Bitstrata Systems Inc.",
0x0351: "Pieps GmbH",
0x0352: "iRiding(Xiamen)Technology Co.,Ltd.",
0x0353: "Alpha Audiotronics, Inc.",
0x0354: "TOPPAN FORMS CO.,LTD.",
0x0355: "Sigma Designs, Inc.",
0x0356: "Spectrum Brands, Inc.",
0x0357: "Polymap Wireless",
0x0358: "MagniWare Ltd.",
0x0359: "Novotec Medical GmbH",
0x035A: "Medicom Innovation Partner a/s",
0x035B: "Matrix Inc.",
0x035C: "Eaton Corporation",
0x035D: "KYS",
0x035E: "Naya Health, Inc.",
0x035F: "Acromag",
0x0360: "Insulet Corporation",
0x0361: "Wellinks Inc.",
0x0362: "ON Semiconductor",
0x0363: "FREELAP SA",
0x0364: "Favero Electronics Srl",
0x0365: "BioMech Sensor LLC",
0x0366: "BOLTT Sports technologies Private limited",
0x0367: "Saphe International",
0x0368: "Metormote AB",
0x0369: "littleBits",
0x036A: "SetPoint Medical",
0x036B: "BRControls Products BV",
0x036C: "Zipcar",
0x036D: "AirBolt Pty Ltd",
0x036E: "KeepTruckin Inc",
0x036F: "Motiv, Inc.",
0x0370: "Wazombi Labs O",
0x0371: "ORBCOMM",
0x0372: "Nixie Labs, Inc.",
0x0373: "AppNearMe Ltd",
0x0374: "Holman Industries",
0x0375: "Expain AS",
0x0376: "Electronic Temperature Instruments Ltd",
0x0377: "Plejd AB",
0x0378: "Propeller Health",
0x0379: "Shenzhen iMCO Electronic Technology Co.,Ltd",
0x037A: "Algoria",
0x037B: "Apption Labs Inc.",
0x037C: "Cronologics Corporation",
0x037D: "MICRODIA Ltd.",
0x037E: "lulabytes S.L.",
0x037F: "Nestec S.A.",
0x0380: 'LLC "MEGA-F service"',
0x0381: "Sharp Corporation",
0x0382: "Precision Outcomes Ltd",
0x0383: "Kronos Incorporated",
0x0384: "OCOSMOS Co., Ltd.",
0x0385: "Embedded Electronic Solutions Ltd. dba e2Solutions",
0x0386: "Aterica Inc.",
0x0387: "BluStor PMC, Inc.",
0x0388: "Kapsch TrafficCom AB",
0x0389: "ActiveBlu Corporation",
0x038A: "Kohler Mira Limited",
0x038B: "Noke",
0x038C: "Appion Inc.",
0x038D: "Resmed Ltd",
0x038E: "Crownstone B.V.",
0x038F: "Xiaomi Inc.",
0x0390: "INFOTECH s.r.o.",
0x0391: "Thingsquare AB",
0x0392: "T&D",
0x0393: "LAVAZZA S.p.A.",
0x0394: "Netclearance Systems, Inc.",
0x0395: "SDATAWAY",
0x0396: "BLOKS GmbH",
0x0397: "LEGO System A/S",
0x0398: "Thetatronics Ltd",
0x0399: "Nikon Corporation",
0x039A: "NeST",
0x039B: "South Silicon Valley Microelectronics",
0x039C: "ALE International",
0x039D: "CareView Communications, Inc.",
0x039E: "SchoolBoard Limited",
0x039F: "Molex Corporation",
0x03A0: "BARROT TECHNOLOGY LIMITED (formerly IVT Wireless Limited)",
0x03A1: "Alpine Labs LLC",
0x03A2: "Candura Instruments",
0x03A3: "SmartMovt Technology Co., Ltd",
0x03A4: "Token Zero Ltd",
0x03A5: "ACE CAD Enterprise Co., Ltd. (ACECAD)",
0x03A6: "Medela, Inc",
0x03A7: "AeroScout",
0x03A8: "Esrille Inc.",
0x03A9: "THINKERLY SRL",
0x03AA: "Exon Sp. z o.o.",
0x03AB: "Meizu Technology Co., Ltd.",
0x03AC: "Smablo LTD",
0x03AD: "XiQ",
0x03AE: "Allswell Inc.",
0x03AF: "Comm-N-Sense Corp DBA Verigo",
0x03B0: "VIBRADORM GmbH",
0x03B1: "Otodata Wireless Network Inc.",
0x03B2: "Propagation Systems Limited",
0x03B3: "Midwest Instruments & Controls",
0x03B4: "Alpha Nodus, inc.",
0x03B5: "petPOMM, Inc",
0x03B6: "Mattel",
0x03B7: "Airbly Inc.",
0x03B8: "A-Safe Limited",
0x03B9: "FREDERIQUE CONSTANT SA",
0x03BA: "Maxscend Microelectronics Company Limited",
0x03BB: "Abbott Diabetes Care",
0x03BC: "ASB Bank Ltd",
0x03BD: "amadas",
0x03BE: "Applied Science, Inc.",
0x03BF: "iLumi Solutions Inc.",
0x03C0: "Arch Systems Inc.",
0x03C1: "Ember Technologies, Inc.",
0x03C2: "Snapchat Inc",
0x03C3: "Casambi Technologies Oy",
0x03C4: "Pico Technology Inc.",
0x03C5: "St. Jude Medical, Inc.",
0x03C6: "Intricon",
0x03C7: "Structural Health Systems, Inc.",
0x03C8: "Avvel International",
0x03C9: "Gallagher Group",
0x03CA: "In2things Automation Pvt. Ltd.",
0x03CB: "SYSDEV Srl",
0x03CC: "Vonkil Technologies Ltd",
0x03CD: "Wynd Technologies, Inc.",
0x03CE: "CONTRINEX S.A.",
0x03CF: "MIRA, Inc.",
0x03D0: "Watteam Ltd",
0x03D1: "Density Inc.",
0x03D2: "IOT Pot India Private Limited",
0x03D3: "Sigma Connectivity AB",
0x03D4: "PEG PEREGO SPA",
0x03D5: "Wyzelink Systems Inc.",
0x03D6: "Yota Devices LTD",
0x03D7: "FINSECUR",
0x03D8: "Zen-Me Labs Ltd",
0x03D9: "3IWare Co., Ltd.",
0x03DA: "EnOcean GmbH",
0x03DB: "Instabeat, Inc",
0x03DC: "Nima Labs",
0x03DD: "Andreas Stihl AG & Co. KG",
0x03DE: "Nathan Rhoades LLC",
0x03DF: "Grob Technologies, LLC",
0x03E0: "Actions (Zhuhai) Technology Co., Limited",
0x03E1: "SPD Development Company Ltd",
0x03E2: "Sensoan Oy",
0x03E3: "Qualcomm Life Inc",
0x03E4: "Chip-ing AG",
0x03E5: "ffly4u",
0x03E6: "IoT Instruments Oy",
0x03E7: "TRUE Fitness Technology",
0x03E8: "Reiner Kartengeraete GmbH & Co. KG.",
0x03E9: "SHENZHEN LEMONJOY TECHNOLOGY CO., LTD.",
0x03EA: "Hello Inc.",
0x03EB: "Evollve Inc.",
0x03EC: "Jigowatts Inc.",
0x03ED: "BASIC MICRO.COM,INC.",
0x03EE: "CUBE TECHNOLOGIES",
0x03EF: "foolography GmbH",
0x03F0: "CLINK",
0x03F1: "Hestan Smart Cooking Inc.",
0x03F2: "WindowMaster A/S",
0x03F3: "Flowscape AB",
0x03F4: "PAL Technologies Ltd",
0x03F5: "WHERE, Inc.",
0x03F6: "Iton Technology Corp.",
0x03F7: "Owl Labs Inc.",
0x03F8: "Rockford Corp.",
0x03F9: "Becon Technologies Co.,Ltd.",
0x03FA: "Vyassoft Technologies Inc",
0x03FB: "Nox Medical",
0x03FC: "Kimberly-Clark",
0x03FD: "Trimble Navigation Ltd.",
0x03FE: "Littelfuse",
0x03FF: "Withings",
0x0400: "i-developer IT Beratung UG",
0x0401: "<unknown>",
0x0402: "Sears Holdings Corporation",
0x0403: "Gantner Electronic GmbH",
0x0404: "Authomate Inc",
0x0405: "Vertex International, Inc.",
0x0406: "Airtago",
0x0407: "Swiss Audio SA",
0x0408: "ToGetHome Inc.",
0x0409: "AXIS",
0x040A: "Openmatics",
0x040B: "Jana Care Inc.",
0x040C: "Senix Corporation",
0x040D: "NorthStar Battery Company, LLC",
0x040E: "SKF (U.K.) Limited",
0x040F: "CO-AX Technology, Inc.",
0x0410: "Fender Musical Instruments",
0x0411: "Luidia Inc",
0x0412: "SEFAM",
0x0413: "Wireless Cables Inc",
0x0414: "Lightning Protection International Pty Ltd",
0x0415: "Uber Technologies Inc",
0x0416: "SODA GmbH",
0x0417: "Fatigue Science",
0x0418: "Alpine Electronics Inc.",
0x0419: "Novalogy LTD",
0x041A: "Friday Labs Limited",
0x041B: "OrthoAccel Technologies",
0x041C: "WaterGuru, Inc.",
0x041D: "Benning Elektrotechnik und Elektronik GmbH & Co. KG",
0x041E: "Dell Computer Corporation",
0x041F: "Kopin Corporation",
0x0420: "TecBakery GmbH",
0x0421: "Backbone Labs, Inc.",
0x0422: "DELSEY SA",
0x0423: "Chargifi Limited",
0x0424: "Trainesense Ltd.",
0x0425: "Unify Software and Solutions GmbH & Co. KG",
0x0426: "Husqvarna AB",
0x0427: "Focus fleet and fuel management inc",
0x0428: "SmallLoop, LLC",
0x0429: "Prolon Inc.",
0x042A: "BD Medical",
0x042B: "iMicroMed Incorporated",
0x042C: "Ticto N.V.",
0x042D: "Meshtech AS",
0x042E: "MemCachier Inc.",
0x042F: "Danfoss A/S",
0x0430: "SnapStyk Inc.",
0x0431: "Amway Corporation",
0x0432: "Silk Labs, Inc.",
0x0433: "Pillsy Inc.",
0x0434: "Hatch Baby, Inc.",
0x0435: "Blocks Wearables Ltd.",
0x0436: "Drayson Technologies (Europe) Limited",
0x0437: "eBest IOT Inc.",
0x0438: "Helvar Ltd",
0x0439: "Radiance Technologies",
0x043A: "Nuheara Limited",
0x043B: "Appside co., ltd.",
0x043C: "DeLaval",
0x043D: "Coiler Corporation",
0x043E: "Thermomedics, Inc.",
0x043F: "Tentacle Sync GmbH",
0x0440: "Valencell, Inc.",
0x0441: "iProtoXi Oy",
0x0442: "SECOM CO., LTD.",
0x0443: "Tucker International LLC",
0x0444: "Metanate Limited",
0x0445: "Kobian Canada Inc.",
0x0446: "NETGEAR, Inc.",
0x0447: "Fabtronics Australia Pty Ltd",
0x0448: "Grand Centrix GmbH",
0x0449: "1UP USA.com llc",
0x044A: "SHIMANO INC.",
0x044B: "Nain Inc.",
0x044C: "LifeStyle Lock, LLC",
0x044D: "VEGA Grieshaber KG",
0x044E: "Xtrava Inc.",
0x044F: "TTS Tooltechnic Systems AG & Co. KG",
0x0450: "Teenage Engineering AB",
0x0451: "Tunstall Nordic AB",
0x0452: "Svep Design Center AB",
0x0453: "GreenPeak Technologies BV",
0x0454: "Sphinx Electronics GmbH & Co KG",
0x0455: "Atomation",
0x0456: "Nemik Consulting Inc",
0x0457: "RF INNOVATION",
0x0458: "Mini Solution Co., Ltd.",
0x0459: "Lumenetix, Inc",
0x045A: "2048450 Ontario Inc",
0x045B: "SPACEEK LTD",
0x045C: "Delta T Corporation",
0x045D: "Boston Scientific Corporation",
0x045E: "Nuviz, Inc.",
0x045F: "Real Time Automation, Inc.",
0x0460: "Kolibree",
0x0461: "vhf elektronik GmbH",
0x0462: "Bonsai Systems GmbH",
0x0463: "Fathom Systems Inc.",
0x0464: "Bellman & Symfon",
0x0465: "International Forte Group LLC",
0x0466: "CycleLabs Solutions inc.",
0x0467: "Codenex Oy",
0x0468: "Kynesim Ltd",
0x0469: "Palago AB",
0x046A: "INSIGMA INC.",
0x046B: "PMD Solutions",
0x046C: "Qingdao Realtime Technology Co., Ltd.",
0x046D: "BEGA Gantenbrink-Leuchten KG",
0x046E: "Pambor Ltd.",
0x046F: "Develco Products A/S",
0x0470: "iDesign s.r.l.",
0x0471: "TiVo Corp",
0x0472: "Control-J Pty Ltd",
0x0473: "Steelcase, Inc.",
0x0474: "iApartment co., ltd.",
0x0475: "Icom inc.",
0x0476: "Oxstren Wearable Technologies Private Limited",
0x0477: "Blue Spark Technologies",
0x0478: "FarSite Communications Limited",
0x0479: "mywerk system GmbH",
0x047A: "Sinosun Technology Co., Ltd.",
0x047B: "MIYOSHI ELECTRONICS CORPORATION",
0x047C: "POWERMAT LTD",
0x047D: "Occly LLC",
0x047E: "OurHub Dev IvS",
0x047F: "Pro-Mark, Inc.",
0x0480: "Dynometrics Inc.",
0x0481: "Quintrax Limited",
0x0482: "POS Tuning Udo Vosshenrich GmbH & Co. KG",
0x0483: "Multi Care Systems B.V.",
0x0484: "Revol Technologies Inc",
0x0485: "SKIDATA AG",
0x0486: "DEV TECNOLOGIA INDUSTRIA, COMERCIO E MANUTENCAO DE EQUIPAMENTOS LTDA. - ME",
0x0487: "Centrica Connected Home",
0x0488: "Automotive Data Solutions Inc",
0x0489: "Igarashi Engineering",
0x048A: "Taelek Oy",
0x048B: "CP Electronics Limited",
0x048C: "Vectronix AG",
0x048D: "S-Labs Sp. z o.o.",
0x048E: "Companion Medical, Inc.",
0x048F: "BlueKitchen GmbH",
0x0490: "Matting AB",
0x0491: "SOREX - Wireless Solutions GmbH",
0x0492: "ADC Technology, Inc.",
0x0493: "Lynxemi Pte Ltd",
0x0494: "SENNHEISER electronic GmbH & Co. KG",
0x0495: "LMT Mercer Group, Inc",
0x0496: "Polymorphic Labs LLC",
0x0497: "Cochlear Limited",
0x0498: "METER Group, Inc. USA",
0x0499: "Ruuvi Innovations Ltd.",
0x049A: "Situne AS",
0x049B: "nVisti, LLC",
0x049C: "DyOcean",
0x049D: "Uhlmann & Zacher GmbH",
0x049E: "AND!XOR LLC",
0x049F: "tictote AB",
0x04A0: "Vypin, LLC",
0x04A1: "PNI Sensor Corporation",
0x04A2: "ovrEngineered, LLC",
0x04A3: "GT-tronics HK Ltd",
0x04A4: "Herbert Waldmann GmbH & Co. KG",
0x04A5: "Guangzhou FiiO Electronics Technology Co.,Ltd",
0x04A6: "Vinetech Co., Ltd",
0x04A7: "Dallas Logic Corporation",
0x04A8: "BioTex, Inc.",
0x04A9: "DISCOVERY SOUND TECHNOLOGY, LLC",
0x04AA: "LINKIO SAS",
0x04AB: "Harbortronics, Inc.",
0x04AC: "Undagrid B.V.",
0x04AD: "Shure Inc",
0x04AE: "ERM Electronic Systems LTD",
0x04AF: "BIOROWER Handelsagentur GmbH",
0x04B0: "Weba Sport und Med. Artikel GmbH",
0x04B1: "Kartographers Technologies Pvt. Ltd.",
0x04B2: "The Shadow on the Moon",
0x04B3: "mobike (Hong Kong) Limited",
0x04B4: "Inuheat Group AB",
0x04B5: "Swiftronix AB",
0x04B6: "Diagnoptics Technologies",
0x04B7: "Analog Devices, Inc.",
0x04B8: "Soraa Inc.",
0x04B9: "CSR Building Products Limited",
0x04BA: "Crestron Electronics, Inc.",
0x04BB: "Neatebox Ltd",
0x04BC: "Draegerwerk AG & Co. KGaA",
0x04BD: "AlbynMedical",
0x04BE: "Averos FZCO",
0x04BF: "VIT Initiative, LLC",
0x04C0: "Statsports International",
0x04C1: "Sospitas, s.r.o.",
0x04C2: "Dmet Products Corp.",
0x04C3: "Mantracourt Electronics Limited",
0x04C4: "TeAM Hutchins AB",
0x04C5: "Seibert Williams Glass, LLC",
0x04C6: "Insta GmbH",
0x04C7: "Svantek Sp. z o.o.",
0x04C8: "Shanghai Flyco Electrical Appliance Co., Ltd.",
0x04C9: "Thornwave Labs Inc",
0x04CA: "Steiner-Optik GmbH",
0x04CB: "Novo Nordisk A/S",
0x04CC: "Enflux Inc.",
0x04CD: "Safetech Products LLC",
0x04CE: "GOOOLED S.R.L.",
0x04CF: "DOM Sicherheitstechnik GmbH & Co. KG",
0x04D0: "Olympus Corporation",
0x04D1: "KTS GmbH",
0x04D2: "Anloq Technologies Inc.",
0x04D3: "Queercon, Inc",
0x04D4: "5th Element Ltd",
0x04D5: "Gooee Limited",
0x04D6: "LUGLOC LLC",
0x04D7: "Blincam, Inc.",
0x04D8: "FUJIFILM Corporation",
0x04D9: "RandMcNally",
0x04DA: "Franceschi Marina snc",
0x04DB: "Engineered Audio, LLC.",
0x04DC: "IOTTIVE (OPC) PRIVATE LIMITED",
0x04DD: "4MOD Technology",
0x04DE: "Lutron Electronics Co., Inc.",
0x04DF: "Emerson",
0x04E0: "Guardtec, Inc.",
0x04E1: "REACTEC LIMITED",
0x04E2: "EllieGrid",
0x04E3: "Under Armour",
0x04E4: "Woodenshark",
0x04E5: "Avack Oy",
0x04E6: "Smart Solution Technology, Inc.",
0x04E7: "REHABTRONICS INC.",
0x04E8: "STABILO International",
0x04E9: "Busch Jaeger Elektro GmbH",
0x04EA: "Pacific Bioscience Laboratories, Inc",
0x04EB: "Bird Home Automation GmbH",
0x04EC: "Motorola Solutions",
0x04ED: "R9 Technology, Inc.",
0x04EE: "Auxivia",
0x04EF: "DaisyWorks, Inc",
0x04F0: "Kosi Limited",
0x04F1: "Theben AG",
0x04F2: "InDreamer Techsol Private Limited",
0x04F3: "Cerevast Medical",
0x04F4: "ZanCompute Inc.",
0x04F5: "Pirelli Tyre S.P.A.",
0x04F6: "McLear Limited",
0x04F7: "Shenzhen Huiding Technology Co.,Ltd.",
0x04F8: "Convergence Systems Limited",
0x04F9: "Interactio",
0x04FA: "Androtec GmbH",
0x04FB: "Benchmark Drives GmbH & Co. KG",
0x04FC: "SwingLync L. L. C.",
0x04FD: "Tapkey GmbH",
0x04FE: "Woosim Systems Inc.",
0x04FF: "Microsemi Corporation",
0x0500: "Wiliot LTD.",
0x0501: "Polaris IND",
0x0502: "Specifi-Kali LLC",
0x0503: "Locoroll, Inc",
0x0504: "PHYPLUS Inc",
0x0505: "Inplay Technologies LLC",
0x0506: "Hager",
0x0507: "Yellowcog",
0x0508: "Axes System sp. z o. o.",
0x0509: "myLIFTER Inc.",
0x050A: "Shake-on B.V.",
0x050B: "Vibrissa Inc.",
0x050C: "OSRAM GmbH",
0x050D: "TRSystems GmbH",
0x050E: "Yichip Microelectronics (Hangzhou) Co.,Ltd.",
0x050F: "Foundation Engineering LLC",
0x0510: "UNI-ELECTRONICS, INC.",
0x0511: "Brookfield Equinox LLC",
0x0512: "Soprod SA",
0x0513: "9974091 Canada Inc.",
0x0514: "FIBRO GmbH",
0x0515: "RB Controls Co., Ltd.",
0x0516: "Footmarks",
0x0517: "Amtronic Sverige AB (formerly Amcore AB)",
0x0518: "MAMORIO.inc",
0x0519: "Tyto Life LLC",
0x051A: "Leica Camera AG",
0x051B: "Angee Technologies Ltd.",
0x051C: "EDPS",
0x051D: "OFF Line Co., Ltd.",
0x051E: "Detect Blue Limited",
0x051F: "Setec Pty Ltd",
0x0520: "Target Corporation",
0x0521: "IAI Corporation",
0x0522: "NS Tech, Inc.",
0x0523: "MTG Co., Ltd.",
0x0524: "Hangzhou iMagic Technology Co., Ltd",
0x0525: "HONGKONG NANO IC TECHNOLOGIES CO., LIMITED",
0x0526: "Honeywell International Inc.",
0x0527: "Albrecht JUNG",
0x0528: "Lunera Lighting Inc.",
0x0529: "Lumen UAB",
0x052A: "Keynes Controls Ltd",
0x052B: "Novartis AG",
0x052C: "Geosatis SA",
0x052D: "EXFO, Inc.",
0x052E: "LEDVANCE GmbH",
0x052F: "Center ID Corp.",
0x0530: "Adolene, Inc.",
0x0531: "D&M Holdings Inc.",
0x0532: "CRESCO Wireless, Inc.",
0x0533: "Nura Operations Pty Ltd",
0x0534: "Frontiergadget, Inc.",
0x0535: "Smart Component Technologies Limited",
0x0536: "ZTR Control Systems LLC",
0x0537: "MetaLogics Corporation",
0x0538: "Medela AG",
0x0539: "OPPLE Lighting Co., Ltd",
0x053A: "Savitech Corp.,",
0x053B: "prodigy",
0x053C: "Screenovate Technologies Ltd",
0x053D: "TESA SA",
0x053E: "CLIM8 LIMITED",
0x053F: "Silergy Corp",
0x0540: "SilverPlus, Inc",
0x0541: "Sharknet srl",
0x0542: "Mist Systems, Inc.",
0x0543: "MIWA LOCK CO.,Ltd",
0x0544: "OrthoSensor, Inc.",
0x0545: "Candy Hoover Group s.r.l",
0x0546: "Apexar Technologies S.A.",
0x0547: "LOGICDATA d.o.o.",
0x0548: "Knick Elektronische Messgeraete GmbH & Co. KG",
0x0549: "Smart Technologies and Investment Limited",
0x054A: "Linough Inc.",
0x054B: "Advanced Electronic Designs, Inc.",
0x054C: "Carefree Scott Fetzer Co Inc",
0x054D: "Sensome",
0x054E: "FORTRONIK storitve d.o.o.",
0x054F: "Sinnoz",
0x0550: "Versa Networks, Inc.",
0x0551: "Sylero",
0x0552: "Avempace SARL",
0x0553: "Nintendo Co., Ltd.",
0x0554: "National Instruments",
0x0555: "KROHNE Messtechnik GmbH",
0x0556: "Otodynamics Ltd",
0x0557: "Arwin Technology Limited",
0x0558: "benegear, inc.",
0x0559: "Newcon Optik",
0x055A: "CANDY HOUSE, Inc.",
0x055B: "FRANKLIN TECHNOLOGY INC",
0x055C: "Lely",
0x055D: "Valve Corporation",
0x055E: "Hekatron Vertriebs GmbH",
0x055F: "PROTECH S.A.S. DI GIRARDI ANDREA & C.",
0x0560: "Sarita CareTech APS (formerly Sarita CareTech IVS)",
0x0561: "Finder S.p.A.",
0x0562: "Thalmic Labs Inc.",
0x0563: "Steinel Vertrieb GmbH",
0x0564: "Beghelli Spa",
0x0565: "Beijing Smartspace Technologies Inc.",
0x0566: "CORE TRANSPORT TECHNOLOGIES NZ LIMITED",
0x0567: "Xiamen Everesports Goods Co., Ltd",
0x0568: "Bodyport Inc.",
0x0569: "Audionics System, INC.",
0x056A: "Flipnavi Co.,Ltd.",
0x056B: "Rion Co., Ltd.",
0x056C: "Long Range Systems, LLC",
0x056D: "Redmond Industrial Group LLC",
0x056E: "VIZPIN INC.",
0x056F: "BikeFinder AS",
0x0570: "Consumer Sleep Solutions LLC",
0x0571: "PSIKICK, INC.",
0x0572: "AntTail.com",
0x0573: "Lighting Science Group Corp.",
0x0574: "AFFORDABLE ELECTRONICS INC",
0x0575: "Integral Memroy Plc",
0x0576: "Globalstar, Inc.",
0x0577: "True Wearables, Inc.",
0x0578: "Wellington Drive Technologies Ltd",
0x0579: "Ensemble Tech Private Limited",
0x057A: "OMNI Remotes",
0x057B: "Duracell U.S. Operations Inc.",
0x057C: "Toor Technologies LLC",
0x057D: "Instinct Performance",
0x057E: "Beco, Inc",
0x057F: "Scuf Gaming International, LLC",
0x0580: "ARANZ Medical Limited",
0x0581: "LYS TECHNOLOGIES LTD",
0x0582: "Breakwall Analytics, LLC",
0x0583: "Code Blue Communications",
0x0584: "Gira Giersiepen GmbH & Co. KG",
0x0585: "Hearing Lab Technology",
0x0586: "LEGRAND",
0x0587: "Derichs GmbH",
0x0588: "ALT-TEKNIK LLC",
0x0589: "Star Technologies",
0x058A: "START TODAY CO.,LTD.",
0x058B: "Maxim Integrated Products",
0x058C: "MERCK Kommanditgesellschaft auf Aktien",
0x058D: "Jungheinrich Aktiengesellschaft",
0x058E: "Oculus VR, LLC",
0x058F: "HENDON SEMICONDUCTORS PTY LTD",
0x0590: "Pur3 Ltd",
0x0591: "Viasat Group S.p.A.",
0x0592: "IZITHERM",
0x0593: "Spaulding Clinical Research",
0x0594: "Kohler Company",
0x0595: "Inor Process AB",
0x0596: "My Smart Blinds",
0x0597: "RadioPulse Inc",
0x0598: "rapitag GmbH",
0x0599: "Lazlo326, LLC.",
0x059A: "Teledyne Lecroy, Inc.",
0x059B: "Dataflow Systems Limited",
0x059C: "Macrogiga Electronics",
0x059D: "Tandem Diabetes Care",
0x059E: "Polycom, Inc.",
0x059F: "Fisher & Paykel Healthcare",
0x05A0: "RCP Software Oy",
0x05A1: "Shanghai Xiaoyi Technology Co.,Ltd.",
0x05A2: "ADHERIUM(NZ) LIMITED",
0x05A3: "Axiomware Systems Incorporated",
0x05A4: "O. E. M. Controls, Inc.",
0x05A5: "Kiiroo BV",
0x05A6: "Telecon Mobile Limited",
0x05A7: "Sonos Inc",
0x05A8: "Tom Allebrandi Consulting",
0x05A9: "Monidor",
0x05AA: "Tramex Limited",
0x05AB: "Nofence AS",
0x05AC: "GoerTek Dynaudio Co., Ltd.",
0x05AD: "INIA",
0x05AE: "CARMATE MFG.CO.,LTD",
0x05AF: "ONvocal",
0x05B0: "NewTec GmbH",
0x05B1: "Medallion Instrumentation Systems",
0x05B2: "CAREL INDUSTRIES S.P.A.",
0x05B3: "Parabit Systems, Inc.",
0x05B4: "White Horse Scientific ltd",
0x05B5: "verisilicon",
0x05B6: "Elecs Industry Co.,Ltd.",
0x05B7: "Beijing Pinecone Electronics Co.,Ltd.",
0x05B8: "Ambystoma Labs Inc.",
0x05B9: "Suzhou Pairlink Network Technology",
0x05BA: "igloohome",
0x05BB: "Oxford Metrics plc",
0x05BC: "Leviton Mfg. Co., Inc.",
0x05BD: "ULC Robotics Inc.",
0x05BE: "RFID Global by Softwork SrL",
0x05BF: "Real-World-Systems Corporation",
0x05C0: "Nalu Medical, Inc.",
0x05C1: "P.I.Engineering",
0x05C2: "Grote Industries",
0x05C3: "Runtime, Inc.",
0x05C4: "Codecoup sp. z o.o. sp. k.",
0x05C5: "SELVE GmbH & Co. KG",
0x05C6: "Smart Animal Training Systems, LLC",
0x05C7: "Lippert Components, INC",
0x05C8: "SOMFY SAS",
0x05C9: "TBS Electronics B.V.",
0x05CA: "MHL Custom Inc",
0x05CB: "LucentWear LLC",
0x05CC: "WATTS ELECTRONICS",
0x05CD: "RJ Brands LLC",
0x05CE: "V-ZUG Ltd",
0x05CF: "Biowatch SA",
0x05D0: "Anova Applied Electronics",
0x05D1: "Lindab AB",
0x05D2: "frogblue TECHNOLOGY GmbH",
0x05D3: "Acurable Limited",
0x05D4: "LAMPLIGHT Co., Ltd.",
0x05D5: "TEGAM, Inc.",
0x05D6: "Zhuhai Jieli technology Co.,Ltd",
0x05D7: "modum.io AG",
0x05D8: "Farm Jenny LLC",
0x05D9: "Toyo Electronics Corporation",
0x05DA: "Applied Neural Research Corp",
0x05DB: "Avid Identification Systems, Inc.",
0x05DC: "Petronics Inc.",
0x05DD: "essentim GmbH",
0x05DE: "QT Medical INC.",
0x05DF: "VIRTUALCLINIC.DIRECT LIMITED",
0x05E0: "Viper Design LLC",
0x05E1: "Human, Incorporated",
0x05E2: "stAPPtronics GmbH",
0x05E3: "Elemental Machines, Inc.",
0x05E4: "Taiyo Yuden Co., Ltd",
0x05E5: "INEO ENERGY& SYSTEMS",
0x05E6: "Motion Instruments Inc.",
0x05E7: "PressurePro",
0x05E8: "COWBOY",
0x05E9: "iconmobile GmbH",
0x05EA: "ACS-Control-System GmbH",
0x05EB: "Bayerische Motoren Werke AG",
0x05EC: "Gycom Svenska AB",
0x05ED: "Fuji Xerox Co., Ltd",
0x05EE: "Glide Inc.",
0x05EF: "SIKOM AS",
0x05F0: "beken",
0x05F1: "The Linux Foundation",
0x05F2: "Try and E CO.,LTD.",
0x05F3: "SeeScan",
0x05F4: "Clearity, LLC",
0x05F5: "GS TAG",
0x05F6: "DPTechnics",
0x05F7: "TRACMO, INC.",
0x05F8: "Anki Inc.",
0x05F9: "Hagleitner Hygiene International GmbH",
0x05FA: "Konami Sports Life Co., Ltd.",
0x05FB: "Arblet Inc.",
0x05FC: "Masbando GmbH",
0x05FD: "Innoseis",
0x05FE: "Niko",
0x05FF: "Wellnomics Ltd",
0x0600: "iRobot Corporation",
0x0601: "Schrader Electronics",
0x0602: "Geberit International AG",
0x0603: "Fourth Evolution Inc",
0x0604: "Cell2Jack LLC",
0x0605: "FMW electronic Futterer u. Maier-Wolf OHG",
0x0606: "John Deere",
0x0607: "Rookery Technology Ltd",
0x0608: "KeySafe-Cloud",
0x0609: "BUCHI Labortechnik AG",
0x060A: "IQAir AG",
0x060B: "Triax Technologies Inc",
0x060C: "Vuzix Corporation",
0x060D: "TDK Corporation",
0x060E: "Blueair AB",
0x060F: "Signify Netherlands (formerlyPhilips Lighting B.V.)",
0x0610: "ADH GUARDIAN USA LLC",
0x0611: "Beurer GmbH",
0x0612: "Playfinity AS",
0x0613: "Hans Dinslage GmbH",
0x0614: "OnAsset Intelligence, Inc.",
0x0615: "INTER ACTION Corporation",
0x0616: "OS42 UG (haftungsbeschraenkt)",
0x0617: "WIZCONNECTED COMPANY LIMITED",
0x0618: "Audio-Technica Corporation",
0x0619: "Six Guys Labs, s.r.o.",
0x061A: "R.W. Beckett Corporation",
0x061B: "silex technology, inc.",
0x061C: "Univations Limited",
0x061D: "SENS Innovation ApS",
0x061E: "Diamond Kinetics, Inc.",
0x061F: "Phrame Inc.",
0x0620: "Forciot Oy",
0x0621: "Noordung d.o.o.",
0x0622: "Beam Labs, LLC",
0x0623: "Philadelphia Scientific (U.K.) Limited",
0x0624: "Biovotion AG",
0x0625: "Square Panda, Inc.",
0x0626: "Amplifico",
0x0627: "WEG S.A.",
0x0628: "Ensto Oy",
0x0629: "PHONEPE PVT LTD",
0x062A: "Lunatico Astronomia SL",
0x062B: "MinebeaMitsumi Inc.",
0x062C: "ASPion GmbH",
0x062D: "Vossloh-Schwabe Deutschland GmbH",
0x062E: "Procept",
0x062F: "ONKYO Corporation",
0x0630: "Asthrea D.O.O.",
0x0631: "Fortiori Design LLC",
0x0632: "Hugo Muller GmbH & Co KG",
0x0633: "Wangi Lai PLT",
0x0634: "Fanstel Corp",
0x0635: "Crookwood",
0x0636: "ELECTRONICA INTEGRAL DE SONIDO S.A.",
0x0637: "GiP Innovation Tools GmbH",
0x0638: "LX SOLUTIONS PTY LIMITED",
0x0639: "Shenzhen Minew Technologies Co., Ltd.",
0x063A: "Prolojik Limited",
0x063B: "Kromek Group Plc",
0x063C: "Contec Medical Systems Co., Ltd.",
0x063D: "Xradio Technology Co.,Ltd.",
0x063E: "The Indoor Lab, LLC",
0x063F: "LDL TECHNOLOGY",
0x0640: "Parkifi",
0x0641: "Revenue Collection Systems FRANCE SAS",
0x0642: "Bluetrum Technology Co.,Ltd",
0x0643: "makita corporation",
0x0644: "Apogee Instruments",
0x0645: "BM3",
0x0646: "SGV Group Holding GmbH & Co. KG",
0x0647: "MED-EL",
0x0648: "Ultune Technologies",
0x0649: "Ryeex Technology Co.,Ltd.",
0x064A: "Open Research Institute, Inc.",
0x064B: "Scale-Tec, Ltd",
0x064C: "Zumtobel Group AG",
0x064D: "iLOQ Oy",
0x064E: "KRUXWorks Technologies Private Limited",
0x064F: "Digital Matter Pty Ltd",
0x0650: "Coravin, Inc.",
0x0651: "Stasis Labs, Inc.",
0x0652: "ITZ Innovations- und Technologiezentrum GmbH",
0x0653: "Meggitt SA",
0x0654: "Ledlenser GmbH & Co. KG",
0x0655: "Renishaw PLC",
0x0656: "ZhuHai AdvanPro Technology Company Limited",
0x0657: "Meshtronix Limited",
0x0658: "Payex Norge AS",
0x0659: "UnSeen Technologies Oy",
0x065A: "Zound Industries International AB",
0x065B: "Sesam Solutions BV",
0x065C: "PixArt Imaging Inc.",
0x065D: "Panduit Corp.",
0x065E: "Alo AB",
0x065F: "Ricoh Company Ltd",
0x0660: "RTC Industries, Inc.",
0x0661: "Mode Lighting Limited",
0x0662: "Particle Industries, Inc.",
0x0663: "Advanced Telemetry Systems, Inc.",
0x0664: "RHA TECHNOLOGIES LTD",
0x0665: "Pure International Limited",
0x0666: "WTO Werkzeug-Einrichtungen GmbH",
0x0667: "Spark Technology Labs Inc.",
0x0668: "Bleb Technology srl",
0x0669: "Livanova USA, Inc.",
0x066A: "Brady Worldwide Inc.",
0x066B: "DewertOkin GmbH",
0x066C: "Ztove ApS",
0x066D: "Venso EcoSolutions AB",
0x066E: "Eurotronik Kranj d.o.o.",
0x066F: "Hug Technology Ltd",
0x0670: "Gema Switzerland GmbH",
0x0671: "Buzz Products Ltd.",
0x0672: "Kopi",
0x0673: "Innova Ideas Limited",
0x0674: "BeSpoon",
0x0675: "Deco Enterprises, Inc.",
0x0676: "Expai Solutions Private Limited",
0x0677: "Innovation First, Inc.",
0x0678: "SABIK Offshore GmbH",
0x0679: "4iiii Innovations Inc.",
0x067A: "The Energy Conservatory, Inc.",
0x067B: "I.FARM, INC.",
0x067C: "Tile, Inc.",
0x067D: "Form Athletica Inc.",
0x067E: "MbientLab Inc",
0x067F: "NETGRID S.N.C. DI BISSOLI MATTEO, CAMPOREALE SIMONE, TOGNETTI FEDERICO",
0x0680: "Mannkind Corporation",
0x0681: "Trade FIDES a.s.",
0x0682: "Photron Limited",
0x0683: "Eltako GmbH",
0x0684: "Dermalapps, LLC",
0x0685: "Greenwald Industries",
0x0686: "inQs Co., Ltd.",
0x0687: "Cherry GmbH",
0x0688: "Amsted Digital Solutions Inc.",
0x0689: "Tacx b.v.",
0x068A: "Raytac Corporation",
0x068B: "Jiangsu Teranovo Tech Co., Ltd.",
0x068C: "Changzhou Sound Dragon Electronics and Acoustics Co., Ltd",
0x068D: "JetBeep Inc.",
0x068E: "Razer Inc.",
0x068F: "JRM Group Limited",
0x0690: "Eccrine Systems, Inc.",
0x0691: "Curie Point AB",
0x0692: "Georg Fischer AG",
0x0693: "Hach - Danaher",
0x0694: "T&A Laboratories LLC",
0x0695: "Koki Holdings Co., Ltd.",
0x0696: "Gunakar Private Limited",
0x0697: "Stemco Products Inc",
0x0698: "Wood IT Security, LLC",
0x0699: "RandomLab SAS",
0x069A: "Adero, Inc. (formerly as TrackR, Inc.)",
0x069B: "Dragonchip Limited",
0x069C: "Noomi AB",
0x069D: "Vakaros LLC",
0x069E: "Delta Electronics, Inc.",
0x069F: "FlowMotion Technologies AS",
0x06A0: "OBIQ Location Technology Inc.",
0x06A1: "Cardo Systems, Ltd",
0x06A2: "Globalworx GmbH",
0x06A3: "Nymbus, LLC",
0x06A4: "Sanyo Techno Solutions Tottori Co., Ltd.",
0x06A5: "TEKZITEL PTY LTD",
0x06A6: "Roambee Corporation",
0x06A7: "Chipsea Technologies (ShenZhen) Corp.",
0x06A8: "GD Midea Air-Conditioning Equipment Co., Ltd.",
0x06A9: "Soundmax Electronics Limited",
0x06AA: "Produal Oy",
0x06AB: "HMS Industrial Networks AB",
0x06AC: "Ingchips Technology Co., Ltd.",
0x06AD: "InnovaSea Systems Inc.",
0x06AE: "SenseQ Inc.",
0x06AF: "Shoof Technologies",
0x06B0: "BRK Brands, Inc.",
0x06B1: "SimpliSafe, Inc.",
0x06B2: "Tussock Innovation 2013 Limited",
0x06B3: "The Hablab ApS",
0x06B4: "Sencilion Oy",
0x06B5: "Wabilogic Ltd.",
0x06B6: "Sociometric Solutions, Inc.",
0x06B7: "iCOGNIZE GmbH",
0x06B8: "ShadeCraft, Inc",
0x06B9: "Beflex Inc.",
0x06BA: "Beaconzone Ltd",
0x06BB: "Leaftronix Analogic Solutions Private Limited",
0x06BC: "TWS Srl",
0x06BD: "ABB Oy",
0x06BE: "HitSeed Oy",
0x06BF: "Delcom Products Inc.",
0x06C0: "CAME S.p.A.",
0x06C1: "Alarm.com Holdings, Inc",
0x06C2: "Measurlogic Inc.",
0x06C3: "King I Electronics.Co.,Ltd",
0x06C4: "Dream Labs GmbH",
0x06C5: "Urban Compass, Inc",
0x06C6: "Simm Tronic Limited",
0x06C7: "Somatix Inc",
0x06C8: "Storz & Bickel GmbH & Co. KG",
0x06C9: "MYLAPS B.V.",
0x06CA: "Shenzhen Zhongguang Infotech Technology Development Co., Ltd",
0x06CB: "Dyeware, LLC",
0x06CC: "Dongguan SmartAction Technology Co.,Ltd.",
0x06CD: "DIG Corporation",
0x06CE: "FIOR & GENTZ",
0x06CF: "Belparts N.V.",
0x06D0: "Etekcity Corporation",
0x06D1: "Meyer Sound Laboratories, Incorporated",
0x06D2: "CeoTronics AG",
0x06D3: "TriTeq Lock and Security, LLC",
0x06D4: "DYNAKODE TECHNOLOGY PRIVATE LIMITED",
0x06D5: "Sensirion AG",
0x06D6: "JCT Healthcare Pty Ltd",
0x06D7: "FUBA Automotive Electronics GmbH",
0x06D8: "AW Company",
0x06D9: "Shanghai Mountain View Silicon Co.,Ltd.",
0x06DA: "Zliide Technologies ApS",
0x06DB: "Automatic Labs, Inc.",
0x06DC: "Industrial Network Controls, LLC",
0x06DD: "Intellithings Ltd.",
0x06DE: "Navcast, Inc.",
0x06DF: "Hubbell Lighting, Inc.",
0x06E0: "Avaya",
0x06E1: "Milestone AV Technologies LLC",
0x06E2: "Alango Technologies Ltd",
0x06E3: "Spinlock Ltd",
0x06E4: "Aluna",
0x06E5: "OPTEX CO.,LTD.",
0x06E6: "NIHON DENGYO KOUSAKU",
0x06E7: "VELUX A/S",
0x06E8: "Almendo Technologies GmbH",
0x06E9: "Zmartfun Electronics, Inc.",
0x06EA: "SafeLine Sweden AB",
0x06EB: "Houston Radar LLC",
0x06EC: "Sigur",
0x06ED: "J Neades Ltd",
0x06EE: "Avantis Systems Limited",
0x06EF: "ALCARE Co., Ltd.",
0x06F0: "Chargy Technologies, SL",
0x06F1: "Shibutani Co., Ltd.",
0x06F2: "Trapper Data AB",
0x06F3: "Alfred International Inc.",
0x06F4: "Near Field Solutions Ltd",
0x06F5: "Vigil Technologies Inc.",
0x06F6: "Vitulo Plus BV",
0x06F7: "WILKA Schliesstechnik GmbH",
0x06F8: "BodyPlus Technology Co.,Ltd",
0x06F9: "happybrush GmbH",
0x06FA: "Enequi AB",
0x06FB: "Sartorius AG",
0x06FC: "Tom Communication Industrial Co.,Ltd.",
0x06FD: "ESS Embedded System Solutions Inc.",
0x06FE: "Mahr GmbH",
0x06FF: "Redpine Signals Inc",
0x0700: "TraqFreq LLC",
0x0701: "PAFERS TECH",
0x0702: 'Akciju sabiedriba "SAF TEHNIKA"',
0x0703: "Beijing Jingdong Century Trading Co., Ltd.",
0x0704: "JBX Designs Inc.",
0x0705: "AB Electrolux",
0x0706: "Wernher von Braun Center for ASdvanced Research",
0x0707: "Essity Hygiene and Health Aktiebolag",
0x0708: "Be Interactive Co., Ltd",
0x0709: "Carewear Corp.",
0x070A: "Huf Hlsbeck & Frst GmbH & Co. KG",
0x070B: "Element Products, Inc.",
0x070C: "Beijing Winner Microelectronics Co.,Ltd",
0x070D: "SmartSnugg Pty Ltd",
0x070E: "FiveCo Sarl",
0x070F: "California Things Inc.",
0x0710: "Audiodo AB",
0x0711: "ABAX AS",
0x0712: "Bull Group Company Limited",
0x0713: "Respiri Limited",
0x0714: "MindPeace Safety LLC",
0x0715: "MBARC LABS Inc (formerly Vgyan Solutions)",
0x0716: "Altonics",
0x0717: "iQsquare BV",
0x0718: "IDIBAIX enginneering",
0x0719: "ECSG",
0x071A: "REVSMART WEARABLE HK CO LTD",
0x071B: "Precor",
0x071C: "F5 Sports, Inc",
0x071D: "exoTIC Systems",
0x071E: "DONGGUAN HELE ELECTRONICS CO., LTD",
0x071F: "Dongguan Liesheng Electronic Co.Ltd",
0x0720: "Oculeve, Inc.",
0x0721: "Clover Network, Inc.",
0x0722: "Xiamen Eholder Electronics Co.Ltd",
0x0723: "Ford Motor Company",
0x0724: "Guangzhou SuperSound Information Technology Co.,Ltd",
0x0725: "Tedee Sp. z o.o.",
0x0726: "PHC Corporation",
0x0727: "STALKIT AS",
0x0728: "Eli Lilly and Company",
0x0729: "SwaraLink Technologies",
0x072A: "JMR embedded systems GmbH",
0x072B: "Bitkey Inc.",
0x072C: "GWA Hygiene GmbH",
0x072D: "Safera Oy",
0x072E: "Open Platform Systems LLC",
0x072F: "OnePlus Electronics (Shenzhen) Co., Ltd.",
0x0730: "Wildlife Acoustics, Inc.",
0x0731: "ABLIC Inc.",
0x0732: "Dairy Tech, Inc.",
0x0733: "Iguanavation, Inc.",
0x0734: "DiUS Computing Pty Ltd",
0x0735: "UpRight Technologies LTD",
0x0736: "FrancisFund, LLC",
0x0737: "LLC Navitek",
0x0738: "Glass Security Pte Ltd",
0x0739: "Jiangsu Qinheng Co., Ltd.",
0x073A: "Chandler Systems Inc.",
0x073B: "Fantini Cosmi s.p.a.",
0x073C: "Acubit ApS",
0x073D: "Beijing Hao Heng Tian Tech Co., Ltd.",
0x073E: "Bluepack S.R.L.",
0x073F: "Beijing Unisoc Technologies Co., Ltd.",
0x0740: "HITIQ LIMITED",
0x0741: "MAC SRL",
0x0742: "DML LLC",
0x0743: "Sanofi",
0x0744: "SOCOMEC",
0x0745: "WIZNOVA, Inc.",
0x0746: "Seitec Elektronik GmbH",
0x0747: "OR Technologies Pty Ltd",
0x0748: "GuangZhou KuGou Computer Technology Co.Ltd",
0x0749: "DIAODIAO (Beijing) Technology Co., Ltd.",
0x074A: "Illusory Studios LLC",
0x074B: "Sarvavid Software Solutions LLP",
0x074C: "iopool s.a.",
0x074D: "Amtech Systems, LLC",
0x074E: "EAGLE DETECTION SA",
0x074F: "MEDIATECH S.R.L.",
0x0750: "Hamilton Professional Services of Canada Incorporated",
0x0751: "Changsha JEMO IC Design Co.,Ltd",
0x0752: "Elatec GmbH",
0x0753: "JLG Industries, Inc.",
0x0754: "Michael Parkin",
0x0755: "Brother Industries, Ltd",
0x0756: "Lumens For Less, Inc",
0x0757: "ELA Innovation",
0x0758: "umanSense AB",
0x0759: "Shanghai InGeek Cyber Security Co., Ltd.",
0x075A: "HARMAN CO.,LTD.",
0x075B: "Smart Sensor Devices AB",
0x075C: "Antitronics Inc.",
0x075D: "RHOMBUS SYSTEMS, INC.",
0x075E: "Katerra Inc.",
0x075F: "Remote Solution Co., LTD.",
0x0760: "Vimar SpA",
0x0761: "Mantis Tech LLC",
0x0762: "TerOpta Ltd",
0x0763: "PIKOLIN S.L.",
0x0764: "WWZN Information Technology Company Limited",
0x0765: "Voxx International",
0x0766: "ART AND PROGRAM, INC.",
0x0767: "NITTO DENKO ASIA TECHNICAL CENTRE PTE. LTD.",
0x0768: "Peloton Interactive Inc.",
0x0769: "Force Impact Technologies",
0x076A: "Dmac Mobile Developments, LLC",
0x076B: "Engineered Medical Technologies",
0x076C: "Noodle Technology inc",
0x076D: "Graesslin GmbH",
0x076E: "WuQi technologies, Inc.",
0x076F: "Successful Endeavours Pty Ltd",
0x0770: "InnoCon Medical ApS",
0x0771: "Corvex Connected Safety",
0x0772: "Thirdwayv Inc.",
0x0773: "Echoflex Solutions Inc.",
0x0774: "C-MAX Asia Limited",
0x0775: "4eBusiness GmbH",
0x0776: "Cyber Transport Control GmbH",
0x0777: "Cue",
0x0778: "KOAMTAC INC.",
0x0779: "Loopshore Oy",
0x077A: "Niruha Systems Private Limited",
0x077B: "AmaterZ, Inc.",
0x077C: "radius co., ltd.",
0x077D: "Sensority, s.r.o.",
0x077E: "Sparkage Inc.",
0x077F: "Glenview Software Corporation",
0x0780: "Finch Technologies Ltd.",
0x0781: "Qingping Technology (Beijing) Co., Ltd.",
0x0782: "DeviceDrive AS",
0x0783: "ESEMBER LIMITED LIABILITY COMPANY",
0x0784: "audifon GmbH & Co. KG",
0x0785: "O2 Micro, Inc.",
0x0786: "HLP Controls Pty Limited",
0x0787: "Pangaea Solution",
0x0788: "BubblyNet, LLC",
0xFFFF: "This value has special meaning depending on the context in which it used. Link Manager Protocol (LMP): This value may be used in the internal and interoperability tests before a Company ID has been assigned. This value shall not be used in shipping end products. Device ID Profile: This value is reserved as the default vendor ID when no Device ID service record is present in a remote device.",
}
| 34.934951
| 408
| 0.639303
|
4a16bef545b1bc3507ccc9c979d368101f6dde1f
| 1,943
|
py
|
Python
|
toggl_extra/nubia_wiring/nubia_context.py
|
oshev/toggl-extra
|
f187dee850eada14c99d0d76ddac20a5d824f9d8
|
[
"MIT"
] | null | null | null |
toggl_extra/nubia_wiring/nubia_context.py
|
oshev/toggl-extra
|
f187dee850eada14c99d0d76ddac20a5d824f9d8
|
[
"MIT"
] | 1
|
2019-07-01T10:20:51.000Z
|
2019-07-07T19:59:43.000Z
|
toggl_extra/nubia_wiring/nubia_context.py
|
oshev/toggl-extra
|
f187dee850eada14c99d0d76ddac20a5d824f9d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# This class was largely borrowed from
# https://github.com/facebookincubator/python-nubia/tree/master/example
# so the original copyright is kept untouched.
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the package.
#
import yaml
from nubia import context
from nubia import exceptions
from nubia import eventbus
CONFIG_PATH = 'configs/toggl-extra.yaml'
class NubiaTogglExtraContext(context.Context):
def __init__(self):
super().__init__()
self.verbose = None
config_yaml_stream = open(CONFIG_PATH, "r")
config_root = yaml.load(config_yaml_stream)
toggl_config_entry = self.get_config_param(config_root, 'Toggl')
self.toggl_auth_token = self.get_config_param(toggl_config_entry, 'auth_token')
@staticmethod
def get_config_param_recursive(entry, elements):
if len(elements) > 0 and type(entry) is dict:
return NubiaTogglExtraContext.get_config_param_recursive(entry[elements[0]], elements[1:])
else:
return entry
@staticmethod
def get_config_param(section_entries, path):
elements = path.split('.')
return NubiaTogglExtraContext.get_config_param_recursive(section_entries, elements)
def on_connected(self, *args, **kwargs):
pass
def on_cli(self, cmd, args):
# dispatch the on connected message
self.verbose = args.verbose
self.registry.dispatch_message(eventbus.Message.CONNECTED)
def on_interactive(self, args):
self.verbose = args.verbose
ret = self._registry.find_command("connect").run_cli(args)
if ret:
raise exceptions.CommandError("Failed starting interactive mode")
# dispatch the on connected message
self.registry.dispatch_message(eventbus.Message.CONNECTED)
| 33.5
| 102
| 0.709727
|
4a16bf692c3493cd5c391fd838c9332b0bbdc25f
| 200
|
py
|
Python
|
SUYI/final/FullEducationData/test.py
|
caHaber/cahaber2019
|
d11dd4b448659af7d8927cc9dbf44d66343743bc
|
[
"Apache-2.0"
] | 1
|
2020-07-28T20:15:42.000Z
|
2020-07-28T20:15:42.000Z
|
SUYI/final/FullEducationData/test.py
|
caHaber/oldwebsite
|
d11dd4b448659af7d8927cc9dbf44d66343743bc
|
[
"Apache-2.0"
] | null | null | null |
SUYI/final/FullEducationData/test.py
|
caHaber/oldwebsite
|
d11dd4b448659af7d8927cc9dbf44d66343743bc
|
[
"Apache-2.0"
] | null | null | null |
import anaconda2
import pandas as pd
import matplotlib.pyplot as plt
s = pd.read_csv('SUYI Proxy Data 2012-2013.csv');
df = pd.DataFrame(s);
df.sort('School', ascending=False);
print(df.head(30));
| 18.181818
| 49
| 0.73
|
4a16bfe50043500b36068815b37d036fb2f9859d
| 14,057
|
py
|
Python
|
homeassistant/components/zha/core/gateway.py
|
maexono/home-assistant
|
c174b83f5408124fc7834e8282969a1e8f9cca16
|
[
"Apache-2.0"
] | 2
|
2019-02-04T15:05:30.000Z
|
2019-03-04T16:31:32.000Z
|
homeassistant/components/zha/core/gateway.py
|
maexono/home-assistant
|
c174b83f5408124fc7834e8282969a1e8f9cca16
|
[
"Apache-2.0"
] | 1
|
2017-12-15T12:45:32.000Z
|
2018-05-19T09:48:30.000Z
|
homeassistant/components/zha/core/gateway.py
|
maexono/home-assistant
|
c174b83f5408124fc7834e8282969a1e8f9cca16
|
[
"Apache-2.0"
] | 3
|
2019-04-28T16:35:45.000Z
|
2020-05-28T15:21:59.000Z
|
"""
Virtual gateway for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import asyncio
import collections
import itertools
import logging
import os
import traceback
from homeassistant.components.system_log import LogEntry, _figure_out_source
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_component import EntityComponent
from ..api import async_get_device_info
from .channels import MAINS_POWERED, ZDOChannel
from .const import (
ADD_DEVICE_RELAY_LOGGERS, ATTR_MANUFACTURER, BELLOWS, CONF_BAUDRATE,
CONF_DATABASE, CONF_RADIO_TYPE, CONF_USB_PATH, CONTROLLER, CURRENT,
DATA_ZHA, DATA_ZHA_BRIDGE_ID, DATA_ZHA_CORE_COMPONENT, DATA_ZHA_GATEWAY,
DEBUG_LEVELS, DEFAULT_BAUDRATE, DEFAULT_DATABASE_NAME, DEVICE_FULL_INIT,
DEVICE_INFO, DEVICE_JOINED, DEVICE_REMOVED, DOMAIN, IEEE, LOG_ENTRY,
LOG_OUTPUT, MODEL, NWK, ORIGINAL, RADIO, RADIO_DESCRIPTION, RAW_INIT,
SIGNAL_REMOVE, SIGNATURE, TYPE, ZHA, ZHA_GW_MSG, ZIGPY, ZIGPY_DECONZ,
ZIGPY_XBEE)
from .device import DeviceStatus, ZHADevice
from .discovery import (
async_create_device_entity, async_dispatch_discovery_info,
async_process_endpoint)
from .patches import apply_application_controller_patch
from .registries import RADIO_TYPES
from .store import async_get_registry
_LOGGER = logging.getLogger(__name__)
EntityReference = collections.namedtuple(
'EntityReference', 'reference_id zha_device cluster_channels device_info')
class ZHAGateway:
"""Gateway that handles events that happen on the ZHA Zigbee network."""
def __init__(self, hass, config):
"""Initialize the gateway."""
self._hass = hass
self._config = config
self._component = EntityComponent(_LOGGER, DOMAIN, hass)
self._devices = {}
self._device_registry = collections.defaultdict(list)
self.zha_storage = None
self.application_controller = None
self.radio_description = None
hass.data[DATA_ZHA][DATA_ZHA_CORE_COMPONENT] = self._component
hass.data[DATA_ZHA][DATA_ZHA_GATEWAY] = self
self._log_levels = {
ORIGINAL: async_capture_log_levels(),
CURRENT: async_capture_log_levels()
}
self.debug_enabled = False
self._log_relay_handler = LogRelayHandler(hass, self)
async def async_initialize(self, config_entry):
"""Initialize controller and connect radio."""
self.zha_storage = await async_get_registry(self._hass)
usb_path = config_entry.data.get(CONF_USB_PATH)
baudrate = self._config.get(CONF_BAUDRATE, DEFAULT_BAUDRATE)
radio_type = config_entry.data.get(CONF_RADIO_TYPE)
radio_details = RADIO_TYPES[radio_type][RADIO]()
radio = radio_details[RADIO]
self.radio_description = RADIO_TYPES[radio_type][RADIO_DESCRIPTION]
await radio.connect(usb_path, baudrate)
if CONF_DATABASE in self._config:
database = self._config[CONF_DATABASE]
else:
database = os.path.join(
self._hass.config.config_dir, DEFAULT_DATABASE_NAME)
self.application_controller = radio_details[CONTROLLER](
radio, database)
apply_application_controller_patch(self)
self.application_controller.add_listener(self)
await self.application_controller.startup(auto_form=True)
self._hass.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID] = str(
self.application_controller.ieee)
init_tasks = []
for device in self.application_controller.devices.values():
init_tasks.append(self.async_device_initialized(device, False))
await asyncio.gather(*init_tasks)
def device_joined(self, device):
"""Handle device joined.
At this point, no information about the device is known other than its
address
"""
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: DEVICE_JOINED,
NWK: device.nwk,
IEEE: str(device.ieee)
}
)
def raw_device_initialized(self, device):
"""Handle a device initialization without quirks loaded."""
endpoint_ids = device.endpoints.keys()
ept_id = next((ept_id for ept_id in endpoint_ids if ept_id != 0), None)
manufacturer = 'Unknown'
model = 'Unknown'
if ept_id is not None:
manufacturer = device.endpoints[ept_id].manufacturer
model = device.endpoints[ept_id].model
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: RAW_INIT,
NWK: device.nwk,
IEEE: str(device.ieee),
MODEL: model,
ATTR_MANUFACTURER: manufacturer,
SIGNATURE: device.get_signature()
}
)
def device_initialized(self, device):
"""Handle device joined and basic information discovered."""
self._hass.async_create_task(
self.async_device_initialized(device, True))
def device_left(self, device):
"""Handle device leaving the network."""
pass
def device_removed(self, device):
"""Handle device being removed from the network."""
zha_device = self._devices.pop(device.ieee, None)
self._device_registry.pop(device.ieee, None)
if zha_device is not None:
device_info = async_get_device_info(self._hass, zha_device)
self._hass.async_create_task(zha_device.async_unsub_dispatcher())
async_dispatcher_send(
self._hass,
"{}_{}".format(SIGNAL_REMOVE, str(zha_device.ieee))
)
if device_info is not None:
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: DEVICE_REMOVED,
DEVICE_INFO: device_info
}
)
def get_device(self, ieee):
"""Return ZHADevice for given ieee."""
return self._devices.get(ieee)
def get_entity_reference(self, entity_id):
"""Return entity reference for given entity_id if found."""
for entity_reference in itertools.chain.from_iterable(
self.device_registry.values()):
if entity_id == entity_reference.reference_id:
return entity_reference
@property
def devices(self):
"""Return devices."""
return self._devices
@property
def device_registry(self):
"""Return entities by ieee."""
return self._device_registry
def register_entity_reference(
self, ieee, reference_id, zha_device, cluster_channels,
device_info):
"""Record the creation of a hass entity associated with ieee."""
self._device_registry[ieee].append(
EntityReference(
reference_id=reference_id,
zha_device=zha_device,
cluster_channels=cluster_channels,
device_info=device_info
)
)
@callback
def async_enable_debug_mode(self):
"""Enable debug mode for ZHA."""
self._log_levels[ORIGINAL] = async_capture_log_levels()
async_set_logger_levels(DEBUG_LEVELS)
self._log_levels[CURRENT] = async_capture_log_levels()
for logger_name in ADD_DEVICE_RELAY_LOGGERS:
logging.getLogger(logger_name).addHandler(self._log_relay_handler)
self.debug_enabled = True
@callback
def async_disable_debug_mode(self):
"""Disable debug mode for ZHA."""
async_set_logger_levels(self._log_levels[ORIGINAL])
self._log_levels[CURRENT] = async_capture_log_levels()
for logger_name in ADD_DEVICE_RELAY_LOGGERS:
logging.getLogger(logger_name).removeHandler(
self._log_relay_handler)
self.debug_enabled = False
@callback
def _async_get_or_create_device(self, zigpy_device, is_new_join):
"""Get or create a ZHA device."""
zha_device = self._devices.get(zigpy_device.ieee)
if zha_device is None:
zha_device = ZHADevice(self._hass, zigpy_device, self)
self._devices[zigpy_device.ieee] = zha_device
if not is_new_join:
entry = self.zha_storage.async_get_or_create(zha_device)
zha_device.async_update_last_seen(entry.last_seen)
zha_device.set_power_source(entry.power_source)
return zha_device
@callback
def async_device_became_available(
self, sender, is_reply, profile, cluster, src_ep, dst_ep, tsn,
command_id, args):
"""Handle tasks when a device becomes available."""
self.async_update_device(sender)
@callback
def async_update_device(self, sender):
"""Update device that has just become available."""
if sender.ieee in self.devices:
device = self.devices[sender.ieee]
# avoid a race condition during new joins
if device.status is DeviceStatus.INITIALIZED:
device.update_available(True)
async def async_update_device_storage(self):
"""Update the devices in the store."""
for device in self.devices.values():
self.zha_storage.async_update(device)
await self.zha_storage.async_save()
async def async_device_initialized(self, device, is_new_join):
"""Handle device joined and basic information discovered (async)."""
zha_device = self._async_get_or_create_device(device, is_new_join)
is_rejoin = False
if zha_device.status is not DeviceStatus.INITIALIZED:
discovery_infos = []
for endpoint_id, endpoint in device.endpoints.items():
async_process_endpoint(
self._hass, self._config, endpoint_id, endpoint,
discovery_infos, device, zha_device, is_new_join
)
if endpoint_id != 0:
for cluster in endpoint.in_clusters.values():
cluster.bind_only = False
for cluster in endpoint.out_clusters.values():
cluster.bind_only = True
else:
is_rejoin = is_new_join is True
_LOGGER.debug(
'skipping discovery for previously discovered device: %s',
"{} - is rejoin: {}".format(zha_device.ieee, is_rejoin)
)
if is_new_join:
# configure the device
await zha_device.async_configure()
zha_device.update_available(True)
elif zha_device.power_source is not None\
and zha_device.power_source == MAINS_POWERED:
# the device isn't a battery powered device so we should be able
# to update it now
_LOGGER.debug(
"attempting to request fresh state for %s %s",
zha_device.name,
"with power source: {}".format(
ZDOChannel.POWER_SOURCES.get(zha_device.power_source)
)
)
await zha_device.async_initialize(from_cache=False)
else:
await zha_device.async_initialize(from_cache=True)
if not is_rejoin:
for discovery_info in discovery_infos:
async_dispatch_discovery_info(
self._hass,
is_new_join,
discovery_info
)
device_entity = async_create_device_entity(zha_device)
await self._component.async_add_entities([device_entity])
if is_new_join:
device_info = async_get_device_info(self._hass, zha_device)
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: DEVICE_FULL_INIT,
DEVICE_INFO: device_info
}
)
async def shutdown(self):
"""Stop ZHA Controller Application."""
_LOGGER.debug("Shutting down ZHA ControllerApplication")
await self.application_controller.shutdown()
@callback
def async_capture_log_levels():
"""Capture current logger levels for ZHA."""
return {
BELLOWS: logging.getLogger(BELLOWS).getEffectiveLevel(),
ZHA: logging.getLogger(ZHA).getEffectiveLevel(),
ZIGPY: logging.getLogger(ZIGPY).getEffectiveLevel(),
ZIGPY_XBEE: logging.getLogger(ZIGPY_XBEE).getEffectiveLevel(),
ZIGPY_DECONZ: logging.getLogger(ZIGPY_DECONZ).getEffectiveLevel(),
}
@callback
def async_set_logger_levels(levels):
"""Set logger levels for ZHA."""
logging.getLogger(BELLOWS).setLevel(levels[BELLOWS])
logging.getLogger(ZHA).setLevel(levels[ZHA])
logging.getLogger(ZIGPY).setLevel(levels[ZIGPY])
logging.getLogger(ZIGPY_XBEE).setLevel(levels[ZIGPY_XBEE])
logging.getLogger(ZIGPY_DECONZ).setLevel(levels[ZIGPY_DECONZ])
class LogRelayHandler(logging.Handler):
"""Log handler for error messages."""
def __init__(self, hass, gateway):
"""Initialize a new LogErrorHandler."""
super().__init__()
self.hass = hass
self.gateway = gateway
def emit(self, record):
"""Relay log message via dispatcher."""
stack = []
if record.levelno >= logging.WARN:
if not record.exc_info:
stack = [f for f, _, _, _ in traceback.extract_stack()]
entry = LogEntry(record, stack,
_figure_out_source(record, stack, self.hass))
async_dispatcher_send(
self.hass,
ZHA_GW_MSG,
{
TYPE: LOG_OUTPUT,
LOG_ENTRY: entry.to_dict()
}
)
| 37.286472
| 79
| 0.636053
|
4a16c0283c13e780bcbe5889bd97e6a1a6913734
| 652
|
py
|
Python
|
zen/api/quotes/viewer.py
|
ymussi/zen_quotes_of_python
|
2d6fa9cd7a1d20eee8e84b284f182ec89364b190
|
[
"MIT"
] | null | null | null |
zen/api/quotes/viewer.py
|
ymussi/zen_quotes_of_python
|
2d6fa9cd7a1d20eee8e84b284f182ec89364b190
|
[
"MIT"
] | null | null | null |
zen/api/quotes/viewer.py
|
ymussi/zen_quotes_of_python
|
2d6fa9cd7a1d20eee8e84b284f182ec89364b190
|
[
"MIT"
] | null | null | null |
from flask_restplus import Resource
from flask import request, jsonify
from zen.api import api
from zen.api.quotes import Quotes
import logging
import json
log = logging.getLogger(__name__)
ns = api.namespace(
'/', description='List Zen Quotes')
@ns.route('/')
@ns.route('/<int:number>')
@ns.route('/<string:lang>')
@ns.route('/<string:lang>/<int:number>')
class List(Resource):
@ns.response(code=400, description="Bad Request")
def get(self, lang=None, number=None):
"""
Lista todos as citações do Zen do Python em pt ou en.
"""
q = Quotes()
res = q.get_quotes(lang, number)
return res
| 23.285714
| 61
| 0.653374
|
4a16c06ab9e3957ba7e0ce0b676153fc4856f649
| 1,046
|
py
|
Python
|
src/rules/intention/dispatch.py
|
FrozenYogurtPuff/iStar-pipeline
|
aff129d201673925255890e06123798603b9163d
|
[
"MIT"
] | null | null | null |
src/rules/intention/dispatch.py
|
FrozenYogurtPuff/iStar-pipeline
|
aff129d201673925255890e06123798603b9163d
|
[
"MIT"
] | null | null | null |
src/rules/intention/dispatch.py
|
FrozenYogurtPuff/iStar-pipeline
|
aff129d201673925255890e06123798603b9163d
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import logging
import spacy_alignments as tokenizations
from src.deeplearning.infer.result import BertResult
from src.rules.dispatch import dispatch
from src.rules.intention.aux_slice.dispatch import dispatch as dispatch_slice
from src.utils.spacy import get_spacy
from src.utils.typing import RulePlugins
logger = logging.getLogger(__name__)
def get_rule_fixes(
sent: str,
b: BertResult,
funcs: RulePlugins | None = None,
is_slice: bool = True,
) -> BertResult:
nlp = get_spacy()
logger.info(sent)
s = nlp(sent)[:]
spacy_tokens = [i.text for i in s]
s2b, _ = tokenizations.get_alignments(spacy_tokens, b.tokens)
result = dispatch(s, b, s2b, funcs=funcs) if funcs else dispatch(s, b, s2b)
fix_result = b.apply_fix(result)
if is_slice:
slices = dispatch_slice(s)
slice_result = fix_result.apply_slices(slices)
logger.debug(slice_result)
return slice_result
else:
logger.debug(fix_result)
return fix_result
| 28.27027
| 79
| 0.717973
|
4a16c25be8925f9c84bdf980193258a577e0879d
| 5,881
|
py
|
Python
|
evennia/utils/tests/test_validatorfuncs.py
|
bradley-evans/evennia
|
4ad54ffd7ce5755454551dd26a2a410b3e417345
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/utils/tests/test_validatorfuncs.py
|
bradley-evans/evennia
|
4ad54ffd7ce5755454551dd26a2a410b3e417345
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/utils/tests/test_validatorfuncs.py
|
bradley-evans/evennia
|
4ad54ffd7ce5755454551dd26a2a410b3e417345
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests for validatorfuncs """
from django.test import TestCase
from evennia.utils import validatorfuncs
import mock
import datetime
import pytz
class TestValidatorFuncs(TestCase):
def test_text_ok(self):
for val in [None, -123, 'abc', 1.234, {1:True, 2:False}, ['a', 1]]:
self.assertEqual(str(val), validatorfuncs.text(val))
@mock.patch('builtins.str')
def test_text_raises_ValueError(self, mocked_str):
mocked_str.side_effect = Exception
with self.assertRaises(ValueError):
validatorfuncs.text(None)
def test_color_ok(self):
for color in ['r', 'g', 'b', 'H', 'R', 'M', '^']:
self.assertEqual(color, validatorfuncs.color(color))
def test_color_falsy_raises_ValueError(self):
for color in [None, (), [], False, True, {}]:
with self.assertRaises(ValueError):
validatorfuncs.color(color)
def test_datetime_ok(self):
for dt in ['Oct 12 1:00 1492', 'Jan 2 12:00 2020', 'Dec 31 00:00 2018']:
self.assertTrue(
isinstance(validatorfuncs.datetime(dt, from_tz=pytz.UTC),
datetime.datetime))
def test_datetime_raises_ValueError(self):
for dt in ['', 'January 1, 2019', '1/1/2019', 'Jan 1 2019']:
with self.assertRaises(ValueError):
validatorfuncs.datetime(dt)
def test_duration_ok(self):
for d in ['1d', '2w', '3h', '4s', '5m', '6y']:
self.assertTrue(
isinstance(validatorfuncs.duration(d), datetime.timedelta))
# THE FOLLOWING FAILS, year calculation seems to be incorrect
# self.assertEqual(
# datetime.timedelta(1+5*365, 2, 0, 0, 3, 4, 5),
# validatorfuncs.duration('1d 2s 3m 4h 5w 5y'))
def test_duration_raises_ValueError(self):
for d in ['', '1', '5days', '1Week']:
with self.assertRaises(ValueError):
validatorfuncs.duration(d)
def test_future_ok(self):
year = int(datetime.datetime.utcnow().strftime("%Y"))
for f in [f'Jan 2 12:00 {year+1}', f'Dec 31 00:00 {year+1}']:
self.assertTrue(
isinstance(validatorfuncs.future(f, from_tz=pytz.UTC),
datetime.datetime))
def test_future_raises_ValueError(self):
year = int(datetime.datetime.utcnow().strftime("%Y"))
for f in [f'Jan 2 12:00 {year-1}', f'Dec 31 00:00 {year-1}']:
with self.assertRaises(ValueError):
validatorfuncs.future(f, from_tz=pytz.UTC)
def test_signed_integer_ok(self):
for si in ['123', '4567890', '001', '-123', '-45', '0']:
self.assertEqual(int(si), validatorfuncs.signed_integer(si))
@mock.patch('builtins.int')
def test_signed_integer_raises_ValueError(self, mocked_int):
for si in ['', '000', 'abc']:
mocked_int.side_effect = ValueError
with self.assertRaises(ValueError):
validatorfuncs.signed_integer(si)
def test_positive_integer_ok(self):
for pi in ['123', '4567890', '001']:
self.assertEqual(int(pi), validatorfuncs.positive_integer(pi))
@mock.patch('builtins.int')
def test_positive_integer_raises_ValueError(self, mocked_int):
mocked_int.return_value = -1
with self.assertRaises(ValueError):
validatorfuncs.positive_integer(str(-1))
for pi in ['', '000', 'abc', '-1']:
mocked_int.side_effect = ValueError
with self.assertRaises(ValueError):
validatorfuncs.positive_integer(pi)
def test_unsigned_integer_ok(self):
for ui in ['123', '4567890', '001', '0']:
self.assertEqual(int(ui), validatorfuncs.unsigned_integer(ui))
@mock.patch('builtins.int')
def test_unsigned_integer_raises_ValueError(self, mocked_int):
mocked_int.return_value = -1
with self.assertRaises(ValueError):
validatorfuncs.unsigned_integer(str(-1))
for ui in ['', '000', 'abc', '-1', '0']:
mocked_int.side_effect = ValueError
with self.assertRaises(ValueError):
validatorfuncs.unsigned_integer(ui)
def test_boolean(self):
for b in ['true', '1', 'on', 'ENABLED']:
self.assertTrue(validatorfuncs.boolean(b))
for b in ['FalSe', '0', 'oFF', 'disabled']:
self.assertFalse(validatorfuncs.boolean(b))
def test_boolean_raises_ValueError(self):
for b in ['', None, 1, 0, True, False, [None], {True:True}]:
with self.assertRaises(ValueError):
validatorfuncs.boolean(b)
def test_timezone_ok(self):
for tz in ['America/Chicago', 'GMT', 'UTC']:
self.assertEqual(tz, validatorfuncs.timezone(tz).zone)
def test_timezone_raises_ValueError(self):
for tz in ['America', None, '', 'Mars', 'DT']:
with self.assertRaises(ValueError):
validatorfuncs.timezone(tz)
def test_email_ok(self):
for e in ['a@a.aa', 'zeus@olympus.net']:
self.assertEqual(e, validatorfuncs.email(e))
def test_email_raises_ValueError(self):
for e in ['', None, ['abc@abc.com'], 123]:
with self.assertRaises(ValueError):
validatorfuncs.email(e)
def test_lock_ok(self):
for l in ['do:true;look:no', 'a:t']:
self.assertEqual(l, validatorfuncs.lock(l))
def test_lock_raises_ValueError(self):
for l in [';;;', '', ':', ':::', ';:;:', 'x:', ':y']:
with self.assertRaises(ValueError):
validatorfuncs.lock(l)
with self.assertRaises(ValueError):
validatorfuncs.lock('view:',
access_options=())
with self.assertRaises(ValueError):
validatorfuncs.lock('view:',
access_options=('look'))
| 38.690789
| 80
| 0.600408
|
4a16c260f33c800a2f7f166b5ac402a88624ffc9
| 53,225
|
py
|
Python
|
Allura/allura/tests/functional/test_neighborhood.py
|
rohankumardubey/allura
|
9c490a051ca912d28b81ce656441d6fed100cb24
|
[
"Apache-2.0"
] | 113
|
2015-03-25T10:33:37.000Z
|
2022-02-16T20:55:06.000Z
|
Allura/allura/tests/functional/test_neighborhood.py
|
rohankumardubey/allura
|
9c490a051ca912d28b81ce656441d6fed100cb24
|
[
"Apache-2.0"
] | 4
|
2017-08-04T16:19:07.000Z
|
2020-06-08T19:01:33.000Z
|
Allura/allura/tests/functional/test_neighborhood.py
|
rohankumardubey/allura
|
9c490a051ca912d28b81ce656441d6fed100cb24
|
[
"Apache-2.0"
] | 36
|
2015-08-14T16:27:39.000Z
|
2022-02-16T20:54:35.000Z
|
# coding=utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import os
from io import BytesIO
import six.moves.urllib.parse
import six.moves.urllib.request
import six.moves.urllib.error
from io import open
import PIL
from mock import patch
from tg import config
from alluratest.tools import assert_equal, assert_in, assert_not_equal
from ming.orm.ormsession import ThreadLocalORMSession, session
from paste.httpexceptions import HTTPFound, HTTPMovedPermanently
from tg import app_globals as g, tmpl_context as c
import allura
from allura import model as M
from allura.tests import TestController
from allura.tests import decorators as td
from allura.lib import helpers as h
from allura.lib import utils
from alluratest.controller import setup_trove_categories
from six.moves import map
class TestNeighborhood(TestController):
def test_home_project(self):
r = self.app.get('/adobe/wiki/', status=301)
assert r.location.endswith('/adobe/wiki/Home/')
r = r.follow()
assert 'This is the "Adobe" neighborhood' in str(r), str(r)
r = self.app.get(
'/adobe/admin/', extra_environ=dict(username=str('test-user')),
status=403)
def test_redirect(self):
r = self.app.post('/adobe/_admin/update',
params=dict(redirect='wiki/Home/'),
extra_environ=dict(username=str('root')))
r = self.app.get('/adobe/')
assert r.location.endswith('/adobe/wiki/Home/')
@patch('allura.model.neighborhood.Neighborhood.use_wiki_page_as_root', True)
def test_wiki_as_home(self):
r = self.app.get('/adobe/', status=200)
assert 'This is the "Adobe" neighborhood' in str(r), str(r)
def test_admin(self):
r = self.app.get('/adobe/_admin/', extra_environ=dict(username=str('root')))
r = self.app.get('/adobe/_admin/overview',
extra_environ=dict(username=str('root')))
r = self.app.get('/adobe/_admin/accolades',
extra_environ=dict(username=str('root')))
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['google_analytics'] = True
r = self.app.post('/adobe/_admin/update',
params=dict(name='Mozq1', css='',
homepage='# MozQ1!', tracking_id='U-123456'),
extra_environ=dict(username=str('root')))
r = self.app.post('/adobe/_admin/update',
params=dict(name='Mozq1', css='',
homepage='# MozQ1!\n[Root]'),
extra_environ=dict(username=str('root')))
# make sure project_template is validated as proper json
r = self.app.post('/adobe/_admin/update',
params=dict(project_template='{'),
extra_environ=dict(username=str('root')))
assert 'Invalid JSON' in r
def test_admin_overview_audit_log(self):
def check_log(message):
return M.AuditLog.query.find({'message': message}).count() == 1
nbhd = M.Neighborhood.query.get(name='Projects')
nbhd.features['css'] = 'custom'
nbhd.features['google_analytics'] = True
params = {
'name': 'Pjs',
'redirect': 'http://fake.org/',
'show_title': 'false',
'allow_browse': 'false',
'css': '.class { border: 1px; }',
'tracking_id': 'U-123456',
'homepage': '[Homepage]',
'project_list_url': 'http://fake.org/project_list',
'project_template': '{"name": "template"}',
'anchored_tools': 'wiki:Wiki',
'prohibited_tools': 'wiki, tickets'
}
self.app.post('/p/_admin/update', params=params,
extra_environ=dict(username=str('root')))
# must get as many log records as many values are updated
assert M.AuditLog.query.find().count() == len(params)
assert check_log('change neighborhood name to Pjs')
assert check_log('change neighborhood redirect to http://fake.org/')
assert check_log('change neighborhood show title to False')
assert check_log('change neighborhood allow browse to False')
assert check_log('change neighborhood css to .class { border: 1px; }')
assert check_log('change neighborhood homepage to [Homepage]')
assert check_log('change neighborhood project list url to '
'http://fake.org/project_list')
assert check_log('change neighborhood project template to '
'{"name": "template"}')
assert check_log('update neighborhood tracking_id')
assert check_log('update neighborhood prohibited tools')
def test_prohibited_tools(self):
self.app.post('/p/_admin/update',
params=dict(name='Projects',
prohibited_tools='wiki, tickets'),
extra_environ=dict(username=str('root')))
r = self.app.get('/p/_admin/overview', extra_environ=dict(username=str('root')))
assert 'wiki, tickets' in r
c.user = M.User.query.get(username='root')
c.project = M.Project.query.get(shortname='test')
data = c.project.nav_data(admin_options=True)
assert 'Wiki' not in data
assert 'Tickets' not in data
r = self.app.post('/p/_admin/update',
params=dict(name='Projects',
prohibited_tools='wiki, test'),
extra_environ=dict(username=str('root')))
assert 'error' in self.webflash(r), self.webflash(r)
@td.with_wiki
def test_anchored_tools(self):
neighborhood = M.Neighborhood.query.get(name='Projects')
r = self.app.post('/p/_admin/update',
params=dict(name='Projects',
anchored_tools='wiki:Wiki, tickets:Ticket'),
extra_environ=dict(username=str('root')))
assert 'error' not in self.webflash(r)
r = self.app.post('/p/_admin/update',
params=dict(name='Projects',
anchored_tools='w!iki:Wiki, tickets:Ticket'),
extra_environ=dict(username=str('root')))
assert 'error' in self.webflash(r)
assert_equal(neighborhood.anchored_tools, 'wiki:Wiki, tickets:Ticket')
r = self.app.post('/p/_admin/update',
params=dict(name='Projects',
anchored_tools='wiki:Wiki,'),
extra_environ=dict(username=str('root')))
assert 'error' in self.webflash(r)
assert_equal(neighborhood.anchored_tools, 'wiki:Wiki, tickets:Ticket')
r = self.app.post('/p/_admin/update',
params=dict(name='Projects',
anchored_tools='badname,'),
extra_environ=dict(username=str('root')))
assert 'error' in self.webflash(r)
assert_equal(neighborhood.anchored_tools, 'wiki:Wiki, tickets:Ticket')
r = self.app.get('/p/test/admin/overview')
top_nav = r.html.find(id='top_nav')
assert top_nav.find(href='/p/test/wiki/'), top_nav
assert top_nav.find(href='/p/test/tickets/'), top_nav
c.user = M.User.query.get(username='root')
c.project = M.Project.query.get(shortname='test')
data = c.project.nav_data(admin_options=True)
for tool in data['menu']:
if tool['name'].lower() == 'wiki':
menu = [name['text'] for name in tool['admin_options']]
assert 'Delete' not in menu
break
def test_show_title(self):
r = self.app.get('/adobe/_admin/overview',
extra_environ=dict(username=str('root')))
neighborhood = M.Neighborhood.query.get(name='Adobe')
# if not set show_title must be True
assert neighborhood.show_title
# title should be present
assert 'class="project_title"' in str(r)
r = self.app.post('/adobe/_admin/update',
params=dict(name='Mozq1', css='',
homepage='# MozQ1!',
tracking_id='U-123456',
show_title='false'),
extra_environ=dict(username=str('root')))
# no title now
r = self.app.get('/adobe/', extra_environ=dict(username=str('root')))
assert 'class="project_title"' not in str(r)
r = self.app.get('/adobe/wiki/Home/',
extra_environ=dict(username=str('root')))
assert 'class="project_title"' not in str(r)
# title must be present on project page
r = self.app.get('/adobe/adobe-1/admin/',
extra_environ=dict(username=str('root')))
assert 'class="project_title"' in str(r)
def test_admin_stats_del_count(self):
neighborhood = M.Neighborhood.query.get(name='Adobe')
proj = M.Project.query.get(neighborhood_id=neighborhood._id)
proj.deleted = True
ThreadLocalORMSession.flush_all()
r = self.app.get('/adobe/_admin/stats/',
extra_environ=dict(username=str('root')))
assert 'Deleted: 1' in r
assert 'Private: 0' in r
def test_admin_stats_priv_count(self):
neighborhood = M.Neighborhood.query.get(name='Adobe')
proj = M.Project.query.get(neighborhood_id=neighborhood._id)
proj.deleted = False
proj.private = True
ThreadLocalORMSession.flush_all()
r = self.app.get('/adobe/_admin/stats/',
extra_environ=dict(username=str('root')))
assert 'Deleted: 0' in r
assert 'Private: 1' in r
def test_admin_stats_adminlist(self):
neighborhood = M.Neighborhood.query.get(name='Adobe')
proj = M.Project.query.get(neighborhood_id=neighborhood._id)
proj.private = False
ThreadLocalORMSession.flush_all()
r = self.app.get('/adobe/_admin/stats/adminlist',
extra_environ=dict(username=str('root')))
pq = M.Project.query.find(
dict(neighborhood_id=neighborhood._id, deleted=False))
pq.sort('name')
projects = pq.skip(0).limit(int(25)).all()
for proj in projects:
admin_role = M.ProjectRole.query.get(
project_id=proj.root_project._id, name='Admin')
if admin_role is None:
continue
user_role_list = M.ProjectRole.query.find(
dict(project_id=proj.root_project._id, name=None)).all()
for ur in user_role_list:
if ur.user is not None and admin_role._id in ur.roles:
assert proj.name in r
assert ur.user.username in r
def test_icon(self):
file_name = 'neo-icon-set-454545-256x350.png'
file_path = os.path.join(
allura.__path__[0], 'nf', 'allura', 'images', file_name)
file_data = open(file_path, 'rb').read()
upload = ('icon', file_name, file_data)
r = self.app.get('/adobe/_admin/', extra_environ=dict(username=str('root')))
r = self.app.post('/adobe/_admin/update',
params=dict(name='Mozq1', css='',
homepage='# MozQ1'),
extra_environ=dict(username=str('root')), upload_files=[upload])
r = self.app.get('/adobe/icon')
image = PIL.Image.open(BytesIO(r.body))
assert image.size == (48, 48)
r = self.app.get('/adobe/icon?foo=bar')
def test_google_analytics(self):
# analytics allowed
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['google_analytics'] = True
r = self.app.get('/adobe/_admin/overview',
extra_environ=dict(username=str('root')))
assert 'Google Analytics ID' in r
r = self.app.get('/adobe/adobe-1/admin/overview',
extra_environ=dict(username=str('root')))
assert 'Google Analytics ID' in r
r = self.app.post('/adobe/_admin/update',
params=dict(name='Adobe', css='',
homepage='# MozQ1', tracking_id='U-123456'),
extra_environ=dict(username=str('root')), status=302)
r = self.app.post('/adobe/adobe-1/admin/update',
params=dict(tracking_id='U-654321'),
extra_environ=dict(username=str('root')), status=302)
r = self.app.get('/adobe/adobe-1/admin/overview',
extra_environ=dict(username=str('root')))
assert "_add_tracking('nbhd', 'U-123456');" in r, r
assert "_add_tracking('proj', 'U-654321');" in r
# analytics not allowed
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['google_analytics'] = False
r = self.app.get('/adobe/_admin/overview',
extra_environ=dict(username=str('root')))
assert 'Google Analytics ID' not in r
r = self.app.get('/adobe/adobe-1/admin/overview',
extra_environ=dict(username=str('root')))
assert 'Google Analytics ID' not in r
r = self.app.get('/adobe/adobe-1/admin/overview',
extra_environ=dict(username=str('root')))
assert "_add_tracking('nbhd', 'U-123456');" not in r
assert "_add_tracking('proj', 'U-654321');" not in r
def test_custom_css(self):
test_css = '.test{color:red;}'
custom_css = 'Custom CSS'
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.css = test_css
neighborhood.features['css'] = 'none'
r = self.app.get('/adobe/')
assert test_css not in r
r = self.app.get('/adobe/_admin/overview',
extra_environ=dict(username=str('root')))
assert custom_css not in r
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['css'] = 'picker'
r = self.app.get('/adobe/')
while isinstance(r.response, HTTPFound) or isinstance(r.response, HTTPMovedPermanently):
r = r.follow()
assert test_css in r
r = self.app.get('/adobe/_admin/overview',
extra_environ=dict(username=str('root')))
assert custom_css in r
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['css'] = 'custom'
r = self.app.get('/adobe/')
while isinstance(r.response, HTTPFound) or isinstance(r.response, HTTPMovedPermanently):
r = r.follow()
assert test_css in r
r = self.app.get('/adobe/_admin/overview',
extra_environ=dict(username=str('root')))
assert custom_css in r
def test_picker_css(self):
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['css'] = 'picker'
r = self.app.get('/adobe/_admin/overview',
extra_environ=dict(username=str('root')))
assert 'Project title, font' in r
assert 'Project title, color' in r
assert 'Bar on top' in r
assert 'Title bar, background' in r
assert 'Title bar, foreground' in r
r = self.app.post('/adobe/_admin/update',
params={'name': 'Adobe',
'css': '',
'homepage': '',
'css-projecttitlefont': 'arial,sans-serif',
'css-projecttitlecolor': 'green',
'css-barontop': '#555555',
'css-titlebarbackground': '#333',
'css-titlebarcolor': '#444'},
extra_environ=dict(username=str('root')), upload_files=[])
neighborhood = M.Neighborhood.query.get(name='Adobe')
assert '/*projecttitlefont*/.project_title{font-family:arial,sans-serif;}' in neighborhood.css
assert '/*projecttitlecolor*/.project_title{color:green;}' in neighborhood.css
assert '/*barontop*/.pad h2.colored {background-color:#555555; background-image: none;}' in neighborhood.css
assert '/*titlebarbackground*/.pad h2.title{background-color:#333; background-image: none;}' in neighborhood.css
assert "/*titlebarcolor*/.pad h2.title, .pad h2.title small a {color:#444;}" in neighborhood.css
def test_max_projects(self):
# Set max value to unlimit
neighborhood = M.Neighborhood.query.get(name='Projects')
neighborhood.features['max_projects'] = None
r = self.app.post('/p/register',
params=dict(
project_unixname='maxproject1', project_name='Max project1',
project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username=str('root')), status=302)
assert '/p/maxproject1/admin' in r.location
# Set max value to 0
neighborhood = M.Neighborhood.query.get(name='Projects')
neighborhood.features['max_projects'] = 0
r = self.app.post('/p/register',
params=dict(
project_unixname='maxproject2', project_name='Max project2',
project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username=str('root')))
while isinstance(r.response, HTTPFound):
r = r.follow()
assert 'You have exceeded the maximum number of projects' in r
def test_project_rate_limit(self):
# Set rate limit to unlimit
with h.push_config(config, **{'project.rate_limits': '{}'}):
r = self.app.post('/p/register',
params=dict(
project_unixname='rateproject1', project_name='Rate project1',
project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username=str('test-user-1')), status=302)
assert '/p/rateproject1/admin' in r.location
# Set rate limit to 1 in first hour of user account
with h.push_config(config, **{'project.rate_limits': '{"3600": 1}'}):
r = self.app.post('/p/register',
params=dict(
project_unixname='rateproject2', project_name='Rate project2',
project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username=str('test-user-1')))
while isinstance(r.response, HTTPFound):
r = r.follow()
assert 'Project creation rate limit exceeded. Please try again later.' in r
def test_project_rate_limit_admin(self):
# Set rate limit to unlimit
with h.push_config(config, **{'project.rate_limits': '{}'}):
r = self.app.post('/p/register',
params=dict(
project_unixname='rateproject1', project_name='Rate project1',
project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username=str('root')), status=302)
assert '/p/rateproject1/admin' in r.location
# Set rate limit to 1 in first hour of user account
with h.push_config(config, **{'project.rate_limits': '{"3600": 1}'}):
r = self.app.post('/p/register',
params=dict(
project_unixname='rateproject2', project_name='Rate project2',
project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username=str('root')))
assert '/p/rateproject2/admin' in r.location
def test_invite(self):
p_nbhd_id = str(M.Neighborhood.query.get(name='Projects')._id)
r = self.app.get('/adobe/_moderate/',
extra_environ=dict(username=str('root')))
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='adobe-1', invite='on',
neighborhood_id=p_nbhd_id),
extra_environ=dict(username=str('root')))
r = self.app.get(r.location, extra_environ=dict(username=str('root')))
assert 'error' in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='no_such_user',
invite='on', neighborhood_id=p_nbhd_id),
extra_environ=dict(username=str('root')))
r = self.app.get(r.location, extra_environ=dict(username=str('root')))
assert 'error' in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', invite='on',
neighborhood_id=p_nbhd_id),
extra_environ=dict(username=str('root')))
r = self.app.get(r.location, extra_environ=dict(username=str('root')))
assert 'invited' in r, r
assert 'warning' not in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', invite='on',
neighborhood_id=p_nbhd_id),
extra_environ=dict(username=str('root')))
r = self.app.get(r.location, extra_environ=dict(username=str('root')))
assert 'warning' in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', uninvite='on',
neighborhood_id=p_nbhd_id),
extra_environ=dict(username=str('root')))
r = self.app.get(r.location, extra_environ=dict(username=str('root')))
assert 'uninvited' in r
assert 'warning' not in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', uninvite='on',
neighborhood_id=p_nbhd_id),
extra_environ=dict(username=str('root')))
r = self.app.get(r.location, extra_environ=dict(username=str('root')))
assert 'warning' in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', invite='on',
neighborhood_id=p_nbhd_id),
extra_environ=dict(username=str('root')))
r = self.app.get(r.location, extra_environ=dict(username=str('root')))
assert 'invited' in r
assert 'warning' not in r
def test_evict(self):
r = self.app.get('/adobe/_moderate/',
extra_environ=dict(username=str('root')))
r = self.app.post('/adobe/_moderate/evict',
params=dict(pid='test'),
extra_environ=dict(username=str('root')))
r = self.app.get(r.location, extra_environ=dict(username=str('root')))
assert 'error' in r
r = self.app.post('/adobe/_moderate/evict',
params=dict(pid='adobe-1'),
extra_environ=dict(username=str('root')))
r = self.app.get(r.location, extra_environ=dict(username=str('root')))
assert 'adobe-1 evicted to Projects' in r
def test_home(self):
self.app.get('/adobe/')
def test_register(self):
r = self.app.get('/adobe/register', status=405)
r = self.app.post('/adobe/register',
params=dict(
project_unixname='', project_name='Nothing',
project_description='', neighborhood='Adobe'),
antispam=True,
extra_environ=dict(username=str('root')))
assert r.html.find('div', {'class': 'error'}
).string == 'Please use 3-15 small letters, numbers, and dashes.'
r = self.app.post('/adobe/register',
params=dict(
project_unixname='mymoz', project_name='My Moz',
project_description='', neighborhood='Adobe'),
antispam=True,
extra_environ=dict(username=str('*anonymous')),
status=302)
r = self.app.post('/adobe/register',
params=dict(
project_unixname='foo.mymoz', project_name='My Moz',
project_description='', neighborhood='Adobe'),
antispam=True,
extra_environ=dict(username=str('root')))
assert r.html.find('div', {'class': 'error'}
).string == 'Please use 3-15 small letters, numbers, and dashes.'
r = self.app.post('/p/register',
params=dict(
project_unixname='test', project_name='Tester',
project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username=str('root')))
assert r.html.find('div', {'class': 'error'}
).string == 'This project name is taken.'
r = self.app.post('/adobe/register',
params=dict(
project_unixname='mymoz', project_name='My Moz',
project_description='', neighborhood='Adobe'),
antispam=True,
extra_environ=dict(username=str('root')),
status=302)
def test_register_private_fails_for_anon(self):
r = self.app.post(
'/p/register',
params=dict(
project_unixname='mymoz',
project_name='My Moz',
project_description='',
neighborhood='Projects',
private_project='on'),
antispam=True,
extra_environ=dict(username=str('*anonymous')),
status=302)
assert config.get('auth.login_url', '/auth/') in r.location, r.location
def test_register_private_fails_for_non_admin(self):
self.app.post(
'/p/register',
params=dict(
project_unixname='mymoz',
project_name='My Moz',
project_description='',
neighborhood='Projects',
private_project='on'),
antispam=True,
extra_environ=dict(username=str('test-user')),
status=403)
def test_register_private_fails_for_non_private_neighborhood(self):
# Turn off private
neighborhood = M.Neighborhood.query.get(name='Projects')
neighborhood.features['private_projects'] = False
r = self.app.get('/p/add_project', extra_environ=dict(username=str('root')))
assert 'private_project' not in r
r = self.app.post(
'/p/register',
params=dict(
project_unixname='myprivate1',
project_name='My Priv1',
project_description='',
neighborhood='Projects',
private_project='on'),
antispam=True,
extra_environ=dict(username=str('root')))
cookies = r.headers.getall('Set-Cookie')
flash_msg_cookies = list(map(six.moves.urllib.parse.unquote, cookies))
assert any('Internal Error' in cookie for cookie in flash_msg_cookies)
proj = M.Project.query.get(
shortname='myprivate1', neighborhood_id=neighborhood._id)
assert proj is None
# Turn on private
neighborhood = M.Neighborhood.query.get(name='Projects')
neighborhood.features['private_projects'] = True
r = self.app.get('/p/add_project', extra_environ=dict(username=str('root')))
assert 'private_project' in r
self.app.post(
'/p/register',
params=dict(
project_unixname='myprivate2',
project_name='My Priv2',
project_description='',
neighborhood='Projects',
private_project='on'),
antispam=True,
extra_environ=dict(username=str('root')))
proj = M.Project.query.get(
shortname='myprivate2', neighborhood_id=neighborhood._id)
assert proj.private
def test_register_private_ok(self):
r = self.app.post(
'/p/register',
params=dict(
project_unixname='mymoz',
project_name='My Moz',
project_description='',
neighborhood='Projects',
private_project='on',
tools='wiki'),
antispam=True,
extra_environ=dict(username=str('root')),
status=302)
assert config.get('auth.login_url',
'/auth/') not in r.location, r.location
r = self.app.get(
'/p/mymoz/wiki/',
extra_environ=dict(username=str('root'))).follow(extra_environ=dict(username=str('root')), status=200)
r = self.app.get(
'/p/mymoz/wiki/',
extra_environ=dict(username=str('*anonymous')),
status=302)
assert config.get('auth.login_url', '/auth/') in r.location, r.location
self.app.get(
'/p/mymoz/wiki/',
extra_environ=dict(username=str('test-user')),
status=403)
def test_project_template(self):
setup_trove_categories()
icon_url = 'file://' + \
os.path.join(allura.__path__[0], 'nf', 'allura',
'images', 'neo-icon-set-454545-256x350.png')
test_groups = [{
"name": "Viewer", # group will be created, all params are valid
"permissions": ["read"],
"usernames": ["user01"]
}, {
"name": "", # group won't be created - invalid name
"permissions": ["read"],
"usernames": ["user01"]
}, {
"name": "TestGroup1", # group won't be created - invalid perm name
"permissions": ["foobar"],
"usernames": ["user01"]
}, {
"name": "TestGroup2", # will be created; 'inspect' perm ignored
"permissions": ["read", "inspect"],
"usernames": ["user01", "user02"]
}, {
"name": "TestGroup3", # will be created with no users in group
"permissions": ["admin"]
}]
r = self.app.post('/adobe/_admin/update', params=dict(name='Mozq1',
css='',
homepage='# MozQ1!\n[Root]',
project_template="""{
"private":true,
"icon":{
"url":"%s",
"filename":"icon.png"
},
"tools":{
"wiki":{
"label":"Wiki",
"mount_point":"wiki",
"options":{
"show_right_bar":false,
"show_left_bar":false,
"show_discussion":false,
"some_url": "http://foo.com/$shortname/"
},
"home_text":"My home text!"
},
"discussion":{"label":"Discussion","mount_point":"discussion"},
"blog":{"label":"News","mount_point":"news","options":{
"show_discussion":false
}},
"admin":{"label":"Admin","mount_point":"admin"}
},
"tool_order":["wiki","discussion","news","admin"],
"labels":["mmi"],
"trove_cats":{
"topic":[247],
"developmentstatus":[11]
},
"groups": %s
}""" % (icon_url, json.dumps(test_groups))),
extra_environ=dict(username=str('root')))
r = self.app.post(
'/adobe/register',
params=dict(
project_unixname='testtemp',
project_name='Test Template',
project_description='',
neighborhood='Mozq1',
private_project='off'),
antispam=True,
extra_environ=dict(username=str('root')),
status=302).follow()
p = M.Project.query.get(shortname='testtemp')
# make sure the correct tools got installed in the right order
top_nav = r.html.find('div', {'id': 'top_nav'}).contents[1]
assert top_nav.contents[1].contents[1].contents[1]['href'] == '/adobe/testtemp/wiki/'
assert 'Wiki' in top_nav.contents[1].contents[1].contents[1].contents[0]
assert top_nav.contents[1].contents[3].contents[1]['href'] == '/adobe/testtemp/discussion/'
assert 'Discussion' in top_nav.contents[1].contents[3].contents[1].contents[0]
assert top_nav.contents[1].contents[5].contents[1]['href'] == '/adobe/testtemp/news/'
assert 'News' in top_nav.contents[1].contents[5].contents[1].contents[0]
assert top_nav.contents[1].contents[7].contents[1]['href'] == '/adobe/testtemp/admin/'
assert 'Admin' in top_nav.contents[1].contents[7].contents[1].contents[0]
# make sure project is private
r = self.app.get(
'/adobe/testtemp/wiki/',
extra_environ=dict(username=str('root'))).follow(extra_environ=dict(username=str('root')), status=200)
r = self.app.get(
'/adobe/testtemp/wiki/',
extra_environ=dict(username=str('*anonymous')),
status=302)
# check the labels and trove cats
r = self.app.get('/adobe/testtemp/admin/trove')
assert 'mmi' in r
assert 'Communications » Telephony' in r
assert '5 - Production/Stable' in r
# check the wiki text
r = self.app.get('/adobe/testtemp/wiki/').follow()
assert "My home text!" in r
# check tool options
opts = p.app_config('wiki').options
assert_equal(False, opts.show_discussion)
assert_equal(False, opts.show_left_bar)
assert_equal(False, opts.show_right_bar)
assert_equal("http://foo.com/testtemp/", opts.some_url)
# check that custom groups/perms/users were setup correctly
roles = p.named_roles
for group in test_groups:
name = group.get('name')
permissions = group.get('permissions', [])
usernames = group.get('usernames', [])
if name in ('Viewer', 'TestGroup2', 'TestGroup3'):
role = M.ProjectRole.by_name(name, project=p)
# confirm role created in project
assert role in roles
for perm in permissions:
# confirm valid permissions added to role, and invalid
# permissions ignored
if perm in p.permissions:
assert M.ACE.allow(role._id, perm) in p.acl
else:
assert M.ACE.allow(role._id, perm) not in p.acl
# confirm valid users received role
for username in usernames:
user = M.User.by_username(username)
if user and user._id:
assert role in M.ProjectRole.by_user(
user, project=p).roles
# confirm roles with invalid json data are not created
if name in ('', 'TestGroup1'):
assert name not in roles
def test_projects_anchored_tools(self):
r = self.app.post('/adobe/_admin/update', params=dict(name='Adobe',
css='',
homepage='# Adobe!\n[Root]',
project_template="""{
"private":true,
"tools":{
"wiki":{
"label":"Wiki",
"mount_point":"wiki",
"options":{
"show_right_bar":false,
"show_left_bar":false,
"show_discussion":false,
"some_url": "http://foo.com/$shortname/"
},
"home_text":"My home text!"
},
"admin":{"label":"Admin","mount_point":"admin"}
},
"tool_order":["wiki","admin"],
}"""),
extra_environ=dict(username=str('root')))
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.anchored_tools = 'wiki:Wiki'
r = self.app.post(
'/adobe/register',
params=dict(
project_unixname='testtemp',
project_name='Test Template',
project_description='',
neighborhood='Adobe',
private_project='off'),
antispam=True,
extra_environ=dict(username=str('root')))
r = self.app.get('/adobe/testtemp/admin/overview')
assert r.html.find('div', id='top_nav').find(
'a', href='/adobe/testtemp/wiki/'), r.html
assert r.html.find('div', id='top_nav').find(
'a', href='/adobe/testtemp/admin/'), r.html
def test_name_check(self):
for name in ('My+Moz', 'Te%st!', 'ab', 'a' * 16):
r = self.app.get(
'/p/check_names?neighborhood=Projects&project_unixname=%s' % name)
assert_equal(
r.json,
{'project_unixname': 'Please use 3-15 small letters, numbers, and dashes.'})
r = self.app.get(
'/p/check_names?neighborhood=Projects&project_unixname=mymoz')
assert_equal(r.json, {})
r = self.app.get(
'/p/check_names?neighborhood=Projects&project_unixname=test')
assert_equal(r.json,
{'project_unixname': 'This project name is taken.'})
@td.with_tool('test/sub1', 'Wiki', 'wiki')
def test_neighborhood_project(self):
self.app.get('/adobe/adobe-1/admin/', status=200)
self.app.get('/p/test/sub1/wiki/')
self.app.get('/p/test/sub1/', status=302)
self.app.get('/p/test/no-such-app/', status=404)
def test_neighborhood_namespace(self):
# p/test exists, so try creating adobe/test
self.app.get('/adobe/test/wiki/', status=404)
r = self.app.post('/adobe/register',
params=dict(
project_unixname='test', project_name='Test again',
project_description='', neighborhood='Adobe', tools='wiki'),
antispam=True,
extra_environ=dict(username=str('root')))
assert r.status_int == 302, r.html.find(
'div', {'class': 'error'}).string
assert not r.location.endswith('/add_project'), self.webflash(r)
r = self.app.get('/adobe/test/wiki/').follow(status=200)
def test_neighborhood_awards(self):
file_name = 'adobe_icon.png'
file_path = os.path.join(
allura.__path__[0], 'public', 'nf', 'images', file_name)
file_data = open(file_path, 'rb').read()
upload = ('icon', file_name, file_data)
r = self.app.get('/adobe/_admin/awards',
extra_environ=dict(username=str('root')))
r = self.app.post('/adobe/_admin/awards/create',
params=dict(short='FOO', full='A basic foo award'),
extra_environ=dict(username=str('root')), upload_files=[upload])
r = self.app.post('/adobe/_admin/awards/create',
params=dict(short='BAR',
full='A basic bar award with no icon'),
extra_environ=dict(username=str('root')))
foo_id = str(M.Award.query.find(dict(short='FOO')).first()._id)
bar_id = str(M.Award.query.find(dict(short='BAR')).first()._id)
r = self.app.post('/adobe/_admin/awards/%s/update' % bar_id,
params=dict(short='BAR2',
full='Updated description.'),
extra_environ=dict(username=str('root'))).follow().follow()
assert 'BAR2' in r
assert 'Updated description.' in r
r = self.app.get('/adobe/_admin/awards/%s' %
foo_id, extra_environ=dict(username=str('root')))
r = self.app.get('/adobe/_admin/awards/%s/icon' %
foo_id, extra_environ=dict(username=str('root')))
image = PIL.Image.open(BytesIO(r.body))
assert image.size == (48, 48)
self.app.post('/adobe/_admin/awards/grant',
params=dict(grant='FOO', recipient='adobe-1',
url='http://award.org', comment='Winner!'),
extra_environ=dict(username=str('root')))
r = self.app.get('/adobe/_admin/accolades',
extra_environ=dict(username=str('root')))
assert_in('Winner!', r)
assert_in('http://award.org', r)
self.app.get('/adobe/_admin/awards/%s/adobe-1' %
foo_id, extra_environ=dict(username=str('root')))
self.app.post('/adobe/_admin/awards/%s/adobe-1/revoke' % foo_id,
extra_environ=dict(username=str('root')))
self.app.post('/adobe/_admin/awards/%s/delete' % foo_id,
extra_environ=dict(username=str('root')))
def test_add_a_project_link(self):
from tg import tmpl_context as c
# Install Home tool for all neighborhoods
for nb in M.Neighborhood.query.find().all():
p = nb.neighborhood_project
with h.push_config(c, user=M.User.query.get()):
p.install_app('home', 'home', 'Home', ordinal=0)
r = self.app.get('/p/')
assert 'Add a Project' in r
r = self.app.get('/u/', extra_environ=dict(username=str('test-user')))
assert 'Add a Project' not in r
r = self.app.get('/adobe/', extra_environ=dict(username=str('test-user')))
assert 'Add a Project' not in r
r = self.app.get('/u/', extra_environ=dict(username=str('root')))
assert 'Add a Project' in r
r = self.app.get('/adobe/', extra_environ=dict(username=str('root')))
assert 'Add a Project' in r
def test_help(self):
r = self.app.get('/p/_admin/help/',
extra_environ=dict(username=str('root')))
assert 'macro' in r
@td.with_user_project('test-user')
def test_profile_tools(self):
r = self.app.get('/u/test-user/',
extra_environ=dict(username=str('test-user'))).follow()
assert r.html.select('div.profile-section.tools a[href="/u/test-user/profile/"]'), r.html
def test_user_project_creates_on_demand(self):
M.User.register(dict(username='donald-duck'), make_project=False)
ThreadLocalORMSession.flush_all()
self.app.get('/u/donald-duck/')
def test_disabled_user_has_no_user_project(self):
M.User.register(dict(username='donald-duck'))
self.app.get('/u/donald-duck/') # assert it's there
M.User.query.update(dict(username='donald-duck'),
{'$set': {'disabled': True}})
self.app.get('/u/donald-duck/', status=404, extra_environ={'username': str('*anonymous')})
self.app.get('/u/donald-duck/', status=404, extra_environ={'username': str('test-user')})
self.app.get('/u/donald-duck/', status=302, extra_environ={'username': str('test-admin')}) # site admin user
def test_more_projects_link(self):
r = self.app.get('/adobe/adobe-1/admin/')
link = r.html.find(
'div', {'class': 'neighborhood_title_link'}).find('a')
assert 'View More Projects' in str(link)
assert link['href'] == '/adobe/'
def test_nav_json(self):
self.app.get('/p/_nav.json')
class TestPhoneVerificationOnProjectRegistration(TestController):
def test_phone_verification_fragment_renders(self):
self.app.get('/p/phone_verification_fragment', status=200)
self.app.get('/adobe/phone_verification_fragment', status=200)
def test_verify_phone_no_params(self):
with h.push_config(config, **{'project.verify_phone': 'true'}):
self.app.get('/p/verify_phone', status=404)
def test_verify_phone_error(self):
with h.push_config(config, **{'project.verify_phone': 'true'}):
r = self.app.get('/p/verify_phone', {'number': '1234567890'})
expected = {'status': 'error',
'error': 'Phone service is not configured'}
assert_equal(r.json, expected)
rid = r.session.get('phone_verification.request_id')
hash = r.session.get('phone_verification.number_hash')
assert_equal(rid, None)
assert_equal(hash, None)
@patch.object(g, 'phone_service', autospec=True)
def test_verify_phone(self, phone_service):
with h.push_config(config, **{'project.verify_phone': 'true'}):
phone_service.verify.return_value = {
'request_id': 'request-id', 'status': 'ok'}
r = self.app.get('/p/verify_phone', {'number': '1-555-444-3333'})
phone_service.verify.assert_called_once_with('15554443333')
assert_equal(r.json, {'status': 'ok'})
rid = r.session.get('phone_verification.request_id')
hash = r.session.get('phone_verification.number_hash')
assert_equal(rid, 'request-id')
assert_equal(hash, 'f9ac49faef45d18746ced08d001e23b179107940')
@patch.object(g, 'phone_service', autospec=True)
def test_verify_phone_escapes_error(self, phone_service):
phone_service.verify.return_value = {
'status': 'error',
'error': '<script>alert("hacked");</script>',
}
with h.push_config(config, **{'project.verify_phone': 'true'}):
r = self.app.get('/p/verify_phone', {'number': '555-444-3333'})
expected = {
'status': 'error',
'error': '<script>alert("hacked");</script>',
}
assert_equal(r.json, expected)
@patch.object(g, 'phone_service', autospec=True)
def test_verify_phone_already_used(self, phone_service):
with h.push_config(config, **{'project.verify_phone': 'true'}):
u = M.User.register(dict(username='existing-user'), make_project=False)
u.set_tool_data('phone_verification', number_hash=utils.phone_number_hash('1-555-444-9999'))
session(u).flush(u)
phone_service.verify.return_value = {'request_id': 'request-id', 'status': 'ok'}
r = self.app.get('/p/verify_phone', {'number': '1-555-444-9999'})
assert_equal(r.json, {
'status': 'error',
'error': 'That phone number has already been used.'
})
def test_check_phone_verification_no_params(self):
with h.push_config(config, **{'project.verify_phone': 'true'}):
self.app.get('/p/check_phone_verification', status=404)
@patch.object(g, 'phone_service', autospec=True)
def test_check_phone_verification_error(self, phone_service):
with h.push_config(config, **{'project.verify_phone': 'true'}):
phone_service.check.return_value = {'status': 'error'}
req_id = 'request-id'
# make request to verify first to initialize session
phone_service.verify.return_value = {
'request_id': req_id, 'status': 'ok'}
r = self.app.get('/p/verify_phone', {'number': '1234567890'})
r = self.app.get('/p/check_phone_verification', {'pin': '1234'})
assert_equal(r.json, {'status': 'error'})
phone_service.check.assert_called_once_with(req_id, '1234')
user = M.User.by_username('test-admin')
hash = user.get_tool_data('phone_verification', 'number_hash')
assert_equal(hash, None)
@patch.object(g, 'phone_service', autospec=True)
def test_check_phone_verification_ok(self, phone_service):
with h.push_config(config, **{'project.verify_phone': 'true'}):
phone_service.check.return_value = {'status': 'ok'}
req_id = 'request-id'
# make request to verify first to initialize session
phone_service.verify.return_value = {
'request_id': req_id, 'status': 'ok'}
r = self.app.get('/p/verify_phone', {'number': '11234567890'})
r = self.app.get('/p/check_phone_verification', {'pin': '1234'})
assert_equal(r.json, {'status': 'ok'})
phone_service.check.assert_called_once_with(req_id, '1234')
user = M.User.by_username('test-admin')
hash = user.get_tool_data('phone_verification', 'number_hash')
assert_equal(hash, '54c61c96d5d5aea5254c2d4f41508a938e5501b4')
@patch.object(g, 'phone_service', autospec=True)
def test_check_phone_verification_escapes_error(self, phone_service):
phone_service.check.return_value = {
'status': 'error',
'error': '<script>alert("hacked");</script>',
}
with h.push_config(config, **{'project.verify_phone': 'true'}):
r = self.app.get('/p/check_phone_verification', {'pin': '1234'})
expected = {
'status': 'error',
'error': '<script>alert("hacked");</script>',
}
assert_equal(r.json, expected)
def test_register_phone_not_verified(self):
with h.push_config(config, **{'project.verify_phone': 'true'}):
r = self.app.post(
'/p/register',
params=dict(
project_unixname='phonetest',
project_name='Phone Test',
project_description='',
neighborhood='Projects'),
extra_environ=dict(username=str('test-user')),
antispam=True)
overlay = r.html.find('div', {'id': 'phone_verification_overlay'})
assert_not_equal(overlay, None)
header = overlay.find('h2')
iframe = overlay.find('iframe')
assert_equal(header.getText(), 'Phone Verification Required')
assert_equal(iframe.get('src'), '/p/phone_verification_fragment')
class TestProjectImport(TestController):
def test_not_found(self):
self.app.get('/p/import_project/asdf/', status=404)
self.app.get('/p/import_project/', status=404)
# positive tests exist within ForgeImporter package
| 47.821204
| 120
| 0.546999
|
4a16c28859465e18f6edc7a3e5586d1acbc0267c
| 863
|
py
|
Python
|
data_structures/math/geometry/2D/segment.py
|
Pysics/Algorithm
|
223f618e3e6d96e15091783b81b90ee00c771e8f
|
[
"MIT"
] | null | null | null |
data_structures/math/geometry/2D/segment.py
|
Pysics/Algorithm
|
223f618e3e6d96e15091783b81b90ee00c771e8f
|
[
"MIT"
] | 3
|
2022-03-30T01:30:32.000Z
|
2022-03-31T12:52:04.000Z
|
data_structures/math/geometry/2D/segment.py
|
Pysics/Algorithm
|
223f618e3e6d96e15091783b81b90ee00c771e8f
|
[
"MIT"
] | 4
|
2022-03-29T12:27:48.000Z
|
2022-03-30T05:02:31.000Z
|
from __future__ import annotations
import typing
import sympy
from polygon import Polygon
from sector import Sector
if typing.TYPE_CHECKING:
from circle import Circle
from point import Point
class Segment:
def __init__(self, circle: Circle, point_1: Point, point_2: Point) -> None:
self.circle = circle
self.point_1 = point_1
self.point_2 = point_2
@property
def central_angle(self) -> float:
distance = self.point_1.distance(self.point_2)
angle = float(sympy.acos((distance ** 2 - 2 * (self.circle.radius ** 2)) / (2 * (self.circle.radius ** 2))))
return angle
@property
def area(self) -> float:
sector = Sector(self.circle, self.point_1, self.point_2)
triangle = Polygon(self.circle.center, self.point_1, self.point_2)
return sector.area - triangle.area
| 27.83871
| 116
| 0.668598
|
4a16c2943bfb5d21c8f72a5228178edbbefe75df
| 11,505
|
py
|
Python
|
code/python/SecuritizedDerivativesAPIforDigitalPortals/v2/fds/sdk/SecuritizedDerivativesAPIforDigitalPortals/model/inline_response2005_data_key_figures_delta_unadjusted.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/SecuritizedDerivativesAPIforDigitalPortals/v2/fds/sdk/SecuritizedDerivativesAPIforDigitalPortals/model/inline_response2005_data_key_figures_delta_unadjusted.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/SecuritizedDerivativesAPIforDigitalPortals/v2/fds/sdk/SecuritizedDerivativesAPIforDigitalPortals/model/inline_response2005_data_key_figures_delta_unadjusted.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.SecuritizedDerivativesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.SecuritizedDerivativesAPIforDigitalPortals.exceptions import ApiAttributeError
class InlineResponse2005DataKeyFiguresDeltaUnadjusted(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'minimum': (float,), # noqa: E501
'maximum': (float,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'minimum': 'minimum', # noqa: E501
'maximum': 'maximum', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse2005DataKeyFiguresDeltaUnadjusted - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
minimum (float): Minimum value.. [optional] # noqa: E501
maximum (float): Maximum value.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse2005DataKeyFiguresDeltaUnadjusted - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
minimum (float): Minimum value.. [optional] # noqa: E501
maximum (float): Maximum value.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.25
| 124
| 0.576271
|
4a16c2dcafbd6362e28db11c64dc9014984059d0
| 48,758
|
py
|
Python
|
test/unit/data/test_galaxy_mapping.py
|
yvanlebras/galaxy
|
6b8489ca866825bcdf033523120a8b24ea6e6342
|
[
"CC-BY-3.0"
] | null | null | null |
test/unit/data/test_galaxy_mapping.py
|
yvanlebras/galaxy
|
6b8489ca866825bcdf033523120a8b24ea6e6342
|
[
"CC-BY-3.0"
] | 2
|
2017-05-18T16:12:55.000Z
|
2022-03-08T12:08:43.000Z
|
test/unit/data/test_galaxy_mapping.py
|
yvanlebras/galaxy
|
6b8489ca866825bcdf033523120a8b24ea6e6342
|
[
"CC-BY-3.0"
] | null | null | null |
import collections
import os
import random
import unittest
import uuid
from tempfile import NamedTemporaryFile
from typing import List
import pytest
from sqlalchemy import (
inspect,
select,
)
import galaxy.datatypes.registry
import galaxy.model
import galaxy.model.mapping as mapping
from galaxy import model
from galaxy.model.database_utils import create_database
from galaxy.model.metadata import MetadataTempFile
from galaxy.model.security import GalaxyRBACAgent
datatypes_registry = galaxy.datatypes.registry.Registry()
datatypes_registry.load_datatypes()
galaxy.model.set_datatypes_registry(datatypes_registry)
DB_URI = "sqlite:///:memory:"
# docker run -e POSTGRES_USER=galaxy -p 5432:5432 -d postgres
# GALAXY_TEST_UNIT_MAPPING_URI_POSTGRES_BASE='postgresql://galaxy@localhost:5432/' pytest test/unit/data/test_galaxy_mapping.py
skip_if_not_postgres_base = pytest.mark.skipif(
not os.environ.get("GALAXY_TEST_UNIT_MAPPING_URI_POSTGRES_BASE"),
reason="GALAXY_TEST_UNIT_MAPPING_URI_POSTGRES_BASE not set",
)
class BaseModelTestCase(unittest.TestCase):
model: mapping.GalaxyModelMapping
@classmethod
def _db_uri(cls):
return DB_URI
@classmethod
def setUpClass(cls):
# Start the database and connect the mapping
cls.model = mapping.init("/tmp", cls._db_uri(), create_tables=True, object_store=MockObjectStore())
assert cls.model.engine is not None
@classmethod
def query(cls, type):
return cls.model.session.query(type)
@classmethod
def persist(cls, *args, **kwargs):
session = cls.session()
flush = kwargs.get("flush", True)
for arg in args:
session.add(arg)
if flush:
session.flush()
if kwargs.get("expunge", not flush):
cls.expunge()
return arg # Return last or only arg.
@classmethod
def session(cls):
return cls.model.session
@classmethod
def expunge(cls):
cls.model.session.flush()
cls.model.session.expunge_all()
class MappingTests(BaseModelTestCase):
def test_annotations(self):
u = model.User(email="annotator@example.com", password="password")
self.persist(u)
def persist_and_check_annotation(annotation_class, **kwds):
annotated_association = annotation_class()
annotated_association.annotation = "Test Annotation"
annotated_association.user = u
for key, value in kwds.items():
setattr(annotated_association, key, value)
self.persist(annotated_association)
self.expunge()
stored_annotation = self.query(annotation_class).all()[0]
assert stored_annotation.annotation == "Test Annotation"
assert stored_annotation.user.email == "annotator@example.com"
sw = model.StoredWorkflow()
sw.user = u
self.persist(sw)
persist_and_check_annotation(model.StoredWorkflowAnnotationAssociation, stored_workflow=sw)
workflow = model.Workflow()
workflow.stored_workflow = sw
self.persist(workflow)
ws = model.WorkflowStep()
ws.workflow = workflow
self.persist(ws)
persist_and_check_annotation(model.WorkflowStepAnnotationAssociation, workflow_step=ws)
h = model.History(name="History for Annotation", user=u)
self.persist(h)
persist_and_check_annotation(model.HistoryAnnotationAssociation, history=h)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h, create_dataset=True, sa_session=self.model.session
)
self.persist(d1)
persist_and_check_annotation(model.HistoryDatasetAssociationAnnotationAssociation, hda=d1)
page = model.Page()
page.user = u
self.persist(page)
persist_and_check_annotation(model.PageAnnotationAssociation, page=page)
visualization = model.Visualization()
visualization.user = u
self.persist(visualization)
persist_and_check_annotation(model.VisualizationAnnotationAssociation, visualization=visualization)
dataset_collection = model.DatasetCollection(collection_type="paired")
history_dataset_collection = model.HistoryDatasetCollectionAssociation(collection=dataset_collection)
self.persist(history_dataset_collection)
persist_and_check_annotation(
model.HistoryDatasetCollectionAssociationAnnotationAssociation,
history_dataset_collection=history_dataset_collection,
)
library_dataset_collection = model.LibraryDatasetCollectionAssociation(collection=dataset_collection)
self.persist(library_dataset_collection)
persist_and_check_annotation(
model.LibraryDatasetCollectionAnnotationAssociation, library_dataset_collection=library_dataset_collection
)
def test_ratings(self):
user_email = "rater@example.com"
u = model.User(email=user_email, password="password")
self.persist(u)
def persist_and_check_rating(rating_class, item):
rating = 5
rating_association = rating_class(u, item, rating)
self.persist(rating_association)
self.expunge()
stored_rating = self.query(rating_class).all()[0]
assert stored_rating.rating == rating
assert stored_rating.user.email == user_email
sw = model.StoredWorkflow()
sw.user = u
self.persist(sw)
persist_and_check_rating(model.StoredWorkflowRatingAssociation, sw)
h = model.History(name="History for Rating", user=u)
self.persist(h)
persist_and_check_rating(model.HistoryRatingAssociation, h)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h, create_dataset=True, sa_session=self.model.session
)
self.persist(d1)
persist_and_check_rating(model.HistoryDatasetAssociationRatingAssociation, d1)
page = model.Page()
page.user = u
self.persist(page)
persist_and_check_rating(model.PageRatingAssociation, page)
visualization = model.Visualization()
visualization.user = u
self.persist(visualization)
persist_and_check_rating(model.VisualizationRatingAssociation, visualization)
dataset_collection = model.DatasetCollection(collection_type="paired")
history_dataset_collection = model.HistoryDatasetCollectionAssociation(collection=dataset_collection)
self.persist(history_dataset_collection)
persist_and_check_rating(model.HistoryDatasetCollectionRatingAssociation, history_dataset_collection)
library_dataset_collection = model.LibraryDatasetCollectionAssociation(collection=dataset_collection)
self.persist(library_dataset_collection)
persist_and_check_rating(model.LibraryDatasetCollectionRatingAssociation, library_dataset_collection)
def test_display_name(self):
def assert_display_name_converts_to_unicode(item, name):
assert isinstance(item.get_display_name(), str)
assert item.get_display_name() == name
ldda = model.LibraryDatasetDatasetAssociation(name="ldda_name")
assert_display_name_converts_to_unicode(ldda, "ldda_name")
hda = model.HistoryDatasetAssociation(name="hda_name")
assert_display_name_converts_to_unicode(hda, "hda_name")
history = model.History(name="history_name")
assert_display_name_converts_to_unicode(history, "history_name")
library = model.Library(name="library_name")
assert_display_name_converts_to_unicode(library, "library_name")
library_folder = model.LibraryFolder(name="library_folder")
assert_display_name_converts_to_unicode(library_folder, "library_folder")
history = model.History(name="Hello₩◎ґʟⅾ")
assert isinstance(history.name, str)
assert isinstance(history.get_display_name(), str)
assert history.get_display_name() == "Hello₩◎ґʟⅾ"
def test_hda_to_library_dataset_dataset_association(self):
u = model.User(email="mary@example.com", password="password")
hda = model.HistoryDatasetAssociation(name="hda_name")
self.persist(hda)
trans = collections.namedtuple("trans", "user")
target_folder = model.LibraryFolder(name="library_folder")
ldda = hda.to_library_dataset_dataset_association(
trans=trans(user=u),
target_folder=target_folder,
)
assert target_folder.item_count == 1
assert ldda.id
assert ldda.library_dataset.id
assert ldda.library_dataset_id
assert ldda.library_dataset.library_dataset_dataset_association
assert ldda.library_dataset.library_dataset_dataset_association_id
library_dataset_id = ldda.library_dataset_id
replace_dataset = ldda.library_dataset
new_ldda = hda.to_library_dataset_dataset_association(
trans=trans(user=u), target_folder=target_folder, replace_dataset=replace_dataset
)
assert new_ldda.id != ldda.id
assert new_ldda.library_dataset_id == library_dataset_id
assert new_ldda.library_dataset.library_dataset_dataset_association_id == new_ldda.id
assert len(new_ldda.library_dataset.expired_datasets) == 1
assert new_ldda.library_dataset.expired_datasets[0] == ldda
assert target_folder.item_count == 1
def test_tags(self):
TAG_NAME = "Test Tag"
my_tag = model.Tag(name=TAG_NAME)
u = model.User(email="tagger@example.com", password="password")
self.persist(my_tag, u)
def tag_and_test(taggable_object, tag_association_class):
q = select(tag_association_class).join(model.Tag).where(model.Tag.name == TAG_NAME)
assert len(self.model.session.execute(q).all()) == 0
tag_association = tag_association_class()
tag_association.tag = my_tag
taggable_object.tags = [tag_association]
self.persist(tag_association, taggable_object)
assert len(self.model.session.execute(q).all()) == 1
sw = model.StoredWorkflow(user=u)
tag_and_test(sw, model.StoredWorkflowTagAssociation)
h = model.History(name="History for Tagging", user=u)
tag_and_test(h, model.HistoryTagAssociation)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h, create_dataset=True, sa_session=self.model.session
)
tag_and_test(d1, model.HistoryDatasetAssociationTagAssociation)
page = model.Page(user=u)
tag_and_test(page, model.PageTagAssociation)
visualization = model.Visualization(user=u)
tag_and_test(visualization, model.VisualizationTagAssociation)
dataset_collection = model.DatasetCollection(collection_type="paired")
history_dataset_collection = model.HistoryDatasetCollectionAssociation(collection=dataset_collection)
tag_and_test(history_dataset_collection, model.HistoryDatasetCollectionTagAssociation)
library_dataset_collection = model.LibraryDatasetCollectionAssociation(collection=dataset_collection)
tag_and_test(library_dataset_collection, model.LibraryDatasetCollectionTagAssociation)
def test_collection_get_interface(self):
u = model.User(email="mary@example.com", password="password")
h1 = model.History(name="History 1", user=u)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h1, create_dataset=True, sa_session=self.model.session
)
c1 = model.DatasetCollection(collection_type="list")
elements = 100
dces = [
model.DatasetCollectionElement(collection=c1, element=d1, element_identifier=f"{i}", element_index=i)
for i in range(elements)
]
self.persist(u, h1, d1, c1, *dces, flush=False, expunge=False)
self.model.session.flush()
for i in range(elements):
assert c1[i] == dces[i]
def test_dataset_instance_order(self):
u = model.User(email="mary@example.com", password="password")
h1 = model.History(name="History 1", user=u)
elements = []
list_pair = model.DatasetCollection(collection_type="list:paired")
for i in range(20):
pair = model.DatasetCollection(collection_type="pair")
forward = model.HistoryDatasetAssociation(
extension="txt", history=h1, name=f"forward_{i}", create_dataset=True, sa_session=self.model.session
)
reverse = model.HistoryDatasetAssociation(
extension="bam", history=h1, name=f"reverse_{i}", create_dataset=True, sa_session=self.model.session
)
dce1 = model.DatasetCollectionElement(
collection=pair, element=forward, element_identifier=f"forward_{i}", element_index=1
)
dce2 = model.DatasetCollectionElement(
collection=pair, element=reverse, element_identifier=f"reverse_{i}", element_index=2
)
to_persist = [(forward, reverse), (dce1, dce2)]
self.persist(pair)
for pair_item in to_persist:
if i % 2:
self.persist(pair_item[0])
self.persist(pair_item[1])
else:
self.persist(pair_item[1])
self.persist(pair_item[0])
elements.append(
model.DatasetCollectionElement(
collection=list_pair, element=pair, element_index=i, element_identifier=str(i)
)
)
self.persist(list_pair)
random.shuffle(elements)
for item in elements:
self.persist(item)
forward_hdas: List[model.HistoryDatasetAssociation] = []
reverse_hdas: List[model.HistoryDatasetAssociation] = []
for i, dataset_instance in enumerate(list_pair.dataset_instances):
if i % 2:
reverse_hdas.append(dataset_instance)
else:
forward_hdas.append(dataset_instance)
assert all(d.name == f"forward_{i}" for i, d in enumerate(forward_hdas))
assert all(d.name == f"reverse_{i}" for i, d in enumerate(reverse_hdas))
def test_collections_in_histories(self):
u = model.User(email="mary@example.com", password="password")
h1 = model.History(name="History 1", user=u)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h1, create_dataset=True, sa_session=self.model.session
)
d2 = model.HistoryDatasetAssociation(
extension="txt", history=h1, create_dataset=True, sa_session=self.model.session
)
c1 = model.DatasetCollection(collection_type="pair")
hc1 = model.HistoryDatasetCollectionAssociation(history=h1, collection=c1, name="HistoryCollectionTest1")
dce1 = model.DatasetCollectionElement(collection=c1, element=d1, element_identifier="left")
dce2 = model.DatasetCollectionElement(collection=c1, element=d2, element_identifier="right")
self.persist(u, h1, d1, d2, c1, hc1, dce1, dce2)
loaded_dataset_collection = (
self.query(model.HistoryDatasetCollectionAssociation)
.filter(model.HistoryDatasetCollectionAssociation.name == "HistoryCollectionTest1")
.first()
.collection
)
self.assertEqual(len(loaded_dataset_collection.elements), 2)
assert loaded_dataset_collection.collection_type == "pair"
assert loaded_dataset_collection["left"] == dce1
assert loaded_dataset_collection["right"] == dce2
def test_collections_in_library_folders(self):
u = model.User(email="mary2@example.com", password="password")
lf = model.LibraryFolder(name="RootFolder")
library = model.Library(name="Library1", root_folder=lf)
ld1 = model.LibraryDataset()
ld2 = model.LibraryDataset()
ldda1 = model.LibraryDatasetDatasetAssociation(extension="txt", library_dataset=ld1)
ldda2 = model.LibraryDatasetDatasetAssociation(extension="txt", library_dataset=ld1)
c1 = model.DatasetCollection(collection_type="pair")
dce1 = model.DatasetCollectionElement(collection=c1, element=ldda1)
dce2 = model.DatasetCollectionElement(collection=c1, element=ldda2)
self.persist(u, library, lf, ld1, ld2, c1, ldda1, ldda2, dce1, dce2)
# TODO:
# loaded_dataset_collection = self.query( model.DatasetCollection ).filter( model.DatasetCollection.name == "LibraryCollectionTest1" ).first()
# self.assertEqual(len(loaded_dataset_collection.datasets), 2)
# assert loaded_dataset_collection.collection_type == "pair"
def test_nested_collection_attributes(self):
u = model.User(email="mary2@example.com", password="password")
h1 = model.History(name="History 1", user=u)
d1 = model.HistoryDatasetAssociation(
extension="bam", history=h1, create_dataset=True, sa_session=self.model.session
)
index = NamedTemporaryFile("w")
index.write("cool bam index")
index2 = NamedTemporaryFile("w")
index2.write("cool bam index 2")
metadata_dict = {
"bam_index": MetadataTempFile.from_JSON({"kwds": {}, "filename": index.name}),
"bam_csi_index": MetadataTempFile.from_JSON({"kwds": {}, "filename": index2.name}),
}
d1.metadata.from_JSON_dict(json_dict=metadata_dict)
assert d1.metadata.bam_index
assert d1.metadata.bam_csi_index
assert isinstance(d1.metadata.bam_index, model.MetadataFile)
assert isinstance(d1.metadata.bam_csi_index, model.MetadataFile)
d2 = model.HistoryDatasetAssociation(
extension="txt", history=h1, create_dataset=True, sa_session=self.model.session
)
c1 = model.DatasetCollection(collection_type="paired")
dce1 = model.DatasetCollectionElement(collection=c1, element=d1, element_identifier="forward", element_index=0)
dce2 = model.DatasetCollectionElement(collection=c1, element=d2, element_identifier="reverse", element_index=1)
c2 = model.DatasetCollection(collection_type="list:paired")
dce3 = model.DatasetCollectionElement(
collection=c2, element=c1, element_identifier="inner_list", element_index=0
)
c3 = model.DatasetCollection(collection_type="list:list")
c4 = model.DatasetCollection(collection_type="list:list:paired")
dce4 = model.DatasetCollectionElement(
collection=c4, element=c2, element_identifier="outer_list", element_index=0
)
self.model.session.add_all([d1, d2, c1, dce1, dce2, c2, dce3, c3, c4, dce4])
self.model.session.flush()
q = c2._get_nested_collection_attributes(
element_attributes=("element_identifier",), hda_attributes=("extension",), dataset_attributes=("state",)
)
assert [(r.keys()) for r in q] == [
["element_identifier_0", "element_identifier_1", "extension", "state"],
["element_identifier_0", "element_identifier_1", "extension", "state"],
]
assert q.all() == [("inner_list", "forward", "bam", "new"), ("inner_list", "reverse", "txt", "new")]
q = c2._get_nested_collection_attributes(return_entities=(model.HistoryDatasetAssociation,))
assert q.all() == [d1, d2]
q = c2._get_nested_collection_attributes(return_entities=(model.HistoryDatasetAssociation, model.Dataset))
assert q.all() == [(d1, d1.dataset), (d2, d2.dataset)]
# Assert properties that use _get_nested_collection_attributes return correct content
assert c2.dataset_instances == [d1, d2]
assert c2.dataset_elements == [dce1, dce2]
assert c2.dataset_action_tuples == []
assert c2.populated_optimized
assert c2.dataset_states_and_extensions_summary == ({"new"}, {"txt", "bam"})
assert c2.element_identifiers_extensions_paths_and_metadata_files == [
[
("inner_list", "forward"),
"bam",
"mock_dataset_14.dat",
[("bai", "mock_dataset_14.dat"), ("bam.csi", "mock_dataset_14.dat")],
],
[("inner_list", "reverse"), "txt", "mock_dataset_14.dat", []],
]
assert c3.dataset_instances == []
assert c3.dataset_elements == []
assert c3.dataset_states_and_extensions_summary == (set(), set())
q = c4._get_nested_collection_attributes(element_attributes=("element_identifier",))
assert q.all() == [("outer_list", "inner_list", "forward"), ("outer_list", "inner_list", "reverse")]
assert c4.dataset_elements == [dce1, dce2]
assert c4.element_identifiers_extensions_and_paths == [
(("outer_list", "inner_list", "forward"), "bam", "mock_dataset_14.dat"),
(("outer_list", "inner_list", "reverse"), "txt", "mock_dataset_14.dat"),
]
def test_dataset_dbkeys_and_extensions_summary(self):
u = model.User(email="mary2@example.com", password="password")
h1 = model.History(name="History 1", user=u)
d1 = model.HistoryDatasetAssociation(
extension="bam", dbkey="hg19", history=h1, create_dataset=True, sa_session=self.model.session
)
d2 = model.HistoryDatasetAssociation(
extension="txt", dbkey="hg19", history=h1, create_dataset=True, sa_session=self.model.session
)
c1 = model.DatasetCollection(collection_type="paired")
dce1 = model.DatasetCollectionElement(collection=c1, element=d1, element_identifier="forward", element_index=0)
dce2 = model.DatasetCollectionElement(collection=c1, element=d2, element_identifier="reverse", element_index=1)
hdca = model.HistoryDatasetCollectionAssociation(collection=c1, history=h1)
self.model.session.add_all([d1, d2, c1, dce1, dce2, hdca])
self.model.session.flush()
assert hdca.dataset_dbkeys_and_extensions_summary[0] == {"hg19"}
assert hdca.dataset_dbkeys_and_extensions_summary[1] == {"bam", "txt"}
def test_populated_optimized_ok(self):
u = model.User(email="mary2@example.com", password="password")
h1 = model.History(name="History 1", user=u)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h1, create_dataset=True, sa_session=self.model.session
)
d2 = model.HistoryDatasetAssociation(
extension="txt", history=h1, create_dataset=True, sa_session=self.model.session
)
c1 = model.DatasetCollection(collection_type="paired")
dce1 = model.DatasetCollectionElement(collection=c1, element=d1, element_identifier="forward", element_index=0)
dce2 = model.DatasetCollectionElement(collection=c1, element=d2, element_identifier="reverse", element_index=1)
self.model.session.add_all([d1, d2, c1, dce1, dce2])
self.model.session.flush()
assert c1.populated
assert c1.populated_optimized
def test_populated_optimized_empty_list_list_ok(self):
c1 = model.DatasetCollection(collection_type="list")
c2 = model.DatasetCollection(collection_type="list:list")
dce1 = model.DatasetCollectionElement(
collection=c2, element=c1, element_identifier="empty_list", element_index=0
)
self.model.session.add_all([c1, c2, dce1])
self.model.session.flush()
assert c1.populated
assert c1.populated_optimized
assert c2.populated
assert c2.populated_optimized
def test_populated_optimized_list_list_not_populated(self):
c1 = model.DatasetCollection(collection_type="list")
c1.populated_state = False
c2 = model.DatasetCollection(collection_type="list:list")
dce1 = model.DatasetCollectionElement(
collection=c2, element=c1, element_identifier="empty_list", element_index=0
)
self.model.session.add_all([c1, c2, dce1])
self.model.session.flush()
assert not c1.populated
assert not c1.populated_optimized
assert not c2.populated
assert not c2.populated_optimized
def test_default_disk_usage(self):
u = model.User(email="disk_default@test.com", password="password")
self.persist(u)
u.adjust_total_disk_usage(1)
u_id = u.id
self.expunge()
user_reload = self.model.session.query(model.User).get(u_id)
assert user_reload.disk_usage == 1
def test_basic(self):
original_user_count = len(self.model.session.query(model.User).all())
# Make some changes and commit them
u = model.User(email="james@foo.bar.baz", password="password")
# gs = model.GalaxySession()
h1 = model.History(name="History 1", user=u)
# h1.queries.append( model.Query( "h1->q1" ) )
# h1.queries.append( model.Query( "h1->q2" ) )
h2 = model.History(name=("H" * 1024))
self.persist(u, h1, h2)
# q1 = model.Query( "h2->q1" )
metadata = dict(chromCol=1, startCol=2, endCol=3)
d1 = model.HistoryDatasetAssociation(
extension="interval", metadata=metadata, history=h2, create_dataset=True, sa_session=self.model.session
)
# h2.queries.append( q1 )
# h2.queries.append( model.Query( "h2->q2" ) )
self.persist(d1)
# Check
users = self.model.session.query(model.User).all()
assert len(users) == original_user_count + 1
user = [user for user in users if user.email == "james@foo.bar.baz"][0]
assert user.email == "james@foo.bar.baz"
assert user.password == "password"
assert len(user.histories) == 1
assert user.histories[0].name == "History 1"
hists = self.model.session.query(model.History).all()
hist0 = [history for history in hists if history.name == "History 1"][0]
hist1 = [history for history in hists if history.name == "H" * 255][0]
assert hist0.name == "History 1"
assert hist1.name == ("H" * 255)
assert hist0.user == user
assert hist1.user is None
assert hist1.datasets[0].metadata.chromCol == 1
# The filename test has moved to objectstore
# id = hist1.datasets[0].id
# assert hist1.datasets[0].file_name == os.path.join( "/tmp", *directory_hash_id( id ) ) + ( "/dataset_%d.dat" % id )
# Do an update and check
hist1.name = "History 2b"
self.expunge()
hists = self.model.session.query(model.History).all()
hist0 = [history for history in hists if history.name == "History 1"][0]
hist1 = [history for history in hists if history.name == "History 2b"][0]
assert hist0.name == "History 1"
assert hist1.name == "History 2b"
# gvk TODO need to ad test for GalaxySessions, but not yet sure what they should look like.
def test_metadata_spec(self):
metadata = dict(chromCol=1, startCol=2, endCol=3)
d = model.HistoryDatasetAssociation(extension="interval", metadata=metadata, sa_session=self.model.session)
assert d.metadata.chromCol == 1
assert d.metadata.anyAttribute is None
assert "items" not in d.metadata
def test_dataset_job_relationship(self):
dataset = model.Dataset()
job = model.Job()
dataset.job = job
self.persist(job, dataset)
loaded_dataset = self.model.session.query(model.Dataset).filter(model.Dataset.id == dataset.id).one()
assert loaded_dataset.job_id == job.id
def test_jobs(self):
u = model.User(email="jobtest@foo.bar.baz", password="password")
job = model.Job()
job.user = u
job.tool_id = "cat1"
self.persist(u, job)
loaded_job = self.model.session.query(model.Job).filter(model.Job.user == u).first()
assert loaded_job.tool_id == "cat1"
def test_job_metrics(self):
u = model.User(email="jobtest@foo.bar.baz", password="password")
job = model.Job()
job.user = u
job.tool_id = "cat1"
job.add_metric("gx", "galaxy_slots", 5)
job.add_metric("system", "system_name", "localhost")
self.persist(u, job)
task = model.Task(job=job, working_directory="/tmp", prepare_files_cmd="split.sh")
task.add_metric("gx", "galaxy_slots", 5)
task.add_metric("system", "system_name", "localhost")
big_value = ":".join("%d" % i for i in range(2000))
task.add_metric("env", "BIG_PATH", big_value)
self.persist(task)
# Ensure big values truncated
assert len(task.text_metrics[1].metric_value) <= 1023
def test_tasks(self):
u = model.User(email="jobtest@foo.bar.baz", password="password")
job = model.Job()
task = model.Task(job=job, working_directory="/tmp", prepare_files_cmd="split.sh")
job.user = u
self.persist(u, job, task)
loaded_task = self.model.session.query(model.Task).filter(model.Task.job == job).first()
assert loaded_task.prepare_input_files_cmd == "split.sh"
def test_history_contents(self):
u = model.User(email="contents@foo.bar.baz", password="password")
# gs = model.GalaxySession()
h1 = model.History(name="HistoryContentsHistory1", user=u)
self.persist(u, h1, expunge=False)
d1 = self.new_hda(h1, name="1")
d2 = self.new_hda(h1, name="2", visible=False)
d3 = self.new_hda(h1, name="3", deleted=True)
d4 = self.new_hda(h1, name="4", visible=False, deleted=True)
self.session().flush()
def contents_iter_names(**kwds):
history = (
self.model.context.query(model.History).filter(model.History.name == "HistoryContentsHistory1").first()
)
return list(map(lambda hda: hda.name, history.contents_iter(**kwds)))
self.assertEqual(contents_iter_names(), ["1", "2", "3", "4"])
assert contents_iter_names(deleted=False) == ["1", "2"]
assert contents_iter_names(visible=True) == ["1", "3"]
assert contents_iter_names(visible=False) == ["2", "4"]
assert contents_iter_names(deleted=True, visible=False) == ["4"]
assert contents_iter_names(ids=[d1.id, d2.id, d3.id, d4.id]) == ["1", "2", "3", "4"]
assert contents_iter_names(ids=[d1.id, d2.id, d3.id, d4.id], max_in_filter_length=1) == ["1", "2", "3", "4"]
assert contents_iter_names(ids=[d1.id, d3.id]) == ["1", "3"]
def test_history_audit(self):
u = model.User(email="contents@foo.bar.baz", password="password")
h1 = model.History(name="HistoryAuditHistory", user=u)
h2 = model.History(name="HistoryAuditHistory", user=u)
def get_audit_table_entries(history):
return (
self.session()
.query(model.HistoryAudit.table)
.filter(model.HistoryAudit.table.c.history_id == history.id)
.all()
)
def get_latest_entry(entries):
# key ensures result is correct if new columns are added
return max(entries, key=lambda x: x.update_time)
self.persist(u, h1, h2, expunge=False)
assert len(get_audit_table_entries(h1)) == 1
assert len(get_audit_table_entries(h2)) == 1
self.new_hda(h1, name="1")
self.new_hda(h2, name="2")
self.session().flush()
# _next_hid modifies history, plus trigger on HDA means 2 additional audit rows per history
h1_audits = get_audit_table_entries(h1)
h2_audits = get_audit_table_entries(h2)
assert len(h1_audits) == 3
assert len(h2_audits) == 3
h1_latest = get_latest_entry(h1_audits)
h2_latest = get_latest_entry(h2_audits)
model.HistoryAudit.prune(self.session())
h1_audits = get_audit_table_entries(h1)
h2_audits = get_audit_table_entries(h2)
assert len(h1_audits) == 1
assert len(h2_audits) == 1
assert h1_audits[0] == h1_latest
assert h2_audits[0] == h2_latest
def _non_empty_flush(self):
lf = model.LibraryFolder(name="RootFolder")
session = self.session()
session.add(lf)
session.flush()
def test_flush_refreshes(self):
# Normally I don't believe in unit testing library code, but the behaviors around attribute
# states and flushing in SQL Alchemy is very subtle and it is good to have a executable
# reference for how it behaves in the context of Galaxy objects.
model = self.model
user = model.User(email="testworkflows@bx.psu.edu", password="password")
galaxy_session = model.GalaxySession()
galaxy_session_other = model.GalaxySession()
galaxy_session.user = user
galaxy_session_other.user = user
self.persist(user, galaxy_session_other, galaxy_session)
galaxy_session_id = galaxy_session.id
self.expunge()
session = self.session()
galaxy_model_object = self.query(model.GalaxySession).get(galaxy_session_id)
expected_id = galaxy_model_object.id
# id loaded as part of the object query, could be any non-deferred attribute.
assert "id" not in inspect(galaxy_model_object).unloaded
# Perform an empty flush, verify empty flush doesn't reload all attributes.
session.flush()
assert "id" not in inspect(galaxy_model_object).unloaded
# However, flushing anything non-empty - even unrelated object will invalidate
# the session ID.
self._non_empty_flush()
assert "id" in inspect(galaxy_model_object).unloaded
# Fetch the ID loads the value from the database
assert expected_id == galaxy_model_object.id
assert "id" not in inspect(galaxy_model_object).unloaded
# Using cached_id instead does not exhibit this behavior.
self._non_empty_flush()
assert expected_id == galaxy.model.cached_id(galaxy_model_object)
assert "id" in inspect(galaxy_model_object).unloaded
# Keeping the following failed experiments here for future reference,
# I probed the internals of the attribute tracking and couldn't find an
# alternative, generalized way to get the previously loaded value for unloaded
# attributes.
# print(galaxy_model_object._sa_instance_state.attrs.id)
# print(dir(galaxy_model_object._sa_instance_state.attrs.id))
# print(galaxy_model_object._sa_instance_state.attrs.id.loaded_value)
# print(galaxy_model_object._sa_instance_state.attrs.id.state)
# print(galaxy_model_object._sa_instance_state.attrs.id.load_history())
# print(dir(galaxy_model_object._sa_instance_state.attrs.id.load_history()))
# print(galaxy_model_object._sa_instance_state.identity)
# print(dir(galaxy_model_object._sa_instance_state))
# print(galaxy_model_object._sa_instance_state.expired_attributes)
# print(galaxy_model_object._sa_instance_state.expired)
# print(galaxy_model_object._sa_instance_state._instance_dict().keys())
# print(dir(galaxy_model_object._sa_instance_state._instance_dict))
# assert False
# Verify cached_id works even immediately after an initial flush, prevents a second SELECT
# query that would be executed if object.id was used.
galaxy_model_object_new = model.GalaxySession()
session.add(galaxy_model_object_new)
session.flush()
assert galaxy.model.cached_id(galaxy_model_object_new)
assert "id" in inspect(galaxy_model_object_new).unloaded
# Verify a targeted flush prevent expiring unrelated objects.
galaxy_model_object_new.id
assert "id" not in inspect(galaxy_model_object_new).unloaded
session.flush(model.GalaxySession())
assert "id" not in inspect(galaxy_model_object_new).unloaded
def test_workflows(self):
user = model.User(email="testworkflows@bx.psu.edu", password="password")
def workflow_from_steps(steps):
stored_workflow = model.StoredWorkflow()
stored_workflow.user = user
workflow = model.Workflow()
workflow.steps = steps
workflow.stored_workflow = stored_workflow
return workflow
child_workflow = workflow_from_steps([])
self.persist(child_workflow)
workflow_step_1 = model.WorkflowStep()
workflow_step_1.order_index = 0
workflow_step_1.type = "data_input"
workflow_step_2 = model.WorkflowStep()
workflow_step_2.order_index = 1
workflow_step_2.type = "subworkflow"
workflow_step_2.subworkflow = child_workflow
workflow_step_1.get_or_add_input("moo1")
workflow_step_1.get_or_add_input("moo2")
workflow_step_2.get_or_add_input("moo")
workflow_step_1.add_connection("foo", "cow", workflow_step_2)
workflow = workflow_from_steps([workflow_step_1, workflow_step_2])
self.persist(workflow)
workflow_id = workflow.id
annotation = model.WorkflowStepAnnotationAssociation()
annotation.annotation = "Test Step Annotation"
annotation.user = user
annotation.workflow_step = workflow_step_1
self.persist(annotation)
assert workflow_step_1.id is not None
h1 = model.History(name="WorkflowHistory1", user=user)
invocation_uuid = uuid.uuid1()
workflow_invocation = model.WorkflowInvocation()
workflow_invocation.uuid = invocation_uuid
workflow_invocation.history = h1
workflow_invocation_step1 = model.WorkflowInvocationStep()
workflow_invocation_step1.workflow_invocation = workflow_invocation
workflow_invocation_step1.workflow_step = workflow_step_1
subworkflow_invocation = model.WorkflowInvocation()
workflow_invocation.attach_subworkflow_invocation_for_step(workflow_step_2, subworkflow_invocation)
workflow_invocation_step2 = model.WorkflowInvocationStep()
workflow_invocation_step2.workflow_invocation = workflow_invocation
workflow_invocation_step2.workflow_step = workflow_step_2
workflow_invocation.workflow = workflow
d1 = self.new_hda(h1, name="1")
workflow_request_dataset = model.WorkflowRequestToInputDatasetAssociation()
workflow_request_dataset.workflow_invocation = workflow_invocation
workflow_request_dataset.workflow_step = workflow_step_1
workflow_request_dataset.dataset = d1
self.persist(workflow_invocation)
assert workflow_request_dataset is not None
assert workflow_invocation.id is not None
history_id = h1.id
self.expunge()
loaded_invocation = self.query(model.WorkflowInvocation).get(workflow_invocation.id)
assert loaded_invocation.uuid == invocation_uuid, f"{loaded_invocation.uuid} != {invocation_uuid}"
assert loaded_invocation
assert loaded_invocation.history.id == history_id
step_1, step_2 = loaded_invocation.workflow.steps
assert not step_1.subworkflow
assert step_2.subworkflow
assert len(loaded_invocation.steps) == 2
subworkflow_invocation_assoc = loaded_invocation.get_subworkflow_invocation_association_for_step(step_2)
assert subworkflow_invocation_assoc is not None
assert isinstance(subworkflow_invocation_assoc.subworkflow_invocation, model.WorkflowInvocation)
assert isinstance(subworkflow_invocation_assoc.parent_workflow_invocation, model.WorkflowInvocation)
assert subworkflow_invocation_assoc.subworkflow_invocation.history.id == history_id
loaded_workflow = self.query(model.Workflow).get(workflow_id)
assert len(loaded_workflow.steps[0].annotations) == 1
copied_workflow = loaded_workflow.copy(user=user)
annotations = copied_workflow.steps[0].annotations
assert len(annotations) == 1
def test_role_creation(self):
security_agent = GalaxyRBACAgent(self.model)
def check_private_role(private_role, email):
assert private_role.type == model.Role.types.PRIVATE
assert len(private_role.users) == 1
assert private_role.name == email
assert private_role.description == "Private Role for " + email
email = "rule_user_1@example.com"
u = model.User(email=email, password="password")
self.persist(u)
role = security_agent.get_private_user_role(u)
assert role is None
role = security_agent.create_private_user_role(u)
assert role is not None
check_private_role(role, email)
email = "rule_user_2@example.com"
u = model.User(email=email, password="password")
self.persist(u)
role = security_agent.get_private_user_role(u)
assert role is None
role = security_agent.get_private_user_role(u, auto_create=True)
assert role is not None
check_private_role(role, email)
# make sure re-running auto_create doesn't break things
role = security_agent.get_private_user_role(u, auto_create=True)
assert role is not None
check_private_role(role, email)
def test_private_share_role(self):
security_agent = GalaxyRBACAgent(self.model)
u_from, u_to, u_other = self._three_users("private_share_role")
h = model.History(name="History for Annotation", user=u_from)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h, create_dataset=True, sa_session=self.model.session
)
self.persist(h, d1)
security_agent.privately_share_dataset(d1.dataset, [u_to])
assert security_agent.can_access_dataset(u_to.all_roles(), d1.dataset)
assert not security_agent.can_access_dataset(u_other.all_roles(), d1.dataset)
def test_make_dataset_public(self):
security_agent = GalaxyRBACAgent(self.model)
u_from, u_to, u_other = self._three_users("make_dataset_public")
h = model.History(name="History for Annotation", user=u_from)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h, create_dataset=True, sa_session=self.model.session
)
self.persist(h, d1)
security_agent.privately_share_dataset(d1.dataset, [u_to])
security_agent.make_dataset_public(d1.dataset)
assert security_agent.can_access_dataset(u_to.all_roles(), d1.dataset)
assert security_agent.can_access_dataset(u_other.all_roles(), d1.dataset)
def test_set_all_dataset_permissions(self):
security_agent = GalaxyRBACAgent(self.model)
u_from, _, u_other = self._three_users("set_all_perms")
h = model.History(name="History for Annotation", user=u_from)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h, create_dataset=True, sa_session=self.model.session
)
self.persist(h, d1)
role = security_agent.get_private_user_role(u_from, auto_create=True)
access_action = security_agent.permitted_actions.DATASET_ACCESS.action
manage_action = security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action
permissions = {access_action: [role], manage_action: [role]}
assert security_agent.can_access_dataset(u_other.all_roles(), d1.dataset)
security_agent.set_all_dataset_permissions(d1.dataset, permissions)
assert not security_agent.allow_action(
u_other.all_roles(), security_agent.permitted_actions.DATASET_ACCESS, d1.dataset
)
assert not security_agent.can_access_dataset(u_other.all_roles(), d1.dataset)
def test_can_manage_privately_shared_dataset(self):
security_agent = GalaxyRBACAgent(self.model)
u_from, u_to, u_other = self._three_users("can_manage_dataset")
h = model.History(name="History for Prevent Sharing", user=u_from)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h, create_dataset=True, sa_session=self.model.session
)
self.persist(h, d1)
self._make_owned(security_agent, u_from, d1)
assert security_agent.can_manage_dataset(u_from.all_roles(), d1.dataset)
security_agent.privately_share_dataset(d1.dataset, [u_to])
assert not security_agent.can_manage_dataset(u_to.all_roles(), d1.dataset)
def test_can_manage_private_dataset(self):
security_agent = GalaxyRBACAgent(self.model)
u_from, _, u_other = self._three_users("can_manage_dataset_ps")
h = model.History(name="History for Prevent Sharing", user=u_from)
d1 = model.HistoryDatasetAssociation(
extension="txt", history=h, create_dataset=True, sa_session=self.model.session
)
self.persist(h, d1)
self._make_private(security_agent, u_from, d1)
assert security_agent.can_manage_dataset(u_from.all_roles(), d1.dataset)
assert not security_agent.can_manage_dataset(u_other.all_roles(), d1.dataset)
def test_history_hid_counter_is_expired_after_next_hid_call(self):
u = model.User(email="hid_abuser@example.com", password="password")
h = model.History(name="History for hid testing", user=u)
self.persist(u, h)
state = inspect(h)
assert h.hid_counter == 1
assert "hid_counter" not in state.unloaded
assert "id" not in state.unloaded
h._next_hid()
assert "hid_counter" in state.unloaded # this attribute has been expired
assert "id" not in state.unloaded # but other attributes have NOT been expired
assert h.hid_counter == 2 # check this last: this causes thie hid_counter to be reloaded
def test_next_hid(self):
u = model.User(email="hid_abuser@example.com", password="password")
h = model.History(name="History for hid testing", user=u)
self.persist(u, h)
assert h.hid_counter == 1
h._next_hid()
assert h.hid_counter == 2
h._next_hid(n=3)
assert h.hid_counter == 5
def _three_users(self, suffix):
email_from = f"user_{suffix}e1@example.com"
email_to = f"user_{suffix}e2@example.com"
email_other = f"user_{suffix}e3@example.com"
u_from = model.User(email=email_from, password="password")
u_to = model.User(email=email_to, password="password")
u_other = model.User(email=email_other, password="password")
self.persist(u_from, u_to, u_other)
return u_from, u_to, u_other
def _make_private(self, security_agent, user, hda):
role = security_agent.get_private_user_role(user, auto_create=True)
access_action = security_agent.permitted_actions.DATASET_ACCESS.action
manage_action = security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action
permissions = {access_action: [role], manage_action: [role]}
security_agent.set_all_dataset_permissions(hda.dataset, permissions)
def _make_owned(self, security_agent, user, hda):
role = security_agent.get_private_user_role(user, auto_create=True)
manage_action = security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action
permissions = {manage_action: [role]}
security_agent.set_all_dataset_permissions(hda.dataset, permissions)
def new_hda(self, history, **kwds):
return history.add_dataset(
model.HistoryDatasetAssociation(create_dataset=True, sa_session=self.model.session, **kwds)
)
@skip_if_not_postgres_base
class PostgresMappingTests(MappingTests):
@classmethod
def _db_uri(cls):
base = os.environ.get("GALAXY_TEST_UNIT_MAPPING_URI_POSTGRES_BASE")
dbname = "gxtest" + str(uuid.uuid4())
assert base
postgres_url = base + dbname
create_database(postgres_url)
return postgres_url
class MockObjectStore:
def __init__(self):
pass
def size(self, dataset):
return 42
def exists(self, *args, **kwds):
return True
def get_filename(self, *args, **kwds):
return "mock_dataset_14.dat"
def get_store_by(self, *args, **kwds):
return "id"
def update_from_file(self, *arg, **kwds):
pass
def get_suite():
suite = unittest.TestSuite()
suite.addTest(MappingTests("test_basic"))
return suite
| 44.365787
| 150
| 0.677591
|
4a16c3ff5b4bb345b435c1e276048a7adbfb2d5c
| 148
|
py
|
Python
|
cython/setup.py
|
brittainhard/py
|
aede05530ad05a8319fef7e76b49e4bf3cebebac
|
[
"MIT"
] | null | null | null |
cython/setup.py
|
brittainhard/py
|
aede05530ad05a8319fef7e76b49e4bf3cebebac
|
[
"MIT"
] | null | null | null |
cython/setup.py
|
brittainhard/py
|
aede05530ad05a8319fef7e76b49e4bf3cebebac
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules=cythonize("hello_world.pyx"),
name="Hello World"
)
| 16.444444
| 45
| 0.75
|
4a16c43a8600aa1e30ebe5fa8cf96b0046ee3b14
| 2,906
|
py
|
Python
|
tensor2tensor/utils/video2gif.py
|
repoloper/tensor2tensor
|
2fd91d34b8e6d79599c0612e446175174e838b9d
|
[
"Apache-2.0"
] | 61
|
2018-06-23T01:40:58.000Z
|
2021-06-07T09:33:38.000Z
|
tensor2tensor/utils/video2gif.py
|
zhaopufeng/tensor2tensor
|
7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/utils/video2gif.py
|
zhaopufeng/tensor2tensor
|
7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3
|
[
"Apache-2.0"
] | 8
|
2018-10-23T13:10:12.000Z
|
2019-07-31T05:53:08.000Z
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""View the problem.
This binary saves the videos in the problem(dataset) into gifs.
The imagemagick package should be installed for conversion to gifs.
Example usage to view dataset:
video2gif \
--data_dir ~/data \
--problem=gym_water_world_random5k \
--hparams_set=next_frame_stochastic \
--output_dir /usr/local/google/home/mbz/t2t_train/ww/ \
--data_dir /usr/local/google/home/mbz/temp/ \
--num_samples 10
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import
from tensor2tensor.data_generators import problem # pylint: disable=unused-import
from tensor2tensor.utils import decoding
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_samples", -1, "Number of saved samples.")
def create_gif(name):
cmd = "convert -delay 15 {0}* {0}.gif".format(name)
os.system(cmd)
def main(_):
problem_name = FLAGS.problem
if "video" not in problem_name and "gym" not in problem_name:
print("This tool only works for video problems.")
return
mode = tf.estimator.ModeKeys.TRAIN
hparams = trainer_lib.create_hparams(
FLAGS.hparams_set,
FLAGS.hparams,
data_dir=os.path.expanduser(FLAGS.data_dir),
problem_name=problem_name)
dataset = hparams.problem.input_fn(mode, hparams)
features = dataset.make_one_shot_iterator().get_next()
tf.gfile.MakeDirs(FLAGS.output_dir)
base_template = os.path.join(FLAGS.output_dir, FLAGS.problem)
count = 0
with tf.train.MonitoredTrainingSession() as sess:
while not sess.should_stop():
# TODO(mbz): figure out what the second output is.
data, _ = sess.run(features)
video_batch = np.concatenate((data["inputs"], data["targets"]), axis=1)
for video in video_batch:
print("Saving {}/{}".format(count, FLAGS.num_samples))
name = "%s_%05d" % (base_template, count)
decoding.save_video(video, name + "_{:05d}.png")
create_gif(name)
count += 1
if count == FLAGS.num_samples:
sys.exit(0)
if __name__ == "__main__":
tf.app.run()
| 31.247312
| 82
| 0.718169
|
4a16c4d40226b0c455b3cbcfd51a9789fdd5e258
| 7,594
|
py
|
Python
|
gans/cgan.py
|
er-Bot/gans
|
fc19446750e10896dd3b1746b0ccb3c4d3b5ed8d
|
[
"MIT"
] | null | null | null |
gans/cgan.py
|
er-Bot/gans
|
fc19446750e10896dd3b1746b0ccb3c4d3b5ed8d
|
[
"MIT"
] | null | null | null |
gans/cgan.py
|
er-Bot/gans
|
fc19446750e10896dd3b1746b0ccb3c4d3b5ed8d
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm.auto import tqdm
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
__all__ = ["Discriminator", "Generator", "CGAN"]
criterion = nn.BCEWithLogitsLoss()
hidden_dim = 128
class Discriminator(nn.Module):
def __init__(self, in_dim):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_dim, 4 * hidden_dim),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(4 * hidden_dim, 2 * hidden_dim),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(2 * hidden_dim, hidden_dim),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(hidden_dim, 1)
)
def forward(self, x, y):
d_in = torch.cat((x, y), -1)
return self.model(d_in)
class Generator(nn.Module):
def __init__(self, in_dim, out_dim):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, 2 * hidden_dim),
nn.BatchNorm1d(2 * hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(2 * hidden_dim, 4 * hidden_dim),
nn.BatchNorm1d(4 * hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(4 * hidden_dim, 8 * hidden_dim),
nn.BatchNorm1d(8 * hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(8 * hidden_dim, out_dim),
nn.Sigmoid()
)
def forward(self, z, y):
g_in = torch.cat((z, y), -1)
return self.model(g_in)
class CGAN:
def __init__(self):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# img_size of the form (1, w, h) e.g. for MNIST it's (1, 28, 28)
def setup(self, z_dim, n_classes, img_size, lr, betas):
self.z_dim = z_dim
self.n_classes = n_classes
self.img_size = img_size
assert len(img_size) == 3, 'size sould be of format : (channel, width, heigt)'
x_dim = img_size[1] * img_size[2]
self.generator = Generator(z_dim + n_classes, x_dim).to(self.device)
self.discriminator = Discriminator(x_dim + n_classes).to(self.device)
self.g_opt = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=betas)
self.d_opt = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=betas)
self.d_loss_history = []
self.g_loss_history = []
self.z = self.noise(100)
self.start_epoch = 0
def load_state(self, path):
state = torch.load(path, map_location=self.device)
self.z_dim = state['z_dim']
self.n_classes = state['n_classes']
self.img_size = state['img_size']
self.generator = state['gen']
self.discriminator = state['disc']
self.g_opt = state['g_opt']
self.d_opt = state['d_opt']
self.d_loss_history = state['d_loss_history'].tolist()
self.g_loss_history = state['g_loss_history'].tolist()
self.z = state['z']
self.start_epoch = state['start_epoch']
def noise(self, n):
return torch.randn(n, self.z_dim, device=self.device)
def show_images(self, images, figsize=(10, 10), nrow=10, show=False, path='.'):
img_unflat = images.detach().cpu().view(-1, *self.img_size)
img_grid = make_grid(img_unflat, nrow=nrow)
plt.figure(figsize=figsize)
plt.imshow(img_grid.permute(1, 2, 0).squeeze())
if not show:
plt.savefig(path)
else:
plt.show()
plt.close(None)
def get_discriminator_loss(self, real, labels, batch_size):
noise = self.noise(batch_size)
fake_image_gen = self.generator(noise, labels)
fake_image_pred = self.discriminator(fake_image_gen.detach(), labels)
fake_image_loss = criterion(fake_image_pred, torch.zeros_like(fake_image_pred))
real_image_pred = self.discriminator(real, labels)
real_image_loss = criterion(real_image_pred, torch.ones_like(real_image_pred))
disc_loss = (fake_image_loss + real_image_loss) / 2
return disc_loss
def get_generator_loss(self, labels, batch_size):
noise = self.noise(batch_size)
fake_image_gen = self.generator(noise, labels)
fake_image_pred = self.discriminator(fake_image_gen, labels)
gen_loss = criterion(fake_image_pred, torch.ones_like(fake_image_pred))
return gen_loss
def one_hot(self, labels):
return F.one_hot(labels, self.n_classes).to(self.device)
def train(self, dataloader, n_epochs, display_step=1, save_step=50, path='.'):
for epoch in range(self.start_epoch, n_epochs + 1):
for real, labels in tqdm(dataloader):
batch_size = len(real)
real = real.view(batch_size, -1).to(self.device) # flatten
y = self.one_hot(labels)
""" Update discriminator """
self.d_opt.zero_grad()
disc_loss = self.get_discriminator_loss(real, y, batch_size)
disc_loss.backward()
self.d_opt.step()
self.d_loss_history += [disc_loss.item()]
""" Update generator """
self.g_opt.zero_grad()
gen_loss = self.get_generator_loss(y, batch_size)
gen_loss.backward()
self.g_opt.step()
self.g_loss_history += [gen_loss.item()]
### Some visuals ###
if epoch % display_step == 0:
print(f"Epoch {epoch}: G_loss = {self.g_loss_history[-1]}, D_loss = {self.d_loss_history[-1]}")
yy = self.one_hot(torch.arange(0, 100, 1)//10)
generated = self.generator(self.z, yy)
self.show_images(generated, path=path+'/sample-%04d.png'%epoch)
# loss functions
step_bins = 20
n_example = (len(self.d_loss_history) // step_bins) * step_bins
plt.clf()
plt.figure(figsize=(10, 5))
plt.plot(
range(n_example // step_bins),
torch.Tensor(self.g_loss_history[:n_example]).view(-1, step_bins).mean(1),
label="Generator loss"
)
plt.plot(
range(n_example // step_bins),
torch.Tensor(self.d_loss_history[:n_example]).view(-1, step_bins).mean(1),
label="Discriminator loss"
)
plt.legend()
plt.savefig(path+'/loss-%04d.png'%epoch)
plt.close(None)
### Model saving ###
if epoch % save_step == 0:
state = {
'z_dim': self.z_dim,
'n_classes': self.n_classes,
'img_size': self.img_size,
'gen': self.generator,
'disc': self.discriminator,
'd_opt': self.d_opt,
'g_opt': self.g_opt,
'd_loss_history': torch.Tensor(self.d_loss_history),
'g_loss_history': torch.Tensor(self.g_loss_history),
'z': self.z,
'start_epoch': epoch + 1,
}
torch.save(state, path+'/cgan-%04d.h5'%epoch)
| 37.97
| 111
| 0.564393
|
4a16c50ac67ab1d7846d6e1d5c5e6c9ff2749df8
| 8,546
|
py
|
Python
|
UnityEngine/UI/GraphicRaycaster/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/UI/GraphicRaycaster/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/UI/GraphicRaycaster/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class GraphicRaycaster:
def __new__(cls, arg1=None):
'''
:returns: GraphicRaycaster
:rtype: UnityEngine.UI.GraphicRaycaster
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_sortOrderPriority():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_renderOrderPriority():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_ignoreReversedGraphics():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_ignoreReversedGraphics(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_blockingObjects():
'''
:returns: GraphicRaycaster+BlockingObjects
:rtype: UnityEngine.GraphicRaycaster+BlockingObjects
'''
pass
@staticmethod
def set_blockingObjects(arg1):
'''
:param arg1: BlockingObjects
:type arg1: UnityEngine.BlockingObjects
'''
pass
@staticmethod
def Raycast(arg1, arg2):
'''
:param arg1: PointerEventData
:type arg1: UnityEngine.PointerEventData
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
def get_eventCamera():
'''
:returns: Camera
:rtype: UnityEngine.Camera
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def IsActive():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def IsDestroyed():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_enabled():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_enabled(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_transform():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_gameObject():
'''
:returns: GameObject
:rtype: UnityEngine.GameObject
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponent(arg1=None):
pass
@staticmethod
@overload
def GetComponentInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponentInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInParent(arg1=None):
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInParent(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponents(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponents(arg1=None, arg2=None):
pass
@staticmethod
def GetInstanceID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_name():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_name(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
| 20.742718
| 77
| 0.540838
|
4a16c5db35587459a30c9c56b88a1572f07adc02
| 1,755
|
py
|
Python
|
setup.py
|
vincentxavier/sphinx-proof
|
e400bd95a21159cd6cabe2026d53eb807df2d675
|
[
"MIT"
] | 12
|
2020-11-17T02:39:50.000Z
|
2022-02-16T21:14:01.000Z
|
setup.py
|
vincentxavier/sphinx-proof
|
e400bd95a21159cd6cabe2026d53eb807df2d675
|
[
"MIT"
] | 33
|
2020-10-09T14:10:22.000Z
|
2022-03-20T05:47:30.000Z
|
setup.py
|
executablebooks/sphinxcontrib-prettyproof
|
e400bd95a21159cd6cabe2026d53eb807df2d675
|
[
"MIT"
] | 6
|
2021-03-05T16:38:47.000Z
|
2022-02-04T11:19:05.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
VERSION = "v0.1.3"
LONG_DESCRIPTION = """
This package contains a [Sphinx](http://www.sphinx-doc.org/en/master/) extension
for producing proof, theorem, axiom, lemma, definition, criterion, remark, conjecture,
corollary, algorithm, example, property, observation and proposition directives.
This project is maintained and supported by [najuzilu](https://github.com/najuzilu).
"""
SHORT_DESCRIPTION = "A Sphinx extension for producing proofs, theorems, axioms, etc."
BASE_URL = "https://github.com/executablebooks/sphinx-proof"
URL = f"{BASE_URL}/archive/{VERSION}.tar.gz"
# Define all extras
extras = {
"code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"],
"testing": [
"coverage",
"pytest>=3.6,<4",
"pytest-cov",
"pytest-regressions",
"beautifulsoup4",
"myst-parser",
"texsoup",
],
"rtd": [
"sphinx>=3.0",
"sphinx-book-theme",
"sphinxcontrib-bibtex",
"myst-parser",
"sphinx_togglebutton",
],
}
extras["all"] = set(ii for jj in extras.values() for ii in jj)
setup(
name="sphinx-proof",
version=VERSION,
python_requires=">=3.6",
author="QuantEcon",
author_email="admin@quantecon.org",
url=BASE_URL,
download_url=URL,
project_urls={
"Source": BASE_URL,
"Tracker": f"{BASE_URL}/issues",
},
description=SHORT_DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
license="BSD",
packages=find_packages(),
install_requires=["docutils>=0.15", "sphinx", "sphinx-book-theme"],
extras_require=extras,
include_package_data=True,
)
| 27.421875
| 86
| 0.649003
|
4a16c697b6ae42e609288f0f91f2273bd8318a59
| 946
|
py
|
Python
|
caption.py
|
skypher/python-imagemagick-annotations
|
f2914bdfbc98a42b905664b903f9de4dd4a82199
|
[
"MIT"
] | null | null | null |
caption.py
|
skypher/python-imagemagick-annotations
|
f2914bdfbc98a42b905664b903f9de4dd4a82199
|
[
"MIT"
] | null | null | null |
caption.py
|
skypher/python-imagemagick-annotations
|
f2914bdfbc98a42b905664b903f9de4dd4a82199
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import re
import os
with open('desc.txt', 'r') as f:
while 1:
line = f.readline()
if line == '': # EOF
break
sys.stdout.write(line)
if re.match('^\s$', line, re.UNICODE): # skip empty lines
continue
line2 = f.readline()
m = re.match('^\s*(.+?)\.\s*(.+)$', line)
assert(m)
fn = m.group(1)
desc1 = m.group(2)
assert(fn)
assert(desc1)
desc = desc1.strip() + ' ' + line2.strip()
# http://www.imagemagick.org/Usage/annotating/#annotating
cmd = "width=`identify -format %w '{}'`; convert -resize 40% -background '#0008' -fill white -gravity center -size ${{width}}x100 caption:'{}' '{}' +swap -gravity south -composite '{}'".format(fn+'.jpg', desc, fn + '.jpg', fn + 'cap.jpg')
print(cmd)
r = os.system(cmd)
print('<', r, '>', fn, ' / ', desc)
| 30.516129
| 246
| 0.504228
|
4a16c77e6374f70e58259a4d2b5c01d14770a76b
| 1,376
|
py
|
Python
|
common/src/stack/command/stack/commands/add/box/__init__.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/add/box/__init__.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/add/box/__init__.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | null | null | null |
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack
import stack.commands
from stack.exception import ArgUnique, CommandError, ArgNotFound
class Command(stack.commands.BoxArgumentProcessor,
stack.commands.OSArgumentProcessor,
stack.commands.add.command):
"""
Add a box specification to the database.
<arg type='string' name='box'>
Name of the new box.
</arg>
<param type='string' name='os'>
OS associated with the box. Default is the native os (e.g., 'redhat', 'sles').
</param>
<example cmd='add box develop'>
Adds the box named "develop" into the database.
</example>
"""
def run(self, params, args):
if len(args) != 1:
raise ArgUnique(self, 'box')
box = args[0]
if box in self.getBoxNames():
raise CommandError(self, 'box "%s" exists' % box)
OS, = self.fillParams([ ('os', self.os) ])
if OS not in self.getOSNames():
raise ArgNotFound(self, OS, 'OS')
self.db.execute("""insert into boxes (name, os) values
(%s, (select id from oses where name=%s))""", (box, OS))
| 25.962264
| 79
| 0.692587
|
4a16c9161e12e4eb6feb73ab75a868f8f12686f2
| 2,628
|
py
|
Python
|
pychron/graph/tools/cursor_tool_overlay.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 1
|
2019-02-27T21:57:44.000Z
|
2019-02-27T21:57:44.000Z
|
pychron/graph/tools/cursor_tool_overlay.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 80
|
2018-07-17T20:10:20.000Z
|
2021-08-17T15:38:24.000Z
|
pychron/graph/tools/cursor_tool_overlay.py
|
AGESLDEO/pychron
|
1a81e05d9fba43b797f335ceff6837c016633bcf
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.text_box_overlay import TextBoxOverlay
from traits.api import Enum, Any, Bool
# ============= standard library imports ========================
# ============= local library imports ==========================
class CursorToolOverlay(TextBoxOverlay):
border_visible = True
bgcolor = 'lightgreen'
border_color = 'darkgreen'
tool = Any
visibility = Enum("auto", True, False)
visible = False
tooltip_mode = Bool(False)
def _tool_changed(self, old, new):
if old:
old.on_trait_event(self._new_value_updated, 'current_position', remove=True)
old.on_trait_change(self._tool_visible_changed, "visible", remove=True)
if new:
new.on_trait_event(self._new_value_updated, 'current_position')
new.on_trait_change(self._tool_visible_changed, "visible")
self._tool_visible_changed()
def _new_value_updated(self, new):
if new is None:
self.text = ""
if self.visibility == "auto":
self.visible = False
return
elif self.visibility == "auto":
self.visible = True
if self.tooltip_mode:
self.alternate_position = self.tool.last_mouse_position
else:
self.alternate_position = None
ns = ['DAC ={:0.5f}'.format(new[0]),
'Intensity={:0.5f}'.format(new[1])]
self.text = '\n'.join(ns)
self.component.request_redraw()
def _visible_changed(self):
self.component.request_redraw()
def _tool_visible_changed(self):
self.visibility = self.tool.visible
if self.visibility != "auto":
self.visible = self.visibility
# ============= EOF =============================================
| 36
| 88
| 0.578767
|
4a16c93817e37a847ee921257e7a72097321518e
| 1,122
|
py
|
Python
|
kodi_library_update.py
|
joshlapham/py-misc-scripts
|
67495e1551ec2151be9179b619a3f31ddcd784ba
|
[
"Beerware"
] | null | null | null |
kodi_library_update.py
|
joshlapham/py-misc-scripts
|
67495e1551ec2151be9179b619a3f31ddcd784ba
|
[
"Beerware"
] | null | null | null |
kodi_library_update.py
|
joshlapham/py-misc-scripts
|
67495e1551ec2151be9179b619a3f31ddcd784ba
|
[
"Beerware"
] | null | null | null |
#!/usr/bin python
from requests import post
from json import dumps
import kodi_cfg as cfg
HEADERS = {'content-type': 'application/json'}
KODI_JSON_RPC_URL = "http://" + cfg.KODI_USERNAME + ":" + cfg.KODI_PASSWORD + "@" + cfg.KODI_HOST + ":" + str(cfg.KODI_PORT) + "/jsonrpc"
def do_video_library_scan(logger=None):
payload = {"jsonrpc": cfg.KODI_JSON_RPC_VERSION, "method": "VideoLibrary.Scan"}
response = post(KODI_JSON_RPC_URL, data=dumps(payload), headers=HEADERS)
if logger:
logger.info(response)
return response
def do_video_library_clean(logger=None):
payload = {"jsonrpc": cfg.KODI_JSON_RPC_VERSION, "method": "VideoLibrary.Clean"}
response = post(KODI_JSON_RPC_URL, data=dumps(payload), headers=HEADERS)
if logger:
logger.info(response)
return response
if __name__ == '__main__':
""" Makes an API call to Kodi Media Center to clean and update the video library. """
try:
do_video_library_scan()
do_video_library_clean()
except Exception as e:
print "Error : %s" % e
| 30.324324
| 137
| 0.655971
|
4a16ca356345208b67264313cae7afce6c8e20a1
| 1,369
|
py
|
Python
|
silver/migrations/0054_auto_20210109_1153.py
|
truehostcloud/silver
|
dd60ea476f0c7c6055df32669fcba9f0bf70d8da
|
[
"Apache-2.0"
] | null | null | null |
silver/migrations/0054_auto_20210109_1153.py
|
truehostcloud/silver
|
dd60ea476f0c7c6055df32669fcba9f0bf70d8da
|
[
"Apache-2.0"
] | null | null | null |
silver/migrations/0054_auto_20210109_1153.py
|
truehostcloud/silver
|
dd60ea476f0c7c6055df32669fcba9f0bf70d8da
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-09 11:53
from django.db import migrations, models
import django_fsm
class Migration(migrations.Migration):
dependencies = [
("silver", "0053_auto_20191028_1254"),
]
operations = [
migrations.AlterField(
model_name="transaction",
name="fail_code",
field=models.CharField(
blank=True,
choices=[
("default", "default"),
("insufficient_funds", "insufficient_funds"),
("expired_payment_method", "expired_payment_method"),
("expired_card", "expired_card"),
("invalid_payment_method", "invalid_payment_method"),
("invalid_card", "invalid_card"),
("limit_exceeded", "limit_exceeded"),
("transaction_declined", "transaction_declined"),
("transaction_declined_by_bank", "transaction_declined_by_bank"),
("transaction_hard_declined", "transaction_hard_declined"),
(
"transaction_hard_declined_by_bank",
"transaction_hard_declined_by_bank",
),
],
max_length=64,
null=True,
),
),
]
| 34.225
| 85
| 0.514244
|
4a16ca9fb7d2ea279c4488dc30f32d38b95f9b7f
| 2,367
|
py
|
Python
|
server/apps/api/migrations/0005_auto_20200813_1806.py
|
efojs/censortracker_backend
|
1654e86a5f9004c9ddd13886c4d1e0ce7276f1cd
|
[
"MIT"
] | 8
|
2020-08-17T09:12:21.000Z
|
2022-03-05T09:25:29.000Z
|
server/apps/api/migrations/0005_auto_20200813_1806.py
|
efojs/censortracker_backend
|
1654e86a5f9004c9ddd13886c4d1e0ce7276f1cd
|
[
"MIT"
] | 17
|
2020-06-30T08:55:00.000Z
|
2021-12-12T01:25:56.000Z
|
server/apps/api/migrations/0005_auto_20200813_1806.py
|
efojs/censortracker_backend
|
1654e86a5f9004c9ddd13886c4d1e0ce7276f1cd
|
[
"MIT"
] | 3
|
2020-07-29T04:51:31.000Z
|
2021-08-01T12:37:32.000Z
|
# Generated by Django 3.0.5 on 2020-08-13 18:06
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0004_auto_20200729_0752"),
]
operations = [
migrations.RemoveField(model_name="domain", name="client_hash",),
migrations.RemoveField(model_name="domain", name="client_ip",),
migrations.RemoveField(model_name="domain", name="client_provider",),
migrations.RemoveField(model_name="domain", name="client_region",),
migrations.CreateModel(
name="Case",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"client_ip",
models.GenericIPAddressField(
blank=True, null=True, verbose_name="Client IP"
),
),
(
"client_hash",
models.CharField(
blank=True,
default="",
max_length=64,
verbose_name="Client Hash",
),
),
(
"client_region",
models.CharField(
blank=True,
default="",
max_length=64,
verbose_name="Client region",
),
),
(
"client_provider",
models.CharField(
blank=True,
default="",
max_length=64,
verbose_name="Client provider",
),
),
(
"domain",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="api.Domain"
),
),
],
options={"verbose_name": "Case", "verbose_name_plural": "Cases",},
),
]
| 32.424658
| 84
| 0.393325
|
4a16caa396c1fcc5cf15b52f12be4973d5d147ca
| 20,211
|
py
|
Python
|
tensorflow_federated/python/learning/federated_evaluation_test.py
|
teo-milea/federated
|
ce0707a954a531860eb38864b44d7b748fd62aa7
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/learning/federated_evaluation_test.py
|
teo-milea/federated
|
ce0707a954a531860eb38864b44d7b748fd62aa7
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/learning/federated_evaluation_test.py
|
teo-milea/federated
|
ce0707a954a531860eb38864b44d7b748fd62aa7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from unittest import mock
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import test_utils
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.templates import measured_process
from tensorflow_federated.python.core.test import static_assert
from tensorflow_federated.python.learning import federated_evaluation
from tensorflow_federated.python.learning import keras_utils
from tensorflow_federated.python.learning import model
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.framework import dataset_reduce
from tensorflow_federated.python.learning.framework import encoding_utils
from tensorflow_federated.python.learning.metrics import aggregator
from tensorflow_model_optimization.python.core.internal import tensor_encoding as te
# Convenience aliases.
FederatedType = computation_types.FederatedType
FunctionType = computation_types.FunctionType
SequenceType = computation_types.SequenceType
StructType = computation_types.StructType
TensorType = computation_types.TensorType
class TestModel(model.Model):
def __init__(self):
self._variables = collections.namedtuple('Vars', 'max_temp num_over')(
max_temp=tf.Variable(
lambda: tf.zeros(dtype=tf.float32, shape=[]),
name='max_temp',
trainable=True),
num_over=tf.Variable(0.0, name='num_over', trainable=False))
@property
def trainable_variables(self):
return [self._variables.max_temp]
@property
def non_trainable_variables(self):
return []
@property
def local_variables(self):
return [self._variables.num_over]
@property
def input_spec(self):
return collections.OrderedDict(temp=tf.TensorSpec([None], tf.float32))
@tf.function
def predict_on_batch(self, batch, training=True):
del training # Unused.
return tf.zeros_like(batch['temp'])
@tf.function
def forward_pass(self, batch, training=True):
assert not training
num_over = tf.reduce_sum(
tf.cast(
tf.greater(batch['temp'], self._variables.max_temp), tf.float32))
self._variables.num_over.assign_add(num_over)
loss = tf.constant(0.0)
predictions = self.predict_on_batch(batch, training)
return model.BatchOutput(
loss=loss,
predictions=predictions,
num_examples=tf.shape(predictions)[0])
@tf.function
def report_local_unfinalized_metrics(self):
return collections.OrderedDict(num_over=self._variables.num_over)
def metric_finalizers(self):
return collections.OrderedDict(num_over=tf.function(func=lambda x: x))
@tf.function
def reset_metrics(self):
"""Resets metrics variables to initial value."""
for var in self.local_variables:
var.assign(tf.zeros_like(var))
class TestModelQuant(model.Model):
"""This model stores how much client data matches the input (num_same)."""
def __init__(self):
self._variables = collections.namedtuple('Vars', 'given_nums num_same')(
given_nums=tf.Variable(
lambda: tf.zeros(dtype=tf.float32, shape=(4,)),
name='given_nums',
trainable=True),
num_same=tf.Variable(0.0, name='num_same', trainable=False))
@property
def trainable_variables(self):
return [self._variables.given_nums]
@property
def non_trainable_variables(self):
return []
@property
def local_variables(self):
return [self._variables.num_same]
@property
def input_spec(self):
return collections.OrderedDict(temp=tf.TensorSpec([None], tf.float32))
@tf.function
def predict_on_batch(self, batch, training=True):
del training # Unused.
return tf.zeros_like(batch['temp'])
@tf.function
def forward_pass(self, batch, training=True):
"""Unlike the TestModel implementation above, only tracks num_same."""
assert not training
# Calculate how many of the values in the training data match the input.
num_same = tf.reduce_sum(
tf.cast(
tf.equal(batch['temp'], self._variables.given_nums), tf.float32))
self._variables.num_same.assign_add(num_same)
# We're not actually training anything, so just use 0 loss and predictions.
loss = tf.constant(0.0)
predictions = self.predict_on_batch(batch, training)
return model.BatchOutput(
loss=loss,
predictions=predictions,
num_examples=tf.shape(predictions)[0])
@tf.function
def report_local_unfinalized_metrics(self):
return collections.OrderedDict(num_same=self._variables.num_same)
def metric_finalizers(self):
return collections.OrderedDict(num_same=tf.function(func=lambda x: x))
@tf.function
def reset_metrics(self):
"""Resets metrics variables to initial value."""
for var in self.local_variables:
var.assign(tf.zeros_like(var))
def _model_fn_from_keras():
keras_model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(1,)),
tf.keras.layers.Dense(
1,
kernel_initializer='ones',
bias_initializer='zeros',
activation=None)
], name='my_model') # pyformat: disable
# TODO(b/165666045): pyformat would create a big gap here
return keras_utils.from_keras_model(
keras_model,
input_spec=collections.OrderedDict(
x=tf.TensorSpec(shape=(None, 1), dtype=tf.float32),
y=tf.TensorSpec(shape=(None, 1), dtype=tf.float32)),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.Accuracy()])
def _build_simple_quant_encoder(quantization_bits):
"""Returns a function to quantize an input tensor using quantization_bits."""
def simple_quant_encoder(value: tf.Tensor):
def quant_encoder(value: tf.Tensor):
assert value.dtype in [tf.float32, tf.float64]
return te.encoders.uniform_quantization(quantization_bits)
spec = tf.TensorSpec(value.shape, value.dtype)
return te.encoders.as_simple_encoder(quant_encoder(value), spec)
return simple_quant_encoder
def _build_expected_broadcaster_next_signature():
"""Returns signature of the broadcaster used in multiple tests below."""
state_type = computation_types.at_server(
computation_types.StructType([('trainable', [
(),
]), ('non_trainable', [])]))
value_type = computation_types.at_server(
model_utils.weights_type_from_model(TestModelQuant))
result_type = computation_types.at_clients(
model_utils.weights_type_from_model(TestModelQuant))
measurements_type = computation_types.at_server(())
return computation_types.FunctionType(
parameter=collections.OrderedDict(state=state_type, value=value_type),
result=collections.OrderedDict(
state=state_type, result=result_type, measurements=measurements_type))
def _build_expected_test_quant_model_eval_signature():
"""Returns signature for build_federated_evaluation using TestModelQuant."""
weights_parameter_type = computation_types.at_server(
model_utils.weights_type_from_model(TestModelQuant))
data_parameter_type = computation_types.at_clients(
computation_types.SequenceType(
collections.OrderedDict(
temp=computation_types.TensorType(
shape=(None,), dtype=tf.float32))))
return_type = computation_types.at_server(
collections.OrderedDict(
eval=collections.OrderedDict(num_same=tf.float32)))
return computation_types.FunctionType(
parameter=collections.OrderedDict(
server_model_weights=weights_parameter_type,
federated_dataset=data_parameter_type),
result=return_type)
class FederatedEvaluationTest(test_case.TestCase, parameterized.TestCase):
@test_utils.skip_test_for_multi_gpu
def test_local_evaluation(self):
model_weights_type = model_utils.weights_type_from_model(TestModel)
batch_type = computation_types.to_type(TestModel().input_spec)
client_evaluate = federated_evaluation.build_local_evaluation(
TestModel, model_weights_type, batch_type)
self.assert_types_equivalent(
client_evaluate.type_signature,
FunctionType(
parameter=StructType([
('incoming_model_weights', model_weights_type),
('dataset',
SequenceType(
StructType([('temp',
TensorType(dtype=tf.float32,
shape=[None]))]))),
]),
result=collections.OrderedDict(
local_outputs=collections.OrderedDict(num_over=tf.float32),
num_examples=tf.int64)))
def _temp_dict(temps):
return {'temp': np.array(temps, dtype=np.float32)}
client_result = client_evaluate(
collections.OrderedDict(trainable=[5.0], non_trainable=[]),
[_temp_dict([1.0, 10.0, 2.0, 8.0]),
_temp_dict([6.0, 11.0])])
self.assertEqual(
client_result,
collections.OrderedDict(
local_outputs=collections.OrderedDict(num_over=4.0),
num_examples=6))
@test_utils.skip_test_for_multi_gpu
def test_federated_evaluation(self):
evaluate = federated_evaluation.build_federated_evaluation(TestModel)
model_weights_type = model_utils.weights_type_from_model(TestModel)
self.assert_types_equivalent(
evaluate.type_signature,
FunctionType(
parameter=StructType([
('server_model_weights',
computation_types.at_server(model_weights_type)),
('federated_dataset',
computation_types.at_clients(
SequenceType(
StructType([
('temp',
TensorType(dtype=tf.float32, shape=[None]))
])))),
]),
result=computation_types.at_server(
collections.OrderedDict(
eval=collections.OrderedDict(num_over=tf.float32)))))
def _temp_dict(temps):
return {'temp': np.array(temps, dtype=np.float32)}
result = evaluate(
collections.OrderedDict(trainable=[5.0], non_trainable=[]), [
[_temp_dict([1.0, 10.0, 2.0, 7.0]),
_temp_dict([6.0, 11.0])],
[_temp_dict([9.0, 12.0, 13.0])],
[_temp_dict([1.0]), _temp_dict([22.0, 23.0])],
])
self.assertEqual(
result,
collections.OrderedDict(
eval=collections.OrderedDict(num_over=9.0),
))
@test_utils.skip_test_for_multi_gpu
def test_federated_evaluation_quantized_conservatively(self):
# Set up a uniform quantization encoder as the broadcaster.
broadcaster = (
encoding_utils.build_encoded_broadcast_process_from_model(
TestModelQuant, _build_simple_quant_encoder(12)))
self.assert_types_equivalent(broadcaster.next.type_signature,
_build_expected_broadcaster_next_signature())
evaluate = federated_evaluation.build_federated_evaluation(
TestModelQuant, broadcast_process=broadcaster)
# Confirm that the type signature matches what is expected.
self.assert_types_identical(
evaluate.type_signature,
_build_expected_test_quant_model_eval_signature())
def _temp_dict(temps):
return {'temp': np.array(temps, dtype=np.float32)}
result = evaluate(
collections.OrderedDict(
trainable=[[5.0, 10.0, 5.0, 7.0]], non_trainable=[]), [
[
_temp_dict([1.0, 10.0, 2.0, 7.0]),
_temp_dict([6.0, 11.0, 5.0, 8.0])
],
[_temp_dict([9.0, 12.0, 13.0, 7.0])],
[
_temp_dict([1.0, 22.0, 23.0, 24.0]),
_temp_dict([5.0, 10.0, 5.0, 7.0])
],
])
# This conservative quantization should not be too lossy.
# When comparing the data examples to trainable, there are 8 times
# where the index and value match.
self.assertEqual(
result,
collections.OrderedDict(eval=collections.OrderedDict(num_same=8.0)))
@test_utils.skip_test_for_multi_gpu
def test_federated_evaluation_quantized_aggressively(self):
# Set up a uniform quantization encoder as the broadcaster.
broadcaster = (
encoding_utils.build_encoded_broadcast_process_from_model(
TestModelQuant, _build_simple_quant_encoder(2)))
self.assert_types_equivalent(broadcaster.next.type_signature,
_build_expected_broadcaster_next_signature())
evaluate = federated_evaluation.build_federated_evaluation(
TestModelQuant, broadcast_process=broadcaster)
# Confirm that the type signature matches what is expected.
self.assert_types_identical(
evaluate.type_signature,
_build_expected_test_quant_model_eval_signature())
def _temp_dict(temps):
return {'temp': np.array(temps, dtype=np.float32)}
result = evaluate(
collections.OrderedDict(
trainable=[[5.0, 10.0, 5.0, 7.0]], non_trainable=[]), [
[
_temp_dict([1.0, 10.0, 2.0, 7.0]),
_temp_dict([6.0, 11.0, 5.0, 8.0])
],
[_temp_dict([9.0, 12.0, 13.0, 7.0])],
[
_temp_dict([1.0, 22.0, 23.0, 24.0]),
_temp_dict([5.0, 10.0, 5.0, 7.0])
],
])
# This very aggressive quantization should be so lossy that some of the
# data is changed during encoding so the number that are equal between
# the original and the final result should not be 8 as it is in the
# conservative quantization test above.
self.assertEqual(list(result.keys()), ['eval'])
self.assertContainsSubset(result['eval'].keys(), ['num_same'])
self.assertLess(result['eval']['num_same'], 8.0)
@test_utils.skip_test_for_multi_gpu
def test_federated_evaluation_fails_stateful_broadcast(self):
# Create a test stateful measured process that doesn't do anything useful.
@computations.federated_computation
def init_fn():
return intrinsics.federated_eval(
computations.tf_computation(
lambda: tf.zeros(shape=[], dtype=tf.float32)), placements.SERVER)
@computations.federated_computation(
computation_types.at_server(tf.float32),
computation_types.at_clients(tf.int32))
def next_fn(state, value):
return measured_process.MeasuredProcessOutput(state, value, state)
broadcaster = measured_process.MeasuredProcess(init_fn, next_fn)
with self.assertRaisesRegex(ValueError, 'stateful broadcast'):
federated_evaluation.build_federated_evaluation(
TestModelQuant, broadcast_process=broadcaster)
@test_utils.skip_test_for_multi_gpu
def test_federated_evaluation_fails_non_measured_process_broadcast(self):
broadcaster = computations.tf_computation(lambda x: x)
with self.assertRaisesRegex(ValueError, '`MeasuredProcess`'):
federated_evaluation.build_federated_evaluation(
TestModelQuant, broadcast_process=broadcaster)
@parameterized.named_parameters(('non-simulation', False),
('simulation', True))
@test_utils.skip_test_for_multi_gpu
def test_federated_evaluation_with_keras(self, simulation):
evaluate_comp = federated_evaluation.build_federated_evaluation(
_model_fn_from_keras, use_experimental_simulation_loop=simulation)
initial_weights = tf.nest.map_structure(
lambda x: x.read_value(),
model_utils.ModelWeights.from_model(_model_fn_from_keras()))
def _input_dict(temps):
return collections.OrderedDict(
x=np.reshape(np.array(temps, dtype=np.float32), (-1, 1)),
y=np.reshape(np.array(temps, dtype=np.float32), (-1, 1)))
result = evaluate_comp(
initial_weights,
[[_input_dict([1.0, 10.0, 2.0, 7.0]),
_input_dict([6.0, 11.0])], [_input_dict([9.0, 12.0, 13.0])],
[_input_dict([1.0]), _input_dict([22.0, 23.0])]])
# Expect 100% accuracy and no loss because we've constructed the identity
# function and have the same x's and y's for training data.
self.assertDictEqual(
result,
collections.OrderedDict(
eval=collections.OrderedDict(
accuracy=1.0, loss=0.0, num_examples=12, num_batches=5)))
@mock.patch.object(
dataset_reduce,
'_dataset_reduce_fn',
wraps=dataset_reduce._dataset_reduce_fn)
@test_utils.skip_test_for_multi_gpu
def test_federated_evaluation_dataset_reduce(self, mock_method):
evaluate_comp = federated_evaluation.build_federated_evaluation(
_model_fn_from_keras, use_experimental_simulation_loop=False)
initial_weights = tf.nest.map_structure(
lambda x: x.read_value(),
model_utils.ModelWeights.from_model(_model_fn_from_keras()))
def _input_dict(temps):
return collections.OrderedDict(
x=np.reshape(np.array(temps, dtype=np.float32), (-1, 1)),
y=np.reshape(np.array(temps, dtype=np.float32), (-1, 1)))
evaluate_comp(
initial_weights,
[[_input_dict([1.0, 10.0, 2.0, 7.0]),
_input_dict([6.0, 11.0])], [_input_dict([9.0, 12.0, 13.0])],
[_input_dict([1.0]), _input_dict([22.0, 23.0])]])
mock_method.assert_called()
@mock.patch.object(
dataset_reduce,
'_dataset_reduce_fn',
wraps=dataset_reduce._dataset_reduce_fn)
@test_utils.skip_test_for_gpu
def test_federated_evaluation_simulation_loop(self, mock_method):
evaluate_comp = federated_evaluation.build_federated_evaluation(
_model_fn_from_keras, use_experimental_simulation_loop=True)
initial_weights = tf.nest.map_structure(
lambda x: x.read_value(),
model_utils.ModelWeights.from_model(_model_fn_from_keras()))
def _input_dict(temps):
return collections.OrderedDict(
x=np.reshape(np.array(temps, dtype=np.float32), (-1, 1)),
y=np.reshape(np.array(temps, dtype=np.float32), (-1, 1)))
evaluate_comp(
initial_weights,
[[_input_dict([1.0, 10.0, 2.0, 7.0]),
_input_dict([6.0, 11.0])]])
mock_method.assert_not_called()
def test_construction_calls_model_fn(self):
# Assert that the the process building does not call `model_fn` too many
# times. `model_fn` can potentially be expensive (loading weights,
# processing, etc).
mock_model_fn = mock.Mock(side_effect=TestModel)
federated_evaluation.build_federated_evaluation(mock_model_fn)
# TODO(b/186451541): reduce the number of calls to model_fn.
self.assertEqual(mock_model_fn.call_count, 2)
def test_no_unsecure_aggregation_with_secure_metrics_finalizer(self):
evaluate_comp = federated_evaluation.build_federated_evaluation(
_model_fn_from_keras,
metrics_aggregator=aggregator.secure_sum_then_finalize)
static_assert.assert_not_contains_unsecure_aggregation(evaluate_comp)
if __name__ == '__main__':
execution_contexts.set_local_python_execution_context()
test_case.main()
| 39.168605
| 84
| 0.694424
|
4a16cae940bf12c336e4a3857f5de4224329148d
| 5,929
|
py
|
Python
|
flaskr/docs.py
|
amarabuco/celia
|
8c278662c3e6e44442affc0aa481363023541c8a
|
[
"Apache-2.0"
] | 2
|
2020-09-29T14:33:06.000Z
|
2021-06-15T13:34:38.000Z
|
flaskr/docs.py
|
amarabuco/celia
|
8c278662c3e6e44442affc0aa481363023541c8a
|
[
"Apache-2.0"
] | null | null | null |
flaskr/docs.py
|
amarabuco/celia
|
8c278662c3e6e44442affc0aa481363023541c8a
|
[
"Apache-2.0"
] | null | null | null |
import os
import textract
import re
from difflib import Differ, SequenceMatcher, HtmlDiff
from collections import Counter, OrderedDict
from operator import itemgetter, attrgetter
from flask import (
Flask, Blueprint, flash, g, redirect, render_template, request, url_for, send_from_directory, send_file
)
from werkzeug.exceptions import abort
from werkzeug.utils import secure_filename
from flaskr import UPLOAD_FOLDER, ALLOWED_EXTENSIONS
from flaskr.auth import login_required
from flaskr.db import get_db
bp = Blueprint('docs', __name__)
@bp.route('/')
def index():
db = get_db()
docs = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM doc p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('docs/index.html', docs=docs)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO doc (title, body, author_id)'
' VALUES (?, ?, ?)',
(title, body, g.user['id'])
)
db.commit()
return redirect(url_for('docs.index'))
return render_template('docs/create.html')
def get_doc(id, check_author=True):
doc = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM doc p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if doc is None:
abort(404, "doc id {0} doesn't exist.".format(id))
if check_author and doc['author_id'] != g.user['id']:
abort(403)
return doc
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
doc = get_doc(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'UPDATE doc SET title = ?, body = ?'
' WHERE id = ?',
(title, body, id)
)
db.commit()
return redirect(url_for('docs.index'))
return render_template('docs/update.html', doc=doc)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
get_doc(id)
db = get_db()
db.execute('DELETE FROM doc WHERE id = ?', (id,))
db.commit()
return redirect(url_for('docs.index'))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@bp.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
body = file2text(filename)
db = get_db()
db.execute(
'INSERT INTO doc (title, body, author_id)'
' VALUES (?, ?, ?)',
(filename, body, g.user['id'])
)
db.commit()
return redirect(url_for('docs.uploaded_file',
filename=filename))
return render_template('docs/upload.html')
@bp.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(os.path.abspath(UPLOAD_FOLDER), filename)
@bp.route('/convert/<filename>')
def file2text(filename):
text = textract.process(os.path.join(
UPLOAD_FOLDER, filename), method="tesseract")
return text
@bp.route('/<int:id>/analise')
def analise(id):
doc = get_doc(id)
text = doc['body'].decode()
linhas = str.splitlines(text)
data = dict()
data['num_linhas'] = len(linhas)
pattern = re.compile("clausula")
clausulas = {linha: number for number, linha in enumerate(
linhas) if pattern.match(linha.lower())}
clausulas = OrderedDict(clausulas)
values = list(clausulas.values())
return render_template('docs/analise.html', data=data, clausulas=clausulas, values=values, linhas=linhas)
@bp.route('/compare', methods=('GET', 'POST'))
@login_required
def compare():
if request.method == 'POST':
doc1 = int(request.form['doc1'])
doc2 = int(request.form['doc2'])
result = diff(doc1, doc2)
return render_template('docs/compare.html', result=result)
db = get_db()
docs = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM doc p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('docs/compare.html', docs=docs)
def diff(doc1, doc2):
text1 = get_doc(doc1)['body']
text2 = get_doc(doc2)['body']
lines1 = str(get_doc(doc1)['body']).splitlines(keepends=True)
lines2 = str(get_doc(doc2)['body']).splitlines(keepends=True)
d = Differ()
#d = HtmlDiff()
result = list(d.compare(lines1, lines2))
#result = d.make_table(lines1, lines2)
#result = SequenceMatcher(None, text1, text2).ratio()
return result
| 28.233333
| 109
| 0.594873
|
4a16cb6116d4e9c10e194b94c7d7304bb414fea2
| 5,494
|
py
|
Python
|
urlreduce/settings.py
|
jesuejunior/urlreduce
|
cefd59e6242366bc790f0a258707af5dd4df3fe3
|
[
"BSD-3-Clause"
] | null | null | null |
urlreduce/settings.py
|
jesuejunior/urlreduce
|
cefd59e6242366bc790f0a258707af5dd4df3fe3
|
[
"BSD-3-Clause"
] | null | null | null |
urlreduce/settings.py
|
jesuejunior/urlreduce
|
cefd59e6242366bc790f0a258707af5dd4df3fe3
|
[
"BSD-3-Clause"
] | null | null | null |
#encoding: utf-8
# Django settings for urlreduce project.
import os
import sys
PROJECT_DIR = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Jesue Junior', 'talkto@jesuejunior.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_DIR, 'urlreduce.db'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Sao_Paulo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pt-BR'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Organização de arquivos estáticos
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media/')
MEDIA_URL = '/media/'
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join('static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '!ge+wf)v*dvn14km&#f+a(+z550n3u4(v_rn$yxevnj&&k+w5j'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urlreduce.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'urlreduce.wsgi.application'
TEMPLATE_DIRS = (
os.path.join('templates/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'registration',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'reducer',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['jj_console'],
'propagate': True,
}
},
'formatters': {
'simple_format': {
'format': '{%(levelname)s:%(asctime)s - %(funcName)s:%(lineno)d - %(threadName)s:%(message)s}'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'jj_console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple_format',
'stream': sys.stdout,
},
},
}
APPEND_SLASH = True
# django-register
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/my-links/'
try:
from .settings_local import *
except ImportError:
print u'O arquivo .settings_local.py nao foi encontrado para o settings padrao'
| 30.865169
| 127
| 0.67055
|
4a16cbc7c3df76c45a87d0b959a005a00b8268cb
| 4,742
|
py
|
Python
|
wintria/lib/source_data.py
|
codelucas/wintria.com
|
99c3f20d64e6ecf3d02cf0117233de349274a607
|
[
"MIT"
] | 2
|
2017-10-04T20:53:09.000Z
|
2021-11-12T10:02:32.000Z
|
wintria/lib/source_data.py
|
codelucas/wintria.com
|
99c3f20d64e6ecf3d02cf0117233de349274a607
|
[
"MIT"
] | null | null | null |
wintria/lib/source_data.py
|
codelucas/wintria.com
|
99c3f20d64e6ecf3d02cf0117233de349274a607
|
[
"MIT"
] | null | null | null |
"""
"""
import re
import urllib2
import urllib
import os
from urlparse import urlparse
from BeautifulSoup import BeautifulSoup
from wintria.lib.bing_logo_extract import extract_bing_url
from wintria.lib import s3
from wintria.lib.imaging import thumbnail
from wintria.article.models import NO_DESC
from wintria.wintria.settings import PROJECT_ROOT
def url_exists(url):
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
c_regex = re.compile(regex)
return (c_regex.search(url) != None)
def is_img(url):
return ('.jpg' in url) or ('.png' in url) or ('.jpeg' in url) or ('.gif' in url)\
or ('.bmp' in url) or ('.tiff' in url)
LINKS_W_IMGS = ['meta', 'link', 'og', 'img', 'a']
IMG_KEYWORDS = ['logo', 'thumb']
USER_AGENT = "Mozilla/5.0"
def url_into_query(url):
url = urlparse(url).netloc
url = url.split('.')
if url[0] == 'www' or url[0] == 'www2': # cut out useless www super-domain
url = url[1:]
url = ' '.join(url)
return url
def get_desc(soup):
if not soup:
print 'err, no soup, can\'t access desc, logo'
return NO_DESC
meta_desc = soup.find('meta', {'name':'description'})
fb_desc = soup.find('meta', {'property':'og:description'})
desc = None
try:
if meta_desc and meta_desc['content'].strip():
desc = meta_desc['content']
elif fb_desc and fb_desc['content'].strip():
desc = fb_desc['content']
except Exception, e:
desc = None
if not desc:
desc = NO_DESC
return desc
def get_logo(soup):
fb_img = soup.find('meta', {'property':'og:image'})
if fb_img and url_exists(fb_img['content']) and is_img(fb_img['content']):
return fb_img['content']
icon = soup.find('link', {'rel':'icon'})
if icon and url_exists(icon['href']) and is_img(icon['href']):
return icon['href']
icon = soup.find('link', {'rel':'img_src'})
if icon and url_exists(icon['href']) and is_img(icon['href']):
return icon['href']
return None
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
def get_soup(url):
req = urllib2.Request(url, headers=hdr)
try: con = urllib2.urlopen( req, timeout=3 )
except Exception, e:
print 'error opening url', str(e), 'at', url
return None
try: soup = BeautifulSoup(con.read())
except Exception, e:
print str(e), "Error with trying to open beautifulsoup, trying to query on", url
return None
return soup
def save_to_disk(url, domain):
try: urllib.urlretrieve(url, PROJECT_ROOT + 'wintria/wintria/templates/static/logobank/' +
domain + '.png')
except Exception, e:
print str(e), 'error downloading', domain, '\'s logo'
def save_logo(soup, domain):
img_url = None
if soup:
img_url = get_logo(soup)
if not img_url:
img_url = extract_bing_url(domain)
if img_url:
print 'downloading logo for ...', domain
save_to_disk(img_url, domain)
else:
pass
def push_s3(s):
try:
key = s.thumbnail_key()
img, img_url = thumbnail(s.get_logo_url())
local = key + '.jpg'
if img is None:
return
try:
img.save(local)
except IOError: # converting to jpg causes errors sometimes
print 'caught error'
img.convert('RGB').save(local)
abs_pth = os.path.abspath(local)
print s3.upload_img(abs_pth, key, bucket='wintria-source-images')
os.remove(abs_pth)
except Exception, e:
print('%s fail to save img %s' % (str(e), s.domain))
return
if __name__ == '__main__':
soup = get_soup('http://huffingtonpost.com')
print get_desc(soup)
| 33.871429
| 128
| 0.549768
|
4a16cc4b8683d37f62318e6e3843142281e2d115
| 6,248
|
py
|
Python
|
tensorflow_addons/utils/test_utils.py
|
henry-eigen/addons
|
6c2869c1d6e413f39cb5a8404b3315a9ba6eeaa4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_addons/utils/test_utils.py
|
henry-eigen/addons
|
6c2869c1d6e413f39cb5a8404b3315a9ba6eeaa4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_addons/utils/test_utils.py
|
henry-eigen/addons
|
6c2869c1d6e413f39cb5a8404b3315a9ba6eeaa4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing Addons."""
import contextlib
import inspect
import unittest
import random
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_addons.utils import resource_loader
# TODO: copy the layer_test implementation in Addons.
from tensorflow.python.keras.testing_utils import layer_test # noqa: F401
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and tf.test.is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with tf.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
def create_virtual_devices(
num_devices, force_device=None, memory_limit_per_device=1024
):
"""Virtualize a the physical device into logical devices.
Args:
num_devices: The number of virtual devices needed.
force_device: 'CPU'/'GPU'. Defaults to None, where the
devices is selected based on the system.
memory_limit_per_device: Specify memory for each
virtual GPU. Only for GPUs.
Returns:
virtual_devices: A list of virtual devices which can be passed to
tf.distribute.MirroredStrategy()
"""
if force_device is None:
device_type = (
"GPU" if len(tf.config.list_physical_devices("GPU")) > 0 else "CPU"
)
else:
assert force_device in ["CPU", "GPU"]
device_type = force_device
physical_devices = tf.config.list_physical_devices(device_type)
if device_type == "CPU":
memory_limit_per_device = None
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0],
[
tf.config.experimental.VirtualDeviceConfiguration(
memory_limit=memory_limit_per_device
)
for _ in range(num_devices)
],
)
return tf.config.experimental.list_logical_devices(device_type)
def run_all_distributed(num_devices):
base_decorator = run_distributed(num_devices)
def decorator(cls):
for name, method in cls.__dict__.copy().items():
if (
callable(method)
and name.startswith(unittest.TestLoader.testMethodPrefix)
and name != "test_session"
):
setattr(cls, name, base_decorator(method))
return cls
return decorator
# TODO: Add support for other distribution strategies
def run_distributed(num_devices):
def decorator(f):
if inspect.isclass(f):
raise TypeError(
"`run_distributed` only supports test methods. "
"Did you mean to use `run_all_distributed`?"
)
def decorated(self, *args, **kwargs):
logical_devices = create_virtual_devices(num_devices)
strategy = tf.distribute.MirroredStrategy(logical_devices)
with strategy.scope():
f(self, *args, **kwargs)
return decorated
return decorator
def finalizer():
tf.config.experimental_run_functions_eagerly(False)
@pytest.fixture(scope="function", params=["eager_mode", "tf_function"])
def maybe_run_functions_eagerly(request):
if request.param == "eager_mode":
tf.config.experimental_run_functions_eagerly(True)
elif request.param == "tf_function":
tf.config.experimental_run_functions_eagerly(False)
request.addfinalizer(finalizer)
@pytest.fixture(scope="function", params=["CPU", "GPU"])
def cpu_and_gpu(request):
if request.param == "CPU":
with tf.device("/device:CPU:0"):
yield
else:
if not tf.test.is_gpu_available():
pytest.skip("GPU is not available.")
with tf.device("/device:GPU:0"):
yield
@pytest.fixture(scope="function", params=["channels_first", "channels_last"])
def data_format(request):
return request.param
@pytest.fixture(scope="function", autouse=True)
def set_seeds():
random.seed(0)
np.random.seed(0)
tf.random.set_seed(0)
def pytest_addoption(parser):
parser.addoption(
"--skip-custom-ops",
action="store_true",
help="When a custom op is being loaded in a test, skip this test.",
)
@pytest.fixture(scope="session", autouse=True)
def set_global_variables(request):
if request.config.getoption("--skip-custom-ops"):
resource_loader.SKIP_CUSTOM_OPS = True
def assert_allclose_according_to_type(
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
):
"""
Similar to tf.test.TestCase.assertAllCloseAccordingToType()
but this doesn't need a subclassing to run.
"""
a = np.array(a)
b = np.array(b)
# types with lower tol are put later to overwrite previous ones.
if (
a.dtype == np.float32
or b.dtype == np.float32
or a.dtype == np.complex64
or b.dtype == np.complex64
):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if a.dtype == tf.bfloat16.as_numpy_dtype or b.dtype == tf.bfloat16.as_numpy_dtype:
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
| 29.060465
| 86
| 0.652369
|
4a16cca1b50574c78bb0e0c3dbf83aebfa0246c3
| 20,400
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/bgpmvpnsendersitesipv4_4fb28863ad3595e11a7fecc4fbb6ec9d.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/bgpmvpnsendersitesipv4_4fb28863ad3595e11a7fecc4fbb6ec9d.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/bgpmvpnsendersitesipv4_4fb28863ad3595e11a7fecc4fbb6ec9d.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class BgpMVpnSenderSitesIpv4(Base):
"""Bgp MVPN Sender Sites Properties
The BgpMVpnSenderSitesIpv4 class encapsulates a list of bgpMVpnSenderSitesIpv4 resources that are managed by the user.
A list of resources can be retrieved from the server using the BgpMVpnSenderSitesIpv4.find() method.
The list can be managed by using the BgpMVpnSenderSitesIpv4.add() and BgpMVpnSenderSitesIpv4.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'bgpMVpnSenderSitesIpv4'
_SDM_ATT_MAP = {
'Active': 'active',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableNextHop': 'enableNextHop',
'GroupAddressCount': 'groupAddressCount',
'GroupMaskWidth': 'groupMaskWidth',
'Ipv4NextHop': 'ipv4NextHop',
'Ipv6NextHop': 'ipv6NextHop',
'Name': 'name',
'SendTriggeredSourceActiveADRoute': 'sendTriggeredSourceActiveADRoute',
'SetNextHop': 'setNextHop',
'SetNextHopIpType': 'setNextHopIpType',
'SourceAddressCount': 'sourceAddressCount',
'SourceGroupMapping': 'sourceGroupMapping',
'SourceMaskWidth': 'sourceMaskWidth',
'StartGroupAddressIpv4': 'startGroupAddressIpv4',
'StartSourceAddressIpv4': 'startSourceAddressIpv4',
}
def __init__(self, parent):
super(BgpMVpnSenderSitesIpv4, self).__init__(parent)
@property
def CMacProperties(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.cmacproperties_4ac468c2f246fc5ef1a77fc3e4ebe180.CMacProperties): An instance of the CMacProperties class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.cmacproperties_4ac468c2f246fc5ef1a77fc3e4ebe180 import CMacProperties
if self._properties.get('CMacProperties', None) is None:
return CMacProperties(self)
else:
return self._properties.get('CMacProperties')
@property
def EvpnIPv4PrefixRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.evpnipv4prefixrange_79e14e1ab070701ebf4eb586cecc565f.EvpnIPv4PrefixRange): An instance of the EvpnIPv4PrefixRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.evpnipv4prefixrange_79e14e1ab070701ebf4eb586cecc565f import EvpnIPv4PrefixRange
if self._properties.get('EvpnIPv4PrefixRange', None) is None:
return EvpnIPv4PrefixRange(self)
else:
return self._properties.get('EvpnIPv4PrefixRange')
@property
def EvpnIPv6PrefixRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.evpnipv6prefixrange_f8dd80c93700c982de65324fe6552b86.EvpnIPv6PrefixRange): An instance of the EvpnIPv6PrefixRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.evpnipv6prefixrange_f8dd80c93700c982de65324fe6552b86 import EvpnIPv6PrefixRange
if self._properties.get('EvpnIPv6PrefixRange', None) is None:
return EvpnIPv6PrefixRange(self)
else:
return self._properties.get('EvpnIPv6PrefixRange')
@property
def Tag(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
if self._properties.get('Tag', None) is None:
return Tag(self)
else:
return self._properties.get('Tag')
@property
def Active(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableNextHop(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Next Hop
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableNextHop']))
@property
def GroupAddressCount(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Group Address Count
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GroupAddressCount']))
@property
def GroupMaskWidth(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Group Mask Width
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GroupMaskWidth']))
@property
def Ipv4NextHop(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): IPv4 Next Hop
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4NextHop']))
@property
def Ipv6NextHop(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): IPv6 Next Hop
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6NextHop']))
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def SendTriggeredSourceActiveADRoute(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Send Triggered Source Active A-D Route
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendTriggeredSourceActiveADRoute']))
@property
def SetNextHop(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Set Next Hop
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetNextHop']))
@property
def SetNextHopIpType(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Set Next Hop IP Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetNextHopIpType']))
@property
def SourceAddressCount(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Source Address Count
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceAddressCount']))
@property
def SourceGroupMapping(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Source Group Mapping
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceGroupMapping']))
@property
def SourceMaskWidth(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Source Mask Width
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceMaskWidth']))
@property
def StartGroupAddressIpv4(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Start Group Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StartGroupAddressIpv4']))
@property
def StartSourceAddressIpv4(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Start Source Address IPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StartSourceAddressIpv4']))
def update(self, Name=None):
"""Updates bgpMVpnSenderSitesIpv4 resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Name=None):
"""Adds a new bgpMVpnSenderSitesIpv4 resource on the server and adds it to the container.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with all currently retrieved bgpMVpnSenderSitesIpv4 resources using find and the newly added bgpMVpnSenderSitesIpv4 resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained bgpMVpnSenderSitesIpv4 resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Count=None, DescriptiveName=None, Name=None):
"""Finds and retrieves bgpMVpnSenderSitesIpv4 resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve bgpMVpnSenderSitesIpv4 resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all bgpMVpnSenderSitesIpv4 resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching bgpMVpnSenderSitesIpv4 resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of bgpMVpnSenderSitesIpv4 data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the bgpMVpnSenderSitesIpv4 resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, EnableNextHop=None, GroupAddressCount=None, GroupMaskWidth=None, Ipv4NextHop=None, Ipv6NextHop=None, SendTriggeredSourceActiveADRoute=None, SetNextHop=None, SetNextHopIpType=None, SourceAddressCount=None, SourceGroupMapping=None, SourceMaskWidth=None, StartGroupAddressIpv4=None, StartSourceAddressIpv4=None):
"""Base class infrastructure that gets a list of bgpMVpnSenderSitesIpv4 device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- EnableNextHop (str): optional regex of enableNextHop
- GroupAddressCount (str): optional regex of groupAddressCount
- GroupMaskWidth (str): optional regex of groupMaskWidth
- Ipv4NextHop (str): optional regex of ipv4NextHop
- Ipv6NextHop (str): optional regex of ipv6NextHop
- SendTriggeredSourceActiveADRoute (str): optional regex of sendTriggeredSourceActiveADRoute
- SetNextHop (str): optional regex of setNextHop
- SetNextHopIpType (str): optional regex of setNextHopIpType
- SourceAddressCount (str): optional regex of sourceAddressCount
- SourceGroupMapping (str): optional regex of sourceGroupMapping
- SourceMaskWidth (str): optional regex of sourceMaskWidth
- StartGroupAddressIpv4 (str): optional regex of startGroupAddressIpv4
- StartSourceAddressIpv4 (str): optional regex of startSourceAddressIpv4
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('abort', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def SwitchToSpmsi(self, *args, **kwargs):
"""Executes the switchToSpmsi operation on the server.
SwitchToSPMSI
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
switchToSpmsi(SessionIndices=list)
----------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
switchToSpmsi(SessionIndices=string)
------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
switchToSpmsi(Arg2=list)list
----------------------------
- Arg2 (list(number)): List of indices into the group. An empty list indicates all instances in the group.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('switchToSpmsi', payload=payload, response_object=None)
| 40.39604
| 367
| 0.66348
|
4a16cec95774839629ddbe1e2618c188c56ee882
| 14,400
|
py
|
Python
|
Policy_Gradient_with_Continuous_action.py
|
hoseinkh/Policy_Gradient_with_Continuous_action
|
ade61687675309a505e6db1f8ac975fc272ab5c4
|
[
"MIT"
] | null | null | null |
Policy_Gradient_with_Continuous_action.py
|
hoseinkh/Policy_Gradient_with_Continuous_action
|
ade61687675309a505e6db1f8ac975fc272ab5c4
|
[
"MIT"
] | null | null | null |
Policy_Gradient_with_Continuous_action.py
|
hoseinkh/Policy_Gradient_with_Continuous_action
|
ade61687675309a505e6db1f8ac975fc272ab5c4
|
[
"MIT"
] | null | null | null |
###############################################################################
# For more info, see https://hoseinkh.github.io/
###############################################################################
import gym
import os
import sys
import numpy as np
"""
# if using tensorflow v1:
import tensorflow as tf
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import matplotlib.pyplot as plt
import matplotlib
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
from gym import wrappers
from datetime import datetime
###############################################################################
# Feature transformer uses RBF kernels to transform the original state space to ...
# ... higher dimensions. This helps with the performance of the model!
class FeatureTransformer:
def __init__(self, env, n_components=500):
# generate states (observations)
observation_examples = np.array([env.observation_space.sample() for x in range(10000)])
# define scaler and scale the states (observations) --> mean 0 and variance 1
scaler = StandardScaler()
scaler.fit(observation_examples)
#
# Now we basically use RBF to for feature generation
# Each RBFSampler takes each (original) (feature representation) of ...
# ... a state and converts it to "n_components" new featuers.
# Hence, after concatenating the new features, we convert each state to ...
# ... {(# RBF samplers) * n_components} new features.
#
# We use RBF kernels with different variances to cover different parts ...
# ... of the space.
#
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=n_components)),
("rbf2", RBFSampler(gamma=2.0, n_components=n_components)),
("rbf3", RBFSampler(gamma=1.0, n_components=n_components)),
("rbf4", RBFSampler(gamma=0.5, n_components=n_components))
])
# For all the generated samples, transform original state representaions ...
# ... to a new state representation using "featurizer"
example_features = featurizer.fit_transform(scaler.transform(observation_examples))
#
self.dimensions = example_features.shape[1]
self.scaler = scaler
self.featurizer = featurizer
######################################
def transform(self, observations):
#
scaled_original_state_representation = self.scaler.transform(observations)
#
scaled_higher_dimensions_state_representation = self.featurizer.transform(scaled_original_state_representation)
return scaled_higher_dimensions_state_representation
###############################################################################
# It is better to define everything directly. This allows tensorflow to ...
# ... automatically calculate the cost functions, and hence we get rid of ...
# ... the issue of manually feeding it to the tensorflow.
# To do this TensorFlow needs to remember what operations happen in what ...
# ... order during the forward pass. Then, during the backward pass, ...
# ... TensorFlow traverses this list of operations in reverse order to ...
# ... compute gradients.
class HiddenLayer:
def __init__(self, inp_size_of_hidden_layer, out_size_of_hidden_layer, f=tf.nn.tanh, use_bias=True, zeros=False):
if zeros:
W = np.zeros((inp_size_of_hidden_layer, out_size_of_hidden_layer), dtype=np.float32)
else:
W = tf.random_normal(shape=(inp_size_of_hidden_layer, out_size_of_hidden_layer)) * np.sqrt(2. / inp_size_of_hidden_layer, dtype=np.float32)
self.W = tf.Variable(W)
#
self.use_bias = use_bias
if use_bias:
self.b = tf.Variable(np.zeros(out_size_of_hidden_layer).astype(np.float32))
#
self.f = f
######################################
def forward(self, X):
if self.use_bias:
a = tf.matmul(X, self.W) + self.b
else:
a = tf.matmul(X, self.W)
return self.f(a)
###############################################################################
# approximates pi(a | s)
# here we use two NNs. One for predicting the mean of the action, and one to ...
# ... predict the std of the action. However, the two NNs have the same body, ...
# ... and only the last layer differs!
class PolicyModel:
def __init__(self, data_input_size, feature_transformer, hidden_layer_sizes=[]):
self.feature_transformer = feature_transformer
#
##### hidden layers #####
NN_input_size = data_input_size
self.hidden_layers = []
for NN_output_size in hidden_layer_sizes:
layer = HiddenLayer(NN_input_size, NN_output_size)
self.hidden_layers.append(layer)
NN_input_size = NN_output_size
#
## final layer for the mean (we use linear for the activation function)
self.mean_layer = HiddenLayer(data_input_size, 1, lambda x: x, use_bias=False, zeros=True)
#
## final layer for the variance (we use softplus for the activation function to ensure positive std)
self.stdv_layer = HiddenLayer(data_input_size, 1, tf.nn.softplus, use_bias=False, zeros=False)
#
### inputs and targets (used in the session)
## self.X is the feature representaion of the state (after applying self.feature_transformer)
self.X = tf.placeholder(tf.float32, shape=(None, data_input_size), name='X')
self.actions = tf.placeholder(tf.float32, shape=(None,), name='actions')
## self.advantages is the G - V(S), which uses V(S) as a Baseline to ...
## ... decrease variance of the model!
self.advantages = tf.placeholder(tf.float32, shape=(None,), name='advantages')
#
### get final hidden layer
out_of_curr_layer = self.X
for layer in self.hidden_layers:
out_of_curr_layer = layer.forward(out_of_curr_layer)
#
### calculate output and cost
## calculate the mean of the Gaussian distribution for the action
mean = self.mean_layer.forward(out_of_curr_layer)
## calculate the std of the Gaussian distribution for the action
stdv = self.stdv_layer.forward(out_of_curr_layer) + 1e-5 # we do smoothing by adding small amount to the std
#
### make mean and std 1-D
mean = tf.reshape(mean, [-1])
stdv = tf.reshape(stdv, [-1])
#
### Build the normal distribution of the action
norm = tf.distributions.Normal(mean, stdv)
## note that the actions in the environment are between -1 and 1
self.predict_op = tf.clip_by_value(norm.sample(), -1, 1)
#
log_probs = norm.log_prob(self.actions)
## note that here we add a regularization term (i.e. 0.1*norm.entropy()) to the cost function ...
## ... to avoid overfitting!
cost = -tf.reduce_sum(self.advantages * log_probs + 0.1*norm.entropy())
self.train_op = tf.train.AdamOptimizer(1e-3).minimize(cost)
######################################
def set_session(self, session):
self.session = session
######################################
def partial_fit(self, X, actions, advantages):
X = np.atleast_2d(X)
X = self.feature_transformer.transform(X)
#
actions = np.atleast_1d(actions)
advantages = np.atleast_1d(advantages)
self.session.run(
self.train_op,
feed_dict={
self.X: X,
self.actions: actions,
self.advantages: advantages,
}
)
######################################
def predict(self, X):
X = np.atleast_2d(X)
X = self.feature_transformer.transform(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
######################################
def sample_action(self, X):
p = self.predict(X)[0]
return p
###############################################################################
# approximates V(s)
# we use this function to calculate state-value function V(s) ...
# ... which is used as Baseline in the policy gradient, which ...
# ... helps decreasing the variance of the model!
class ValueModel:
def __init__(self, data_input_size, feature_transformer, hidden_layer_sizes=[]):
self.feature_transformer = feature_transformer
self.costs = []
#
# create the neural network for the state-value approximation (i.e. V(S))
self.layers = []
NN_input_size = data_input_size
for NN_output_size in hidden_layer_sizes:
layer = HiddenLayer(NN_input_size, NN_output_size)
self.layers.append(layer)
NN_input_size = NN_output_size
#
## final layer. Since we are predicting the value function, we only have one node, and ...
## ... the linear function is used as the activation function in the output layer
layer = HiddenLayer(NN_input_size, 1, lambda x: x)
self.layers.append(layer)
#
### inputs and targets
## self.X is the (feature-transformed) feature representation of the state
self.X = tf.placeholder(tf.float32, shape=(None, data_input_size), name='X')
## self.Y is the observed value for the state S.
self.Y = tf.placeholder(tf.float32, shape=(None,), name='Y')
#
### calculate output and cost
out_of_curr_layer = self.X # = feature representation of the state
for layer in self.layers:
out_of_curr_layer = layer.forward(out_of_curr_layer)
Y_hat = tf.reshape(out_of_curr_layer, [-1]) # the output of the NN (estimated V(s))
self.predict_op = Y_hat
#
### we use the squared error as the error function!
cost = tf.reduce_sum(tf.square(self.Y - Y_hat))
self.cost = cost
self.train_op = tf.train.AdamOptimizer(1e-1).minimize(cost)
######################################
def set_session(self, session):
self.session = session
######################################
def partial_fit(self, X, Y):
X = np.atleast_2d(X)
X = self.feature_transformer.transform(X)
Y = np.atleast_1d(Y)
self.session.run(self.train_op, feed_dict={self.X: X, self.Y: Y})
cost = self.session.run(self.cost, feed_dict={self.X: X, self.Y: Y})
self.costs.append(cost)
######################################
def predict(self, X):
X = np.atleast_2d(X)
X = self.feature_transformer.transform(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
###############################################################################
def play_one_td(env, policy_model, value_model, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
#
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = policy_model.sample_action(observation)
prev_observation = observation
observation, reward, done, info = env.step([action])
#
totalreward += reward
#
# update the models
V_next = value_model.predict(observation)
G = reward + gamma*V_next
advantage = G - value_model.predict(prev_observation)
policy_model.partial_fit(prev_observation, action, advantage)
value_model.partial_fit(prev_observation, G)
#
iters += 1
#
return totalreward, iters
###############################################################################
# we are evaluating the performance of the model at each time t by ...
# ... taking the running average of the adjacent 100 iterations to that time t.
def plot_running_avg(totalrewards):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.xlabel("Iterations")
plt.ylabel("Average Time")
# plt.show()
curr_path = os.path.abspath(os.getcwd())
plt.savefig(curr_path + '/figs/reward_running_avg_MountainCarContinuous.png')
plt.close()
###############################################################################
# here we plot the negative of the optimal state value functions (i,e, -V*(s))!
# Note that the optimal action values are equal to the negative of the average optimal time ...
# ... that it takes to reach the mountain.
# Hence this plot shows the average optimal time to reach the top of the mountain at each state.
def plot_avg_num_remaining_steps(env, estimator, num_tiles=20):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
# both X and Y will be of shape (num_tiles, num_tiles)
Z = np.apply_along_axis(lambda _: -1*np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
# Z will also be of shape (num_tiles, num_tiles)
#
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z,
rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('Num steps to reach mountain == -V(s)')
ax.set_title("Num steps to Reach Mountain Function")
fig.colorbar(surf)
fig.savefig("./figs/Num_steps_to_Reach_Mountain.png")
# plt.show()
plt.close()
###############################################################################
if __name__ == '__main__':
env = gym.make('MountainCarContinuous-v0').env
feature_transformer = FeatureTransformer(env, n_components=100)
D = feature_transformer.dimensions
policy_model = PolicyModel(D, feature_transformer, [])
value_model = ValueModel(D, feature_transformer, [])
init = tf.global_variables_initializer()
session = tf.InteractiveSession()
session.run(init)
policy_model.set_session(session)
value_model.set_session(session)
discount_rate = 0.95
#
if True:
monitor_dir = os.getcwd() + "/videos/" + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
#
N = 50
totalrewards = np.empty(N)
costs = np.empty(N)
for n in range(N):
totalreward, num_steps = play_one_td(env, policy_model, value_model, discount_rate)
totalrewards[n] = totalreward
if n % 1 == 0:
print("episode:", n, "total reward: %.1f" % totalreward, "num steps: %d" % num_steps, "avg reward (last 100): %.1f" % totalrewards[max(0, n-100):(n+1)].mean())
#
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
#
plt.plot(totalrewards)
plt.title("Rewards")
plt.savefig("./figs/reward_avg_MountainCarContinuous_policy_gradient_continuous_action.png")
plt.show()
plt.close()
#
plot_running_avg(totalrewards)
plot_avg_num_remaining_steps(env, value_model)
| 43.243243
| 165
| 0.645486
|
4a16cf0d6c7c60c8368ea5f570b4765d18cb55e3
| 46,566
|
py
|
Python
|
tests/data_context/test_data_context_test_yaml_config.py
|
Calvo94/great_expectations
|
bcb73249d5d6ab2dc94246fc09b046764778774d
|
[
"Apache-2.0"
] | 1
|
2021-12-20T22:16:03.000Z
|
2021-12-20T22:16:03.000Z
|
tests/data_context/test_data_context_test_yaml_config.py
|
Calvo94/great_expectations
|
bcb73249d5d6ab2dc94246fc09b046764778774d
|
[
"Apache-2.0"
] | null | null | null |
tests/data_context/test_data_context_test_yaml_config.py
|
Calvo94/great_expectations
|
bcb73249d5d6ab2dc94246fc09b046764778774d
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import json
import os
import tempfile
from unittest import mock
import pytest
import great_expectations.exceptions as ge_exceptions
from great_expectations import DataContext
from great_expectations.core import ExpectationSuite
from great_expectations.data_context.store import CheckpointStore
from great_expectations.data_context.util import file_relative_path
from tests.test_utils import create_files_in_directory
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_empty_store(mock_emit, empty_data_context_stats_enabled):
# noinspection PyUnusedLocal
my_expectation_store = empty_data_context_stats_enabled.test_yaml_config(
yaml_config="""
module_name: great_expectations.data_context.store.expectations_store
class_name: ExpectationsStore
store_backend:
module_name: great_expectations.data_context.store.store_backend
class_name: InMemoryStoreBackend
"""
)
assert mock_emit.call_count == 1
# Substitute current anonymized name since it changes for each run
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "ExpectationsStore",
"anonymized_store_backend": {
"parent_class": "InMemoryStoreBackend"
},
},
"success": True,
}
),
]
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_config_with_yaml_error(mock_emit, empty_data_context_stats_enabled):
with pytest.raises(Exception):
# noinspection PyUnusedLocal
my_expectation_store = empty_data_context_stats_enabled.test_yaml_config(
yaml_config="""
module_name: great_expectations.data_context.store.expectations_store
class_name: ExpectationsStore
store_backend:
module_name: "great_expectations.data_context.store.store_backend"
class_name: InMemoryStoreBackend
EGREGIOUS FORMATTING ERROR
"""
)
assert mock_emit.call_count == 1
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {"diagnostic_info": ["__yaml_parse_error__"]},
"success": False,
}
),
]
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_expectations_store_with_filesystem_store_backend(
mock_emit, empty_data_context_stats_enabled
):
tmp_dir = str(tempfile.mkdtemp())
with open(os.path.join(tmp_dir, "expectations_A1.json"), "w") as f_:
f_.write("\n")
with open(os.path.join(tmp_dir, "expectations_A2.json"), "w") as f_:
f_.write("\n")
# noinspection PyUnusedLocal
my_expectation_store = empty_data_context_stats_enabled.test_yaml_config(
yaml_config=f"""
module_name: great_expectations.data_context.store
class_name: ExpectationsStore
store_backend:
module_name: "great_expectations.data_context.store"
class_name: TupleFilesystemStoreBackend
base_directory: {tmp_dir}
"""
)
assert mock_emit.call_count == 1
# Substitute current anonymized name since it changes for each run
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "ExpectationsStore",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
},
"success": True,
}
)
]
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_store_with_filesystem_store_backend(
mock_emit, empty_data_context_stats_enabled, tmp_path_factory
):
tmp_dir: str = str(
tmp_path_factory.mktemp("test_checkpoint_store_with_filesystem_store_backend")
)
context: DataContext = empty_data_context_stats_enabled
yaml_config: str = f"""
store_name: my_checkpoint_store
class_name: CheckpointStore
module_name: great_expectations.data_context.store
store_backend:
class_name: TupleFilesystemStoreBackend
module_name: "great_expectations.data_context.store"
base_directory: {tmp_dir}/checkpoints
"""
my_checkpoint_store: CheckpointStore = context.test_yaml_config(
yaml_config=yaml_config,
return_mode="instantiated_class",
)
assert mock_emit.call_count == 1
# Substitute anonymized_name since it changes for each run
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "CheckpointStore",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
},
"success": True,
}
),
]
report_object: dict = context.test_yaml_config(
yaml_config=yaml_config,
return_mode="report_object",
)
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "CheckpointStore",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
},
"success": True,
}
),
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "CheckpointStore",
"anonymized_store_backend": {
"parent_class": "TupleFilesystemStoreBackend"
},
},
"success": True,
}
),
]
assert my_checkpoint_store.config == report_object["config"]
expected_checkpoint_store_config: dict
expected_checkpoint_store_config = {
"store_name": "my_checkpoint_store",
"class_name": "CheckpointStore",
"module_name": "great_expectations.data_context.store.checkpoint_store",
"store_backend": {
"module_name": "great_expectations.data_context.store",
"class_name": "TupleFilesystemStoreBackend",
"base_directory": f"{tmp_dir}/checkpoints",
"suppress_store_backend_id": True,
"filepath_suffix": ".yml",
},
"overwrite_existing": False,
"runtime_environment": {
"root_directory": f"{context.root_directory}",
},
}
assert my_checkpoint_store.config == expected_checkpoint_store_config
checkpoint_store_name: str = my_checkpoint_store.config["store_name"]
context.get_config()["checkpoint_store_name"] = checkpoint_store_name
assert (
context.get_config_with_variables_substituted().checkpoint_store_name
== "my_checkpoint_store"
)
assert (
context.get_config_with_variables_substituted().checkpoint_store_name
== my_checkpoint_store.config["store_name"]
)
expected_checkpoint_store_config = {
"store_name": "my_checkpoint_store",
"class_name": "CheckpointStore",
"module_name": "great_expectations.data_context.store",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"module_name": "great_expectations.data_context.store",
"base_directory": f"{tmp_dir}/checkpoints",
"suppress_store_backend_id": True,
},
}
assert (
context.get_config_with_variables_substituted().stores[
context.get_config_with_variables_substituted().checkpoint_store_name
]
== expected_checkpoint_store_config
)
# No other usage stats calls
assert mock_emit.call_count == 2
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_empty_store2(mock_emit, empty_data_context_stats_enabled):
empty_data_context_stats_enabled.test_yaml_config(
yaml_config="""
class_name: ValidationsStore
store_backend:
module_name: "great_expectations.data_context.store.store_backend"
class_name: InMemoryStoreBackend
"""
)
assert mock_emit.call_count == 1
# Substitute anonymized_name since it changes for each run
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "ValidationsStore",
"anonymized_store_backend": {
"parent_class": "InMemoryStoreBackend"
},
},
"success": True,
}
),
]
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_datasource_config(mock_emit, empty_data_context_stats_enabled):
temp_dir = str(tempfile.mkdtemp())
create_files_in_directory(
directory=temp_dir,
file_name_list=[
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
],
)
print(temp_dir)
return_obj = empty_data_context_stats_enabled.test_yaml_config(
yaml_config=f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_filesystem_data_connector:
# class_name: ConfiguredAssetFilesystemDataConnector
class_name: InferredAssetFilesystemDataConnector
base_directory: {temp_dir}
glob_directive: '*.csv'
default_regex:
pattern: (.+)_(\\d+)\\.csv
group_names:
- letter
- number
""",
return_mode="report_object",
)
# Test usage stats messages
assert mock_emit.call_count == 1
# Substitute anonymized names since it changes for each run
anonymized_datasource_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][
"event_payload"
]["anonymized_execution_engine"]["anonymized_name"]
anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "Datasource",
"anonymized_execution_engine": {
"anonymized_name": anonymized_execution_engine_name,
"parent_class": "PandasExecutionEngine",
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "InferredAssetFilesystemDataConnector",
}
],
},
"success": True,
}
)
]
print(json.dumps(return_obj, indent=2))
assert set(return_obj.keys()) == {"execution_engine", "data_connectors"}
sub_obj = return_obj["data_connectors"]["my_filesystem_data_connector"]
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# sub_obj.pop("example_data_reference")
# assert sub_obj == {
# "class_name": "InferredAssetFilesystemDataConnector",
# "data_asset_count": 1,
# "example_data_asset_names": ["DEFAULT_ASSET_NAME"],
# "data_assets": {
# "DEFAULT_ASSET_NAME": {
# "batch_definition_count": 10,
# "example_data_references": [
# "abe_20200809_1040.csv",
# "alex_20200809_1000.csv",
# "alex_20200819_1300.csv",
# ],
# }
# },
# "example_unmatched_data_references": [],
# "unmatched_data_reference_count": 0,
# }
# No other usage stats calls
assert mock_emit.call_count == 1
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_error_states(mock_emit, empty_data_context_stats_enabled):
first_config: str = """
class_name: Datasource
execution_engine:
class_name: NOT_A_REAL_CLASS_NAME
"""
with pytest.raises(ge_exceptions.DatasourceInitializationError) as excinfo:
empty_data_context_stats_enabled.test_yaml_config(yaml_config=first_config)
# print(excinfo.value.message)
# shortened_message_len = len(excinfo.value.message)
# print("="*80)
assert mock_emit.call_count == 1
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {"parent_class": "Datasource"},
"success": False,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
# Set shorten_tracebacks=True and verify that no error is thrown, even though the config is the same as before.
# Note: a more thorough test could also verify that the traceback is indeed short.
empty_data_context_stats_enabled.test_yaml_config(
yaml_config=first_config,
shorten_tracebacks=True,
)
assert mock_emit.call_count == 2
expected_call_args_list.append(
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {"parent_class": "Datasource"},
"success": False,
}
),
)
assert mock_emit.call_args_list == expected_call_args_list
# For good measure, do it again, with a different config and a different type of error
# Note this erroneous key/value does not cause an error and is removed from the Datasource config
temp_dir = str(tempfile.mkdtemp())
second_config = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_filesystem_data_connector:
# class_name: ConfiguredAssetFilesystemDataConnector
class_name: InferredAssetFilesystemDataConnector
base_directory: {temp_dir}
glob_directive: '*.csv'
default_regex:
pattern: (.+)_(\\d+)\\.csv
group_names:
- letter
- number
NOT_A_REAL_KEY: nothing
"""
datasource = empty_data_context_stats_enabled.test_yaml_config(
yaml_config=second_config
)
assert (
"NOT_A_REAL_KEY"
not in datasource.config["data_connectors"]["my_filesystem_data_connector"]
)
assert mock_emit.call_count == 3
# Substitute anonymized names since it changes for each run
anonymized_datasource_name = mock_emit.call_args_list[2][0][0]["event_payload"][
"anonymized_name"
]
anonymized_execution_engine_name = mock_emit.call_args_list[2][0][0][
"event_payload"
]["anonymized_execution_engine"]["anonymized_name"]
anonymized_data_connector_name = mock_emit.call_args_list[2][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
expected_call_args_list.append(
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "Datasource",
"anonymized_execution_engine": {
"anonymized_name": anonymized_execution_engine_name,
"parent_class": "PandasExecutionEngine",
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "InferredAssetFilesystemDataConnector",
}
],
},
"success": True,
}
),
)
assert mock_emit.call_args_list == expected_call_args_list
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_config_variables_in_test_yaml_config(
mock_emit, empty_data_context_stats_enabled, sa
):
context: DataContext = empty_data_context_stats_enabled
db_file = file_relative_path(
__file__,
os.path.join("..", "test_sets", "test_cases_for_sql_data_connector.db"),
)
context.save_config_variable("db_file", db_file)
context.save_config_variable(
"data_connector_name", "my_very_awesome_data_connector"
)
context.save_config_variable("suffix", "__whole_table")
context.save_config_variable("sampling_n", "10")
print(context.config_variables)
first_config = """
class_name: SimpleSqlalchemyDatasource
connection_string: sqlite:///${db_file}
introspection:
${data_connector_name}:
data_asset_name_suffix: ${suffix}
sampling_method: _sample_using_limit
sampling_kwargs:
n: ${sampling_n}
"""
my_datasource = context.test_yaml_config(first_config)
assert (
"test_cases_for_sql_data_connector.db"
in my_datasource.execution_engine.connection_string
)
assert mock_emit.call_count == 1
# Substitute anonymized names since it changes for each run
anonymized_datasource_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "SimpleSqlalchemyDatasource",
"anonymized_execution_engine": {
"parent_class": "SqlAlchemyExecutionEngine"
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "InferredAssetSqlDataConnector",
}
],
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
report_object = context.test_yaml_config(first_config, return_mode="report_object")
print(json.dumps(report_object, indent=2))
assert report_object["data_connectors"]["count"] == 1
assert set(report_object["data_connectors"].keys()) == {
"count",
"my_very_awesome_data_connector",
}
assert mock_emit.call_count == 2
expected_call_args_list.append(
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "SimpleSqlalchemyDatasource",
"anonymized_execution_engine": {
"parent_class": "SqlAlchemyExecutionEngine"
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "InferredAssetSqlDataConnector",
}
],
},
"success": True,
}
),
)
assert mock_emit.call_args_list == expected_call_args_list
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_golden_path_sql_datasource_configuration(
mock_emit, empty_data_context_stats_enabled, sa, test_connectable_postgresql_db
):
"""Tests the golden path for setting up a StreamlinedSQLDatasource using test_yaml_config"""
context: DataContext = empty_data_context_stats_enabled
os.chdir(context.root_directory)
# Everything below this line (except for asserts) is what we expect users to run as part of the golden path.
import great_expectations as ge
context = ge.get_context()
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
yaml_config = f"""
class_name: SimpleSqlalchemyDatasource
credentials:
drivername: postgresql
username: postgres
password: ""
host: {db_hostname}
port: 5432
database: test_ci
introspection:
whole_table_with_limits:
sampling_method: _sample_using_limit
sampling_kwargs:
n: 10
"""
# noinspection PyUnusedLocal
report_object = context.test_yaml_config(
name="my_datasource",
yaml_config=yaml_config,
return_mode="report_object",
)
assert mock_emit.call_count == 2
# Substitute anonymized names since it changes for each run
anonymized_datasource_name = mock_emit.call_args_list[1][0][0]["event_payload"][
"anonymized_name"
]
anonymized_data_connector_name = mock_emit.call_args_list[1][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
expected_call_args_list = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "SimpleSqlalchemyDatasource",
"anonymized_execution_engine": {
"parent_class": "SqlAlchemyExecutionEngine"
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "InferredAssetSqlDataConnector",
}
],
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
print(json.dumps(report_object, indent=2))
print(context.datasources)
my_batch = context.get_batch(
"my_datasource",
"whole_table_with_limits",
"test_df",
)
# assert len(my_batch.data.fetchall()) == 10
with pytest.raises(KeyError):
my_batch = context.get_batch(
"my_datasource",
"whole_table_with_limits",
"DOES_NOT_EXIST",
)
my_validator = context.get_validator(
datasource_name="my_datasource",
data_connector_name="whole_table_with_limits",
data_asset_name="test_df",
expectation_suite=ExpectationSuite("my_expectation_suite"),
)
my_evr = my_validator.expect_table_columns_to_match_set(column_set=[])
print(my_evr)
# my_evr = my_validator.expect_column_values_to_be_between(
# column="x",
# min_value=0,
# max_value=4,
# )
# assert my_evr.success
# TODO: <Alex>ALEX</Alex>
# my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=["a", "b", "c"])
# assert my_evr.success
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_golden_path_inferred_asset_pandas_datasource_configuration(
mock_emit, empty_data_context_stats_enabled, test_df, tmp_path_factory
):
"""
Tests the golden path for InferredAssetFilesystemDataConnector with PandasExecutionEngine using test_yaml_config
"""
base_directory = str(
tmp_path_factory.mktemp("test_golden_path_pandas_datasource_configuration")
)
create_files_in_directory(
directory=base_directory,
file_name_list=[
"test_dir_charlie/A/A-1.csv",
"test_dir_charlie/A/A-2.csv",
"test_dir_charlie/A/A-3.csv",
"test_dir_charlie/B/B-1.csv",
"test_dir_charlie/B/B-2.csv",
"test_dir_charlie/B/B-3.csv",
"test_dir_charlie/C/C-1.csv",
"test_dir_charlie/C/C-2.csv",
"test_dir_charlie/C/C-3.csv",
"test_dir_charlie/D/D-1.csv",
"test_dir_charlie/D/D-2.csv",
"test_dir_charlie/D/D-3.csv",
],
file_content_fn=lambda: test_df.to_csv(header=True, index=False),
)
context: DataContext = empty_data_context_stats_enabled
os.chdir(context.root_directory)
import great_expectations as ge
context = ge.get_context()
mock_emit.reset_mock() # Remove data_context.__init__ call
yaml_config = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_filesystem_data_connector:
class_name: InferredAssetFilesystemDataConnector
base_directory: {base_directory}/test_dir_charlie
glob_directive: "*/*.csv"
default_regex:
pattern: (.+)/(.+)-(\\d+)\\.csv
group_names:
- subdirectory
- data_asset_name
- number
"""
# noinspection PyUnusedLocal
report_object = context.test_yaml_config(
name="my_directory_datasource",
yaml_config=yaml_config,
return_mode="report_object",
)
# print(json.dumps(report_object, indent=2))
# print(context.datasources)
assert mock_emit.call_count == 1
# Substitute anonymized names since it changes for each run
anonymized_datasource_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][
"event_payload"
]["anonymized_execution_engine"]["anonymized_name"]
anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "Datasource",
"anonymized_execution_engine": {
"anonymized_name": anonymized_execution_engine_name,
"parent_class": "PandasExecutionEngine",
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "InferredAssetFilesystemDataConnector",
}
],
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
my_batch = context.get_batch(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="A",
batch_identifiers={
"number": "2",
},
batch_spec_passthrough={
"sampling_method": "_sample_using_hash",
"sampling_kwargs": {
"column_name": "date",
"hash_function_name": "md5",
"hash_value": "f",
},
},
)
assert my_batch.batch_definition["data_asset_name"] == "A"
# "DataContext.get_batch()" calls "DataContext.get_batch_list()" (decorated by "@usage_statistics_enabled_method").
assert mock_emit.call_count == 2
df_data = my_batch.data.dataframe
assert df_data.shape == (10, 10)
df_data["date"] = df_data.apply(
lambda row: datetime.datetime.strptime(row["date"], "%Y-%m-%d").date(), axis=1
)
assert (
test_df[
(test_df["date"] == datetime.date(2020, 1, 15))
| (test_df["date"] == datetime.date(2020, 1, 29))
]
.drop("timestamp", axis=1)
.equals(df_data.drop("timestamp", axis=1))
)
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
my_batch = context.get_batch(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="DOES_NOT_EXIST",
)
# "DataContext.get_batch()" calls "DataContext.get_batch_list()" (decorated by "@usage_statistics_enabled_method").
assert mock_emit.call_count == 3
my_validator = context.get_validator(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="D",
data_connector_query={"batch_filter_parameters": {"number": "3"}},
expectation_suite=ExpectationSuite("my_expectation_suite"),
batch_spec_passthrough={
"sampling_method": "_sample_using_hash",
"sampling_kwargs": {
"column_name": "date",
"hash_function_name": "md5",
"hash_value": "f",
},
},
)
# "DataContext.get_batch()" calls "DataContext.get_batch_list()" (decorated by "@usage_statistics_enabled_method").
assert mock_emit.call_count == 4
my_evr = my_validator.expect_column_values_to_be_between(
column="d", min_value=1, max_value=31
)
assert my_evr.success
# TODO: <Alex>ALEX</Alex>
# my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=["x", "y", "z"])
# assert my_evr.success
# No other usage stats calls detected
# assert mock_emit.call_count == 1
assert mock_emit.call_count == 4
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_golden_path_configured_asset_pandas_datasource_configuration(
mock_emit, empty_data_context_stats_enabled, test_df, tmp_path_factory
):
"""
Tests the golden path for InferredAssetFilesystemDataConnector with PandasExecutionEngine using test_yaml_config
"""
base_directory = str(
tmp_path_factory.mktemp("test_golden_path_pandas_datasource_configuration")
)
create_files_in_directory(
directory=base_directory,
file_name_list=[
"test_dir_foxtrot/A/A-1.csv",
"test_dir_foxtrot/A/A-2.csv",
"test_dir_foxtrot/A/A-3.csv",
"test_dir_foxtrot/B/B-1.txt",
"test_dir_foxtrot/B/B-2.txt",
"test_dir_foxtrot/B/B-3.txt",
"test_dir_foxtrot/C/C-2017.csv",
"test_dir_foxtrot/C/C-2018.csv",
"test_dir_foxtrot/C/C-2019.csv",
"test_dir_foxtrot/D/D-aaa.csv",
"test_dir_foxtrot/D/D-bbb.csv",
"test_dir_foxtrot/D/D-ccc.csv",
"test_dir_foxtrot/D/D-ddd.csv",
"test_dir_foxtrot/D/D-eee.csv",
],
file_content_fn=lambda: test_df.to_csv(header=True, index=False),
)
context: DataContext = empty_data_context_stats_enabled
os.chdir(context.root_directory)
import great_expectations as ge
context = ge.get_context()
mock_emit.reset_mock() # Remove data_context.__init__ call
yaml_config = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_filesystem_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {base_directory}
# glob_directive: "*"
default_regex:
pattern: (.+)\\.csv
group_names:
- alphanumeric
assets:
A:
base_directory: {base_directory}/test_dir_foxtrot/A
pattern: (.+)-(\\d+)\\.csv
group_names:
- letter
- number
B:
base_directory: {base_directory}/test_dir_foxtrot/B
pattern: (.+)-(\\d+)\\.csv
group_names:
- letter
- number
C:
base_directory: {base_directory}/test_dir_foxtrot/C
pattern: (.+)-(\\d+)\\.csv
group_names:
- letter
- year
D:
base_directory: {base_directory}/test_dir_foxtrot/D
pattern: (.+)-(\\d+)\\.csv
group_names:
- letter
- checksum
"""
# noinspection PyUnusedLocal
report_object = context.test_yaml_config(
name="my_directory_datasource",
yaml_config=yaml_config,
return_mode="report_object",
)
# print(json.dumps(report_object, indent=2))
# print(context.datasources)
assert mock_emit.call_count == 1
# Substitute anonymized names since it changes for each run
anonymized_datasource_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][
"event_payload"
]["anonymized_execution_engine"]["anonymized_name"]
anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "Datasource",
"anonymized_execution_engine": {
"anonymized_name": anonymized_execution_engine_name,
"parent_class": "PandasExecutionEngine",
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "ConfiguredAssetFilesystemDataConnector",
}
],
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
my_batch = context.get_batch(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="A",
batch_identifiers={
"number": "2",
},
batch_spec_passthrough={
"sampling_method": "_sample_using_hash",
"sampling_kwargs": {
"column_name": "date",
"hash_function_name": "md5",
"hash_value": "f",
},
},
)
assert my_batch.batch_definition["data_asset_name"] == "A"
# "DataContext.get_batch()" calls "DataContext.get_batch_list()" (decorated by "@usage_statistics_enabled_method").
assert mock_emit.call_count == 2
my_batch.head()
df_data = my_batch.data.dataframe
assert df_data.shape == (10, 10)
df_data["date"] = df_data.apply(
lambda row: datetime.datetime.strptime(row["date"], "%Y-%m-%d").date(), axis=1
)
assert (
test_df[
(test_df["date"] == datetime.date(2020, 1, 15))
| (test_df["date"] == datetime.date(2020, 1, 29))
]
.drop("timestamp", axis=1)
.equals(df_data.drop("timestamp", axis=1))
)
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
my_batch = context.get_batch(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="DOES_NOT_EXIST",
)
# "DataContext.get_batch()" calls "DataContext.get_batch_list()" (decorated by "@usage_statistics_enabled_method").
assert mock_emit.call_count == 3
my_validator = context.get_validator(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="C",
data_connector_query={"batch_filter_parameters": {"year": "2019"}},
create_expectation_suite_with_name="my_expectations",
batch_spec_passthrough={
"sampling_method": "_sample_using_hash",
"sampling_kwargs": {
"column_name": "date",
"hash_function_name": "md5",
"hash_value": "f",
},
},
)
my_evr = my_validator.expect_column_values_to_be_between(
column="d", min_value=1, max_value=31
)
assert my_evr.success
# "DataContext.get_batch()" calls "DataContext.get_batch_list()" (decorated by "@usage_statistics_enabled_method").
assert mock_emit.call_count == 4
# my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=["x", "y", "z"])
# assert my_evr.success
# No other usage stats calls detected
assert mock_emit.call_count == 4
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_golden_path_runtime_data_connector_pandas_datasource_configuration(
mock_emit, empty_data_context_stats_enabled, test_df, tmp_path_factory
):
"""
Tests output of test_yaml_config() for a Datacontext configured with a Datasource with
RuntimeDataConnector. Even though the test directory contains multiple files that can be read-in
by GE, the RuntimeDataConnector will output 0 data_assets, and return a "note" to the user.
This is because the RuntimeDataConnector is not aware of data_assets until they are passed in
through the RuntimeBatchRequest.
The test asserts that the proper number of data_asset_names are returned and note is returned to the user.
"""
base_directory = str(
tmp_path_factory.mktemp("test_golden_path_pandas_datasource_configuration")
)
create_files_in_directory(
directory=base_directory,
file_name_list=[
"test_dir_charlie/A/A-1.csv",
"test_dir_charlie/A/A-2.csv",
"test_dir_charlie/A/A-3.csv",
],
file_content_fn=lambda: test_df.to_csv(header=True, index=False),
)
context: DataContext = empty_data_context_stats_enabled
os.chdir(context.root_directory)
import great_expectations as ge
context = ge.get_context()
mock_emit.reset_mock() # Remove data_context.__init__ call
yaml_config = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
"""
# noinspection PyUnusedLocal
report_object = context.test_yaml_config(
name="my_directory_datasource",
yaml_config=yaml_config,
return_mode="report_object",
)
assert report_object["execution_engine"] == {
"caching": True,
"module_name": "great_expectations.execution_engine.pandas_execution_engine",
"class_name": "PandasExecutionEngine",
"discard_subset_failing_expectations": False,
"boto3_options": {},
"azure_options": {},
"gcs_options": {},
}
assert report_object["data_connectors"]["count"] == 1
# checking the correct number of data_assets have come back
assert (
report_object["data_connectors"]["default_runtime_data_connector_name"][
"data_asset_count"
]
== 0
)
# checking that note has come back
assert (
report_object["data_connectors"]["default_runtime_data_connector_name"]["note"]
== "RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest"
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_golden_path_runtime_data_connector_and_inferred_data_connector_pandas_datasource_configuration(
mock_emit, empty_data_context_stats_enabled, test_df, tmp_path_factory
):
"""
Tests output of test_yaml_config() for a Datacontext configured with a Datasource with InferredAssetDataConnector
and RuntimeDataConnector.
1. The InferredAssetDataConnector will output 4 data_assets, which correspond to the files in the test_dir_charlie folder
2. RuntimeDataConnector will output 0 data_assets, and return a "note" to the user. This is because the
RuntimeDataConnector is not aware of data_assets until they are passed in through the RuntimeBatchRequest.
The test asserts that the proper number of data_asset_names are returned for both DataConnectors, and in the case of
the RuntimeDataConnetor, the proper note is returned to the user.
"""
base_directory = str(
tmp_path_factory.mktemp("test_golden_path_pandas_datasource_configuration")
)
create_files_in_directory(
directory=base_directory,
file_name_list=[
"test_dir_charlie/A/A-1.csv",
"test_dir_charlie/A/A-2.csv",
"test_dir_charlie/A/A-3.csv",
"test_dir_charlie/B/B-1.csv",
"test_dir_charlie/B/B-2.csv",
"test_dir_charlie/B/B-3.csv",
"test_dir_charlie/C/C-1.csv",
"test_dir_charlie/C/C-2.csv",
"test_dir_charlie/C/C-3.csv",
"test_dir_charlie/D/D-1.csv",
"test_dir_charlie/D/D-2.csv",
"test_dir_charlie/D/D-3.csv",
],
file_content_fn=lambda: test_df.to_csv(header=True, index=False),
)
context: DataContext = empty_data_context_stats_enabled
os.chdir(context.root_directory)
import great_expectations as ge
context = ge.get_context()
mock_emit.reset_mock() # Remove data_context.__init__ call
yaml_config = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetFilesystemDataConnector
base_directory: {base_directory}/test_dir_charlie
glob_directive: "*/*.csv"
default_regex:
pattern: (.+)/(.+)-(\\d+)\\.csv
group_names:
- subdirectory
- data_asset_name
- number
"""
# noinspection PyUnusedLocal
report_object = context.test_yaml_config(
name="my_directory_datasource",
yaml_config=yaml_config,
return_mode="report_object",
)
assert report_object["execution_engine"] == {
"caching": True,
"module_name": "great_expectations.execution_engine.pandas_execution_engine",
"class_name": "PandasExecutionEngine",
"discard_subset_failing_expectations": False,
"boto3_options": {},
"azure_options": {},
"gcs_options": {},
}
assert report_object["data_connectors"]["count"] == 2
assert report_object["data_connectors"]["default_runtime_data_connector_name"] == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"data_assets": {},
"example_data_asset_names": [],
"example_unmatched_data_references": [],
"note": "RuntimeDataConnector will not have data_asset_names until they are "
"passed in through RuntimeBatchRequest",
"unmatched_data_reference_count": 0,
}
assert report_object["data_connectors"]["default_inferred_data_connector_name"] == {
"class_name": "InferredAssetFilesystemDataConnector",
"data_asset_count": 4,
"example_data_asset_names": ["A", "B", "C"],
"data_assets": {
"A": {
"batch_definition_count": 3,
"example_data_references": ["A/A-1.csv", "A/A-2.csv", "A/A-3.csv"],
},
"B": {
"batch_definition_count": 3,
"example_data_references": ["B/B-1.csv", "B/B-2.csv", "B/B-3.csv"],
},
"C": {
"batch_definition_count": 3,
"example_data_references": ["C/C-1.csv", "C/C-2.csv", "C/C-3.csv"],
},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
| 35.223903
| 125
| 0.624812
|
4a16cfa31e4fc43a957504472884ec1782b7ce2c
| 308
|
py
|
Python
|
hackerrank/Algorithms/Tower Breakers - The Final Battle/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerrank/Algorithms/Tower Breakers - The Final Battle/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerrank/Algorithms/Tower Breakers - The Final Battle/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
import unittest
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
self.assertEqual(solution.towerBreakers(4), 6)
self.assertEqual(solution.towerBreakers(2), 4)
self.assertEqual(solution.towerBreakers(7), 8)
if __name__ == '__main__':
unittest.main()
| 20.533333
| 54
| 0.701299
|
4a16d1d626fb18f550fabd509da30d0c50a20dcd
| 1,176
|
py
|
Python
|
src/olympia/amo/urls.py
|
anik31/addons-server
|
cecb61da98d6e830fb45a2b1d61b41e72812137e
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/amo/urls.py
|
anik31/addons-server
|
cecb61da98d6e830fb45a2b1d61b41e72812137e
|
[
"BSD-3-Clause"
] | 760
|
2021-05-17T07:59:30.000Z
|
2022-03-31T11:14:15.000Z
|
src/olympia/amo/urls.py
|
championshuttler/addons-server
|
5d4c1bfbed2fc509ecc1f3f5065955996e057eeb
|
[
"BSD-3-Clause"
] | null | null | null |
from django.urls import include, path, re_path
from django.views.decorators.cache import never_cache
from . import views
from .utils import render_xml
services_patterns = [
re_path(r'^monitor\.json$', never_cache(views.monitor), name='amo.monitor'),
re_path(r'^loaded$', never_cache(views.loaded), name='amo.loaded'),
re_path(r'^403', views.handler403),
re_path(r'^404', views.handler404),
re_path(r'^500', views.handler500),
]
api_patterns = [
re_path(r'^site/$', views.SiteStatusView.as_view(), name='amo-site-status'),
]
urlpatterns = [
re_path(r'^robots\.txt$', views.robots, name='robots.txt'),
re_path(r'^contribute\.json$', views.contribute, name='contribute.json'),
re_path(r'^services/', include(services_patterns)),
re_path(r'^__version__$', views.version, name='version.json'),
re_path(
r'^opensearch\.xml$',
render_xml,
{'template': 'amo/opensearch.xml'},
name='amo.opensearch',
),
re_path(
r'^fake-fxa-authorization/$',
views.fake_fxa_authorization,
name='fake-fxa-authorization',
),
path('sitemap.xml', views.sitemap, name='amo.sitemap'),
]
| 30.947368
| 80
| 0.659864
|
4a16d1dc520c72759cb9231b9921c8eaeb0c8304
| 9,525
|
py
|
Python
|
Python/tdw/object_init_data.py
|
tljstewart/tdw
|
61d8afd765c5fd861681bbadaf6c19040b4e2d67
|
[
"BSD-2-Clause"
] | 1
|
2021-02-21T19:53:22.000Z
|
2021-02-21T19:53:22.000Z
|
Python/tdw/object_init_data.py
|
tljstewart/tdw
|
61d8afd765c5fd861681bbadaf6c19040b4e2d67
|
[
"BSD-2-Clause"
] | null | null | null |
Python/tdw/object_init_data.py
|
tljstewart/tdw
|
61d8afd765c5fd861681bbadaf6c19040b4e2d67
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import Dict, List, Tuple
from tdw.tdw_utils import TDWUtils
from tdw.controller import Controller
from tdw.librarian import ModelLibrarian
from tdw.py_impact import AudioMaterial, PyImpact, ObjectInfo
class TransformInitData:
"""
Basic initialization parameters for an object. Can be converted to and from a list of commands.
This is similar to [`Controller.get_add_object()`](controller.md) except that it includes more parameters.
"""
LIBRARIES: Dict[str, ModelLibrarian] = dict()
for _lib_file in ModelLibrarian.get_library_filenames():
LIBRARIES[_lib_file] = ModelLibrarian(_lib_file)
def __init__(self, name: str, library: str = "models_core.json", scale_factor: Dict[str, float] = None, position: Dict[str, float] = None, rotation: Dict[str, float] = None, kinematic: bool = False, gravity: bool = True):
"""
:param name: The name of the model.
:param library: The filename of the library containing the model's record.
:param scale_factor: The [scale factor](../api/command_api.md#scale_object).
:param position: The initial position. If None, defaults to: `{"x": 0, "y": 0, "z": 0`}.
:param rotation: The initial rotation as Euler angles or a quaternion. If None, defaults to: `{"w": 1, "x": 0, "y": 0, "z": 0}`
:param kinematic: If True, the object will be [kinematic](../api/command_api.md#set_kinematic_state).
:param gravity: If True, the object won't respond to [gravity](../api/command_api.md#set_kinematic_state).
"""
if position is None:
self.position = TDWUtils.VECTOR3_ZERO
else:
self.position = position
if rotation is None:
self.rotation = {"w": 1, "x": 0, "y": 0, "z": 0}
else:
self.rotation = rotation
if scale_factor is None:
self.scale_factor = {"x": 1, "y": 1, "z": 1}
else:
self.scale_factor = scale_factor
self.name = name
self.library = library
self.kinematic = kinematic
self.gravity = gravity
def get_commands(self) -> Tuple[int, List[dict]]:
"""
:return: Tuple: The ID of the object; a list of commands to create the object: `[add_object, rotate_object_to, scale_object, set_kinematic_state, set_object_collision_detection_mode]`
"""
record = TransformInitData.LIBRARIES[self.library].get_record(name=self.name)
object_id = Controller.get_unique_id()
commands = [{"$type": "add_object",
"name": record.name,
"url": record.get_url(),
"scale_factor": record.scale_factor,
"position": self.position,
"category": record.wcategory,
"id": object_id}]
# The rotation is a quaternion.
if "w" in self.rotation:
commands.append({"$type": "rotate_object_to",
"rotation": self.rotation,
"id": object_id})
# The rotation is in Euler angles.
else:
commands.append({"$type": "rotate_object_to_euler_angles",
"euler_angles": self.rotation,
"id": object_id})
commands.extend([{"$type": "scale_object",
"scale_factor": self.scale_factor,
"id": object_id},
{"$type": "set_kinematic_state",
"id": object_id,
"is_kinematic": self.kinematic,
"use_gravity": self.gravity}])
# Kinematic objects must be continuous_speculative.
if self.kinematic:
commands.append({"$type": "set_object_collision_detection_mode",
"id": object_id,
"mode": "continuous_speculative"})
return object_id, commands
class RigidbodyInitData(TransformInitData):
"""
A subclass of `TransformInitData`. Includes data and commands to set the mass and physic material of the object.
"""
def __init__(self, name: str, mass: float, dynamic_friction: float, static_friction: float, bounciness: float, library: str = "models_core.json", scale_factor: Dict[str, float] = None, position: Dict[str, float] = None, rotation: Dict[str, float] = None, kinematic: bool = False, gravity: bool = True):
"""
:param name: The name of the model.
:param library: The filename of the library containing the model's record.
:param scale_factor: The [scale factor](../api/command_api.md#scale_object).
:param position: The initial position. If None, defaults to: `{"x": 0, "y": 0, "z": 0`}.
:param rotation: The initial rotation as Euler angles or a quaternion. If None, defaults to: `{"w": 1, "x": 0, "y": 0, "z": 0}`
:param kinematic: If True, the object will be [kinematic](../api/command_api.md#set_kinematic_state).
:param gravity: If True, the object won't respond to [gravity](../api/command_api.md#set_kinematic_state).
:param mass: The mass of the object.
:param dynamic_friction: The [dynamic friction](../api/command_api.md#set_physic_material) of the object.
"""
super().__init__(name=name, library=library, scale_factor=scale_factor, position=position, rotation=rotation,
kinematic=kinematic, gravity=gravity)
self.mass = mass
self.dynamic_friction = dynamic_friction
self.static_friction = static_friction
self.bounciness = bounciness
def get_commands(self) -> Tuple[int, List[dict]]:
"""
:return: Tuple: The ID of the object; a list of commands to create the object: `[add_object, rotate_object_to, scale_object, set_kinematic_state, set_object_collision_detection_mode, set_mass, set_physic_material]`
"""
object_id, commands = super().get_commands()
# Set the mass and physic material.
commands.extend([{"$type": "set_mass",
"mass": self.mass,
"id": object_id},
{"$type": "set_physic_material",
"dynamic_friction": self.dynamic_friction,
"static_friction": self.static_friction,
"bounciness": self.bounciness,
"id": object_id}])
return object_id, commands
class AudioInitData(RigidbodyInitData):
"""
A subclass of `RigidbodyInitData` that includes [audio values](py_impact.md#objectinfo).
Physics values are derived from these audio values.
"""
_DYNAMIC_FRICTION = {AudioMaterial.ceramic: 0.47,
AudioMaterial.hardwood: 0.35,
AudioMaterial.wood: 0.35,
AudioMaterial.cardboard: 0.47,
AudioMaterial.glass: 0.65,
AudioMaterial.metal: 0.43}
_STATIC_FRICTION = {AudioMaterial.ceramic: 0.47,
AudioMaterial.hardwood: 0.4,
AudioMaterial.wood: 0.4,
AudioMaterial.cardboard: 0.47,
AudioMaterial.glass: 0.65,
AudioMaterial.metal: 0.52}
AUDIO = PyImpact.get_object_info()
def __init__(self, name: str, library: str = "models_core.json", scale_factor: Dict[str, float] = None, position: Dict[str, float] = None, rotation: Dict[str, float] = None, kinematic: bool = False, gravity: bool = True, audio: ObjectInfo = None):
"""
:param name: The name of the model.
:param library: The filename of the library containing the model's record.
:param scale_factor: The [scale factor](../api/command_api.md#scale_object).
:param position: The initial position. If None, defaults to: `{"x": 0, "y": 0, "z": 0`}.
:param rotation: The initial rotation as Euler angles or a quaternion. If None, defaults to: `{"w": 1, "x": 0, "y": 0, "z": 0}`
:param kinematic: If True, the object will be [kinematic](../api/command_api.md#set_kinematic_state).
:param gravity: If True, the object won't respond to [gravity](../api/command_api.md#set_kinematic_state).
:param audio: If None, derive physics data from the audio data in `PyImpact.get_object_info()` (if the object isn't in this dictionary, this constructor will throw an error). If not None, use these values instead of the default audio values.
"""
if audio is None:
self.audio = AudioInitData.AUDIO[name]
else:
self.audio = audio
super().__init__(name=name, library=library, scale_factor=scale_factor, position=position, rotation=rotation,
kinematic=kinematic, gravity=gravity, mass=self.audio.mass,
dynamic_friction=AudioInitData._DYNAMIC_FRICTION[self.audio.material],
static_friction=AudioInitData._STATIC_FRICTION[self.audio.material],
bounciness=self.audio.bounciness)
def get_commands(self) -> Tuple[int, List[dict]]:
"""
:return: Tuple: The ID of the object; a list of commands to create the object: `[add_object, rotate_object_to, scale_object, set_kinematic_state, set_object_collision_detection_mode, set_mass, set_physic_material]`
"""
return super().get_commands()
| 53.212291
| 306
| 0.608819
|
4a16d215c42b856a2c4670de2cbb11acedf7528e
| 3,167
|
py
|
Python
|
code/correlation_analysis_scripts.py
|
berkeley-stat159/project-zeta-2
|
7c35423fbc1407751e1aea6aac99d5d02a82dfdc
|
[
"BSD-3-Clause"
] | null | null | null |
code/correlation_analysis_scripts.py
|
berkeley-stat159/project-zeta-2
|
7c35423fbc1407751e1aea6aac99d5d02a82dfdc
|
[
"BSD-3-Clause"
] | null | null | null |
code/correlation_analysis_scripts.py
|
berkeley-stat159/project-zeta-2
|
7c35423fbc1407751e1aea6aac99d5d02a82dfdc
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib import colors
import pandas as pd
# from matplotlib import rcParams
# rcParams.update({'figure.autolayout': True})
#object_list
object_list = ["bottle", "cat", "chair", "face", "house", "scissors", "scrambledpix", "shoe"]
# important path:
base_path = os.path.abspath(os.path.dirname(__file__))
base_path = os.path.join(base_path, "..")
figure_path = os.path.join(base_path, "code", "images", "")
file_path = os.path.join(base_path, "code", "txt", "")
# color display
nice_cmap_values = np.loadtxt(file_path + 'actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
# generate list for odd and even run values:
odd_runs = ["odd_%s" % i for i in object_list]
even_runs = ["even_%s" % i for i in object_list]
# load even and odd run results
all_runs = {}
for i in odd_runs:
all_runs[i] = np.loadtxt(file_path + i + ".txt")
for i in even_runs:
all_runs[i] = np.loadtxt(file_path + i + ".txt")
# reshape to 3d images
all_3d = {}
for key, txt in all_runs.iteritems():
all_3d[key] = np.reshape(txt, (-1, 25, 1))
# save each 3d image as figure
for key, fig in all_3d.iteritems():
plt.imshow(fig[:, :, 0], interpolation="nearest", cmap=nice_cmap)
plt.title("%s" % key)
plt.savefig(figure_path + "%s.png" % key)
plt.clf()
plt.close()
# save all 3d images as one compiled figure
fig = plt.figure(figsize=[8.0, 5])
i = 1
for item in object_list:
plt.subplot(2, 8, i)
plt.imshow(all_3d["odd_%s" % item][:, :, 0], interpolation="nearest", cmap=nice_cmap)
plt.title("%s" % item, fontsize=8, weight='bold')
plt.axis('off')
i += 1
for item in object_list:
plt.subplot(2, 8, i)
plt.imshow(all_3d["even_%s" % item][:, :, 0], interpolation="nearest", cmap=nice_cmap)
plt.axis('off')
i += 1
plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.1, bottom=0.05, top=0.835)
# label the figure:
fig.text(0.03, 0.625, 'Odd runs', ha='left', weight='bold')
fig.text(0.03, 0.225, 'Even runs', ha='left', weight='bold')
fig.text(0.16, 0.93, 'Average brain images for odd euns / even runs', fontsize=16, weight='bold')
plt.savefig(figure_path + "odd_even_compile.png")
plt.close()
# Run correlation:
all_results = []
print ("correlation analysis:")
for i in odd_runs:
result = []
for j in even_runs:
corr = np.corrcoef(all_runs[i], all_runs[j])
result.append("%.4f" % corr[0, 1])
print ("%s vs %s: %.4f" % (i, j, corr[0, 1]))
all_results.append(result)
table_result = np.array(all_results)
# make table to display the correlation:
fig = plt.figure(figsize=(8, 4))
plt.subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=table_result, colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')
plt.subplots_adjust(left=0.3, bottom=0, top=0.95)
fig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)
fig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)
fig.text(0.3, 0.85, "Correlation between odd runs and even runs", weight='bold')
table.scale(1.2, 1.2)
plt.savefig(figure_path + "correlation_table.png")
print ("Complete!!!")
| 33.691489
| 118
| 0.672245
|
4a16d241dfbd79d7309ea1fc7a183cbd1ece3c91
| 593
|
py
|
Python
|
tests/__init__.py
|
farsightsec/axamd_client
|
560dfe9a8e23163597bf4efddfa5843669ec4ba5
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2016-11-01T21:52:14.000Z
|
2016-11-01T21:52:14.000Z
|
tests/__init__.py
|
farsightsec/axamd_client
|
560dfe9a8e23163597bf4efddfa5843669ec4ba5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
farsightsec/axamd_client
|
560dfe9a8e23163597bf4efddfa5843669ec4ba5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 by Farsight Security, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 42.357143
| 74
| 0.76054
|
4a16d280dbb9b3db7677ff1b3a70fdc5c64c3090
| 5,539
|
py
|
Python
|
misc/tools/statserver/settings.py
|
zeehio/META-SHARE
|
b796769629734353a63d98db72c84617f725e544
|
[
"BSD-3-Clause"
] | 11
|
2015-07-13T13:36:44.000Z
|
2021-11-15T08:07:25.000Z
|
misc/tools/statserver/settings.py
|
zeehio/META-SHARE
|
b796769629734353a63d98db72c84617f725e544
|
[
"BSD-3-Clause"
] | 13
|
2015-03-21T14:08:31.000Z
|
2021-05-18T18:47:58.000Z
|
misc/tools/statserver/settings.py
|
zeehio/META-SHARE
|
b796769629734353a63d98db72c84617f725e544
|
[
"BSD-3-Clause"
] | 12
|
2015-01-07T02:16:50.000Z
|
2021-05-18T08:25:31.000Z
|
# META-SHARE statistics server
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
ROOT_PATH = os.getcwd()
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# the server checks every CHECKINGTIME seconds the daily statistics of the nodes
CHECKINGTIME = 300.0
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', #django.db.backends. # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': PROJECT_ROOT + '/metastats.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Rome'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
#MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '/media/')
MEDIA_ROOT = '{0}/media/'.format(ROOT_PATH)
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
#MEDIA_URL = '/media/'
MEDIA_URL = os.path.join(os.path.dirname(__file__), '/media/')
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ihw%vco2r4$5(i&lm^fxc-#y2pp#sn03!b!kt5e*&2!t5i!rqi'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'statserver.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, "stats"),
# Other settings...
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'statserver.stats',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 34.61875
| 143
| 0.693808
|
4a16d31c9135edad239080d82cb5d6208877d8fa
| 687
|
py
|
Python
|
pos/webpos/migrations/0009_auto_20141229_1626.py
|
NonnEmilia/OpenGenfri
|
7061957fb13ef824763922e1891cb72f7d51bb0f
|
[
"MIT"
] | null | null | null |
pos/webpos/migrations/0009_auto_20141229_1626.py
|
NonnEmilia/OpenGenfri
|
7061957fb13ef824763922e1891cb72f7d51bb0f
|
[
"MIT"
] | null | null | null |
pos/webpos/migrations/0009_auto_20141229_1626.py
|
NonnEmilia/OpenGenfri
|
7061957fb13ef824763922e1891cb72f7d51bb0f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('webpos', '0008_bill_date'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AddField(
model_name='bill',
name='server',
field=models.ForeignKey(default=2, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 25.444444
| 76
| 0.622999
|
4a16d34cbb523ee887ae1f3de5697e98b06cfab2
| 3,798
|
py
|
Python
|
tests/unit_tests/test_redis_database.py
|
Curtis241/taskmgr
|
ac485395d189e0c150e87bab8807b42d341545ed
|
[
"MIT"
] | null | null | null |
tests/unit_tests/test_redis_database.py
|
Curtis241/taskmgr
|
ac485395d189e0c150e87bab8807b42d341545ed
|
[
"MIT"
] | 4
|
2021-03-25T22:39:57.000Z
|
2021-07-19T05:46:38.000Z
|
tests/unit_tests/test_redis_database.py
|
Curtis241/taskmgr
|
ac485395d189e0c150e87bab8807b42d341545ed
|
[
"MIT"
] | null | null | null |
import unittest
from taskmgr.lib.model.database import RedisDatabase, DatabaseObject
from taskmgr.lib.model.snapshot import Snapshot
from taskmgr.lib.model.task import Task
from taskmgr.lib.presenter.snapshots import Snapshots
class MissingIterMethod(DatabaseObject):
def deserialize(self, obj_dict):
pass
def __init__(self):
super().__init__(self.__class__.__name__)
class MissingSubclass:
pass
class TestRedisDatabase(unittest.TestCase):
def setUp(self) -> None:
self.redis_db = RedisDatabase("localhost", 6379)
self.redis_db.clear()
self.task1 = Task("task1")
self.task1.index = 1
self.task2 = Task("task2")
self.task2.index = 2
self.task3 = Task("task3")
self.task3.index = 3
self.task4 = Task("task4")
self.task4.index = 4
self.task5 = Task("task5")
self.task5.index = 5
self.task6 = Task("task6")
self.task6.index = 6
self.task7 = Task("task7")
self.task7.index = 7
self.task8 = Task("task8")
self.task8.index = 8
self.task9 = Task("task9")
self.task9.index = 9
self.task10 = Task("task10")
self.task10.index = 10
def tearDown(self) -> None:
self.redis_db.clear()
def test_multiple_insert_using_should_not_create_duplicates(self):
self.redis_db.initialize(Task())
self.redis_db.clear()
self.redis_db.set([self.task1, self.task2, self.task3, self.task4, self.task5,
self.task6, self.task7, self.task8, self.task9, self.task10])
self.redis_db.set([self.task1, self.task2, self.task3, self.task4, self.task5,
self.task6, self.task7, self.task8, self.task9, self.task10])
task_list = self.redis_db.to_object_list(self.redis_db.get(), Task())
self.assertTrue(len(task_list) == 10)
def test_object_serialization(self):
self.redis_db.initialize(Task())
self.redis_db.clear()
self.redis_db.set([self.task1])
task_list = self.redis_db.to_object_list(self.redis_db.get(), Task())
self.assertTrue(len(task_list) == 1)
task = task_list[0]
self.assertEqual(self.task1.text, task.text)
self.assertEqual(self.task1.index, task.index)
self.assertEqual(self.task1.unique_id, task.unique_id)
self.assertEqual(self.task1.project, task.project)
self.assertEqual(self.task1.date_expression, task.date_expression)
self.assertEqual(self.task1.priority, task.priority)
self.assertEqual(self.task1.label, task.label)
def test_object_must_be_subclass_of_DatabaseObject(self):
self.redis_db.initialize(Task())
with self.assertRaises(ValueError):
self.redis_db.set([MissingSubclass])
def test_object_must_contain_iter_method(self):
self.redis_db.initialize(Task())
with self.assertRaises(TypeError):
self.redis_db.set([MissingIterMethod()])
def test_save_object(self):
self.redis_db.initialize(Snapshot())
self.assertIsNotNone(self.redis_db.db)
snapshot = Snapshot()
snapshot.count = 22
self.redis_db.set([snapshot])
snapshot_list = self.redis_db.get()
self.assertTrue(len(snapshot_list) == 1)
snapshot_dict = snapshot_list[0]
self.assertTrue(snapshot_dict["count"] == 22)
self.redis_db.initialize(Task())
self.assertIsNotNone(self.redis_db.db)
task = Task()
task.project = "work"
self.redis_db.set([task])
task_list = self.redis_db.get()
self.assertTrue(len(task_list) == 1)
task_dict = task_list[0]
self.assertTrue(task_dict["project"] == "work")
| 33.910714
| 89
| 0.64297
|
4a16d3cf936f3f26a471b142eab5b6cfc523cb12
| 4,249
|
py
|
Python
|
iogt/settings/base.py
|
michaelclapham/iogt
|
faf0fd0444da9f08f3488fd52a93c89c307ab728
|
[
"BSD-2-Clause"
] | null | null | null |
iogt/settings/base.py
|
michaelclapham/iogt
|
faf0fd0444da9f08f3488fd52a93c89c307ab728
|
[
"BSD-2-Clause"
] | null | null | null |
iogt/settings/base.py
|
michaelclapham/iogt
|
faf0fd0444da9f08f3488fd52a93c89c307ab728
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Django settings for iogt project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'home',
'search',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'iogt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'iogt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# ManifestStaticFilesStorage is recommended in production, to prevent outdated
# JavaScript / CSS assets being served from cache (e.g. after a Wagtail upgrade).
# See https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Wagtail settings
WAGTAIL_SITE_NAME = "iogt"
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'http://example.com'
| 26.067485
| 95
| 0.699694
|
4a16d5c5716483a211d32bf2539080ad90eb3095
| 546
|
py
|
Python
|
server/src/project_n/app/net/initconfig.py
|
isuhao/gamein9miao
|
df8624b0e3223a12eb1dc833ce8fa89fd715aa5b
|
[
"MIT"
] | 1
|
2018-04-18T02:38:14.000Z
|
2018-04-18T02:38:14.000Z
|
server/src/project_n/app/net/initconfig.py
|
isuhao/gamein9miao
|
df8624b0e3223a12eb1dc833ce8fa89fd715aa5b
|
[
"MIT"
] | null | null | null |
server/src/project_n/app/net/initconfig.py
|
isuhao/gamein9miao
|
df8624b0e3223a12eb1dc833ce8fa89fd715aa5b
|
[
"MIT"
] | null | null | null |
#coding:utf8
'''
Created on 2013-10-25
@author: lan (www.9miao.com)
'''
from firefly.server.globalobject import GlobalObject
from firefly.netconnect.datapack import DataPackProtoc
def callWhenConnLost(conn):
dynamicId = conn.transport.sessionno
GlobalObject().remote['gate'].callRemote("netconnlost",dynamicId)
GlobalObject().netfactory.doConnectionLost = callWhenConnLost
dataprotocl = DataPackProtoc(78,37,38,48,9,0)
GlobalObject().netfactory.setDataProtocl(dataprotocl)
def loadModule():
import netapp
import gatenodeapp
| 23.73913
| 69
| 0.782051
|
4a16d5c9de2203f1cb6df0ad45e83ccc5b56ea13
| 29,787
|
py
|
Python
|
pcdet/models/bbox_heads/anchor_target_assigner.py
|
charlesyz/PCDet
|
1eb6b1dc5a3d563d7532b1c8ee3be007cbeafc80
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/bbox_heads/anchor_target_assigner.py
|
charlesyz/PCDet
|
1eb6b1dc5a3d563d7532b1c8ee3be007cbeafc80
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/bbox_heads/anchor_target_assigner.py
|
charlesyz/PCDet
|
1eb6b1dc5a3d563d7532b1c8ee3be007cbeafc80
|
[
"Apache-2.0"
] | null | null | null |
# This file is modified from https://github.com/traveller59/second.pytorch
import numpy as np
import numpy.random as npr
import numba
from ...utils import common_utils
def unmap(data, count, inds, fill=0):
'''Unmap a subset of item (data) back to the original set of items (of
size count)'''
if count == len(inds):
return data
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=data.dtype)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=data.dtype)
ret.fill(fill)
ret[inds, :] = data
return ret
def create_anchors_3d_range(feature_size,
anchor_range,
sizes=((1.6, 3.9, 1.56),),
rotations=(0, np.pi / 2),
dtype=np.float32):
"""
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
anchor_range = np.array(anchor_range, dtype)
z_centers = np.linspace(
anchor_range[2], anchor_range[5], feature_size[0], dtype=dtype)
y_centers = np.linspace(
anchor_range[1], anchor_range[4], feature_size[1], dtype=dtype)
x_centers = np.linspace(
anchor_range[0], anchor_range[3], feature_size[2], dtype=dtype)
sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])
rotations = np.array(rotations, dtype=dtype)
rets = np.meshgrid(
x_centers, y_centers, z_centers, rotations, indexing='ij')
tile_shape = [1] * 5
tile_shape[-2] = int(sizes.shape[0])
for i in range(len(rets)):
rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)
rets[i] = rets[i][..., np.newaxis] # for concat
sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = np.tile(sizes, tile_size_shape)
rets.insert(3, sizes)
ret = np.concatenate(rets, axis=-1)
return np.transpose(ret, [2, 1, 0, 3, 4, 5])
def corners_nd(dims, origin=0.5):
"""generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
"""
ndim = int(dims.shape[1])
corners_norm = np.stack(
np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1).astype(
dims.dtype)
# now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1
# (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
# so need to convert to a format which is convenient to do other computing.
# for 2d boxes, format is clockwise start with minimum point
# for 3d boxes, please draw lines by your hand.
if ndim == 2:
# generate clockwise box corners
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape(
[1, 2 ** ndim, ndim])
return corners
def center_to_minmax_2d_0_5(centers, dims):
return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)
def rotation_2d(points, angles):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])
return np.einsum('aij,jka->aik', points, rot_mat_T)
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
"""convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 4, 2]
if angles is not None:
corners = rotation_2d(corners, angles)
corners += centers.reshape([-1, 1, 2])
return corners
def center_to_minmax_2d(centers, dims, origin=0.5):
if origin == 0.5:
return center_to_minmax_2d_0_5(centers, dims)
corners = center_to_corner_box2d(centers, dims, origin=origin)
return corners[:, [0, 2]].reshape([-1, 4])
def rbbox2d_to_near_bbox(rbboxes):
"""convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
"""
rots = rbboxes[..., -1]
rots_0_pi_div_2 = np.abs(common_utils.limit_period(rots, 0.5, np.pi))
cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]
bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])
return bboxes
@numba.jit(nopython=True)
def iou_jit(boxes, query_boxes, eps=0.0):
"""calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + eps) *
(query_boxes[k, 3] - query_boxes[k, 1] + eps))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) -
max(boxes[n, 0], query_boxes[k, 0]) + eps)
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) -
max(boxes[n, 1], query_boxes[k, 1]) + eps)
if ih > 0:
ua = (
(boxes[n, 2] - boxes[n, 0] + eps) *
(boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih)
overlaps[n, k] = iw * ih / ua
return overlaps
class AnchorGeneratorRange(object):
def __init__(self, anchor_ranges, sizes=((1.6, 3.9, 1.56),), rotations=(0, np.pi / 2), class_name=None,
match_threshold=-1, unmatch_threshold=-1, custom_values=None, dtype=np.float32, feature_map_size=None):
self._sizes = sizes
self._anchor_ranges = anchor_ranges
self._rotations = rotations
self._dtype = dtype
self._class_name = class_name
self._match_threshold = match_threshold
self._unmatch_threshold = unmatch_threshold
self._custom_values = custom_values
self._feature_map_size = feature_map_size
@property
def class_name(self):
return self._class_name
@property
def match_threshold(self):
return self._match_threshold
@property
def unmatch_threshold(self):
return self._unmatch_threshold
@property
def custom_values(self):
return self._custom_values
@property
def feature_map_size(self):
return self._feature_map_size
@property
def num_anchors_per_localization(self):
num_rot = len(self._rotations)
num_size = np.array(self._sizes).reshape([-1, 3]).shape[0]
return num_rot * num_size
@property
def ndim(self):
return 7 + len(self._custom_values)
@property
def custom_ndim(self):
return len(self._custom_values)
def generate(self, feature_map_size):
anchors = create_anchors_3d_range(feature_map_size, self._anchor_ranges, self._sizes,
self._rotations, self._dtype)
if self._custom_values is not None:
custom_values = np.zeros((*anchors.shape[:-1], len(self._custom_values)), dtype=self._dtype)
for k in range(len(self._custom_values)):
custom_values[..., k] = self._custom_values[k]
anchors = np.concatenate((anchors, custom_values), axis=-1)
return anchors
class TargetAssigner(object):
def __init__(self, anchor_generators, pos_fraction, sample_size, region_similarity_fn_name, box_coder, logger=None):
super().__init__()
self.anchor_generators = anchor_generators
self.pos_fraction = pos_fraction if pos_fraction >= 0 else None
self.sample_size = sample_size
self.region_similarity_calculator = getattr(self, region_similarity_fn_name)
self.box_coder = box_coder
self.logger = logger
def generate_anchors(self, feature_map_size=None, use_multi_head=False):
anchors_list = []
matched_thresholds = [a.match_threshold for a in self.anchor_generators]
unmatched_thresholds = [a.unmatch_threshold for a in self.anchor_generators]
match_list, unmatch_list = [], []
for anchor_generator, match_thresh, unmatch_thresh in zip(self.anchor_generators,
matched_thresholds, unmatched_thresholds):
if use_multi_head:
anchors = anchor_generator.generate(anchor_generator.feature_map_size) # (1, H, W, 2#, code_size)
anchors = anchors.reshape([*anchors.shape[:3], -1, anchors.shape[-1]])
ndim = len(anchor_generator.feature_map_size)
anchors = anchors.transpose(ndim, *range(0, ndim), ndim + 1) # (2#, 1, H, W, code_size)
anchors = anchors.reshape(-1, anchors.shape[-1])
else:
anchors = anchor_generator.generate(feature_map_size)
anchors = anchors.reshape([*anchors.shape[:3], -1, anchors.shape[-1]])
anchors_list.append(anchors)
num_anchors = np.prod(anchors.shape[:-1])
match_list.append(np.full([num_anchors], match_thresh, anchors.dtype))
unmatch_list.append(np.full([num_anchors], unmatch_thresh, anchors.dtype))
anchors = np.concatenate(anchors_list, axis=-2)
matched_thresholds = np.concatenate(match_list, axis=0)
unmatched_thresholds = np.concatenate(unmatch_list, axis=0)
return {
'anchors': anchors,
'matched_thresholds': matched_thresholds,
'unmatched_thresholds': unmatched_thresholds
}
def generate_anchors_dict(self, feature_map_size, use_multi_head=False):
anchors_list = []
matched_thresholds = [a.match_threshold for a in self.anchor_generators]
unmatched_thresholds = [a.unmatch_threshold for a in self.anchor_generators]
match_list, unmatch_list = [], []
anchors_dict = {a.class_name: {} for a in self.anchor_generators}
for anchor_generator, match_thresh, unmatch_thresh in zip(self.anchor_generators,
matched_thresholds, unmatched_thresholds):
if use_multi_head:
anchors = anchor_generator.generate(anchor_generator.feature_map_size)
anchors = anchors.reshape([*anchors.shape[:3], -1, anchors.shape[-1]])
ndim = len(feature_map_size)
anchors = anchors.transpose(ndim, *range(0, ndim), ndim + 1)
else:
anchors = anchor_generator.generate(feature_map_size)
anchors = anchors.reshape([*anchors.shape[:3], -1, anchors.shape[-1]])
anchors_list.append(anchors)
num_anchors = np.prod(anchors.shape[:-1])
match_list.append(np.full([num_anchors], match_thresh, anchors.dtype))
unmatch_list.append(np.full([num_anchors], unmatch_thresh, anchors.dtype))
class_name = anchor_generator.class_name
anchors_dict[class_name]['anchors'] = anchors
anchors_dict[class_name]['matched_thresholds'] = match_list[-1]
anchors_dict[class_name]['unmatched_thresholds'] = unmatch_list[-1]
return anchors_dict
@staticmethod
def nearest_iou_similarity(boxes1, boxes2):
boxes1_bv = rbbox2d_to_near_bbox(boxes1)
boxes2_bv = rbbox2d_to_near_bbox(boxes2)
ret = iou_jit(boxes1_bv, boxes2_bv, eps=0.0)
return ret
def assign_v2(self, anchors_dict, gt_boxes, anchors_mask=None, gt_classes=None, gt_names=None):
prune_anchor_fn = None if anchors_mask is None else lambda _: np.where(anchors_mask)[0]
def similarity_fn(anchors, gt_boxes):
anchors_rbv = anchors[:, [0, 1, 3, 4, 6]]
gt_boxes_rbv = gt_boxes[:, [0, 1, 3, 4, 6]]
return self.region_similarity_calculator(anchors_rbv, gt_boxes_rbv)
def box_encoding_fn(boxes, anchors):
return self.box_coder.encode_np(boxes, anchors)
targets_list = []
for class_name, anchor_dict in anchors_dict.items():
mask = np.array([c == class_name for c in gt_names], dtype=np.bool_)
targets = self.create_target_np(
# anchor_dict['anchors'].reshape(-1, self.box_coder.code_size),
anchor_dict['anchors'].reshape(-1, anchor_dict['anchors'].shape[-1]),
gt_boxes[mask],
similarity_fn,
box_encoding_fn,
prune_anchor_fn=prune_anchor_fn,
gt_classes=gt_classes[mask],
matched_threshold=anchor_dict['matched_thresholds'],
unmatched_threshold=anchor_dict['unmatched_thresholds'],
positive_fraction=self.pos_fraction,
rpn_batch_size=self.sample_size,
norm_by_num_examples=False,
box_code_size=self.box_coder.code_size
)
targets_list.append(targets)
feature_map_size = anchor_dict['anchors'].shape[:3]
targets_dict = {
'labels': [t['labels'] for t in targets_list],
'bbox_targets': [t['bbox_targets'] for t in targets_list],
'bbox_src_targets': [t['bbox_src_targets'] for t in targets_list],
'bbox_outside_weights': [t['bbox_outside_weights'] for t in targets_list],
}
# bbox_targets: (H, W, num_anchors_per_loc, code_size)
targets_dict['bbox_targets'] = np.concatenate([v.reshape(*feature_map_size, -1, self.box_coder.code_size)
for v in targets_dict['bbox_targets']], axis=-2)
targets_dict['bbox_src_targets'] = np.concatenate([v.reshape(*feature_map_size, -1, self.box_coder.code_size)
for v in targets_dict['bbox_src_targets']], axis=-2)
targets_dict['labels'] = np.concatenate([v.reshape(*feature_map_size, -1)
for v in targets_dict['labels']], axis=-1)
targets_dict['bbox_outside_weights'] = np.concatenate([v.reshape(*feature_map_size, -1)
for v in targets_dict['bbox_outside_weights']], axis=-1)
targets_dict['bbox_targets'] = targets_dict['bbox_targets'].reshape(-1, self.box_coder.code_size)
targets_dict['bbox_src_targets'] = targets_dict['bbox_src_targets'].reshape(-1, self.box_coder.code_size)
targets_dict['labels'] = targets_dict['labels'].reshape(-1)
targets_dict['bbox_outside_weights'] = targets_dict['bbox_outside_weights'].reshape(-1)
return targets_dict
def assign_multihead(self, anchors_dict, gt_boxes, anchors_mask=None, gt_classes=None, gt_names=None):
prune_anchor_fn = None if anchors_mask is None else lambda _: np.where(anchors_mask)[0]
def similarity_fn(anchors, gt_boxes):
anchors_rbv = anchors[:, [0, 1, 3, 4, 6]]
gt_boxes_rbv = gt_boxes[:, [0, 1, 3, 4, 6]]
return self.region_similarity_calculator(anchors_rbv, gt_boxes_rbv)
def box_encoding_fn(boxes, anchors):
return self.box_coder.encode_np(boxes, anchors)
targets_list = []
for class_name, anchor_dict in anchors_dict.items():
mask = np.array([c == class_name for c in gt_names], dtype=np.bool_)
targets = self.create_target_np(
# anchor_dict['anchors'].reshape(-1, self.box_coder.code_size),
anchor_dict['anchors'].reshape(-1, anchor_dict['anchors'].shape[-1]),
gt_boxes[mask],
similarity_fn,
box_encoding_fn,
prune_anchor_fn=prune_anchor_fn,
gt_classes=gt_classes[mask],
matched_threshold=anchor_dict['matched_thresholds'],
unmatched_threshold=anchor_dict['unmatched_thresholds'],
positive_fraction=self.pos_fraction,
rpn_batch_size=self.sample_size,
norm_by_num_examples=False,
box_code_size=self.box_coder.code_size
)
targets_list.append(targets)
targets_dict = {
'labels': [t['labels'] for t in targets_list],
'bbox_targets': [t['bbox_targets'] for t in targets_list],
'bbox_outside_weights': [t['bbox_outside_weights'] for t in targets_list],
}
# # bbox_targets: (H, W, num_anchors_per_loc, code_size)
targets_dict['bbox_targets'] = np.concatenate([v.reshape(-1, self.box_coder.code_size)
for v in targets_dict['bbox_targets']], axis=0)
targets_dict['labels'] = np.concatenate([v.reshape(-1) for v in targets_dict['labels']], axis=0)
targets_dict['bbox_outside_weights'] = np.concatenate([v.reshape(-1)
for v in targets_dict['bbox_outside_weights']], axis=0)
return targets_dict
def create_target_np(self, all_anchors,
gt_boxes,
similarity_fn,
box_encoding_fn,
prune_anchor_fn=None,
gt_classes=None,
matched_threshold=0.6,
unmatched_threshold=0.45,
bbox_inside_weight=None,
positive_fraction=None,
rpn_batch_size=300,
norm_by_num_examples=False,
box_code_size=7):
'''Modified from FAIR detectron.
Args:
all_anchors: [num_of_anchors, box_ndim] float tensor.
gt_boxes: [num_gt_boxes, box_ndim] float tensor.
similarity_fn: a function, accept anchors and gt_boxes, return
similarity matrix(such as IoU).
box_encoding_fn: a function, accept gt_boxes and anchors, return
box encodings(offsets).
prune_anchor_fn: a function, accept anchors, return indices that
indicate valid anchors.
gt_classes: [num_gt_boxes] int tensor. indicate gt classes, must
start with 1.
matched_threshold: float, iou greater than matched_threshold will
be treated as positives.
unmatched_threshold: float, iou smaller than unmatched_threshold will
be treated as negatives.
bbox_inside_weight: unused
positive_fraction: [0-1] float or None. if not None, we will try to
keep ratio of pos/neg equal to positive_fraction when sample.
if there is not enough positives, it fills the rest with negatives
rpn_batch_size: int. sample size
norm_by_num_examples: bool. norm box_weight by number of examples, but
I recommend to do this outside.
Returns:
labels, bbox_targets, bbox_outside_weights
'''
total_anchors = all_anchors.shape[0]
if prune_anchor_fn is not None:
inds_inside = prune_anchor_fn(all_anchors)
anchors = all_anchors[inds_inside, :]
if not isinstance(matched_threshold, float):
matched_threshold = matched_threshold[inds_inside]
if not isinstance(unmatched_threshold, float):
unmatched_threshold = unmatched_threshold[inds_inside]
else:
anchors = all_anchors
inds_inside = None
num_inside = len(inds_inside) if inds_inside is not None else total_anchors
box_ndim = all_anchors.shape[1]
if self.logger is not None:
self.logger.info('total_anchors: {}'.format(total_anchors))
self.logger.info('inds_inside: {}'.format(num_inside))
self.logger.info('anchors.shape: {}'.format(anchors.shape))
if gt_classes is None:
gt_classes = np.ones([gt_boxes.shape[0]], dtype=np.int32)
# Compute anchor labels:
# label=1 is positive, 0 is negative, -1 is don't care (ignore)
labels = np.empty((num_inside,), dtype=np.int32)
gt_ids = np.empty((num_inside,), dtype=np.int32)
labels.fill(-1)
gt_ids.fill(-1)
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
# Compute overlaps between the anchors and the gt boxes overlaps
anchor_by_gt_overlap = similarity_fn(anchors, gt_boxes)
# Map from anchor to gt box that has highest overlap
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)
# For each anchor, amount of overlap with most overlapping gt box
anchor_to_gt_max = anchor_by_gt_overlap[np.arange(num_inside),
anchor_to_gt_argmax] #
# Map from gt box to an anchor that has highest overlap
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)
# For each gt box, amount of overlap with most overlapping anchor
gt_to_anchor_max = anchor_by_gt_overlap[
gt_to_anchor_argmax,
np.arange(anchor_by_gt_overlap.shape[1])]
# must remove gt which doesn't match any anchor.
empty_gt_mask = gt_to_anchor_max == 0
gt_to_anchor_max[empty_gt_mask] = -1
# Find all anchors that share the max overlap amount
# (this includes many ties)
anchors_with_max_overlap = np.where(
anchor_by_gt_overlap == gt_to_anchor_max)[0]
# Fg label: for each gt use anchors with highest overlap
# (including ties)
gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
gt_ids[anchors_with_max_overlap] = gt_inds_force
# Fg label: above threshold IOU
pos_inds = anchor_to_gt_max >= matched_threshold
gt_inds = anchor_to_gt_argmax[pos_inds]
labels[pos_inds] = gt_classes[gt_inds]
gt_ids[pos_inds] = gt_inds
bg_inds = np.where(anchor_to_gt_max < unmatched_threshold)[0]
else:
# labels[:] = 0
bg_inds = np.arange(num_inside)
fg_inds = np.where(labels > 0)[0]
fg_max_overlap = None
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
fg_max_overlap = anchor_to_gt_max[fg_inds]
gt_pos_ids = gt_ids[fg_inds]
# bg_inds = np.where(anchor_to_gt_max < unmatched_threshold)[0]
# bg_inds = np.where(labels == 0)[0]
# subsample positive labels if we have too many
if positive_fraction is not None:
num_fg = int(positive_fraction * rpn_batch_size)
if len(fg_inds) > num_fg:
disable_inds = npr.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
fg_inds = np.where(labels > 0)[0]
# subsample negative labels if we have too many
# (samples with replacement, but since the set of bg inds is large most
# samples will not have repeats)
num_bg = rpn_batch_size - np.sum(labels > 0)
# print(num_fg, num_bg, len(bg_inds) )
if len(bg_inds) > num_bg:
enable_inds = bg_inds[npr.randint(len(bg_inds), size=num_bg)]
labels[enable_inds] = 0
bg_inds = np.where(labels == 0)[0]
else:
if len(gt_boxes) == 0 or anchors.shape[0] == 0:
labels[:] = 0
else:
labels[bg_inds] = 0
# re-enable anchors_with_max_overlap
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
bbox_targets = np.zeros(
(num_inside, box_code_size), dtype=all_anchors.dtype)
bbox_src_targets = np.zeros(
(num_inside, box_code_size), dtype=all_anchors.dtype)
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
# print(anchors[fg_inds, :].shape, gt_boxes[anchor_to_gt_argmax[fg_inds], :].shape)
# bbox_targets[fg_inds, :] = box_encoding_fn(
# anchors[fg_inds, :], gt_boxes[anchor_to_gt_argmax[fg_inds], :])
fg_gt_boxes = gt_boxes[anchor_to_gt_argmax[fg_inds], :]
fg_anchors = anchors[fg_inds, :]
bbox_targets[fg_inds, :] = box_encoding_fn(fg_gt_boxes, fg_anchors)
temp_src_gt_boxes = fg_gt_boxes.copy()
temp_src_gt_boxes[:, 0:3] = fg_gt_boxes[:, 0:3] - fg_anchors[:, 0:3]
bbox_src_targets[fg_inds, :] = temp_src_gt_boxes
# Bbox regression loss has the form:
# loss(x) = weight_outside * L(weight_inside * x)
# Inside weights allow us to set zero loss on an element-wise basis
# Bbox regression is only trained on positive examples so we set their
# weights to 1.0 (or otherwise if config is different) and 0 otherwise
# NOTE: we don't need bbox_inside_weights, remove it.
# bbox_inside_weights = np.zeros((num_inside, box_ndim), dtype=np.float32)
# bbox_inside_weights[labels == 1, :] = [1.0] * box_ndim
# The bbox regression loss only averages by the number of images in the
# mini-batch, whereas we need to average by the total number of example
# anchors selected
# Outside weights are used to scale each element-wise loss so the final
# average over the mini-batch is correct
# bbox_outside_weights = np.zeros((num_inside, box_ndim), dtype=np.float32)
bbox_outside_weights = np.zeros((num_inside,), dtype=all_anchors.dtype)
# uniform weighting of examples (given non-uniform sampling)
if norm_by_num_examples:
num_examples = np.sum(labels >= 0) # neg + pos
num_examples = np.maximum(1.0, num_examples)
bbox_outside_weights[labels > 0] = 1.0 / num_examples
else:
bbox_outside_weights[labels > 0] = 1.0
# bbox_outside_weights[labels == 0, :] = 1.0 / num_examples
# Map up to original set of anchors
if inds_inside is not None:
labels = unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_src_targets = unmap(bbox_src_targets, total_anchors, inds_inside, fill=0)
# bbox_inside_weights = unmap(
# bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = unmap(
bbox_outside_weights, total_anchors, inds_inside, fill=0)
# return labels, bbox_targets, bbox_outside_weights
ret = {
'labels': labels,
'bbox_targets': bbox_targets,
'bbox_outside_weights': bbox_outside_weights,
'assigned_anchors_overlap': fg_max_overlap,
'positive_gt_id': gt_pos_ids,
'bbox_src_targets': bbox_src_targets,
}
if inds_inside is not None:
ret['assigned_anchors_inds'] = inds_inside[fg_inds]
else:
ret['assigned_anchors_inds'] = fg_inds
return ret
@property
def num_anchors_per_location(self):
num = 0
for a_generator in self.anchor_generators:
num += a_generator.num_anchors_per_localization
return num
def num_anchors_per_location_class(self, class_name):
if isinstance(class_name, int):
class_name = self.classes[class_name]
assert class_name in self.classes
class_idx = self.classes.index(class_name)
return self.anchor_generators[class_idx].num_anchors_per_localization
@property
def classes(self):
return [a.class_name for a in self.anchor_generators]
@property
def box_ndim(self):
return self.anchor_generators[0].ndim
| 46.253106
| 120
| 0.610501
|
4a16d61fb40258b5aef9131ce3172defc1d50346
| 1,943
|
py
|
Python
|
zeeguu_core/util/text.py
|
simonchristensen1/Zeeguu-Core
|
76f0e4a73676e00e6023ccbb2017210982670da2
|
[
"MIT"
] | 1
|
2018-03-22T12:29:49.000Z
|
2018-03-22T12:29:49.000Z
|
zeeguu_core/util/text.py
|
simonchristensen1/Zeeguu-Core
|
76f0e4a73676e00e6023ccbb2017210982670da2
|
[
"MIT"
] | 82
|
2017-12-09T16:15:02.000Z
|
2020-11-12T11:34:09.000Z
|
zeeguu_core/util/text.py
|
simonchristensen1/Zeeguu-Core
|
76f0e4a73676e00e6023ccbb2017210982670da2
|
[
"MIT"
] | 9
|
2017-11-25T11:32:05.000Z
|
2020-10-26T15:50:13.000Z
|
import math
import nltk
import pyphen
import regex
from collections import Counter
from nltk import SnowballStemmer
from zeeguu_core.model import Language
AVERAGE_SYLLABLE_LENGTH = 2.5
"""
Collection of simple text processing functions
"""
def split_words_from_text(text):
words = regex.findall(r'(\b\p{L}+\b)', text)
return words
def split_unique_words_from_text(text, language:Language):
words = split_words_from_text(text)
stemmer = SnowballStemmer(language.name.lower())
return set([stemmer.stem(w.lower()) for w in words])
def length(text):
return len(split_words_from_text(text))
def unique_length(text, language: Language):
words_unique = split_unique_words_from_text(text, language)
return len(words_unique)
def number_of_sentences(text):
return len(nltk.sent_tokenize(text))
def average_sentence_length(text):
return length(text)/number_of_sentences(text)
def median_sentence_length(text):
sentence_lengths = [length(s) for s in nltk.sent_tokenize(text)]
sentence_lengths = sorted(sentence_lengths)
return sentence_lengths[int(len(sentence_lengths)/2)]
def number_of_syllables(text, language:Language):
words = [w.lower() for w in split_words_from_text(text)]
number_of_syllables = 0
for word, freq in Counter(words).items():
if language.code == "zh-CN":
syllables = int(math.floor(max(len(word) / AVERAGE_SYLLABLE_LENGTH,1)))
else:
dic = pyphen.Pyphen(lang=language.code)
syllables = len(dic.positions(word)) + 1
number_of_syllables += syllables * freq
return number_of_syllables
def average_word_length(text, language:Language):
return number_of_syllables(text, language)/length(text)
def median_word_length(text, language:Language):
word_lengths = [number_of_syllables(w, language) for w in split_words_from_text(text)]
return word_lengths[int(len(word_lengths)/2)]
| 28.15942
| 90
| 0.73649
|
4a16d6f33efe015adbbfc0a90a806578192f0c5f
| 542
|
py
|
Python
|
fizzbuzz.py
|
Magicianred/Projects
|
7cd00b4e24a325c7cdca28dde7fe7c04e55b4773
|
[
"MIT"
] | 2
|
2021-04-08T01:36:07.000Z
|
2021-06-03T04:21:31.000Z
|
fizzbuzz.py
|
Magicianred/Projects
|
7cd00b4e24a325c7cdca28dde7fe7c04e55b4773
|
[
"MIT"
] | null | null | null |
fizzbuzz.py
|
Magicianred/Projects
|
7cd00b4e24a325c7cdca28dde7fe7c04e55b4773
|
[
"MIT"
] | 1
|
2020-12-03T07:00:39.000Z
|
2020-12-03T07:00:39.000Z
|
'''
Write a program that prints the numbers 1-100, each on a new line
For each number that is a multiple of 3, print “Fizz” instead of the number
For each number that is a multiple of 5, print “Buzz” instead of the number
For each number that is a multiple of both 3 and 5, print “FizzBuzz” instead of the number
'''
for number in range(100):
if number % 3 == 0 and number % 5 == 0:
print("FizzBuzz")
elif number % 3 == 0:
print("Fizz")
elif number % 5 == 0:
print("Buzz")
else:
print(number)
| 31.882353
| 90
| 0.640221
|
4a16d70f8a3f6fb8992297215ff96b20edce633e
| 21,726
|
py
|
Python
|
espnet/nets/pytorch_backend/e2e_asr.py
|
Pranavs05/espnet
|
6830cb683b2c4dbb823b24ae865ac976dd0047fe
|
[
"Apache-2.0"
] | 4
|
2020-10-28T00:34:21.000Z
|
2021-08-02T05:43:59.000Z
|
espnet/nets/pytorch_backend/e2e_asr.py
|
Pranavs05/espnet
|
6830cb683b2c4dbb823b24ae865ac976dd0047fe
|
[
"Apache-2.0"
] | 1
|
2019-10-24T06:21:21.000Z
|
2019-10-24T06:21:21.000Z
|
espnet/nets/pytorch_backend/e2e_asr.py
|
Pranavs05/espnet
|
6830cb683b2c4dbb823b24ae865ac976dd0047fe
|
[
"Apache-2.0"
] | 5
|
2019-07-19T16:40:57.000Z
|
2020-11-05T20:09:44.000Z
|
#!/usr/bin/env python3
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
import logging
import math
import os
import editdistance
import chainer
import numpy as np
import six
import torch
from itertools import groupby
from chainer import reporter
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.e2e_asr_common import label_smoothing_dist
from espnet.nets.pytorch_backend.ctc import ctc_for
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.pytorch_backend.nets_utils import to_device
from espnet.nets.pytorch_backend.nets_utils import to_torch_tensor
from espnet.nets.pytorch_backend.rnn.attentions import att_for
from espnet.nets.pytorch_backend.rnn.decoders import decoder_for
from espnet.nets.pytorch_backend.rnn.encoders import encoder_for
from espnet.nets.scorers.ctc import CTCPrefixScorer
CTC_LOSS_THRESHOLD = 10000
class Reporter(chainer.Chain):
"""A chainer reporter wrapper"""
def report(self, loss_ctc, loss_att, acc, cer_ctc, cer, wer, mtl_loss):
reporter.report({'loss_ctc': loss_ctc}, self)
reporter.report({'loss_att': loss_att}, self)
reporter.report({'acc': acc}, self)
reporter.report({'cer_ctc': cer_ctc}, self)
reporter.report({'cer': cer}, self)
reporter.report({'wer': wer}, self)
logging.info('mtl loss:' + str(mtl_loss))
reporter.report({'loss': mtl_loss}, self)
class E2E(ASRInterface, torch.nn.Module):
"""E2E module
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
E2E.encoder_add_arguments(parser)
E2E.attention_add_arguments(parser)
E2E.decoder_add_arguments(parser)
return parser
@staticmethod
def encoder_add_arguments(parser):
group = parser.add_argument_group("E2E encoder setting")
# encoder
group.add_argument('--etype', default='blstmp', type=str,
choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm',
'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'],
help='Type of encoder network architecture')
group.add_argument('--elayers', default=4, type=int,
help='Number of encoder layers (for shared recognition part in multi-speaker asr mode)')
group.add_argument('--eunits', '-u', default=300, type=int,
help='Number of encoder hidden units')
group.add_argument('--eprojs', default=320, type=int,
help='Number of encoder projection units')
group.add_argument('--subsample', default="1", type=str,
help='Subsample input frames x_y_z means subsample every x frame at 1st layer, '
'every y frame at 2nd layer etc.')
return parser
@staticmethod
def attention_add_arguments(parser):
group = parser.add_argument_group("E2E attention setting")
# attention
group.add_argument('--atype', default='dot', type=str,
choices=['noatt', 'dot', 'add', 'location', 'coverage',
'coverage_location', 'location2d', 'location_recurrent',
'multi_head_dot', 'multi_head_add', 'multi_head_loc',
'multi_head_multi_res_loc'],
help='Type of attention architecture')
group.add_argument('--adim', default=320, type=int,
help='Number of attention transformation dimensions')
group.add_argument('--awin', default=5, type=int,
help='Window size for location2d attention')
group.add_argument('--aheads', default=4, type=int,
help='Number of heads for multi head attention')
group.add_argument('--aconv-chans', default=-1, type=int,
help='Number of attention convolution channels \
(negative value indicates no location-aware attention)')
group.add_argument('--aconv-filts', default=100, type=int,
help='Number of attention convolution filters \
(negative value indicates no location-aware attention)')
group.add_argument('--dropout-rate', default=0.0, type=float,
help='Dropout rate for the encoder')
return parser
@staticmethod
def decoder_add_arguments(parser):
group = parser.add_argument_group("E2E encoder setting")
group.add_argument('--dtype', default='lstm', type=str,
choices=['lstm', 'gru'],
help='Type of decoder network architecture')
group.add_argument('--dlayers', default=1, type=int,
help='Number of decoder layers')
group.add_argument('--dunits', default=320, type=int,
help='Number of decoder hidden units')
group.add_argument('--dropout-rate-decoder', default=0.0, type=float,
help='Dropout rate for the decoder')
group.add_argument('--sampling-probability', default=0.0, type=float,
help='Ratio of predicted labels fed back to decoder')
return parser
def __init__(self, idim, odim, args):
super(E2E, self).__init__()
torch.nn.Module.__init__(self)
self.mtlalpha = args.mtlalpha
assert 0.0 <= self.mtlalpha <= 1.0, "mtlalpha should be [0.0, 1.0]"
self.etype = args.etype
self.verbose = args.verbose
# NOTE: for self.build method
args.char_list = getattr(args, "char_list", None)
self.char_list = args.char_list
self.outdir = args.outdir
self.space = args.sym_space
self.blank = args.sym_blank
self.reporter = Reporter()
# below means the last number becomes eos/sos ID
# note that sos/eos IDs are identical
self.sos = odim - 1
self.eos = odim - 1
# subsample info
# +1 means input (+1) and layers outputs (args.elayer)
subsample = np.ones(args.elayers + 1, dtype=np.int)
if args.etype.endswith("p") and not args.etype.startswith("vgg"):
ss = args.subsample.split("_")
for j in range(min(args.elayers + 1, len(ss))):
subsample[j] = int(ss[j])
else:
logging.warning(
'Subsampling is not performed for vgg*. It is performed in max pooling layers at CNN.')
logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))
self.subsample = subsample
# label smoothing info
if args.lsm_type and os.path.isfile(args.train_json):
logging.info("Use label smoothing with " + args.lsm_type)
labeldist = label_smoothing_dist(odim, args.lsm_type, transcript=args.train_json)
else:
labeldist = None
# speech translation related
self.replace_sos = getattr(args, "replace_sos", False) # use getattr to keep compatibility
if getattr(args, "use_frontend", False): # use getattr to keep compatibility
# Relative importing because of using python3 syntax
from espnet.nets.pytorch_backend.frontends.feature_transform \
import feature_transform_for
from espnet.nets.pytorch_backend.frontends.frontend \
import frontend_for
self.frontend = frontend_for(args, idim)
self.feature_transform = feature_transform_for(args, (idim - 1) * 2)
idim = args.n_mels
else:
self.frontend = None
# encoder
self.enc = encoder_for(args, idim, self.subsample)
# ctc
self.ctc = ctc_for(args, odim)
# attention
self.att = att_for(args)
# decoder
self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)
# weight initialization
self.init_like_chainer()
# options for beam search
if args.report_cer or args.report_wer:
recog_args = {'beam_size': args.beam_size, 'penalty': args.penalty,
'ctc_weight': args.ctc_weight, 'maxlenratio': args.maxlenratio,
'minlenratio': args.minlenratio, 'lm_weight': args.lm_weight,
'rnnlm': args.rnnlm, 'nbest': args.nbest,
'space': args.sym_space, 'blank': args.sym_blank,
'tgt_lang': False}
self.recog_args = argparse.Namespace(**recog_args)
self.report_cer = args.report_cer
self.report_wer = args.report_wer
else:
self.report_cer = False
self.report_wer = False
self.rnnlm = None
self.logzero = -10000000000.0
self.loss = None
self.acc = None
def init_like_chainer(self):
"""Initialize weight like chainer
chainer basically uses LeCun way: W ~ Normal(0, fan_in ** -0.5), b = 0
pytorch basically uses W, b ~ Uniform(-fan_in**-0.5, fan_in**-0.5)
however, there are two exceptions as far as I know.
- EmbedID.W ~ Normal(0, 1)
- LSTM.upward.b[forget_gate_range] = 1 (but not used in NStepLSTM)
"""
def lecun_normal_init_parameters(module):
for p in module.parameters():
data = p.data
if data.dim() == 1:
# bias
data.zero_()
elif data.dim() == 2:
# linear weight
n = data.size(1)
stdv = 1. / math.sqrt(n)
data.normal_(0, stdv)
elif data.dim() in (3, 4):
# conv weight
n = data.size(1)
for k in data.size()[2:]:
n *= k
stdv = 1. / math.sqrt(n)
data.normal_(0, stdv)
else:
raise NotImplementedError
def set_forget_bias_to_one(bias):
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
lecun_normal_init_parameters(self)
# exceptions
# embed weight ~ Normal(0, 1)
self.dec.embed.weight.data.normal_(0, 1)
# forget-bias = 1.0
# https://discuss.pytorch.org/t/set-forget-gate-bias-of-lstm/1745
for l in six.moves.range(len(self.dec.decoder)):
set_forget_bias_to_one(self.dec.decoder[l].bias_ih)
def forward(self, xs_pad, ilens, ys_pad):
"""E2E forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)
:return: loass value
:rtype: torch.Tensor
"""
# 0. Frontend
if self.frontend is not None:
hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
hs_pad, hlens = self.feature_transform(hs_pad, hlens)
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
if self.replace_sos:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beggining
else:
tgt_lang_ids = None
hs_pad, hlens, _ = self.enc(hs_pad, hlens)
# 2. CTC loss
if self.mtlalpha == 0:
self.loss_ctc = None
else:
self.loss_ctc = self.ctc(hs_pad, hlens, ys_pad)
# 3. attention loss
if self.mtlalpha == 1:
self.loss_att, acc = None, None
else:
self.loss_att, acc, _ = self.dec(hs_pad, hlens, ys_pad, tgt_lang_ids=tgt_lang_ids)
self.acc = acc
# 4. compute cer without beam search
if self.mtlalpha == 0 or self.char_list is None:
cer_ctc = None
else:
cers = []
y_hats = self.ctc.argmax(hs_pad).data
for i, y in enumerate(y_hats):
y_hat = [x[0] for x in groupby(y)]
y_true = ys_pad[i]
seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
seq_true = [self.char_list[int(idx)] for idx in y_true if int(idx) != -1]
seq_hat_text = "".join(seq_hat).replace(self.space, ' ')
seq_hat_text = seq_hat_text.replace(self.blank, '')
seq_true_text = "".join(seq_true).replace(self.space, ' ')
hyp_chars = seq_hat_text.replace(' ', '')
ref_chars = seq_true_text.replace(' ', '')
if len(ref_chars) > 0:
cers.append(editdistance.eval(hyp_chars, ref_chars) / len(ref_chars))
cer_ctc = sum(cers) / len(cers) if cers else None
# 5. compute cer/wer
if self.training or not (self.report_cer or self.report_wer):
cer, wer = 0.0, 0.0
# oracle_cer, oracle_wer = 0.0, 0.0
else:
if self.recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(hs_pad).data
else:
lpz = None
word_eds, word_ref_lens, char_eds, char_ref_lens = [], [], [], []
nbest_hyps = self.dec.recognize_beam_batch(
hs_pad, torch.tensor(hlens), lpz,
self.recog_args, self.char_list,
self.rnnlm,
tgt_lang_ids=tgt_lang_ids.squeeze(1).tolist() if self.replace_sos else None)
# remove <sos> and <eos>
y_hats = [nbest_hyp[0]['yseq'][1:-1] for nbest_hyp in nbest_hyps]
for i, y_hat in enumerate(y_hats):
y_true = ys_pad[i]
seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
seq_true = [self.char_list[int(idx)] for idx in y_true if int(idx) != -1]
seq_hat_text = "".join(seq_hat).replace(self.recog_args.space, ' ')
seq_hat_text = seq_hat_text.replace(self.recog_args.blank, '')
seq_true_text = "".join(seq_true).replace(self.recog_args.space, ' ')
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
hyp_chars = seq_hat_text.replace(' ', '')
ref_chars = seq_true_text.replace(' ', '')
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
wer = 0.0 if not self.report_wer else float(sum(word_eds)) / sum(word_ref_lens)
cer = 0.0 if not self.report_cer else float(sum(char_eds)) / sum(char_ref_lens)
alpha = self.mtlalpha
if alpha == 0:
self.loss = self.loss_att
loss_att_data = float(self.loss_att)
loss_ctc_data = None
elif alpha == 1:
self.loss = self.loss_ctc
loss_att_data = None
loss_ctc_data = float(self.loss_ctc)
else:
self.loss = alpha * self.loss_ctc + (1 - alpha) * self.loss_att
loss_att_data = float(self.loss_att)
loss_ctc_data = float(self.loss_ctc)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(loss_ctc_data, loss_att_data, acc, cer_ctc, cer, wer, loss_data)
else:
logging.warning('loss (=%f) is not correct', loss_data)
return self.loss
def scorers(self):
return dict(decoder=self.dec, ctc=CTCPrefixScorer(self.ctc, self.eos))
def encode(self, x):
self.eval()
ilens = [x.shape[0]]
# subsample frame
x = x[::self.subsample[0], :]
p = next(self.parameters())
h = torch.as_tensor(x, device=p.device, dtype=p.dtype)
# make a utt list (1) to use the same interface for encoder
hs = h.contiguous().unsqueeze(0)
# 0. Frontend
if self.frontend is not None:
enhanced, hlens, mask = self.frontend(hs, ilens)
hs, hlens = self.feature_transform(enhanced, hlens)
else:
hs, hlens = hs, ilens
# 1. encoder
hs, _, _ = self.enc(hs, hlens)
return hs.squeeze(0)
def recognize(self, x, recog_args, char_list, rnnlm=None):
"""E2E beam search
:param ndarray x: input acoustic feature (T, D)
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
hs = self.encode(x).unsqueeze(0)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(hs)[0]
else:
lpz = None
# 2. Decoder
# decode the first utterance
y = self.dec.recognize_beam(hs[0], lpz, recog_args, char_list, rnnlm)
return y
def recognize_batch(self, xs, recog_args, char_list, rnnlm=None):
"""E2E beam search
:param list xs: list of input acoustic feature arrays [(T_1, D), (T_2, D), ...]
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
prev = self.training
self.eval()
ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
# subsample frame
xs = [xx[::self.subsample[0], :] for xx in xs]
xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
xs_pad = pad_list(xs, 0.0)
# 0. Frontend
if self.frontend is not None:
enhanced, hlens, mask = self.frontend(xs_pad, ilens)
hs_pad, hlens = self.feature_transform(enhanced, hlens)
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
hs_pad, hlens, _ = self.enc(hs_pad, hlens)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(hs_pad)
normalize_score = False
else:
lpz = None
normalize_score = True
# 2. Decoder
hlens = torch.tensor(list(map(int, hlens))) # make sure hlens is tensor
y = self.dec.recognize_beam_batch(hs_pad, hlens, lpz, recog_args, char_list,
rnnlm, normalize_score=normalize_score)
if prev:
self.train()
return y
def enhance(self, xs):
"""Forwarding only the frontend stage
:param ndarray xs: input acoustic feature (T, C, F)
"""
if self.frontend is None:
raise RuntimeError('Frontend does\'t exist')
prev = self.training
self.eval()
ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
# subsample frame
xs = [xx[::self.subsample[0], :] for xx in xs]
xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
xs_pad = pad_list(xs, 0.0)
enhanced, hlensm, mask = self.frontend(xs_pad, ilens)
if prev:
self.train()
return enhanced.cpu().numpy(), mask.cpu().numpy(), ilens
def calculate_all_attentions(self, xs_pad, ilens, ys_pad):
"""E2E attention calculation
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray
"""
with torch.no_grad():
# 0. Frontend
if self.frontend is not None:
hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
hs_pad, hlens = self.feature_transform(hs_pad, hlens)
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
if self.replace_sos:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beggining
else:
tgt_lang_ids = None
hpad, hlens, _ = self.enc(hs_pad, hlens)
# 2. Decoder
att_ws = self.dec.calculate_all_attentions(hpad, hlens, ys_pad, tgt_lang_ids=tgt_lang_ids)
return att_ws
def subsample_frames(self, x):
# subsample frame
x = x[::self.subsample[0], :]
ilen = [x.shape[0]]
h = to_device(self, torch.from_numpy(
np.array(x, dtype=np.float32)))
h.contiguous()
return h, ilen
| 40.233333
| 119
| 0.576958
|
4a16d84ecb4a879f4960913d0b475910964171af
| 1,114
|
py
|
Python
|
opensearch.py
|
datalogics-cgreen/server_core
|
4459314cd2cdb92b7cabeed8fd1125d8c5cb7941
|
[
"Apache-2.0"
] | null | null | null |
opensearch.py
|
datalogics-cgreen/server_core
|
4459314cd2cdb92b7cabeed8fd1125d8c5cb7941
|
[
"Apache-2.0"
] | 1
|
2017-05-12T22:14:16.000Z
|
2017-05-12T22:14:16.000Z
|
opensearch.py
|
datalogics-cgreen/server_core
|
4459314cd2cdb92b7cabeed8fd1125d8c5cb7941
|
[
"Apache-2.0"
] | 2
|
2017-05-12T21:27:53.000Z
|
2021-08-04T12:27:25.000Z
|
class OpenSearchDocument(object):
"""Generates OpenSearch documents."""
TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/">
<ShortName>%(name)s</ShortName>
<Description>%(description)s</Description>
<Tags>%(tags)s</Tags>
<Url type="application/atom+xml;profile=opds-catalog" template="%(url_template)s"/>
</OpenSearchDescription>"""
@classmethod
def search_info(cls, lane):
d = dict(name="Search")
tags = []
if lane is not None and lane.search_target is not None:
tags.append(lane.search_target.name.lower().replace(" ", "-").replace("&", "&"))
description = "Search %s" % lane.search_target.name.replace("&", "&")
else:
description = "Search"
d['description'] = description
d['tags'] = " ".join(tags)
return d
@classmethod
def for_lane(cls, lane, base_url):
info = cls.search_info(lane)
info['url_template'] = base_url + "?q={searchTerms}"
return cls.TEMPLATE % info
| 33.757576
| 96
| 0.603232
|
4a16d8a579a1f45ed4279095bccd74131c047ea5
| 12,913
|
py
|
Python
|
ppcls/arch/backbone/model_zoo/ghostnet.py
|
TxT1212/PaddleClas
|
5a24c8700f738f036bf27f80ca12dbe8471a11b0
|
[
"Apache-2.0"
] | 3,763
|
2020-04-10T04:48:11.000Z
|
2022-03-31T13:24:37.000Z
|
ppcls/arch/backbone/model_zoo/ghostnet.py
|
TxT1212/PaddleClas
|
5a24c8700f738f036bf27f80ca12dbe8471a11b0
|
[
"Apache-2.0"
] | 633
|
2020-04-08T18:27:31.000Z
|
2022-03-31T01:09:43.000Z
|
ppcls/arch/backbone/model_zoo/ghostnet.py
|
TxT1212/PaddleClas
|
5a24c8700f738f036bf27f80ca12dbe8471a11b0
|
[
"Apache-2.0"
] | 846
|
2020-04-08T08:13:18.000Z
|
2022-03-31T12:28:37.000Z
|
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, AdaptiveAvgPool2D, Linear
from paddle.regularizer import L2Decay
from paddle.nn.initializer import Uniform, KaimingNormal
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"GhostNet_x0_5":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x0_5_pretrained.pdparams",
"GhostNet_x1_0":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_0_pretrained.pdparams",
"GhostNet_x1_3":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_pretrained.pdparams",
}
__all__ = list(MODEL_URLS.keys())
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
groups=1,
act="relu",
name=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(
initializer=KaimingNormal(), name=name + "_weights"),
bias_attr=False)
bn_name = name + "_bn"
self._batch_norm = BatchNorm(
num_channels=out_channels,
act=act,
param_attr=ParamAttr(
name=bn_name + "_scale", regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(
name=bn_name + "_offset", regularizer=L2Decay(0.0)),
moving_mean_name=bn_name + "_mean",
moving_variance_name=bn_name + "_variance")
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class SEBlock(nn.Layer):
def __init__(self, num_channels, reduction_ratio=4, name=None):
super(SEBlock, self).__init__()
self.pool2d_gap = AdaptiveAvgPool2D(1)
self._num_channels = num_channels
stdv = 1.0 / math.sqrt(num_channels * 1.0)
med_ch = num_channels // reduction_ratio
self.squeeze = Linear(
num_channels,
med_ch,
weight_attr=ParamAttr(
initializer=Uniform(-stdv, stdv), name=name + "_1_weights"),
bias_attr=ParamAttr(name=name + "_1_offset"))
stdv = 1.0 / math.sqrt(med_ch * 1.0)
self.excitation = Linear(
med_ch,
num_channels,
weight_attr=ParamAttr(
initializer=Uniform(-stdv, stdv), name=name + "_2_weights"),
bias_attr=ParamAttr(name=name + "_2_offset"))
def forward(self, inputs):
pool = self.pool2d_gap(inputs)
pool = paddle.squeeze(pool, axis=[2, 3])
squeeze = self.squeeze(pool)
squeeze = F.relu(squeeze)
excitation = self.excitation(squeeze)
excitation = paddle.clip(x=excitation, min=0, max=1)
excitation = paddle.unsqueeze(excitation, axis=[2, 3])
out = paddle.multiply(inputs, excitation)
return out
class GhostModule(nn.Layer):
def __init__(self,
in_channels,
output_channels,
kernel_size=1,
ratio=2,
dw_size=3,
stride=1,
relu=True,
name=None):
super(GhostModule, self).__init__()
init_channels = int(math.ceil(output_channels / ratio))
new_channels = int(init_channels * (ratio - 1))
self.primary_conv = ConvBNLayer(
in_channels=in_channels,
out_channels=init_channels,
kernel_size=kernel_size,
stride=stride,
groups=1,
act="relu" if relu else None,
name=name + "_primary_conv")
self.cheap_operation = ConvBNLayer(
in_channels=init_channels,
out_channels=new_channels,
kernel_size=dw_size,
stride=1,
groups=init_channels,
act="relu" if relu else None,
name=name + "_cheap_operation")
def forward(self, inputs):
x = self.primary_conv(inputs)
y = self.cheap_operation(x)
out = paddle.concat([x, y], axis=1)
return out
class GhostBottleneck(nn.Layer):
def __init__(self,
in_channels,
hidden_dim,
output_channels,
kernel_size,
stride,
use_se,
name=None):
super(GhostBottleneck, self).__init__()
self._stride = stride
self._use_se = use_se
self._num_channels = in_channels
self._output_channels = output_channels
self.ghost_module_1 = GhostModule(
in_channels=in_channels,
output_channels=hidden_dim,
kernel_size=1,
stride=1,
relu=True,
name=name + "_ghost_module_1")
if stride == 2:
self.depthwise_conv = ConvBNLayer(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=kernel_size,
stride=stride,
groups=hidden_dim,
act=None,
name=name +
"_depthwise_depthwise" # looks strange due to an old typo, will be fixed later.
)
if use_se:
self.se_block = SEBlock(num_channels=hidden_dim, name=name + "_se")
self.ghost_module_2 = GhostModule(
in_channels=hidden_dim,
output_channels=output_channels,
kernel_size=1,
relu=False,
name=name + "_ghost_module_2")
if stride != 1 or in_channels != output_channels:
self.shortcut_depthwise = ConvBNLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
groups=in_channels,
act=None,
name=name +
"_shortcut_depthwise_depthwise" # looks strange due to an old typo, will be fixed later.
)
self.shortcut_conv = ConvBNLayer(
in_channels=in_channels,
out_channels=output_channels,
kernel_size=1,
stride=1,
groups=1,
act=None,
name=name + "_shortcut_conv")
def forward(self, inputs):
x = self.ghost_module_1(inputs)
if self._stride == 2:
x = self.depthwise_conv(x)
if self._use_se:
x = self.se_block(x)
x = self.ghost_module_2(x)
if self._stride == 1 and self._num_channels == self._output_channels:
shortcut = inputs
else:
shortcut = self.shortcut_depthwise(inputs)
shortcut = self.shortcut_conv(shortcut)
return paddle.add(x=x, y=shortcut)
class GhostNet(nn.Layer):
def __init__(self, scale, class_num=1000):
super(GhostNet, self).__init__()
self.cfgs = [
# k, t, c, SE, s
[3, 16, 16, 0, 1],
[3, 48, 24, 0, 2],
[3, 72, 24, 0, 1],
[5, 72, 40, 1, 2],
[5, 120, 40, 1, 1],
[3, 240, 80, 0, 2],
[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 1, 1],
[3, 672, 112, 1, 1],
[5, 672, 160, 1, 2],
[5, 960, 160, 0, 1],
[5, 960, 160, 1, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 1, 1]
]
self.scale = scale
output_channels = int(self._make_divisible(16 * self.scale, 4))
self.conv1 = ConvBNLayer(
in_channels=3,
out_channels=output_channels,
kernel_size=3,
stride=2,
groups=1,
act="relu",
name="conv1")
# build inverted residual blocks
idx = 0
self.ghost_bottleneck_list = []
for k, exp_size, c, use_se, s in self.cfgs:
in_channels = output_channels
output_channels = int(self._make_divisible(c * self.scale, 4))
hidden_dim = int(self._make_divisible(exp_size * self.scale, 4))
ghost_bottleneck = self.add_sublayer(
name="_ghostbottleneck_" + str(idx),
sublayer=GhostBottleneck(
in_channels=in_channels,
hidden_dim=hidden_dim,
output_channels=output_channels,
kernel_size=k,
stride=s,
use_se=use_se,
name="_ghostbottleneck_" + str(idx)))
self.ghost_bottleneck_list.append(ghost_bottleneck)
idx += 1
# build last several layers
in_channels = output_channels
output_channels = int(self._make_divisible(exp_size * self.scale, 4))
self.conv_last = ConvBNLayer(
in_channels=in_channels,
out_channels=output_channels,
kernel_size=1,
stride=1,
groups=1,
act="relu",
name="conv_last")
self.pool2d_gap = AdaptiveAvgPool2D(1)
in_channels = output_channels
self._fc0_output_channels = 1280
self.fc_0 = ConvBNLayer(
in_channels=in_channels,
out_channels=self._fc0_output_channels,
kernel_size=1,
stride=1,
act="relu",
name="fc_0")
self.dropout = nn.Dropout(p=0.2)
stdv = 1.0 / math.sqrt(self._fc0_output_channels * 1.0)
self.fc_1 = Linear(
self._fc0_output_channels,
class_num,
weight_attr=ParamAttr(
name="fc_1_weights", initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc_1_offset"))
def forward(self, inputs):
x = self.conv1(inputs)
for ghost_bottleneck in self.ghost_bottleneck_list:
x = ghost_bottleneck(x)
x = self.conv_last(x)
x = self.pool2d_gap(x)
x = self.fc_0(x)
x = self.dropout(x)
x = paddle.reshape(x, shape=[-1, self._fc0_output_channels])
x = self.fc_1(x)
return x
def _make_divisible(self, v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def GhostNet_x0_5(pretrained=False, use_ssld=False, **kwargs):
model = GhostNet(scale=0.5, **kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["GhostNet_x0_5"], use_ssld=use_ssld)
return model
def GhostNet_x1_0(pretrained=False, use_ssld=False, **kwargs):
model = GhostNet(scale=1.0, **kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["GhostNet_x1_0"], use_ssld=use_ssld)
return model
def GhostNet_x1_3(pretrained=False, use_ssld=False, **kwargs):
model = GhostNet(scale=1.3, **kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["GhostNet_x1_3"], use_ssld=use_ssld)
return model
| 35.671271
| 105
| 0.574847
|
4a16d8d3c0f3aa5aff4101c9da1abfe8fe857ebe
| 2,321
|
py
|
Python
|
src/gui/components/edit.py
|
Epihaius/panda3dstudio
|
f5c62ca49617cae1aa5aa5b695200027da99e242
|
[
"BSD-3-Clause"
] | 63
|
2016-01-02T16:28:47.000Z
|
2022-01-19T11:29:51.000Z
|
src/gui/components/edit.py
|
Epihaius/panda3dstudio
|
f5c62ca49617cae1aa5aa5b695200027da99e242
|
[
"BSD-3-Clause"
] | 12
|
2016-06-12T14:14:15.000Z
|
2020-12-18T16:11:45.000Z
|
src/gui/components/edit.py
|
Epihaius/panda3dstudio
|
f5c62ca49617cae1aa5aa5b695200027da99e242
|
[
"BSD-3-Clause"
] | 17
|
2016-05-23T00:02:27.000Z
|
2021-04-25T17:48:27.000Z
|
from ..base import *
class EditManager:
def __init__(self, menubar, uv_edit_command):
self._menu = menu = menubar.add_menu("edit", "Edit")
mod_key_codes = GD["mod_key_codes"]
handler = lambda: Mgr.update_app("history", "undo")
menu.add("undo", "Undo", handler)
hotkey = ("z", mod_key_codes["ctrl"])
menu.set_item_hotkey("undo", hotkey, "Ctrl+Z")
handler = lambda: Mgr.update_app("history", "redo")
menu.add("redo", "Redo", handler)
hotkey = ("y", mod_key_codes["ctrl"])
menu.set_item_hotkey("redo", hotkey, "Ctrl+Y")
handler = lambda: Mgr.update_app("history", "edit")
menu.add("hist", "History...", handler)
menu.add("sep0", item_type="separator")
handler = lambda: Mgr.update_remotely("group", "create")
menu.add("group", "Create group", handler)
hotkey = ("g", mod_key_codes["ctrl"])
menu.set_item_hotkey("group", hotkey, "Ctrl+G")
def handler():
if GD["active_obj_level"] != "top":
GD["active_obj_level"] = "top"
Mgr.update_app("active_obj_level")
Mgr.enter_state("grouping_mode")
menu.add("add_to_group", "Add to group...", handler)
handler = lambda: Mgr.update_remotely("group", "remove_members")
menu.add("remove_from_group", "Remove from group", handler)
menu.add("sep1", item_type="separator")
menu.add("uvs", "Edit UVs", uv_edit_command)
hotkey = ("u", mod_key_codes["ctrl"])
menu.set_item_hotkey("uvs", hotkey, "Ctrl+U")
Mgr.add_app_updater("history", self.__check_undo_redo)
def setup(self):
def enter_grouping_mode(prev_state_id, active):
Mgr.do("set_viewport_border_color", "viewport_frame_group_objects")
Mgr.do("enable_gui")
add_state = Mgr.add_state
add_state("grouping_mode", -10, enter_grouping_mode)
def __check_undo_redo(self, update_type, *args, **kwargs):
if update_type != "check":
return
to_undo = GD["history_to_undo"]
to_redo = GD["history_to_redo"]
menu = self._menu
menu.enable_item("undo", to_undo)
menu.enable_item("redo", to_redo)
menu.enable_item("hist", to_undo or to_redo)
| 33.157143
| 79
| 0.603188
|
4a16d8e8b44044e96e1ba797482d96f01b44eb4c
| 3,883
|
py
|
Python
|
benchmark/startQiskit1985.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1985.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1985.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=31
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.cx(input_qubit[0],input_qubit[3]) # number=23
prog.x(input_qubit[3]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.rx(-0.48380526865282825,input_qubit[3]) # number=26
prog.h(input_qubit[1]) # number=2
prog.y(input_qubit[3]) # number=18
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[0],input_qubit[1]) # number=28
prog.x(input_qubit[1]) # number=29
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=22
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[0]) # number=14
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1985.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.061404
| 140
| 0.649498
|
4a16d8f489b9953e3f6a5e998a0657ab7c3c2617
| 2,882
|
py
|
Python
|
compressible_rk/simulation.py
|
SebastianoF/pyro2
|
9d1787c2ee25d735a414db3da8c00287743a6fde
|
[
"BSD-3-Clause"
] | 151
|
2018-08-14T12:52:22.000Z
|
2022-03-29T07:57:01.000Z
|
compressible_rk/simulation.py
|
SebastianoF/pyro2
|
9d1787c2ee25d735a414db3da8c00287743a6fde
|
[
"BSD-3-Clause"
] | 40
|
2015-03-25T15:45:44.000Z
|
2018-07-30T18:48:47.000Z
|
compressible_rk/simulation.py
|
SebastianoF/pyro2
|
9d1787c2ee25d735a414db3da8c00287743a6fde
|
[
"BSD-3-Clause"
] | 56
|
2018-10-10T16:54:59.000Z
|
2022-02-06T08:48:52.000Z
|
from __future__ import print_function
import numpy as np
import mesh.integration as integration
import compressible
import compressible_rk.fluxes as flx
class Simulation(compressible.Simulation):
"""The main simulation class for the method of lines compressible
hydrodynamics solver"""
def substep(self, myd):
"""
take a single substep in the RK timestepping starting with the
conservative state defined as part of myd
"""
myg = myd.grid
grav = self.rp.get_param("compressible.grav")
# compute the source terms
dens = myd.get_var("density")
ymom = myd.get_var("y-momentum")
ymom_src = myg.scratch_array()
ymom_src.v()[:, :] = dens.v()[:, :]*grav
E_src = myg.scratch_array()
E_src.v()[:, :] = ymom.v()[:, :]*grav
k = myg.scratch_array(nvar=self.ivars.nvar)
flux_x, flux_y = flx.fluxes(myd, self.rp,
self.ivars, self.solid, self.tc)
for n in range(self.ivars.nvar):
k.v(n=n)[:, :] = \
(flux_x.v(n=n) - flux_x.ip(1, n=n))/myg.dx + \
(flux_y.v(n=n) - flux_y.jp(1, n=n))/myg.dy
k.v(n=self.ivars.iymom)[:, :] += ymom_src.v()[:, :]
k.v(n=self.ivars.iener)[:, :] += E_src.v()[:, :]
return k
def method_compute_timestep(self):
"""
The timestep function computes the advective timestep (CFL)
constraint. The CFL constraint says that information cannot
propagate further than one zone per timestep.
We use the driver.cfl parameter to control what fraction of the
CFL step we actually take.
"""
cfl = self.rp.get_param("driver.cfl")
# get the variables we need
u, v, cs = self.cc_data.get_var(["velocity", "soundspeed"])
# the timestep is min(dx/(|u| + cs), dy/(|v| + cs))
xtmp = (abs(u) + cs)/self.cc_data.grid.dx
ytmp = (abs(v) + cs)/self.cc_data.grid.dy
self.dt = cfl*float(np.min(1.0/(xtmp + ytmp)))
def evolve(self):
"""
Evolve the equations of compressible hydrodynamics through a
timestep dt.
"""
tm_evolve = self.tc.timer("evolve")
tm_evolve.begin()
myd = self.cc_data
method = self.rp.get_param("compressible.temporal_method")
rk = integration.RKIntegrator(myd.t, self.dt, method=method)
rk.set_start(myd)
for s in range(rk.nstages()):
ytmp = rk.get_stage_start(s)
ytmp.fill_BC_all()
k = self.substep(ytmp)
rk.store_increment(s, k)
rk.compute_final_update()
if self.particles is not None:
self.particles.update_particles(self.dt)
# increment the time
myd.t += self.dt
self.n += 1
tm_evolve.end()
| 28.534653
| 71
| 0.572866
|
4a16d914eec860e87698700fed4e1eb47c4c7f70
| 1,102
|
py
|
Python
|
Global.py
|
giorgosdrainakis/dml
|
2c9bd589d2fb36f971a63256699ce16adbbc684d
|
[
"CC0-1.0"
] | null | null | null |
Global.py
|
giorgosdrainakis/dml
|
2c9bd589d2fb36f971a63256699ce16adbbc684d
|
[
"CC0-1.0"
] | null | null | null |
Global.py
|
giorgosdrainakis/dml
|
2c9bd589d2fb36f971a63256699ce16adbbc684d
|
[
"CC0-1.0"
] | null | null | null |
# Root
_ROOT='C:\Pycharm\Projects\\fl_tests\\'
_MODELS_FOLDER='outcome_models\\'
_DATASETS_FOLDER='training_datasets\\'
_MOBILITY_FOLDER='mobility_datasets\\'
_LOGS_FOLDER='logs\\'
_END_CSV_NAME='end_csv.txt'
_QMNIST_DATASET_PATH='QMNIST\\processed\\train.pt'
_INFIMNIST_DATASET_PATH='MNIST\\processed\\training.pt'
_CIFAR10_DATASET_PATH='cifar-10-batches-py\\'
_SVHN_PATH='SVHN\\'
_SVHN_DATASET_PATH='SVHN\\extra_32x32.mat'
_SHANGHAI_1='Shanghai_Sheet1.csv'
_SHANGHAI_2='Shanghai_sheet2.csv'
_SHANGHAI_1_DAY_1='Shanghai_Sheet1_day1.csv'
#_WIFIDOG='5g_20200803.csv'
_WIFIDOG='wifidog_exported.csv'
_DEBUG_FILENAME=None
# Client settings - WifiDog traffic
#YEAR=2010
#MONTH=3
#DAY=8
# Client settings - Shanghai
#SHANGHAI_DATE='21/6/2014'
#SHANGHAI_BS='31.253346/121.448039'
# ML
_MODEL_CLASSES=62
BATCH_SIZE=64
T_BEGIN=3600*1
T_END=3600*2
MEAN_UL=0.2
SD_UL=0.05
MEAN_DL=0.5
SD_DL=0.15
MIN_UL_DL=0.1
Z=0.1
# FL
MEAN_PROC_TIME=2
SD_PROC_TIME=0.2
MIN_PROC_TIME=0.1
def mydebug(mystr):
with open(_ROOT + _LOGS_FOLDER + _DEBUG_FILENAME + ".txt", mode='a') as file:
file.write(mystr + '\n')
| 22.489796
| 81
| 0.774955
|
4a16d9efbffe83a3aa204a76f316d67146d8beb2
| 1,233
|
py
|
Python
|
dvxplorer_ros_driver/scripts/scripts_extra/save_pytorch_model_example.py
|
ziimiin14/rpg_dvs_ros_modifed
|
da63b163e5d7ee7ccb8335050d3a3303193d9d3a
|
[
"MIT"
] | null | null | null |
dvxplorer_ros_driver/scripts/scripts_extra/save_pytorch_model_example.py
|
ziimiin14/rpg_dvs_ros_modifed
|
da63b163e5d7ee7ccb8335050d3a3303193d9d3a
|
[
"MIT"
] | null | null | null |
dvxplorer_ros_driver/scripts/scripts_extra/save_pytorch_model_example.py
|
ziimiin14/rpg_dvs_ros_modifed
|
da63b163e5d7ee7ccb8335050d3a3303193d9d3a
|
[
"MIT"
] | null | null | null |
# Define model
import torch.nn as nn
import torch.functional as F
import torch
import torch.optim as optim
class TheModelClass(nn.Module):
def __init__(self):
super(TheModelClass, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Initialize model
model = TheModelClass()
# Initialize optimizer
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Print model's state_dict
print("Model's state_dict:")
for param_tensor in model.state_dict():
print(param_tensor, "\t", model.state_dict()[param_tensor].size())
# Print optimizer's state_dict
print("Optimizer's state_dict:")
for var_name in optimizer.state_dict():
print(var_name, "\t", optimizer.state_dict()[var_name])
PATH = 'example.pt'
torch.save(model.state_dict(), PATH)
| 28.022727
| 70
| 0.636659
|
4a16dd7a202fdecf75417d011a9dd6c212b07e3d
| 738
|
py
|
Python
|
v11.py
|
senthilkumarIRTT/Image-and-Video-Processing-using-Python
|
8733010828d0bf8efaaf91776df5f68089f562bd
|
[
"BSD-2-Clause"
] | 1
|
2020-10-31T22:02:45.000Z
|
2020-10-31T22:02:45.000Z
|
v11.py
|
senthilkumarIRTT/Image-and-Video-Processing-using-Python
|
8733010828d0bf8efaaf91776df5f68089f562bd
|
[
"BSD-2-Clause"
] | null | null | null |
v11.py
|
senthilkumarIRTT/Image-and-Video-Processing-using-Python
|
8733010828d0bf8efaaf91776df5f68089f562bd
|
[
"BSD-2-Clause"
] | null | null | null |
#Edge Detection
import cv2
import numpy as np
# Creating a VideoCapture object to read the video
cap = cv2.VideoCapture('Blackbird.mp4')
# Loop untill the end of the video
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
# Display the resulting frame
cv2.imshow('Original Colour video', frame)
# using cv2.Canny() for edge detection.
edge_detect = cv2.Canny(frame,100,200)
cv2.imshow('Edge detect',edge_detect)
# define q as the exit button
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# release the video capture object
cap.release()
# Closes all the windows currently opened.
cv2.destroyAllWindows()
| 25.448276
| 52
| 0.638211
|
4a16de6666971a29d443302260ff9b7bc7249f7b
| 22,041
|
py
|
Python
|
6_py/get.py
|
noisesesame/XFFF
|
7a46c634f1d40aeeed3dd8a9c0708a8e22641052
|
[
"MIT"
] | null | null | null |
6_py/get.py
|
noisesesame/XFFF
|
7a46c634f1d40aeeed3dd8a9c0708a8e22641052
|
[
"MIT"
] | null | null | null |
6_py/get.py
|
noisesesame/XFFF
|
7a46c634f1d40aeeed3dd8a9c0708a8e22641052
|
[
"MIT"
] | null | null | null |
#-*- coding:utf-8 -*-
from socket import *
from ast import literal_eval
import os
import random
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('', 80))
s.listen(1)
while 1:
try:
os.fork()
c, addr = s.accept()
rsp_200 = '''HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=utf-8\r\nServer: XFFF/2.1.0\r\n\r\n'''
rsp_200_css = '''HTTP/1.1 200 OK\r\nContent-Type: text/css; charset=utf-8\r\nServer: XFFF/2.1.0\r\n\r\n'''
# login error
rsp_200_2 = '''HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=utf-8\r\nServer: XFFF/2.1.0\r\n\r\n'''
rsp_zip = '''HTTP/1.1 200 OK\r\nContent-Type: application/x-zip-compressed\r\nAccept-Ranges: bytes\r\nServer: XFFF/2.1.0\r\n\r\n'''
rsp_logout = '''HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=utf-8\r\nServer: XFFF/2.1.0\r\n'''
rsp_web_1 = '''HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=utf-8\r\nServer: XFFF/2.1.0\r\n'''
rsp_web_3 = '''HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=utf-8\r\n'''
web_6_base64 = "VFZOM2QwMUVRWE5OUkVGM1RFUkJkMDFCUFQwPQ=="
rsp_web_6 = '''HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=utf-8\r\nSecurity_XFFF: ''' + web_6_base64 + '''\r\nServer: XFFF/2.1.0\r\n\r\n'''
rsp_rev = '''HTTP/1.1 200 OK\r\nContent-Type: application/x-zip-compressed\r\nAccept-Ranges: bytesServer: XFFF/2.1.0\r\n\r\n'''
main_db = "Your IP"
location_href_login = "http://" + main_db + "/login"
location_auth_db = "http://" + main_db + ":8080"
location_href_main = "http://" + main_db
data = c.recv(1024)
data_1 = data.split("\r\n")
if data_1[0][0:3] in "GET":
data_2 = data_1[0].split("/")
### index ###
if data_2[1] == " HTTP":
f = open("../5_web/index.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
### style ###
elif data_2[1] == "style.css HTTP":
f = open("../5_web/style.css","r")
rsp_200_css += f.read()
f.close()
c.send(rsp_200_css)
c.close()
### about ###
elif data_2[1] == "about HTTP":
f = open("../5_web/about.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
### LOGIN ###
elif data_2[1] == "login HTTP":
f = open("../2_db/session.db","r")
ses_db = f.read()
f.close()
ses_db = literal_eval(ses_db)
cok_check = "XFFF="
# { key : value } => { value : key }
rev_ses_db = {v: k for k, v in ses_db.items()}
for text in data_1:
if cok_check in text:
# text =>>> Cookie: XFFF=session
cok = text.split("=")
# Cookie: XFFF , session
cok = cok[1]
## [FOR_3]
if cok == "E2Y7Gl42dmCL9coRT1Rhr5LEA2oBT2sC4AVoUw9":
f = open("../5_web/login_p.html","r")
rsp_200 += f.read()
f.close()
rsp_200 += '''<br><br><h3>KeJ1NfgzJPcfNbjn4VXD1qicBZQa8pabeUHFow2</h3>'''
rsp_200 += "</div></body></html>"
c.send(rsp_200)
c.close()
else:
pass
try:
if rev_ses_db[cok]:
id = rev_ses_db[cok]
id = str(id)
f = open("../5_web/login_p.html","r")
rsp_200 += f.read()
f.close()
f = open("../2_db/info_id.db","r")
info_id = f.read()
info_id = literal_eval(info_id)
f.close()
point = info_id[id]["pt"]
rsp_200 += "<h3>[ POINT ]</h3>"
rsp_200 += "<h3>" + str(point) + "</h3><br><br>"
rsp_200 += "<h3>[ CLEAR ]</h3>"
if info_id[id]["web1"] == "y":
rsp_200 += "<h3>WEB Challenge 1 +200pt</h3>"
else:
pass
if info_id[id]["web2"] == "y":
rsp_200 += "<h3>WEB Challenge 2 +1800pt</h3>"
else:
pass
if info_id[id]["web3"] == "y":
rsp_200 += "<h3>WEB Challenge 3 +2000pt</h3>"
else:
pass
if info_id[id]["web4"] == "y":
rsp_200 += "<h3>WEB Challenge 4 +200pt</h3>"
else:
pass
if info_id[id]["web5"] == "y":
rsp_200 += "<h3>WEB Challenge 5 +100pt</h3>"
else:
pass
if info_id[id]["web6"] == "y":
rsp_200 += "<h3>WEB Challenge 6 +1800pt</h3>"
else:
pass
if info_id[id]["rev1"] == "y":
rsp_200 += "<h3>REVERSING Challenge 1 +200pt</h3>"
else:
pass
if info_id[id]["rev2"] == "y":
rsp_200 += "<h3>REVERSING Challenge 2 +1800pt</h3>"
else:
pass
if info_id[id]["rev3"] == "y":
rsp_200 += "<h3>REVERSING Challenge 3 +1000pt</h3>"
else:
pass
if info_id[id]["for1"] == "y":
rsp_200 += "<h3>FORENSICS Challenge 1 +300pt</h3>"
else:
pass
if info_id[id]["for2"] == "y":
rsp_200 += "<h3>FORENSICS Challenge 2 +1000pt</h3>"
else:
pass
if info_id[id]["for3"] == "y":
rsp_200 += "<h3>FORENSICS Challenge 3 +800pt</h3>"
else:
pass
if info_id[id]["for4"] == "y":
rsp_200 += "<h3>FORENSICS Challenge 4 +100pt</h3>"
else:
pass
if info_id[id]["for5"] == "y":
rsp_200 += "<h3>FORENSICS Challenge 5 +1100pt</h3>"
else:
pass
if info_id[id]["sys1"] == "y":
rsp_200 += "<h3>SYSTEM Challenge 1 +800pt</h3>"
else:
pass
rsp_200 += '''<br><br><h3><a href="/logout">[ LOGOUT ]</a></h3>'''
rsp_200 += "</div></body></html>"
c.send(rsp_200)
c.close()
except:
f = open("../5_web/login_main_p.html","r")
rsp_200_2 += f.read()
f.close()
rsp_200_2 += '''<form action="''' + location_auth_db
rsp_200_2 += '''" method="POST">'''
rsp_200_2 += '''<h4>ID <input type="text" name="id"><br><br></h4>'''
rsp_200_2 += '''<h4>PW <input type="password" name="pw"><br><br></h4>'''
rsp_200_2 += ''' <button type="submit">LOGIN</button>'''
rsp_200_2 += '''</form></div></body></html>'''
c.send(rsp_200_2)
c.close()
else:
pass
f = open("../5_web/login_main_p.html","r")
rsp_200_2 += f.read()
f.close()
rsp_200_2 += '''<form action="''' + location_auth_db
rsp_200_2 += '''" method="POST">'''
rsp_200_2 += '''<h4>ID <input type="text" name="id"><br><br></h4>'''
rsp_200_2 += '''<h4>PW <input type="password" name="pw"><br><br></h4>'''
rsp_200_2 += '''<button type="submit">LOGIN</button>'''
rsp_200_2 += '''</form></div></body></html>'''
c.send(rsp_200_2)
c.close()
### log_out ###
elif data_2[1] == "logout HTTP":
f = open("../2_db/session.db","r")
ses_db = f.read()
f.close()
ses_db = literal_eval(ses_db)
cok_check = "XFFF="
# { key : value } => { value : key }
rev_ses_db = {v: k for k, v in ses_db.items()}
for text in data_1:
if cok_check in text:
# text =>>> Cookie: XFFF=session
cok = text.split("=")
# Cookie: XFFF , session
cok = cok[1]
if len(cok) == 32:
pass
else:
#rsp_logout += '''Set-Cookie:XFFF=ppp\r\n\r\n'''
rsp_logout += '''\r\n<html><body><script>location.href="''' + location_href_login + '''";</script></body></html>'''
c.send(rsp_logout)
c.close()
try:
if rev_ses_db[cok]:
del rev_ses_db[cok]
ses_db = {v: k for k, v in rev_ses_db.items()}
f = open("../2_db/session.db","w")
f.write(str(ses_db))
f.close()
#rsp_logout += '''Set-Cookie:XFFF=ppp\r\n\r\n'''
rsp_logout += '''\r\n<html><body><script>location.href="''' + location_href_login + '''";</script></body></html>'''
c.send(rsp_logout)
c.close()
except:
#rsp_logout += '''Set-Cookie:XFFF=ppp\r\n\r\n'''
rsp_logout += '''\r\n<html><body><script>location.href="''' + location_href_login + '''";</script></body></html>'''
c.send(rsp_logout)
c.close()
else:
pass
rsp_logout += '''\r\n<html><body><script>location.href="''' + location_href_login + '''";</script></body></html>'''
c.send(rsp_logout)
c.close()
### auth ###
elif data_2[1] == "auth HTTP":
f = open("../5_web/auth_p.html","r")
rsp_200 += f.read()
f.close()
rsp_200 += '''<form action="''' + location_auth_db
rsp_200 += '''" method="POST">'''
rsp_200 += '''<input type="text" name="flag">'''
rsp_200 += '''<br><br><br><br><button type="submit">SUBMIT</button>'''
rsp_200 += '''</form></div></body></html>'''
c.send(rsp_200)
c.close()
### ico ###
elif data_2[1] == "favicon.ico HTTP":
c.close()
### challenge ###
elif data_2[1] == "challenge HTTP":
f = open("../5_web/challenge.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
### [WEB_1] ###
elif data_2[1] == "web_1 HTTP":
f = open("../5_web/web_1.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "robots.txt HTTP":
c.send(rsp_200 + "Disallow: /robots/secure_flag\n")
c.close()
elif data_2[1] == "robots" and data_2[2] == "secure_flag HTTP":
c.send(rsp_web_1 + "flag : qdfsD9arJyKt43arffUHLtHSeQ83R2dtnHqb\r\n\r\n" + "XFFF{=====================================}")
c.close()
### [WEB_2] ###
elif data_2[1] == "web_2 HTTP":
f = open("../5_web/web_2.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "..":
if data_2[2] == ".." and data_2[3] == ".." and data_2[4] == ".." and data_2[5] == ".." and data_2[6] == ".." and data_2[7] == ".." and data_2[8] == ".." and data_2[9] == ".." and data_2[10] == ".." and data_2[11] == ".." and data_2[12] == ".." and data_2[13] == ".." and data_2[14] == ".." and data_2[15] == ".." and data_2[16] == ".." and data_2[17] == ".." and data_2[18] == ".." and data_2[19] == ".." and data_2[20] == ".." and data_2[21] == ".." and data_2[22] == ".." and data_2[23] == ".." and data_2[24] == ".." and data_2[25] == ".." and data_2[26] == ".." and data_2[27] == ".." and data_2[28] == ".." and data_2[29] == ".." and data_2[30] == ".." and data_2[31] == "etc" and data_2[32] == "shadow HTTP":
c.send(rsp_200 + "root: KaRykPJHNGTJwZcBXf3BjujxzBsqv65tn5pR :18541:0:99999:7:::")
c.close()
else:
c.close()
### [WEB_3] ###
elif data_2[1] == "web_3 HTTP":
f = open("../5_web/web_3.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1][:11] == "favicon.ico":
if data_2[1][12:]:
c.send(rsp_web_3 + "Server : g4JzzdhnU8gVj3pLH2F4ZspAMG6ZnP3N7N4k\r\n\r\n" + "<html><head><title>404 Not Found</title></head><body><center><h1>404 Not Found</h1></center><hr><center>nginx/1.17.0</center></body></html>")
c.close()
else:
c.close()
### [WEB_4.php] ###
elif data_2[1] == "web_4.php HTTP":
f = open("../5_web/web_4.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "web_4.txt HTTP":
f = open("../5_web/web_4.txt.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
### [WEB_5] ###
elif data_2[1] == "web_5 HTTP":
f = open("../5_web/web_5.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "web_5?check_flag=Give_me_the_flag HTTP":
f = open("../5_web/web_5_p.html","r")
rsp_200 += f.read()
f.close()
rsp_200 += "sk82VU2jdEnpCxa86z4ryT4HYQYHJs9VKEp9</pre></h3></div></body></html>"
c.send(rsp_200)
c.close()
### [WEB_6] ###
elif data_2[1] == "web_6 HTTP":
check_xfff = "Security_XFFF"
for text in data_1:
if check_xfff in text:
# Security_XFFF: VFZOM2QwMUVRWE5OUkVGM1RFUkJkMDFCUFQwPQ==
do_web_6_v1 = text.split(":")
do_web_6_v2 = do_web_6_v1[1].replace(" ","")
if do_web_6_v2 == "VFZOM2QwMUVRWE5OUkVGM1RFUkJkMDFCUFQwPQ==":
f = open("../5_web/web_6_p.html","r")
rsp_web_6 += f.read()
f.close()
rsp_web_6 += "1,000,000,000 $</h3><br><br><h3>WHfMeYp2n7wMJhEu6PkNkAsg2NWv8kwfPvDz</h3></div></body></html>"
c.send(rsp_web_6)
c.close()
else:
f = open("../5_web/web_6_p.html","r")
rsp_web_6 += f.read()
f.close()
rsp_web_6 += "1,000 $</h3></div></body></html>"
c.send(rsp_web_6)
c.close()
else:
pass
f = open("../5_web/web_6_p.html","r")
rsp_web_6 += f.read()
f.close()
rsp_web_6 += "1,000 $</h3></div></body></html>"
c.send(rsp_web_6)
c.close()
### [REV_1] ###
elif data_2[1] == "rev_1 HTTP":
f = open("../5_web/rev_1.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "rev_1.zip HTTP":
f = open("../5_web/rev_1.zip","r")
rsp_zip += f.read()
f.close()
c.send(rsp_zip)
c.close()
### [REV_2] ###
elif data_2[1] == "rev_2 HTTP":
f = open("../5_web/rev_2.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "rev_2.zip HTTP":
f = open("../5_web/rev_2.zip","r")
rsp_zip += f.read()
f.close()
c.send(rsp_zip)
c.close()
### [REV_3] ###
elif data_2[1] == "rev_3 HTTP":
f = open("../5_web/rev_3.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "rev_3.zip HTTP":
f = open("../5_web/rev_3.zip","r")
rsp_zip += f.read()
f.close()
c.send(rsp_zip)
c.close()
### [FOR_1] ###
elif data_2[1] == "for_1 HTTP":
f = open("../5_web/for_1.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "for_1.zip HTTP":
f = open("../5_web/for_1.zip","r")
rsp_zip += f.read()
f.close()
c.send(rsp_zip)
c.close()
### [FOR_2] ###
elif data_2[1] == "for_2 HTTP":
f = open("../5_web/for_2.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "for_2.zip HTTP":
f = open("../5_web/for_2.zip","r")
rsp_zip += f.read()
f.close()
c.send(rsp_zip)
c.close()
## [FOR_3] ##
## cookie_check of for_3 on the top
elif data_2[1] == "for_3 HTTP":
f = open("../5_web/for_3.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "for_3.zip HTTP":
f = open("../5_web/for_3.zip","r")
rsp_zip += f.read()
f.close()
c.send(rsp_zip)
c.close()
## [FOR_4] ##
elif data_2[1] == "for_4 HTTP":
f = open("../5_web/for_4.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "for_4.zip HTTP":
f = open("../5_web/for_4.zip","r")
rsp_zip += f.read()
f.close()
c.send(rsp_zip)
c.close()
## [FOR_5] ##
elif data_2[1] == "for_5 HTTP":
f = open("../5_web/for_5.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
elif data_2[1] == "for_5.zip HTTP":
f = open("../5_web/for_5.zip","r")
rsp_zip += f.read()
f.close()
c.send(rsp_zip)
c.close()
### [SYS_1] ###
elif data_2[1] == "sys_1 HTTP":
f = open("../5_web/sys_1_p.html","r")
rsp_200 += f.read()
f.close()
rsp_200 += '''<form action="''' + location_href_main + '''" method="GET">'''
rsp_200 += '''<h4>XFFF(root)# <input type="text" name="cmd"><br><br></h4>'''
rsp_200 += '''<br><br></h4> <button type="submit">LOGIN</button></form></div></body></html>'''
c.send(rsp_200)
c.close()
elif data_2[1] == "?cmd=ifconfig HTTP":
f = open("../5_web/sys_1_p.html","r")
rsp_200 += f.read()
f.close()
rsp_200 += '''<form action="''' + location_href_main + '''" method="GET">'''
rsp_200 += '''<h4>XFFF(root)# <input type="text" name="cmd"><br><br></h4>'''
rsp_200 += '''<br><br></h4> <button type="submit">LOGIN</button></form></div></body></html>'''
rsp_200 += '''\r\n<!--\r\nxfff1: flags=9999<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500\r\n'''
rsp_200 += '''inet ''' + main_db + ''' netmask 255.255.255.0 broadcast security\r\n'''
rsp_200 += '''inet6 ffff:ffff:ffff:ffff:ffff:xfff prefixlen 200 scopeid 0x90<link>\r\n'''
rsp_200 += '''ether 00:00:00:00:00:00 txqueuelen 9000 (Ethernet)\r\n'''
rsp_200 += '''RX packets 320138 bytes 32945881 (31.4 MiB)\r\n'''
rsp_200 += '''RX errors 0 dropped 0 overruns 0 frame 0\r\n'''
rsp_200 += '''TX packets 236134 bytes 96378216 (91.9 MiB)\r\n'''
rsp_200 += '''TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0\r\n --> \r\n\r\n\r\n'''
c.send(rsp_200)
c.close()
elif data_2[1] == "?cmd=ifconfig ; ls HTTP":
f = open("../5_web/sys_1_p.html","r")
rsp_200 += f.read()
f.close()
rsp_200 += '''<form action="''' + location_href_main + '''" method="GET">'''
rsp_200 += '''<h4>XFFF(root)# <input type="text" name="cmd"><br><br></h4>'''
rsp_200 += '''<br><br></h4> <button type="submit">LOGIN</button></form></div></body></html>'''
rsp_200 += '''\r\n<!--\r\nxfff1: flags=9999<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500\r\n'''
rsp_200 += '''inet ''' + main_db + ''' netmask 255.255.255.0 broadcast security\r\n'''
rsp_200 += '''inet6 ffff:ffff:ffff:ffff:ffff:xfff prefixlen 200 scopeid 0x90<link>\r\n'''
rsp_200 += '''ether 00:00:00:00:00:00 txqueuelen 9000 (Ethernet)\r\n'''
rsp_200 += '''RX packets 320138 bytes 32945881 (31.4 MiB)\r\n'''
rsp_200 += '''RX errors 0 dropped 0 overruns 0 frame 0\r\n'''
rsp_200 += '''TX packets 236134 bytes 96378216 (91.9 MiB)\r\n'''
rsp_200 += '''TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0\r\n'''
rsp_200 += '''a.html b.html b2.html gubijkaegkjekjfn.html private_flaj.txt bb9sagbi3g.html ''' * 15
rsp_200 += '''a.html b.html b2.html gubijkaegkjekjfn.html private_flag.txt bb9sagbi3g.html '''
rsp_200 += '''a.html b.html b2.html gubijkaegkjekjfn.html private_flaj.txt bb9sagbi3g.html ''' * 15
rsp_200 += '''a.html b.html b2.html gubijkaegkjekjfn.html private_flaj.txt bb9sagbi3g.html --> \r\n\r\n\r\n'''
c.send(rsp_200)
c.close()
elif data_2[1] == "?cmd=ifconfig ; cat private_flag.txt HTTP":
f = open("../5_web/sys_1_p.html","r")
rsp_200 += f.read()
f.close()
rsp_200 += '''<form action="''' + location_href_main + '''" method="GET">'''
rsp_200 += '''<h4>XFFF(root)# <input type="text" name="cmd"><br><br></h4>'''
rsp_200 += '''<br><br></h4> <button type="submit">LOGIN</button></form></div></body></html>'''
rsp_200 += '''\r\n<!--\r\nxfff1: flags=9999<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500\r\n'''
rsp_200 += '''inet ''' + main_db + ''' netmask 255.255.255.0 broadcast security\r\n'''
rsp_200 += '''inet6 ffff:ffff:ffff:ffff:ffff:xfff prefixlen 200 scopeid 0x90<link>\r\n'''
rsp_200 += '''ether 00:00:00:00:00:00 txqueuelen 9000 (Ethernet)\r\n'''
rsp_200 += '''RX packets 320138 bytes 32945881 (31.4 MiB)\r\n'''
rsp_200 += '''RX errors 0 dropped 0 overruns 0 frame 0\r\n'''
rsp_200 += '''TX packets 236134 bytes 96378216 (91.9 MiB)\r\n'''
rsp_200 += '''TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0\r\n'''
rsp_200 += '''E2Y7Gl42dmCL9coRT1Rhr5LEA2oBT2sC4AVoUw9 --> \r\n\r\n\r\n'''
c.send(rsp_200)
c.close()
### ETC... ###
else:
f = open("../5_web/index.html","r")
rsp_200 += f.read()
f.close()
c.send(rsp_200)
c.close()
else:
c.close()
c.close()
except:
pass
| 20.911765
| 718
| 0.481965
|
4a16df5a52c48a4598757c3988248a613812eae6
| 647
|
py
|
Python
|
trend_django/trend/models.py
|
csbok/github-trending
|
596c1663eb7644b17bccaad25ff795f4e61c8cbc
|
[
"MIT"
] | 7
|
2017-06-14T11:43:45.000Z
|
2021-11-20T08:16:17.000Z
|
trend_django/trend/models.py
|
csbok/github-trending
|
596c1663eb7644b17bccaad25ff795f4e61c8cbc
|
[
"MIT"
] | null | null | null |
trend_django/trend/models.py
|
csbok/github-trending
|
596c1663eb7644b17bccaad25ff795f4e61c8cbc
|
[
"MIT"
] | 2
|
2018-05-06T19:57:20.000Z
|
2020-06-27T07:35:08.000Z
|
from django.db import models
# Create your models here.
from django.utils import timezone
class TodayTrend(models.Model):
rank = models.IntegerField()
url = models.CharField(max_length=2000)
desc = models.TextField()
language = models.CharField(max_length=250)
star_count = models.IntegerField()
fork_count = models.IntegerField()
today_star_count = models.IntegerField()
created_at = models.DateTimeField(default=timezone.now)
def __str__(self):
return str(self.created_at) + ' | ' + str(self.today_star_count) + ' | ' + str(self.rank) + ' | ' + self.url
class Meta:
ordering = ['-pk']
| 30.809524
| 116
| 0.683153
|
4a16df61cdedf12a764a33284afa676d0a1fe119
| 3,266
|
py
|
Python
|
mot_api/app.py
|
Artas03/Project-2
|
dc44519e9a3b832184c02ba0751b013c8a890b90
|
[
"ADSL"
] | null | null | null |
mot_api/app.py
|
Artas03/Project-2
|
dc44519e9a3b832184c02ba0751b013c8a890b90
|
[
"ADSL"
] | null | null | null |
mot_api/app.py
|
Artas03/Project-2
|
dc44519e9a3b832184c02ba0751b013c8a890b90
|
[
"ADSL"
] | null | null | null |
from flask import Flask, request, Response
app = Flask(__name__)
@app.route('/get_mot', methods=['GET','POST'])
def get_mot():
item = request.get_json()
if item['make'] == 'BMW' and item['model'] == '4 series':
mot = '3'
elif item['make'] == 'BMW' and item['model'] == 'C63s':
mot = '0'
elif item['make'] == 'BMW' and item['model'] == 'GTC':
mot = '0'
elif item['make'] == 'BMW' and item['model'] == 'Civic':
mot = '2'
elif item['make'] == 'BMW' and item['model'] == 'RS7':
mot = '0'
elif item['make'] == 'BMW' and item['model'] == 'Scirocco':
mot = '3'
elif item['make'] == 'Mercedes' and item['model'] == '4 series':
mot = '6'
elif item['make'] == 'Mercedes' and item['model'] == 'C63s':
mot = '3'
elif item['make'] == 'Mercedes' and item['model'] == 'GTC':
mot = '3'
elif item['make'] == 'Mercedes' and item['model'] == 'Civic':
mot = '5'
elif item['make'] == 'Mercedes' and item['model'] == 'RS7':
mot = '3'
elif item['make'] == 'Mercedes' and item['model'] == 'Scirocco':
mot = '6'
elif item['make'] == 'Vauxhall' and item['model'] == '4 series':
mot = '6'
elif item['make'] == 'Vauxhall' and item['model'] == 'C63s':
mot = '3'
elif item['make'] == 'Vauxhall' and item['model'] == 'GTC':
mot = '3'
elif item['make'] == 'Vauxhall' and item['model'] == 'Civic':
mot = '5'
elif item['make'] == 'Vauxhall' and item['model'] == 'RS7':
mot = '3'
elif item['make'] == 'Vauxhall' and item['model'] == 'Scirocco':
mot = '6'
elif item['make'] == 'Honda' and item['model'] == '4 series':
mot = '5'
elif item['make'] == 'Honda' and item['model'] == 'C63s':
mot = '2'
elif item['make'] == 'Honda' and item['model'] == 'GTC':
mot = '2'
elif item['make'] == 'Honda' and item['model'] == 'Civic':
mot = '4'
elif item['make'] == 'Honda' and item['model'] == 'RS7':
mot = '2'
elif item['make'] == 'Honda' and item['model'] == 'Scirocco':
mot = '5'
elif item['make'] == 'Audi' and item['model'] == '4 series':
mot = '5'
elif item['make'] == 'Audi' and item['model'] == 'C63s':
mot = '2'
elif item['make'] == 'Audi' and item['model'] == 'GTC':
mot = '2'
elif item['make'] == 'Audi' and item['model'] == 'Civic':
mot = '4'
elif item['make'] == 'Audi' and item['model'] == 'RS7':
mot = '2'
elif item['make'] == 'Audi' and item['model'] == 'Scirocco':
mot = '5'
elif item['make'] == 'Volkswagen' and item['model'] == '4 series':
mot = '6'
elif item['make'] == 'Volkswagen' and item['model'] == 'C63s':
mot = '3'
elif item['make'] == 'Volkswagen' and item['model'] == 'GTC':
mot = '3'
elif item['make'] == 'Volkswagen' and item['model'] == 'Civic':
mot = '5'
elif item['make'] == 'Volkswagen' and item['model'] == 'RS7':
mot = '3'
elif item['make'] == 'Volkswagen' and item['model'] == 'Scirocco':
mot = '6'
else:
mot = '0'
return Response(str(mot))
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5003, debug=True)
| 37.54023
| 70
| 0.489896
|
4a16e065700d38bea203286614ffaf8d5a73d0be
| 414
|
py
|
Python
|
djangogram/users/migrations/0004_alter_user_name.py
|
HaewonSon/djangoInstagram
|
dde98d95dc59f44b62efb90007556447eb59acda
|
[
"MIT"
] | null | null | null |
djangogram/users/migrations/0004_alter_user_name.py
|
HaewonSon/djangoInstagram
|
dde98d95dc59f44b62efb90007556447eb59acda
|
[
"MIT"
] | null | null | null |
djangogram/users/migrations/0004_alter_user_name.py
|
HaewonSon/djangoInstagram
|
dde98d95dc59f44b62efb90007556447eb59acda
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-12-13 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20211206_1056'),
]
operations = [
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, verbose_name='Name'),
),
]
| 21.789474
| 84
| 0.60628
|
4a16e11d30299b47c532f821fca2dd6ace937154
| 63
|
py
|
Python
|
enthought/logger/log_queue_handler.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/logger/log_queue_handler.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/logger/log_queue_handler.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from apptools.logger.log_queue_handler import *
| 21
| 47
| 0.825397
|
4a16e18bdf461f34154a931737503511f44368f1
| 4,249
|
py
|
Python
|
company/serializers.py
|
skyydq/GreaterWMS
|
e14014a73b36ec0f0df03712a229b0931cb388fb
|
[
"Apache-2.0"
] | null | null | null |
company/serializers.py
|
skyydq/GreaterWMS
|
e14014a73b36ec0f0df03712a229b0931cb388fb
|
[
"Apache-2.0"
] | null | null | null |
company/serializers.py
|
skyydq/GreaterWMS
|
e14014a73b36ec0f0df03712a229b0931cb388fb
|
[
"Apache-2.0"
] | 1
|
2021-07-01T03:05:21.000Z
|
2021-07-01T03:05:21.000Z
|
from rest_framework import serializers
from .models import ListModel
from userprofile.models import Users
import re
from rest_framework.exceptions import APIException
def data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
return data
def openid_validate(data):
if Users.objects.filter(openid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'})
def appid_validate(data):
if Users.objects.filter(appid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'})
class CompanyGetSerializer(serializers.ModelSerializer):
company_name = serializers.CharField(read_only=True, required=False)
company_city = serializers.CharField(read_only=True, required=False)
company_address = serializers.CharField(read_only=True, required=False)
company_contact = serializers.IntegerField(read_only=True, required=False)
company_manager = serializers.CharField(read_only=True, required=False)
creater = serializers.CharField(read_only=True, required=False)
create_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
update_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
class Meta:
model = ListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id']
class CompanyPostSerializer(serializers.ModelSerializer):
openid = serializers.CharField(read_only=False, required=False, validators=[openid_validate])
company_name = serializers.CharField(read_only=False, required=True, validators=[data_validate])
company_city = serializers.CharField(read_only=False, required=True, validators=[data_validate])
company_address = serializers.CharField(read_only=False, required=True, validators=[data_validate])
company_contact = serializers.IntegerField(read_only=False, required=True, validators=[data_validate])
company_manager = serializers.CharField(read_only=False, required=True, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = ListModel
exclude = ['is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class CompanyUpdateSerializer(serializers.ModelSerializer):
company_name = serializers.CharField(read_only=False, required=True, validators=[data_validate])
company_city = serializers.CharField(read_only=False, required=True, validators=[data_validate])
company_address = serializers.CharField(read_only=False, required=True, validators=[data_validate])
company_contact = serializers.IntegerField(read_only=False, required=True, validators=[data_validate])
company_manager = serializers.CharField(read_only=False, required=True, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = ListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class CompanyPartialUpdateSerializer(serializers.ModelSerializer):
company_name = serializers.CharField(read_only=False, required=False, validators=[data_validate])
company_city = serializers.CharField(read_only=False, required=False, validators=[data_validate])
company_address = serializers.CharField(read_only=False, required=False, validators=[data_validate])
company_contact = serializers.IntegerField(read_only=False, required=False, validators=[data_validate])
company_manager = serializers.CharField(read_only=False, required=False, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=False, validators=[data_validate])
class Meta:
model = ListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
| 53.78481
| 107
| 0.746529
|
4a16e2371f7bb7468ba1e021370b848d50a1ec70
| 3,127
|
py
|
Python
|
bench/benchmark_fuzz.py
|
hugolmn/RapidFuzz
|
81ac43cbf28f5c002f7c4c522a263f5042ed7f25
|
[
"MIT"
] | 554
|
2020-03-19T13:46:16.000Z
|
2020-04-07T13:42:55.000Z
|
bench/benchmark_fuzz.py
|
hugolmn/RapidFuzz
|
81ac43cbf28f5c002f7c4c522a263f5042ed7f25
|
[
"MIT"
] | 104
|
2020-11-30T09:34:33.000Z
|
2022-03-17T21:03:22.000Z
|
bench/benchmark_fuzz.py
|
hugolmn/RapidFuzz
|
81ac43cbf28f5c002f7c4c522a263f5042ed7f25
|
[
"MIT"
] | 32
|
2020-12-16T13:49:56.000Z
|
2022-02-17T12:31:28.000Z
|
# todo combine benchmarks of scorers into common code base
import timeit
import pandas
import numpy as np
def benchmark(name, func, setup, lengths, count):
print(f"starting {name}")
start = timeit.default_timer()
results = []
for length in lengths:
test = timeit.Timer(func, setup=setup.format(length, count))
results.append(min(test.timeit(number=1) for _ in range(7)) / count)
stop = timeit.default_timer()
print(f"finished {name}, Runtime: ", stop - start)
return results
setup ="""
from rapidfuzz import fuzz as rfuzz
from fuzzywuzzy import fuzz
import string
import random
random.seed(18)
characters = string.ascii_letters + string.digits + string.whitespace + string.punctuation
a = ''.join(random.choice(characters) for _ in range({0}))
b_list = [''.join(random.choice(characters) for _ in range({0})) for _ in range({1})]
"""
lengths = list(range(1,512,2))
count = 1000
def scorer_benchmark(funcname):
time_rapidfuzz = benchmark("rapidfuzz",
f'[rfuzz.{funcname}(a, b) for b in b_list]',
setup, lengths, count)
time_fuzzywuzzy = benchmark("fuzzywuzzy",
f'[fuzz.{funcname}(a, b) for b in b_list]',
setup, lengths, count)
df = pandas.DataFrame(data={
"length": lengths,
"rapidfuzz": time_rapidfuzz,
"fuzzywuzzy": time_fuzzywuzzy,
})
df.to_csv(f"results/{funcname}.csv", sep=',',index=False)
scorer_benchmark("ratio")
scorer_benchmark("partial_ratio")
scorer_benchmark("token_sort_ratio")
scorer_benchmark("token_set_ratio")
scorer_benchmark("partial_token_sort_ratio")
scorer_benchmark("partial_token_set_ratio")
scorer_benchmark("WRatio")
# token_ratio is unique to RapidFuzz
time_token_ratio = benchmark("token_ratio",
f'[rfuzz.token_ratio(a, b, processor=None) for b in b_list]',
setup, lengths, count)
# this gets very slow, so only benchmark it for smaller values
time_token_ratio_simple = benchmark("fuzzywuzzy",
f'[max(rfuzz.token_sort_ratio(a, b, processor=None), rfuzz.token_set_ratio(a, b, processor=None)) for b in b_list]',
setup, lengths, count)
df = pandas.DataFrame(data={
"length": lengths,
"token_ratio": time_token_ratio,
"max(token_sort_ratio, token_set_ratio)": time_token_ratio_simple,
})
df.to_csv(f"results/token_ratio.csv", sep=',',index=False)
# partial_token_ratio is unique to RapidFuzz
time_partial_token_ratio = benchmark("token_ratio",
f'[rfuzz.partial_token_ratio(a, b, processor=None) for b in b_list]',
setup, lengths, count)
# this gets very slow, so only benchmark it for smaller values
time_partial_token_ratio_simple = benchmark("fuzzywuzzy",
f'[max(rfuzz.partial_token_sort_ratio(a, b, processor=None), rfuzz.partial_token_set_ratio(a, b, processor=None)) for b in b_list]',
setup, lengths, count)
df = pandas.DataFrame(data={
"length": lengths,
"partial_token_ratio": time_partial_token_ratio,
"max(partial_token_sort_ratio, partial_token_set_ratio)": time_partial_token_ratio_simple,
})
df.to_csv(f"results/partial_token_ratio.csv", sep=',',index=False)
| 34.744444
| 136
| 0.713463
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.