hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26ae6b9eec60876cee6615b37836ae8c99446850 | 878 | py | Python | tests/test_validate.py | pladdy/jsonschema-scratch | 95e9c3a27b4121d6ebc74f5ac8c30b29a0c50fec | [
"MIT"
] | null | null | null | tests/test_validate.py | pladdy/jsonschema-scratch | 95e9c3a27b4121d6ebc74f5ac8c30b29a0c50fec | [
"MIT"
] | 3 | 2020-06-12T04:10:42.000Z | 2020-07-03T15:06:32.000Z | tests/test_validate.py | pladdy/jsonschema-scratch | 95e9c3a27b4121d6ebc74f5ac8c30b29a0c50fec | [
"MIT"
] | 1 | 2020-06-13T01:11:49.000Z | 2020-06-13T01:11:49.000Z | import http
import json
ammy = {
"name": "red amulet",
"description": "This amulet is red.",
"type": "amulet",
"armor-class-types": ["edged", "blunt", "mind", "energy"],
}
invalid_ammy = {
"name": "red amulet",
"description": "This amulet is red.",
"type": "amulet",
}
def test_validate_ok(client_in_test):
r = client_in_test.post(
"/validate?schema=equipment.json", data=json.dumps(ammy)
)
assert r.status_code == http.HTTPStatus.OK
def test_validate_bad_request(client_in_test):
r = client_in_test.post("/validate", data=json.dumps(ammy))
assert r.status_code == http.HTTPStatus.BAD_REQUEST
def test_validate_invalid_post(client_in_test):
r = client_in_test.post(
"/validate?schema=equipment.json", data=json.dumps(invalid_ammy)
)
assert r.status_code == http.HTTPStatus.UNPROCESSABLE_ENTITY
| 25.085714 | 72 | 0.676538 |
6598685e20f9dc15e7781a61dd436bab76164d22 | 4,289 | py | Python | python/tests/test_async_writer.py | nii-gakunin-cloud/sinetstream | abcf8ce800c9970bb51b2eaff54f3845c40c114f | [
"Apache-2.0"
] | 5 | 2020-03-24T15:28:53.000Z | 2022-03-18T06:59:42.000Z | python/tests/test_async_writer.py | nii-gakunin-cloud/sinetstream | abcf8ce800c9970bb51b2eaff54f3845c40c114f | [
"Apache-2.0"
] | null | null | null | python/tests/test_async_writer.py | nii-gakunin-cloud/sinetstream | abcf8ce800c9970bb51b2eaff54f3845c40c114f | [
"Apache-2.0"
] | 3 | 2020-03-24T15:28:52.000Z | 2021-04-01T14:51:42.000Z | #!/usr/local/bin/python3.6
# vim: expandtab shiftwidth=4
# Copyright (C) 2019 National Institute of Informatics
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import pytest
from conftest import SERVICE, TOPIC, TOPIC2
from promise import Promise
from sinetstream import (
AsyncMessageWriter, AT_MOST_ONCE, AT_LEAST_ONCE, EXACTLY_ONCE,
InvalidArgumentError, TEXT,
)
logging.basicConfig(level=logging.ERROR)
pytestmark = pytest.mark.usefixtures('setup_config', 'dummy_writer_plugin')
@pytest.mark.parametrize("topics", [
TOPIC,
[TOPIC],
])
def test_writer_topic(topics):
with AsyncMessageWriter(SERVICE, topics) as _:
pass
@pytest.mark.parametrize("config_topic", [None, [], [TOPIC, TOPIC2]])
def test_writer_bad_topics():
with pytest.raises(InvalidArgumentError):
with AsyncMessageWriter(SERVICE) as _:
pass
@pytest.mark.parametrize("consistency", [AT_MOST_ONCE, AT_LEAST_ONCE, EXACTLY_ONCE])
def test_writer_consistency(consistency):
with AsyncMessageWriter(SERVICE, consistency=consistency) as f:
assert consistency == f.consistency
@pytest.mark.parametrize('config_params', [
{'consistency': 'AT_MOST_ONCE'},
{'consistency': 'AT_LEAST_ONCE'},
{'consistency': 'EXACTLY_ONCE'},
])
def test_writer_consistency_in_config_file(config_params):
with AsyncMessageWriter(SERVICE) as f:
consistency = config_params['consistency']
assert eval(consistency) == f.consistency
@pytest.mark.parametrize("consistency", [999, "XXX"])
def test_writer_bad_consistency(consistency):
with pytest.raises(InvalidArgumentError):
with AsyncMessageWriter(SERVICE, consistency=consistency) as _:
pass
def test_writer_client_id_default():
with AsyncMessageWriter(SERVICE) as f:
assert f.client_id is not None and f.client_id != ""
def test_writer_client_id_set():
cid = "oreore"
with AsyncMessageWriter(SERVICE, client_id=cid) as f:
assert f.client_id == cid
def test_writer_deser():
with AsyncMessageWriter(SERVICE, value_serializer=(lambda x: x)) as _:
pass
def test_open_close():
f = AsyncMessageWriter(SERVICE).open()
f.close()
def test_close_twice():
f = AsyncMessageWriter(SERVICE).open()
f.close()
f.close()
@pytest.mark.parametrize('config_topic', [TOPIC])
def test_writer_topic_in_config_file():
with AsyncMessageWriter(SERVICE) as f:
assert f.topic == TOPIC
@pytest.mark.parametrize('config_topic', [[TOPIC]])
def test_writer_topic_list_one_item_in_config_file():
with AsyncMessageWriter(SERVICE) as f:
assert f.topic == TOPIC
@pytest.mark.parametrize('config_topic', [[TOPIC, TOPIC2]])
def test_writer_topic_list_in_config_file():
with pytest.raises(InvalidArgumentError):
with AsyncMessageWriter(SERVICE) as _:
pass
@pytest.mark.parametrize('config_topic', [TOPIC])
def test_writer_topic_in_config_file_and_arg():
with AsyncMessageWriter(SERVICE, TOPIC2) as f:
assert f.topic == TOPIC2
@pytest.mark.parametrize('config_topic', [TOPIC])
def test_writer_topic_in_config_file_and_kwarg():
with AsyncMessageWriter(topic=TOPIC2, service=SERVICE) as f:
assert f.topic == TOPIC2
def test_async_write():
count = []
with AsyncMessageWriter(SERVICE, TOPIC, value_type=TEXT) as f:
for msg in ['message-1', 'message-2']:
ret = f.publish(msg).then(lambda _: count.append(1))
assert isinstance(ret, Promise)
assert 2 == len(count)
| 29.993007 | 84 | 0.727675 |
69683daa164109eaa040982b5c5cfe86e6ae6636 | 808 | py | Python | manage.py | Ruterana/Hood | 8d2c5f7daaf5af43ca74792cf7a13eeaf957d8f9 | [
"MIT"
] | null | null | null | manage.py | Ruterana/Hood | 8d2c5f7daaf5af43ca74792cf7a13eeaf957d8f9 | [
"MIT"
] | null | null | null | manage.py | Ruterana/Hood | 8d2c5f7daaf5af43ca74792cf7a13eeaf957d8f9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Neiborhood.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.130435 | 77 | 0.643564 |
d02854ab8a370474fd67f8bb30231936fb0c0544 | 3,585 | py | Python | jardel.py | wellingtonfs/fsek-pessoal | c4e63eb36050614cab18f2ccc6eb5c62cd8b867d | [
"MIT"
] | null | null | null | jardel.py | wellingtonfs/fsek-pessoal | c4e63eb36050614cab18f2ccc6eb5c62cd8b867d | [
"MIT"
] | null | null | null | jardel.py | wellingtonfs/fsek-pessoal | c4e63eb36050614cab18f2ccc6eb5c62cd8b867d | [
"MIT"
] | 4 | 2019-08-20T15:07:01.000Z | 2020-03-10T13:53:18.000Z | #!/usr/bin/env python3
# so that script can be run from Brickman
import termios, tty, sys
from ev3dev.ev3 import *
import colorsys
# attach large motors to ports B and C, medium motor to port A
motor_left = LargeMotor('outC')
motor_right = LargeMotor('outD')
Sensor_Cor = [ColorSensor('in2'), ColorSensor('in1')] #1 = Esquerdo, 2 = Direito
Sensor_Cor[0].mode = 'COL-COLOR'
Sensor_Cor[1].mode = 'COL-COLOR'
def convertHSV(r, g, b):
h, s, v = colorsys.rgb_to_hsv(r, g, b)
return (h, s, v)
def convertRGB(h, s, v):
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return (r, g, b)
def Verifica_Cor(x,y,z):
#(x, y, z) = cor3.value(0), cor3.value(1), cor3.value(2)
x = x/1023
y = y/1023
z = z/1023
(h, s, v) = convertHSV(x, y, z)
s = 0.8
v = 1
(r, g, b) = convertRGB(h, s, v)
r = r * 255
g = g * 255
b = b * 255
colors = {
"1": "#000000", #Black
"5": "#FF0000", #Red
"4": "#FFFF00", #Yellow
"3": "#00FF00", #Green
"2": "#0000FF", #Blue
"6": "#FFFFFF" #White
}
def rgbFromStr(s):
r, g, b = int(s[1:3],16), int(s[3:5], 16),int(s[5:7], 16)
return r, g, b
def findNearestColorName(color, Map):
(R,G,B) = color
mindiff = None
for d in Map:
r, g, b = rgbFromStr(Map[d])
diff = abs(R-r) * 256 + abs(G-g) * 256 + abs(B-b) * 256
if mindiff is None or diff < mindiff:
mindiff = diff
mincolorname = d
return mincolorname
return findNearestColorName((r, g, b), colors)
# motor_a = MediumMotor('outA')
# motor_b = MediumMotor('outB')
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setcbreak(fd)
ch = sys.stdin.read(1)
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def forward():
motor_left.run_forever(speed_sp=200)
motor_right.run_forever(speed_sp=200)
#==============================================
def back():
motor_left.run_forever(speed_sp=-200)
motor_right.run_forever(speed_sp=-200)
#==============================================
def left():
motor_left.run_forever(speed_sp=-200)
motor_right.run_forever(speed_sp=200)
#==============================================
def right():
motor_left.run_forever(speed_sp=200)
motor_right.run_forever(speed_sp=-200)
#==============================================
def stop():
motor_left.run_forever(speed_sp=0)
motor_right.run_forever(speed_sp=0)
# motor_a.run_forever(speed_sp=0)
# motor_b.run_forever(speed_sp=0)
# #==============================================
# def up():
# motor_a.run_forever(speed_sp=200)
# motor_b.run_forever(speed_sp=-200)
# def down():
# motor_a.run_forever(speed_sp=-200)
# motor_b.run_forever(speed_sp=200)
while True:
if (Sensor_Cor[0].value() == Sensor_Cor[1].value()):
Sensor_Cor[0].mode = 'RGB-RAW'
Sensor_Cor[1].mode = 'RGB-RAW'
(x,y,z) = (Sensor_Cor[0].value(0),Sensor_Cor[0].value(1),Sensor_Cor[0].value(2))
color = Verifica_Cor(x,y,z)
Sensor_Cor[0].mode = 'COL-COLOR'
Sensor_Cor[1].mode = 'COL-COLOR'
print(color)
k = getch()
print(k)
if k == 's':
back()
if k == 'w':
forward()
if k == 'd':
right()
if k == 'a':
left()
if k == ' ':
stop()
if k == 'o':
up()
if k == 'p':
down()
if k == 'q':
exit() | 25.246479 | 88 | 0.520502 |
6f2126a50628b232f641d4f2737f6cbf73d29b02 | 2,052 | py | Python | verbs/open.py | TheGoozah/fips | a421873f7ae2aeee86abc789d38c44ebaa06093b | [
"MIT"
] | 1 | 2021-06-02T19:42:10.000Z | 2021-06-02T19:42:10.000Z | verbs/open.py | TheGoozah/fips | a421873f7ae2aeee86abc789d38c44ebaa06093b | [
"MIT"
] | null | null | null | verbs/open.py | TheGoozah/fips | a421873f7ae2aeee86abc789d38c44ebaa06093b | [
"MIT"
] | null | null | null | """implement the 'open' verb
open
open [config]
"""
import os
import glob
import subprocess
from mod import log, util, settings, config, project
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, args) :
"""run the 'open' verb (opens project in IDE)"""
if not util.is_valid_project_dir(proj_dir) :
log.error('must be run in a project directory')
proj_name = util.get_project_name_from_dir(proj_dir)
cfg_name = None
if len(args) > 0 :
cfg_name = args[0]
if not cfg_name :
cfg_name = settings.get(proj_dir, 'config')
# check the cmake generator of this config
configs = config.load(fips_dir, proj_dir, cfg_name)
if configs :
# hmm, only look at first match, 'open' doesn't
# make sense with config-patterns
cfg = configs[0]
# find build dir, if it doesn't exist, generate it
build_dir = util.get_build_dir(fips_dir, proj_name, cfg)
if not os.path.isdir(build_dir) :
log.warn("build dir not found, generating...")
project.gen(fips_dir, proj_dir, cfg['name'])
if 'Xcode' in cfg['generator'] :
# find the Xcode project
proj = glob.glob(build_dir + '/*.xcodeproj')
subprocess.call('open {}'.format(proj[0]), shell=True)
elif 'Visual Studio' in cfg['generator'] :
# find the VisualStudio project file
proj = glob.glob(build_dir + '/*.sln')
subprocess.call('cmd /c start {}'.format(proj[0]), shell=True)
else :
log.error("don't know how to open a '{}' project".format(cfg['generator']))
else :
log.error("config '{}' not found".format(cfg_name))
#-------------------------------------------------------------------------------
def help() :
"""print help for verb 'open'"""
log.info(log.YELLOW +
"fips open\n"
"fips open [config]\n" + log.DEF +
" open IDE for current or named config")
| 34.779661 | 87 | 0.549708 |
2babce0c5337929a75ca4d518a84fd1cea779881 | 3,007 | py | Python | deep_rl/common/util.py | jkulhanek/deep-rl-pytorch | 6fa7ceee8524f002d4a8d93295b231f6b9b7c29c | [
"MIT"
] | 7 | 2019-03-24T19:51:11.000Z | 2022-01-27T17:20:29.000Z | deep_rl/common/util.py | jkulhanek/deep-rl-pytorch | 6fa7ceee8524f002d4a8d93295b231f6b9b7c29c | [
"MIT"
] | null | null | null | deep_rl/common/util.py | jkulhanek/deep-rl-pytorch | 6fa7ceee8524f002d4a8d93295b231f6b9b7c29c | [
"MIT"
] | 4 | 2020-04-11T01:06:24.000Z | 2021-07-18T01:22:36.000Z | from collections import OrderedDict, Callable
import os
from functools import partial
import contextlib
class DefaultOrderedDict(OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __call__(self, *args, **kwargs):
return self.x(*args, **kwargs)
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
def serialize_function(fun, *args, **kwargs):
if hasattr(fun, '__self__'):
fun = fun.__func__
boundfun = lambda ctx: partial(fun, ctx, *args, **kwargs)
else:
boundfun = lambda _: partial(fun, *args, **kwargs)
return CloudpickleWrapper(boundfun)
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment) | 31.989362 | 219 | 0.632857 |
a800b3b3d844bc5022073972f2edf84ece2273f5 | 2,034 | py | Python | setup.py | s0ng/spotify-ripper | d0464193dead7bd3ac7580e98bde86a0f323acae | [
"MIT"
] | 536 | 2016-10-05T15:31:29.000Z | 2022-03-27T22:24:34.000Z | setup.py | s0ng/spotify-ripper | d0464193dead7bd3ac7580e98bde86a0f323acae | [
"MIT"
] | 62 | 2016-10-11T01:53:38.000Z | 2022-02-07T08:35:39.000Z | setup.py | s0ng/spotify-ripper | d0464193dead7bd3ac7580e98bde86a0f323acae | [
"MIT"
] | 148 | 2016-10-04T11:54:01.000Z | 2022-03-27T22:24:55.000Z | #!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
import os
def create_default_dir():
default_dir = os.path.normpath(os.path.realpath(
(os.path.join(os.path.expanduser("~"), ".spotify-ripper"))))
if not os.path.exists(default_dir):
print("Creating default settings directory: " +
default_dir)
os.makedirs(default_dir.encode("utf-8"))
def _read(fn):
path = os.path.join(os.path.dirname(__file__), fn)
return open(path).read()
setup(
name='spotify-ripper',
version='2.9.1',
packages=find_packages(exclude=["tests"]),
scripts=['spotify_ripper/main.py'],
include_package_data=True,
zip_safe=False,
# Executable
entry_points={
'console_scripts': [
'spotify-ripper = main:main',
],
},
# Additional data
package_data={
'': ['README.rst', 'LICENCE']
},
# Requirements
install_requires=[
'pyspotify==2.0.5',
'colorama==0.3.3',
'mutagen==1.30',
'requests>=2.3.0',
'schedule>=0.3.1',
],
# Metadata
author='James Newell',
author_email='james.newell@gmail.com',
description='a small ripper for Spotify that rips Spotify URIs '
'to audio files',
license='MIT',
keywords="spotify ripper mp3 ogg vorbis flac opus acc mp4 m4a",
url='https://github.com/jrnewell/spotify-ripper',
download_url='https://github.com/jrnewell/spotify-ripper/tarball/2.9.1',
classifiers=[
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
"Intended Audience :: Developers",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
long_description=_read('README.rst'),
)
create_default_dir()
| 27.486486 | 76 | 0.605703 |
2ef6051d010bad7e47ffb12b9187b627e357a0e7 | 8,208 | py | Python | util/utils.py | suntingtencent/face.evoLVe.PyTorch | be99ba87d2db3b96bbb85628dfd0aca78346b1aa | [
"MIT"
] | 1 | 2019-01-26T03:41:09.000Z | 2019-01-26T03:41:09.000Z | util/utils.py | XZJie-AILab/face.evoLVe.PyTorch | be99ba87d2db3b96bbb85628dfd0aca78346b1aa | [
"MIT"
] | null | null | null | util/utils.py | XZJie-AILab/face.evoLVe.PyTorch | be99ba87d2db3b96bbb85628dfd0aca78346b1aa | [
"MIT"
] | null | null | null | import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
from .verification import evaluate
from datetime import datetime
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import numpy as np
from PIL import Image
import bcolz
import io
import os
# Support: ['get_time', 'l2_norm', 'make_weights_for_balanced_classes', 'get_val_pair', 'get_val_data', 'separate_irse_bn_paras', 'separate_resnet_bn_paras', 'warm_up_lr', 'schedule_lr', 'de_preprocess', 'hflip_batch', 'ccrop_batch', 'gen_plot', 'perform_val', 'buffer_val', 'AverageMeter', 'accuracy']
def get_time():
return (str(datetime.now())[:-10]).replace(' ', '-').replace(':', '-')
def l2_norm(input, axis = 1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
def make_weights_for_balanced_classes(images, nclasses):
'''
Make a vector of weights for each image in the dataset, based
on class frequency. The returned vector of weights can be used
to create a WeightedRandomSampler for a DataLoader to have
class balancing when sampling for a training batch.
images - torchvisionDataset.imgs
nclasses - len(torchvisionDataset.classes)
https://discuss.pytorch.org/t/balanced-sampling-between-classes-with-torchvision-dataloader/2703/3
'''
count = [0] * nclasses
for item in images:
count[item[1]] += 1 # item is (img-data, label-id)
weight_per_class = [0.] * nclasses
N = float(sum(count)) # total number of images
for i in range(nclasses):
weight_per_class[i] = N / float(count[i])
weight = [0] * len(images)
for idx, val in enumerate(images):
weight[idx] = weight_per_class[val[1]]
return weight
def get_val_pair(path, name):
carray = bcolz.carray(rootdir = os.path.join(path, name), mode = 'r')
issame = np.load('{}/{}_list.npy'.format(path, name))
return carray, issame
def get_val_data(data_path):
lfw, lfw_issame = get_val_pair(data_path, 'lfw')
cfp_ff, cfp_ff_issame = get_val_pair(data_path, 'cfp_ff')
cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')
agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')
calfw, calfw_issame = get_val_pair(data_path, 'calfw')
cplfw, cplfw_issame = get_val_pair(data_path, 'cplfw')
vgg2_fp, vgg2_fp_issame = get_val_pair(data_path, 'vgg2_fp')
return lfw, cfp_ff, cfp_fp, agedb_30, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_30_issame, calfw_issame, cplfw_issame, vgg2_fp_issame
def separate_irse_bn_paras(modules):
if not isinstance(modules, list):
modules = [*modules.modules()]
paras_only_bn = []
paras_wo_bn = []
for layer in modules:
if 'model' in str(layer.__class__):
continue
if 'container' in str(layer.__class__):
continue
else:
if 'batchnorm' in str(layer.__class__):
paras_only_bn.extend([*layer.parameters()])
else:
paras_wo_bn.extend([*layer.parameters()])
return paras_only_bn, paras_wo_bn
def separate_resnet_bn_paras(modules):
all_parameters = modules.parameters()
paras_only_bn = []
for pname, p in modules.named_parameters():
if pname.find('bn') >= 0:
paras_only_bn.append(p)
paras_only_bn_id = list(map(id, paras_only_bn))
paras_wo_bn = list(filter(lambda p: id(p) not in paras_only_bn_id, all_parameters))
return paras_only_bn, paras_wo_bn
def warm_up_lr(batch, num_batch_warm_up, init_lr, optimizer):
for params in optimizer.param_groups:
params['lr'] = batch * init_lr / num_batch_warm_up
# print(optimizer)
def schedule_lr(optimizer):
for params in optimizer.param_groups:
params['lr'] /= 10.
print(optimizer)
def de_preprocess(tensor):
return tensor * 0.5 + 0.5
hflip = transforms.Compose([
de_preprocess,
transforms.ToPILImage(),
transforms.functional.hflip,
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
def hflip_batch(imgs_tensor):
hfliped_imgs = torch.empty_like(imgs_tensor)
for i, img_ten in enumerate(imgs_tensor):
hfliped_imgs[i] = hflip(img_ten)
return hfliped_imgs
ccrop = transforms.Compose([
de_preprocess,
transforms.ToPILImage(),
transforms.Resize([128, 128]), # smaller side resized
transforms.CenterCrop([112, 112]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
def ccrop_batch(imgs_tensor):
ccropped_imgs = torch.empty_like(imgs_tensor)
for i, img_ten in enumerate(imgs_tensor):
ccropped_imgs[i] = ccrop(img_ten)
return ccropped_imgs
def gen_plot(fpr, tpr):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.xlabel("FPR", fontsize = 14)
plt.ylabel("TPR", fontsize = 14)
plt.title("ROC Curve", fontsize = 14)
plot = plt.plot(fpr, tpr, linewidth = 2)
buf = io.BytesIO()
plt.savefig(buf, format = 'jpeg')
buf.seek(0)
plt.close()
return buf
def perform_val(multi_gpu, device, embedding_size, batch_size, backbone, carray, issame, nrof_folds = 10, tta = True):
if multi_gpu:
backbone = backbone.module # unpackage model from DataParallel
backbone = backbone.to(device)
else:
backbone = backbone.to(device)
backbone.eval() # switch to evaluation mode
idx = 0
embeddings = np.zeros([len(carray), embedding_size])
with torch.no_grad():
while idx + batch_size <= len(carray):
batch = torch.tensor(carray[idx:idx + batch_size])
if tta:
ccropped = ccrop_batch(batch)
fliped = hflip_batch(ccropped)
emb_batch = backbone(ccropped.to(device)).cpu() + backbone(fliped.to(device)).cpu()
embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
else:
ccropped = ccrop_batch(batch)
embeddings[idx:idx + batch_size] = backbone(ccropped.to(device)).cpu()
idx += batch_size
if idx < len(carray):
batch = torch.tensor(carray[idx:])
if tta:
ccropped = ccrop_batch(batch)
fliped = hflip_batch(ccropped)
emb_batch = backbone(ccropped.to(device)).cpu() + backbone(fliped.to(device)).cpu()
embeddings[idx:] = l2_norm(emb_batch)
else:
ccropped = ccrop_batch(batch)
embeddings[idx:] = backbone(ccropped.to(device)).cpu()
tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame, nrof_folds)
buf = gen_plot(fpr, tpr)
roc_curve = Image.open(buf)
roc_curve_tensor = transforms.ToTensor()(roc_curve)
return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
def buffer_val(writer, db_name, acc, best_threshold, roc_curve_tensor, epoch):
writer.add_scalar('{}_Accuracy'.format(db_name), acc, epoch)
writer.add_scalar('{}_Best_Threshold'.format(db_name), best_threshold, epoch)
writer.add_image('{}_ROC_Curve'.format(db_name), roc_curve_tensor, epoch)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n = 1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| 32.0625 | 302 | 0.640716 |
6da26fd3d50d37840a9a4ff25ea12eeed2ac29b4 | 27 | py | Python | dem_tiler/handlers/__init__.py | kylebarron/dem-tiler | b10d1f61cf89c1a24af5643d0bd4ddee87c52508 | [
"MIT"
] | 10 | 2020-06-18T20:11:21.000Z | 2022-03-11T00:52:41.000Z | dem_tiler/handlers/__init__.py | kylebarron/dem-mosaic-tiler | b10d1f61cf89c1a24af5643d0bd4ddee87c52508 | [
"MIT"
] | 6 | 2020-05-26T06:25:46.000Z | 2020-06-08T00:14:11.000Z | dem_tiler/handlers/__init__.py | kylebarron/dem-mosaic-tiler | b10d1f61cf89c1a24af5643d0bd4ddee87c52508 | [
"MIT"
] | 2 | 2020-08-10T06:26:25.000Z | 2021-07-19T13:14:53.000Z | """dem_tiler: handlers."""
| 13.5 | 26 | 0.62963 |
3405dd73609119f952b2a0fe7b0212aa22b20b02 | 12,395 | py | Python | yolo_batch/src/yolo_dataset.py | hellowaywewe/species-detection | 6c9940c79af9c8763c211a03f4b44ca118b91c91 | [
"Apache-2.0"
] | 2 | 2021-12-31T11:41:47.000Z | 2022-01-01T15:29:11.000Z | yolo_batch/src/yolo_dataset.py | xiuyanDL/species-detection | 6c9940c79af9c8763c211a03f4b44ca118b91c91 | [
"Apache-2.0"
] | null | null | null | yolo_batch/src/yolo_dataset.py | xiuyanDL/species-detection | 6c9940c79af9c8763c211a03f4b44ca118b91c91 | [
"Apache-2.0"
] | 1 | 2021-12-31T08:29:48.000Z | 2021-12-31T08:29:48.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""YOLOV3 dataset."""
import os
import multiprocessing
import cv2
from PIL import Image
from pycocotools.coco import COCO
import mindspore.dataset as de
import mindspore.dataset.vision.c_transforms as CV
from src.distributed_sampler import DistributedSampler
from src.transforms import reshape_fn, MultiScaleTrans
min_keypoints_per_image = 10
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
def has_valid_annotation(anno):
"""Check annotation file."""
# if it's empty, there is no annotation
if not anno:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
class COCOYoloDataset:
"""YOLOV3 Dataset for COCO."""
def __init__(self, root, ann_file, remove_images_without_annotations=True,
filter_crowd_anno=True, is_training=True):
self.coco = COCO(ann_file)
self.root = root
self.img_ids = list(sorted(self.coco.imgs.keys()))
self.filter_crowd_anno = filter_crowd_anno
self.is_training = is_training
# filter images without any annotations
if remove_images_without_annotations:
img_ids = []
for img_id in self.img_ids:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = self.coco.loadAnns(ann_ids)
if has_valid_annotation(anno):
img_ids.append(img_id)
self.img_ids = img_ids
self.categories = {cat["id"]: cat["name"] for cat in self.coco.cats.values()}
self.cat_ids_to_continuous_ids = {
v: i for i, v in enumerate(self.coco.getCatIds())
}
self.continuous_ids_cat_ids = {
v: k for k, v in self.cat_ids_to_continuous_ids.items()
}
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
(img, target) (tuple): target is a dictionary contains "bbox", "segmentation" or "keypoints",
generated by the image's annotation. img is a PIL image.
"""
coco = self.coco
img_id = self.img_ids[index]
img_path = coco.loadImgs(img_id)[0]["file_name"]
img = Image.open(os.path.join(self.root, img_path)).convert("RGB")
if not self.is_training:
return img, img_id
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
# filter crowd annotations
if self.filter_crowd_anno:
annos = [anno for anno in target if anno["iscrowd"] == 0]
else:
annos = [anno for anno in target]
target = {}
boxes = [anno["bbox"] for anno in annos]
target["bboxes"] = boxes
classes = [anno["category_id"] for anno in annos]
classes = [self.cat_ids_to_continuous_ids[cl] for cl in classes]
target["labels"] = classes
bboxes = target['bboxes']
labels = target['labels']
out_target = []
for bbox, label in zip(bboxes, labels):
tmp = []
# convert to [x_min y_min x_max y_max]
bbox = self._convetTopDown(bbox)
tmp.extend(bbox)
tmp.append(int(label))
# tmp [x_min y_min x_max y_max, label]
out_target.append(tmp)
return img, out_target, [], [], [], [], [], []
def __len__(self):
return len(self.img_ids)
def _convetTopDown(self, bbox):
x_min = bbox[0]
y_min = bbox[1]
w = bbox[2]
h = bbox[3]
return [x_min, y_min, x_min+w, y_min+h]
def create_yolo_dataset(image_dir, anno_path, batch_size, max_epoch, device_num, rank,
config=None, is_training=True, shuffle=True):
"""Create dataset for YOLOV3."""
cv2.setNumThreads(0)
if is_training:
filter_crowd = True
remove_empty_anno = True
else:
filter_crowd = False
remove_empty_anno = False
yolo_dataset = COCOYoloDataset(root=image_dir, ann_file=anno_path, filter_crowd_anno=filter_crowd,
remove_images_without_annotations=remove_empty_anno, is_training=is_training)
distributed_sampler = DistributedSampler(len(yolo_dataset), device_num, rank, shuffle=shuffle)
hwc_to_chw = CV.HWC2CHW()
config.dataset_size = len(yolo_dataset)
cores = multiprocessing.cpu_count()
num_parallel_workers = int(cores / device_num)
if is_training:
multi_scale_trans = MultiScaleTrans(config, device_num)
dataset_column_names = ["image", "annotation", "bbox1", "bbox2", "bbox3",
"gt_box1", "gt_box2", "gt_box3"]
if device_num != 8:
ds = de.GeneratorDataset(yolo_dataset, column_names=dataset_column_names,
num_parallel_workers=min(32, num_parallel_workers),
sampler=distributed_sampler)
ds = ds.batch(batch_size, per_batch_map=multi_scale_trans, input_columns=dataset_column_names,
num_parallel_workers=min(32, num_parallel_workers), drop_remainder=True)
else:
ds = de.GeneratorDataset(yolo_dataset, column_names=dataset_column_names, sampler=distributed_sampler)
ds = ds.batch(batch_size, per_batch_map=multi_scale_trans, input_columns=dataset_column_names,
num_parallel_workers=min(8, num_parallel_workers), drop_remainder=True)
else:
ds = de.GeneratorDataset(yolo_dataset, column_names=["image", "img_id"],
sampler=distributed_sampler)
compose_map_func = (lambda image, img_id: reshape_fn(image, img_id, config))
ds = ds.map(operations=compose_map_func, input_columns=["image", "img_id"],
output_columns=["image", "image_shape", "img_id"],
column_order=["image", "image_shape", "img_id"],
num_parallel_workers=8)
ds = ds.map(operations=hwc_to_chw, input_columns=["image"], num_parallel_workers=8)
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.repeat(max_epoch)
return ds, len(yolo_dataset)
def create_yolo_dataset(image_dir, anno_path, batch_size, max_epoch, device_num, rank,
config=None, is_training=True, shuffle=True):
"""Create dataset for YOLOV3."""
cv2.setNumThreads(0)
if is_training:
filter_crowd = True
remove_empty_anno = True
else:
filter_crowd = False
remove_empty_anno = False
yolo_dataset = COCOYoloDataset(root=image_dir, ann_file=anno_path, filter_crowd_anno=filter_crowd,
remove_images_without_annotations=remove_empty_anno, is_training=is_training)
distributed_sampler = DistributedSampler(len(yolo_dataset), device_num, rank, shuffle=shuffle)
hwc_to_chw = CV.HWC2CHW()
config.dataset_size = len(yolo_dataset)
cores = multiprocessing.cpu_count()
num_parallel_workers = int(cores / device_num)
if is_training:
multi_scale_trans = MultiScaleTrans(config, device_num)
dataset_column_names = ["image", "annotation", "bbox1", "bbox2", "bbox3",
"gt_box1", "gt_box2", "gt_box3"]
if device_num != 8:
ds = de.GeneratorDataset(yolo_dataset, column_names=dataset_column_names,
num_parallel_workers=min(32, num_parallel_workers),
sampler=distributed_sampler)
ds = ds.batch(batch_size, per_batch_map=multi_scale_trans, input_columns=dataset_column_names,
num_parallel_workers=min(32, num_parallel_workers), drop_remainder=True)
else:
ds = de.GeneratorDataset(yolo_dataset, column_names=dataset_column_names, sampler=distributed_sampler)
ds = ds.batch(batch_size, per_batch_map=multi_scale_trans, input_columns=dataset_column_names,
num_parallel_workers=min(8, num_parallel_workers), drop_remainder=True)
else:
ds = de.GeneratorDataset(yolo_dataset, column_names=["image", "img_id"],
sampler=distributed_sampler)
compose_map_func = (lambda image, img_id: reshape_fn(image, img_id, config))
ds = ds.map(operations=compose_map_func, input_columns=["image", "img_id"],
output_columns=["image", "image_shape", "img_id"],
column_order=["image", "image_shape", "img_id"],
num_parallel_workers=8)
ds = ds.map(operations=hwc_to_chw, input_columns=["image"], num_parallel_workers=8)
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.repeat(max_epoch)
return ds, len(yolo_dataset)
class COCOYoloDatasetv2():
"""
COCO yolo dataset definitation.
"""
def __init__(self, root, data_txt):
self.root = root
image_list = []
with open(data_txt, 'r') as f:
for line in f:
image_list.append(os.path.basename(line.strip()))
self.img_path = image_list
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
(img, target) (tuple): target is a dictionary contains "bbox", "segmentation" or "keypoints",
generated by the image's annotation. img is a PIL image.
"""
img_path = self.img_path
img_id = self.img_path[index].replace('.JPG', '')
img = Image.open(os.path.join(self.root, img_path[index])).convert("RGB")
return img, int(img_id)
def __len__(self):
return len(self.img_path)
def create_yolo_datasetv2(image_dir,
data_txt,
batch_size,
max_epoch,
device_num,
rank,
default_config=None,
shuffle=True):
"""
Create yolo dataset.
"""
yolo_dataset = COCOYoloDatasetv2(root=image_dir, data_txt=data_txt)
distributed_sampler = DistributedSampler(len(yolo_dataset), device_num, rank, shuffle=shuffle)
hwc_to_chw = CV.HWC2CHW()
default_config.dataset_size = len(yolo_dataset)
cores = multiprocessing.cpu_count()
num_parallel_workers = int(cores / device_num)
ds = de.GeneratorDataset(yolo_dataset, column_names=["image", "img_id"],
sampler=distributed_sampler)
compose_map_func = (lambda image, img_id: reshape_fn(image, img_id, default_config))
ds = ds.map(input_columns=["image", "img_id"],
output_columns=["image", "image_shape", "img_id"],
column_order=["image", "image_shape", "img_id"],
operations=compose_map_func, num_parallel_workers=min(8, num_parallel_workers))
ds = ds.map(input_columns=["image"], operations=hwc_to_chw, num_parallel_workers=min(8, num_parallel_workers))
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.repeat(max_epoch)
return ds, len(yolo_dataset)
| 40.907591 | 114 | 0.629125 |
6b0044f53d54db2a3ad02778f03911cd852a4edd | 8,710 | py | Python | dvg_fftw_welchpowerspectrum.py | Dennis-van-Gils/DvG_Arduino_lock-in_amp | 4576aa529413c93c6e4d6152802349f5f0c3ee16 | [
"MIT"
] | 9 | 2020-07-02T15:23:18.000Z | 2022-03-04T03:32:04.000Z | dvg_fftw_welchpowerspectrum.py | Dennis-van-Gils/DvG_Arduino_lock-in_amp | 4576aa529413c93c6e4d6152802349f5f0c3ee16 | [
"MIT"
] | 3 | 2019-04-22T22:53:56.000Z | 2021-11-02T20:13:43.000Z | dvg_fftw_welchpowerspectrum.py | Dennis-van-Gils/DvG_Arduino_lock-in_amp | 4576aa529413c93c6e4d6152802349f5f0c3ee16 | [
"MIT"
] | 2 | 2021-11-05T14:34:22.000Z | 2022-03-04T03:32:06.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Performs lightning-fast power-spectrum calculations on 1D time series data
acquired at a fixed sampling frequency using Welch's method.
The fast-Fourier transform (FFT) is performed by the excellent `fftw`
(http://www.fftw.org/) library. It will plan the transformations ahead of time
to optimize the calculations. Also, multiple threads can be specified for the
FFT and, when set to > 1, the Python GIL will not be invoked. This results in
true multithreading across multiple cores, which can result in a huge
performance gain. It can outperform the `numpy` and `scipy` libraries by a
factor of > 8 in calculation speed.
Futher improvement to the calculation speed in this module comes from the use of
the `numba.njit()` decorator around arithmetic functions, releasing the Python
GIL as well.
"""
__author__ = "Dennis van Gils"
__authoremail__ = "vangils.dennis@gmail.com"
__url__ = "https://github.com/Dennis-van-Gils/python-dvg-signal-processing"
__date__ = "29-05-2021"
__version__ = "1.0.0"
# pylint: disable=invalid-name, missing-function-docstring, too-many-instance-attributes
import sys
import numpy as np
from scipy import signal
import pyfftw
from numba import njit
p_njit = {"nogil": True, "cache": True, "fastmath": False}
@njit("float64[:,:](float64[:], float64[:,:])", **p_njit)
def fast_multiply_window(window: np.ndarray, data: np.ndarray) -> np.ndarray:
return np.multiply(window, data)
@njit("float64[:,:](complex128[:,:], float64)", **p_njit)
def fast_conjugate_rescale(data: np.ndarray, scale: float) -> np.ndarray:
data = np.multiply(np.conjugate(data), data)
return np.multiply(np.real(data), scale)
@njit("float64[:,:](float64[:,:])", **p_njit)
def fast_transpose(data: np.ndarray) -> np.ndarray:
return np.transpose(data)
@njit("float64[:](float64[:])", **p_njit)
def fast_10log10(data: np.ndarray) -> np.ndarray:
return np.multiply(np.log10(data), 10)
# ------------------------------------------------------------------------------
# FFTW_WelchPowerSpectrum
# ------------------------------------------------------------------------------
class FFTW_WelchPowerSpectrum:
"""Manages a power-spectrum calculation on 1D time series data `data` as
passed to methods `compute_spectrum()` or `compute_spectrum_dB()`.
The input data array must always be of the same length as specified by
argument `len_data`. When the length of the passed input array is not equal
to the `len_data`, an array full of `numpy.nan`s is returned.
The Welch algorithm is based on: `scipy.signal.welch()` with hard-coded
defaults:
window = 'hanning'
noverlap = 50 %
detrend = False
scaling = 'spectrum'
mode = 'psd'
boundary = None
padded = False
sides = 'onesided'
Args:
len_data (int):
Full length of the upcoming input array `data` passed to methods
`compute_spectrum()` or `compute_spectrum_dB().
fs (float):
Sampling frequency of the time series data [Hz].
nperseg (float):
Length of each segment in Welch's method to average over.
fftw_threads (int, optional):
Number of threads to use for the FFT transformations. When set to
> 1, the Python GIL will not be invoked.
Default: 5
Attributes:
freqs (np.ndarray):
The frequency table in [Hz] corresponding to the power spectrum
output of `compute_spectrum()` and `compute_spectrum_dB()`.
"""
def __init__(self, len_data: int, fs: float, nperseg: int, fftw_threads=5):
nperseg = int(nperseg)
if nperseg > len_data:
print(
"nperseg = {0:d} is greater than input length "
" = {1:d}, using nperseg = {1:d}".format(nperseg, len_data)
)
nperseg = len_data
self.len_data = len_data
self.fs = fs
self.nperseg = nperseg
# Calculate the Hanning window in advance
self.win = signal.hann(nperseg, False)
self.scale = 1.0 / self.win.sum() ** 2 # For normalization
# Calculate the frequency table in advance
self.freqs = np.fft.rfftfreq(nperseg, 1 / fs)
# Prepare the FFTW plan
# fmt: off
self.noverlap = nperseg // 2
self.step = nperseg - self.noverlap
self.shape_in = ((len_data - self.noverlap) // self.step, nperseg)
self.shape_out = (
(len_data - self.noverlap) // self.step,
nperseg // 2 + 1,
)
# fmt: on
self._rfft_in = pyfftw.empty_aligned(self.shape_in, dtype="float64")
self._rfft_out = pyfftw.empty_aligned(
self.shape_out, dtype="complex128"
)
print("Creating FFTW plan for Welch power spectrum...", end="")
sys.stdout.flush()
self._fftw_welch = pyfftw.FFTW(
self._rfft_in,
self._rfft_out,
flags=("FFTW_MEASURE", "FFTW_DESTROY_INPUT"),
threads=fftw_threads,
)
print(" done.")
# --------------------------------------------------------------------------
# compute_spectrum
# --------------------------------------------------------------------------
def compute_spectrum(self, data: np.ndarray) -> np.ndarray:
"""Returns the power spectrum array of the passed 1D time series array
`data`. When `data` is in arbitrary units of [V], the output units will
be [V^2]. Use `compute_spectrum_dB()` to get the equivalent power ratio
in units of [dBV].
If `data` is not yet fully populated with data as specified by the
initialisation parameter `len_data`, this method will return an array
filled with `numpy.nan`.
Returns:
The power spectrum array as a 1D numpy array in units of [V^2].
"""
x = np.asarray(data)
if self.len_data != len(x):
return (
np.full(self.len_data, np.nan),
np.full(self.len_data, np.nan),
)
strides = (self.step * x.strides[-1], x.strides[-1])
Pxx_in = np.lib.stride_tricks.as_strided(
x, shape=self.shape_in, strides=strides
)
# Apply window
Pxx_in = fast_multiply_window(self.win, Pxx_in)
# Perform the fft
self._rfft_in[:] = Pxx_in # float64
Pxx = self._fftw_welch() # returns complex128
# Equivalent of:
# Pxx = np.conjugate(Pxx) * Pxx
# Pxx = Pxx.real * self.scale
Pxx = fast_conjugate_rescale(Pxx, self.scale)
if self.nperseg % 2:
Pxx[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
Pxx[..., 1:-1] *= 2
Pxx = fast_transpose(Pxx)
# Average over windows
if len(Pxx.shape) >= 2 and Pxx.size > 0:
if Pxx.shape[-1] > 1:
Pxx = Pxx.mean(axis=-1)
else:
Pxx = np.reshape(Pxx, Pxx.shape[:-1])
return Pxx
# --------------------------------------------------------------------------
# compute_spectrum_dB
# --------------------------------------------------------------------------
def compute_spectrum_dB(self, data: np.ndarray) -> np.ndarray:
"""Returns the power spectrum array of the passed 1D time series array
`data`. When `data` is in arbitrary units of [V], the output units will
be [dBV].
power [dBV]: `10 * log_10(P_in / P_ref)`, where `P_ref` = 1 [V].
If `data` is not yet fully populated with data as specified by the
initialisation parameter `len_data`, this method will return an array
filled with `numpy.nan`.
Physics note:
Technically, `data` should have units of power [W], hence the name
'power spectrum'. The output of this method will then have units of
[dBW]. However, if we measure a voltage, in order to calculate the
power we should also know the impedance `Z` to get to the electrical
power `P = V^2 / Z`. Because we don't always have access to the value
of the impedance, an engineering solution is to neglect the impedance
and simply use `V^2` as the power. Taking 1 `V^2` as the reference
'power', the power amplitude is now represented by the 'engineering'
units of [dBV], instead of [dBW].
Returns:
The power spectrum array as a 1D numpy array in units of [dBV].
"""
return fast_10log10(self.compute_spectrum(data))
| 36.596639 | 88 | 0.586797 |
0b2b2104fa2e1ff2a5055849b35b403618d282e7 | 9,190 | py | Python | imperative/python/megengine/core/tensor/utils.py | Olalaye/MegEngine | 695d24f24517536e6544b07936d189dbc031bbce | [
"Apache-2.0"
] | 5,168 | 2020-03-19T06:10:04.000Z | 2022-03-31T11:11:54.000Z | imperative/python/megengine/core/tensor/utils.py | Olalaye/MegEngine | 695d24f24517536e6544b07936d189dbc031bbce | [
"Apache-2.0"
] | 286 | 2020-03-25T01:36:23.000Z | 2022-03-31T10:26:33.000Z | imperative/python/megengine/core/tensor/utils.py | Olalaye/MegEngine | 695d24f24517536e6544b07936d189dbc031bbce | [
"Apache-2.0"
] | 515 | 2020-03-19T06:10:05.000Z | 2022-03-30T09:15:59.000Z | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import collections
from typing import Iterable, Union
import numpy as np
from .._imperative_rt import make_const
from .._imperative_rt.core2 import SymbolVar, Tensor, apply, dtype_promotion, get_device
from .._imperative_rt.ops import SubgraphBuilder as _SubgraphBuilder
from .._wrap import as_device
from ..ops import builtin
from ..ops.special import Const
from .amp import _high_prec_dtype, _low_prec_dtype
from .dtype import is_dtype_equal, is_quantize
_enable_convert_inputs = True
def get_convert_inputs():
r"""get the curerent state of `_enable_convert_inputs`"""
return _enable_convert_inputs
def set_convert_inputs(flag):
r"""This function is a temporary workaround for reducing the overhead of operator
invocations. The function `convert_inputs` is disabled if the global state
`_enable_convert_inputs` is set to `False`, otherwise enabled. This function is for
internal use only, and should be removed when the tensor-like system is refactored.
"""
global _enable_convert_inputs
backup = _enable_convert_inputs
_enable_convert_inputs = flag
return backup
def concatenate(inputs, axis=0, *, device=None):
inputs = convert_inputs(*inputs)
if device is None:
device = get_device(inputs)
(result,) = apply(builtin.Concat(axis=axis, comp_node=device), *inputs)
return result
def astype(x, dtype):
dtype = np.dtype(dtype)
if not is_dtype_equal(x.dtype, dtype):
isscalar = x._isscalar()
(x,) = apply(builtin.TypeCvt(dtype=dtype), x)
if isscalar:
x._setscalar()
return x
def convert_single_value(v, *, dtype=None, device=None):
if isinstance(v, (Tensor, SymbolVar)):
if not is_quantize(v.dtype):
v = astype(v, dtype)
else:
(v,) = Const(v, dtype=dtype, device=device)()
return v
def convert_inputs(*args, device=None):
if not _enable_convert_inputs:
return args
dtype = dtype_promotion(args)
if device is None:
device = get_device(args)
device = as_device(device)
graph = None
sym_type = None
for a in args:
if isinstance(a, SymbolVar):
if graph is None:
graph = a.var.graph
sym_type = type(a)
else:
assert graph == a.var.graph
args = list(args)
if graph is not None:
for i in range(len(args)):
if not isinstance(args[i], SymbolVar):
rst = make_const(graph, np.array(args[i]), device.to_c(), dtype)
args[i] = sym_type(rst)
def convert(value):
if value is None:
return value
return convert_single_value(value, dtype=dtype, device=device.to_c())
return tuple(map(convert, args))
def cast_tensors(*args, promote=False):
if promote:
dtype = _high_prec_dtype
else:
dtype = _low_prec_dtype
return tuple(arg.astype(dtype) if arg is not None else None for arg in args)
def result_type(*args):
dtypes = []
for i in args:
if isinstance(i, Tensor):
dtypes.append(i.dtype)
continue
try:
dtypes.append(np.dtype(i))
except TypeError:
pass
return np.result_type(*dtypes)
def isscalar(x):
if isinstance(x, (Tensor, SymbolVar)):
return x._isscalar()
return np.isscalar(x)
def setscalar(x):
if isinstance(x, (Tensor, SymbolVar)):
x._setscalar()
else:
raise NotImplementedError("Unsupport type {}".format(type(x)))
def astensor1d(x, *reference, dtype=None, device=None):
"""Convert something to 1D tensor. Support following types
* sequence of scalar literal / tensor
* numpy array
* tensor (returned as is, regardless of dtype and device)
"""
try:
ndim = x.ndim
except AttributeError:
pass
except ValueError:
if dtype is not None and dtype != x.dtype:
x = astype(x, dtype)
if device is not None:
cn = as_device(device).to_c()
(x,) = apply(builtin.Copy(comp_node=cn), x)
return x
else:
if ndim != 0 and ndim != 1:
raise ValueError("ndim != 1 or 0, get : %d" % ndim)
if not isinstance(x, (Tensor, SymbolVar)):
(x,) = Const(x, dtype=dtype, device=device)(*reference)
return x
if not isinstance(x, collections.abc.Sequence):
raise TypeError
if any(isinstance(i, (Tensor, SymbolVar)) for i in x):
x = concatenate(x, device=device) if len(x) > 1 else x[0]
if dtype is not None:
x = astype(x, dtype)
return x
(x,) = Const(x, dtype=dtype, device=device)(*reference)
return x
def _expand_int(s, i):
if isinstance(i, (Tensor, SymbolVar)):
i_np = i.numpy()
if i_np.ndim == 0:
s.append(int(i_np))
else:
s += list(i_np)
return
if isinstance(i, Iterable):
for ii in i:
_expand_int(s, ii)
return
if np.issubdtype(type(i), np.integer):
s.append(i)
return
raise
def make_shape_tuple(shape):
s = []
_expand_int(s, shape)
return tuple(s)
def _normalize_axis(
ndim: int, axis: Union[int, Iterable], reverse=False
) -> Union[int, list]:
def convert(x):
x_org = x
if x < 0:
x = ndim + x
assert (
x >= 0 and x < ndim
), "axis {} is out of bounds for tensor of dimension {}".format(x_org, ndim)
return x
if isinstance(axis, int):
return convert(axis)
elif isinstance(axis, Iterable):
axis_org = axis
axis = list(sorted(map(convert, axis), reverse=reverse))
for i in range(len(axis) - 1):
assert axis[i] != axis[i + 1], "axis {} contains duplicated indices".format(
axis_org
)
return axis
raise
_opr_map = {
("-", 1): builtin.Elemwise(mode="negate"),
("fma3", 3): builtin.Elemwise(mode="FUSE_MUL_ADD3"),
("fma4", 4): builtin.Elemwise(mode="FUSE_MUL_ADD4"),
}
for name, mode in [
("+", "add"),
("-", "sub"),
("*", "mul"),
("/", "true_div"),
("//", "floor_div"),
("**", "pow"),
("max", "max"),
("additive", "add"),
]:
_opr_map[(name, 2)] = builtin.Elemwise(mode=mode)
def subgraph(name, dtype, device, nr_inputs, gopt_level=None):
if device.physical_name.startswith("cpu"):
gopt_level = None # disable jit and compile
def as_op(op, nargs):
if isinstance(op, str):
assert (op, nargs) in _opr_map, "unknown operator"
op = _opr_map[(op, nargs)]
return op
def decorator(func):
builder = _SubgraphBuilder(name)
def apply_expr(op, *args, nr_out=None):
op = as_op(op, len(args))
results = builder.apply(op, args, 1 if nr_out is None else nr_out)
if nr_out is None:
assert len(results) == 1
return results[0]
else:
assert len(results) == nr_out
return results
def apply_const(value, dtype=dtype, device=device):
return builder.apply_const(value, dtype, device)
inputs = [builder.input() for _ in range(nr_inputs)]
outputs, outputs_has_grad = func(inputs, apply_expr, apply_const)
builder.outputs(outputs)
builder.outputs_has_grad(outputs_has_grad)
if gopt_level is None:
return lambda: builder.get()
else:
return lambda: builder.compile(gopt_level)
return decorator
def interpret_subgraph(func, dtype, device):
def as_op(op, nargs):
if isinstance(op, str) and (op, nargs) in _opr_map:
op = _opr_map[(op, nargs)]
return op
def decorated_func(*args):
def apply_expr(op, *args, nr_out=None):
op = as_op(op, len(args))
results = apply(op, *args)
if nr_out is None:
assert len(results) == 1
return results[0]
else:
assert len(results) == nr_out
return results
def apply_const(value, dtype=dtype, device=device):
return Const(value, dtype=dtype, device=device)()[0]
outputs, outputs_has_grad = func(args, apply_expr, apply_const)
return outputs
return decorated_func
def subgraph_fn(name, dtype, device, nr_inputs, gopt_level=None, interpret=False):
def decorator(func):
if not interpret:
op = subgraph(name, dtype, device, nr_inputs, gopt_level=gopt_level)(func)
return lambda *args: apply(op(), *args)
else:
return interpret_subgraph(func, dtype, device)
return decorator
| 28.990536 | 88 | 0.604353 |
15b84e269edb65db67901709f2b744b9518670d9 | 657 | py | Python | tools/quantization_test.py | KateHaeun/pycls | f3d87a36cb0a8adead31c7ad98f43facf7fe4c47 | [
"MIT"
] | null | null | null | tools/quantization_test.py | KateHaeun/pycls | f3d87a36cb0a8adead31c7ad98f43facf7fe4c47 | [
"MIT"
] | null | null | null | tools/quantization_test.py | KateHaeun/pycls | f3d87a36cb0a8adead31c7ad98f43facf7fe4c47 | [
"MIT"
] | 3 | 2021-06-02T05:03:01.000Z | 2021-07-19T03:51:32.000Z | """Test a quantized classification model."""
import pycls.core.config as config
import pycls.core.distributed as dist
import pycls.core.logging as logging
import pycls.core.quantization_tester as tester
from pycls.core.config import cfg
logger = logging.get_logger(__name__)
def main():
config.load_cfg_fom_args("Test a quantized classification model.")
config.assert_and_infer_cfg()
if cfg.NUM_GPUS != 1:
cfg.NUM_GPUS = 1
logger.warning("When testing a quantized model, only one gpu can be used.")
cfg.freeze()
dist.multi_proc_run(num_proc=1, fun=tester.test_quantized_model)
if __name__ == "__main__":
main()
| 27.375 | 83 | 0.739726 |
09d3e08038c0096de589a120e17a81b1b3dec58e | 11,104 | py | Python | cctk/mae_file.py | ekwan/cctk | 85cb8d0b714a80e8e353987dc24006695f1d0532 | [
"Apache-2.0"
] | 10 | 2020-01-16T15:26:57.000Z | 2022-01-15T23:12:00.000Z | cctk/mae_file.py | ekwan/cctk | 85cb8d0b714a80e8e353987dc24006695f1d0532 | [
"Apache-2.0"
] | 2 | 2020-05-27T21:04:36.000Z | 2020-09-26T20:49:53.000Z | cctk/mae_file.py | ekwan/cctk | 85cb8d0b714a80e8e353987dc24006695f1d0532 | [
"Apache-2.0"
] | 2 | 2020-09-24T18:44:18.000Z | 2021-08-05T20:35:51.000Z | import re
import numpy as np
import networkx as nx
from cctk import File, Ensemble, ConformationalEnsemble, Molecule
from cctk.helper_functions import get_number
class MAEFile(File):
"""
Class representing Maestro ``.mae`` files.
Attributes:
name (str): name of file
ensemble (Ensemble): ``Ensemble`` or ``ConformationalEnsemble`` object
"""
def __init__(self, name=None):
if isinstance(name, str):
self.name = name
@classmethod
def read_file(cls, filename, name=None, **kwargs):
"""
Reads ``.mae`` file and generates a ``MAEFile`` instance.
Args:
filename (str): path to file
name (str): name of the file
Returns:
MAEFile object
property names (list)
property_values (list)
"""
file = MAEFile(name=name)
(geometries, symbols, bonds, p_names, p_vals, conformers) = cls._read_mae(filename, **kwargs)
atomic_numbers = np.array([get_number(z) for z in symbols], dtype=np.int8)
if conformers == True:
file.ensemble = ConformationalEnsemble()
else:
file.ensemble = Ensemble()
for geom in geometries:
file.ensemble.add_molecule(Molecule(atomic_numbers, geom, bonds=bonds.edges))
return file, p_names, p_vals
@classmethod
def _read_mae(
cls, filename, contains_conformers="check", save_memory_for_conformers=True, print_status_messages=False,
):
"""
Reads uncompressed Macromodel files.
Args:
filename (str): path to file
contains_conformers (str): one of ``check``, ``True``, or ``False``
save_memory_for_conformers (Bool):
print_status_messages (Bool):
Returns:
geometries (np.ndarray): array of 3-tuples of geometries
symbols (np.ndarray): array of atom symbols (str)
bonds (nx.Graph): ``NetworkX`` graph of bond information
property_names:
property_values:
contains_conformers (Bool): whether or not the file contains conformers
"""
# read file
if print_status_messages:
print(f"Reading {filename}...", end="", flush=True)
lines = super().read_file(filename)
if print_status_messages:
print(f"read {len(lines)} lines...", end="", flush=True)
# initialize arrays
geometries = []
symbols = []
bonds = []
property_names = []
property_values = []
this_geometry = None
this_symbols = None
this_bonds = None
this_property_names = None
this_property_values = None
# parse file
i = 0
current_block_type = None
while i < len(lines):
# read the current line
line = lines[i]
i += 1
# determine if we are in a molecule block
end_of_file = i + 1 == len(lines)
if current_block_type is None and (line.startswith("f_m_ct") or end_of_file):
# store the current results if any
if this_geometry is not None and len(this_geometry) > 0:
geometries.append(this_geometry)
symbols.append(this_symbols)
bonds.append(this_bonds)
property_names.append(this_property_names)
property_values.append(this_property_values)
# prepare to read a new molecule
current_block_type = "property_names"
this_geometry = []
this_symbols = []
this_bonds = None
this_property_names = []
this_property_values = []
continue
# read property names
elif current_block_type == "property_names":
line = line.strip()
if line.startswith("i_m_ct_format"):
next_line = lines[i].strip()
if next_line != ":::":
raise ValueError(f"expected ':::' here but line {i+1} is:\n{next_line}\n")
current_block_type = "property_values"
i += 1
elif line.startswith(":::"):
raise ValueError(f"expected to see i_m_ct_format as the last property (line {i+1})")
else:
fields = re.split(" +", line)
if len(fields) != 1:
raise ValueError(f"unexpected number of fields in property name line: {line}")
this_property_names.append(line)
# read property values
elif current_block_type == "property_values":
n_properties = len(this_property_names)
for j in range(n_properties):
this_property_values.append(lines[i + j])
i += n_properties
current_block_type = "looking_for_geometry1"
# look for geometry block
elif current_block_type == "looking_for_geometry1":
if line.startswith(" m_atom"):
current_block_type = "looking_for_geometry2"
elif current_block_type == "looking_for_geometry2":
if line.strip() == ":::":
current_block_type = "geometry_block"
# parse geometry
elif current_block_type == "geometry_block":
line = line.strip()
if line == ":::":
current_block_type = "bond_block"
# initialize bond connectivity graph
this_bonds = nx.Graph()
n_atoms = len(this_symbols)
this_bonds.add_nodes_from(range(1, n_atoms + 1))
i += 7
else:
fields = re.split(" +", line)
x, y, z = float(fields[2]), float(fields[3]), float(fields[4])
this_geometry.append((x, y, z))
symbol = fields[-1]
this_symbols.append(symbol)
# parse bonds
elif current_block_type == "bond_block":
line = line.strip()
if line == ":::":
current_block_type = None
else:
fields = re.split(" +", line)
bond_number, atom1, atom2, bond_order = (
int(fields[0]),
int(fields[1]),
int(fields[2]),
int(fields[3]),
)
n_atoms = len(this_geometry)
if not 1 <= atom1 <= n_atoms or not 1 <= atom2 <= n_atoms:
raise ValueError(f"atom number out of range: {line}")
bond_order = int(fields[3])
if bond_order <= 0:
raise ValueError(f"zero or negative bond order: {line}")
if this_bonds.number_of_edges() != bond_number - 1:
raise ValueError(f"non-sequential bond number (expected {this_bonds.number_of_edges()+1} but got {bond_number})")
if this_bonds.has_edge(atom1, atom2):
current_bond_order = this_bonds[atom1][atom2]["weight"]
if current_bond_order != bond_order:
raise ValueError(f"inconsistent bond order definition: {line}")
this_bonds.add_edge(atom1, atom2, weight=bond_order)
this_bonds.add_edge(atom2, atom1, weight=bond_order)
# convert to numpy array
geometries = np.array(geometries)
symbols = np.array(symbols)
property_names = np.array(property_names)
property_values = np.array(property_values)
# determine if these are conformers
if contains_conformers == "check":
contains_conformers = True
for this_symbols, this_bonds in zip(symbols[1:], bonds[1:]):
# must have the same symbols and bonds
if not (symbols[0] == this_symbols).all() or not nx.is_isomorphic(bonds[0], this_bonds):
contains_conformers = False
break
elif isinstance(contains_conformers, bool):
pass
else:
raise ValueError("contains_conformers must be 'check' or boolean")
# if requested, just store one copy of symbols and bonds
if save_memory_for_conformers and contains_conformers:
symbols = symbols[0]
bonds = bonds[0]
# return result
n_geometries = len(geometries)
if print_status_messages:
if n_geometries > 1:
if contains_conformers:
n_atoms = len(geometries[0])
n_bonds = bonds.number_of_edges()
if print_status_messages:
print(f"read {n_geometries} conformers ({n_atoms} atoms and {n_bonds} bonds).")
else:
min_n_atoms = len(geometries[0])
max_n_atoms = len(geometries[0])
for geometry in geometries[1:]:
if len(geometry) > max_n_atoms:
max_n_atoms = len(geometry)
elif len(geometry) < min_n_atoms:
min_n_atoms = len(geometry)
min_n_bonds = bonds[0].number_of_edges()
max_n_bonds = bonds[0].number_of_edges()
for this_bonds in bonds[1:]:
if this_bonds.number_of_edges() > max_n_bonds:
max_n_bonds = this_bonds.number_of_edges()
elif this_bonds.number_of_edges() < min_n_bonds:
min_n_bonds = bonds.number_of_edges
if print_status_messages:
print(f"read {n_geometries} unrelated geometries ({min_n_atoms}-{max_n_atoms} atoms and {min_n_bonds}-{max_n_bonds}) bonds).")
else:
n_atoms = len(geometries)
n_bonds = bonds.number_of_edges()
if print_status_messages:
print(f"read one geometry ({n_atoms} atoms and {n_bonds} bonds).")
# return result
return (
geometries,
symbols,
bonds,
property_names,
property_values,
contains_conformers,
)
def get_molecule(self, num=None):
"""
Returns the last molecule from the ensemble.
If ``num`` is specified, returns ``self.ensemble.molecules[num]``
"""
# some methods pass num=None, which overrides setting the default above
if num is None:
num = -1
if not isinstance(num, int):
raise TypeError("num must be int")
return self.ensemble.molecules[num]
| 39.799283 | 150 | 0.530439 |
aafdb5a762a995164d6a9945e3c9b51fb648934a | 7,479 | py | Python | python/tools/freq_stepper.py | alexchartier/digital_rf | cb548266bac910eeee12c210cf31e36809a38565 | [
"BSD-3-Clause"
] | null | null | null | python/tools/freq_stepper.py | alexchartier/digital_rf | cb548266bac910eeee12c210cf31e36809a38565 | [
"BSD-3-Clause"
] | null | null | null | python/tools/freq_stepper.py | alexchartier/digital_rf | cb548266bac910eeee12c210cf31e36809a38565 | [
"BSD-3-Clause"
] | null | null | null | #!python
# ----------------------------------------------------------------------------
# Copyright (c) 2018 Johns Hopkins APL
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Step through oscillator frequencies"""
import pdb
from datetime import datetime, timedelta
import numpy as np
import time
from gnuradio import uhd
import pytz
import digital_rf as drf
def main():
# Test out the frequency stepper
freq_list_fname = 'freq_list_default.txt'
step([], [], freq_list_fname=freq_list_fname)
def step(usrp, op,
ch_num=0,
sleeptime=0.1,
freq_list_fname=None,
flog_fname=None,
lock_fname=None,
time_source='GPS',
timestr='%Y/%b/%d %H:%M:%S',
):
""" Step the USRP's oscillator through a list of frequencies """
if freq_list_fname:
freq_list = get_freq_list(freq_list_fname) if freq_list_fname else set_freq_list()
else:
freq_list = set_freq_list()
print('Starting freq_stepper')
prev_lock = False
# Check for GPS lock
while not usrp.get_mboard_sensor("gps_locked", 0).to_bool():
print("waiting for gps lock...")
time.sleep(5)
assert usrp.get_mboard_sensor("gps_locked", 0).to_bool(), "GPS still not locked"
# Begin infinite transmission loop
freq = 0
while 1:
# Set USRP time (necessary to know what sample number we shifted frequencies at)
usrptime_secs = usrp.get_time_now().get_real_secs()
if time_source == 'GPS':
# Set GPS time (necessary to sync operations between the transmitter and receiver)
gpstime = datetime.utcfromtimestamp(usrp.get_mboard_sensor("gps_time"))
time_next = pytz.utc.localize(gpstime) + timedelta(seconds=1)
elif time_source == 'USRP':
time_next = drf.util.epoch + timedelta(seconds=usrptime_secs + 1)
# Calculate the samplerate
try:
ch_samplerate_frac = op.ch_samplerates_frac[ch_num]
ch_samplerate_ld = (
np.longdouble(ch_samplerate_frac.numerator)
/ np.longdouble(ch_samplerate_frac.denominator)
)
except:
ch_samplerate_ld = op.samplerate
# Frequency shifting block
# Change frequency each time we hit a new time in the list, otherwise hold the existing note
if ((time_next.second) in freq_list.keys()) and (freq != freq_list[time_next.second]):
tune_time = time_next
freq = freq_list[time_next.second]
# Specify USRP tune time on the first exact sample after listed time
# tune_time_secs = (tune_time - drf.util.epoch).total_seconds()
tune_time_secs = usrp.get_time_last_pps().get_real_secs() + 1
tune_time_rsamples = np.ceil(tune_time_secs * op.samplerate)
tune_time_secs = tune_time_rsamples / op.samplerate
gps_lock = usrp.get_mboard_sensor("gps_locked").to_bool()
print('GPS lock status: %s' % gps_lock)
timestr = tune_time.strftime('%Y/%m/%d-%H:%M:%S')
if lock_fname:
if gps_lock != prev_lock:
with open(tune_time.strftime(lock_fname), 'a+') as f:
f.write('GPS lock status: %s at %s' % (gps_lock, timestr))
prev_lock = gps_lock
# Optionally write out the shift samples of each frequency
tune_sample = int(np.uint64(tune_time_secs * ch_samplerate_ld))
if flog_fname:
# Change to 'a' to append
with open(tune_time.strftime(flog_fname), 'w') as f:
f.write('%s %s %i\n' % (timestr, str(freq).rjust(4), tune_sample))
usrp.set_command_time(
uhd.time_spec(float(tune_time_secs)),
uhd.ALL_MBOARDS,
)
# Tune to the next frequency in the list
tune_res = usrp.set_center_freq(
uhd.tune_request(freq * 1E6, op.lo_offsets[ch_num], \
args=uhd.device_addr(','.join(op.tune_args)),
),
ch_num,
)
usrp.clear_command_time(uhd.ALL_MBOARDS)
if op.verbose:
print('Tuned to %s MHz at %s (sample %i)' % \
(str(freq).rjust(4),
tune_time.strftime(timestr),
tune_sample,
)
)
"""
gpstime = datetime.utcfromtimestamp(usrp.get_mboard_sensor("gps_time"))
usrptime = drf.util.epoch + timedelta(seconds=usrp.get_time_now().get_real_secs())
print('GPS tune time: %s\nUSRP tune time: %s' %
(gpstime.strftime(timestr),
usrptime.strftime(timestr))
)
"""
time.sleep(sleeptime)
def set_dev_time(usrp):
# 7) Verify that usrp->get_time_last_pps() and usrp->get_mboard_sensor("gps_time") return the same time.
# while usrp.get_time_last_pps().get_real_secs() + 1 != usrp.get_mboard_sensor("gps_time").to_real():
while usrp.get_time_last_pps().get_real_secs() != usrp.get_mboard_sensor("gps_time").to_real():
print(usrp.get_time_last_pps().get_real_secs())
print(usrp.get_mboard_sensor("gps_time").to_real())
# 1) Poll on usrp->get_mboard_sensor("gps_locked") until it returns true
while not usrp.get_mboard_sensor("gps_locked", 0).to_bool():
print("Waiting for gps lock...")
time.sleep(5)
print("...GPS locked!")
# 2) Poll on usrp->get_time_last_pps() until a change is seen.
pps = usrp.get_time_last_pps()
while usrp.get_time_last_pps() == pps:
time.sleep(0.1)
# 3) Sleep 200ms (allow NMEA string to propagate)
time.sleep(0.2)
# 4) Use "usrp->set_time_next_pps(uhd::time_spec_t(usrp->get_mboard_sensor("gps_time").to_int()+1));" to set the time
usrp.set_time_next_pps(uhd.time_spec_t(usrp.get_mboard_sensor("gps_time").to_int() + 2))
# 5) Poll on usrp->get_time_last_pps() until a change is seen.
pps = usrp.get_time_last_pps()
while usrp.get_time_last_pps() == pps:
time.sleep(0.1)
# 6) Sleep 200ms (allow NMEA string to propagate)
time.sleep(0.2)
print('USRP last PPS = %i, GPSDO = %i' % (\
usrp.get_time_last_pps().get_real_secs(),
usrp.get_mboard_sensor("gps_time").to_real()
))
print('time set')
def get_freq_list(freq_list_fname):
freq_list = {}
with open(freq_list_fname, 'r') as f:
for line in f:
try:
k, v = line.split(':')
freq_list[int(k)] = float(v)
except:
None
assert len(freq_list) > 0, "Could not load %s" % freq_list_fname
return freq_list
def set_freq_list():
# shift time (seconds), freq (MHz)
return {
0: 3.0,
10: 4.0,
20: 5.1,
30: 8.0,
40: 12.0,
50: 16.0,
}
if __name__ == '__main__':
main()
| 37.395 | 126 | 0.567188 |
e644d44a3653633c71c8e0f17a0ecc1117c8855c | 2,140 | py | Python | models/nets/header.py | hitfeelee/rtm3d | 0e80883e40cc225fc489db18c923bb71dca81c57 | [
"MIT"
] | 2 | 2021-01-22T01:21:24.000Z | 2021-04-14T02:46:29.000Z | models/nets/header.py | hitfeelee/rtm3d | 0e80883e40cc225fc489db18c923bb71dca81c57 | [
"MIT"
] | 5 | 2021-01-14T03:18:44.000Z | 2021-05-26T02:24:45.000Z | models/nets/header.py | hitfeelee/rtm3d | 0e80883e40cc225fc489db18c923bb71dca81c57 | [
"MIT"
] | 2 | 2021-04-14T02:46:35.000Z | 2021-08-09T01:49:11.000Z | import torch
import torch.nn as nn
from utils import torch_utils
class RTM3DHeader(nn.Module):
def __init__(self, config):
super(RTM3DHeader, self).__init__()
self._config = config
_in_ch = self._config.MODEL.OUT_CHANNELS
_num_class = len(self._config.DATASET.OBJs)
_num_conv = self._config.MODEL.HEADER_NUM_CONV
_dilation = [1] + [1] * (_num_conv - 1)
# # main detect head
# self.main_kf_header = torch_utils.make_conv_level(_in_ch, _in_ch, 3, _num_conv, bias=True,
# dilation=_dilation)
# self.main_kf_header.add_module('main_kf_head', nn.Conv2d(_in_ch, _num_class, 3, padding=1, bias=True))
#
# # 3d properties # (z_off, sin(alpha), cos(alpha), h, w, l)
# self.regress_header = torch_utils.make_conv_level(_in_ch, _in_ch, 3, _num_conv, bias=True,
# dilation=_dilation)
# self.regress_header.add_module('regress_head', nn.Conv2d(_in_ch, 8, 3, padding=1, bias=True))
# main detect head
self.main_kf_header = torch_utils.make_convbn_level(_in_ch, _in_ch, 3, _num_conv, bias=False,
dilation=_dilation)
self.main_kf_header.add_module('main_kf_head', nn.Conv2d(_in_ch, _num_class, 3, padding=1, bias=True))
# 3d properties # (z_off, sin(alpha), cos(alpha), h, w, l)
self.regress_header = torch_utils.make_convbn_level(_in_ch, _in_ch, 3, _num_conv, bias=False,
dilation=_dilation)
self.regress_header.add_module('regress_head', nn.Conv2d(_in_ch, 8, 3, padding=1, bias=True))
def forward(self, x):
main_kf_logits = self.main_kf_header(x)
regress_logits = self.regress_header(x)
return main_kf_logits, regress_logits
def fuse(self):
self.main_kf_header = torch_utils.fuse_conv_and_bn_in_sequential(self.main_kf_header)
self.regress_header = torch_utils.fuse_conv_and_bn_in_sequential(self.regress_header)
| 50.952381 | 112 | 0.621495 |
4fcc7bce10e095cbf0e94d8ec7d3f0855e7aa2a5 | 48,996 | py | Python | Data_Analysis/Wrangler.py | pwinslow/Lepton-Number-Violation-at-100-TeV | e697142e8e1222a423d1e7bd1ea1e65d1b6f94b8 | [
"MIT"
] | null | null | null | Data_Analysis/Wrangler.py | pwinslow/Lepton-Number-Violation-at-100-TeV | e697142e8e1222a423d1e7bd1ea1e65d1b6f94b8 | [
"MIT"
] | null | null | null | Data_Analysis/Wrangler.py | pwinslow/Lepton-Number-Violation-at-100-TeV | e697142e8e1222a423d1e7bd1ea1e65d1b6f94b8 | [
"MIT"
] | null | null | null | ########################################################################################################
# This script transforms raw LHCO data to a set of features for machine learning and also provides a #
# method for plotting probability densities. #
########################################################################################################
# Analysis imports
from __future__ import division
from math import cosh, sinh, cos, sin, factorial
import numpy as np
from numpy import linalg as LA
import pandas as pd
from pandas import Series, DataFrame
from random import random
import itertools
#import re
import os
# Define combinatoric factor for jet fake probabilities
def nCk(n, k):
return factorial(n) / (factorial(n-k) * factorial(k))
# Define charge flip probabilities based on pseudo-rapidities
def CF_probs(eta):
if eta < -2.5:
prob = 0
elif (-2.5 <= eta) and (eta <= -2):
prob = 8.9e-2
elif (-2 < eta) and (eta <= -1.52):
prob = 4.4e-2
elif (-1.52 < eta) and (eta <= -1.37):
prob = 0
elif (-1.37 < eta) and (eta <= -.8):
prob = 1.8e-2
elif (-.8 < eta) and (eta <= 0):
prob = .7e-2
elif (0 < eta) and (eta <= .8):
prob = .2e-2
elif (.8 < eta) and (eta <= 1.37):
prob = 1.9e-2
elif (1.37 < eta) and (eta <= 1.52):
prob = 0
elif (1.52 < eta) and (eta <= 2):
prob = 3.9e-2
elif (2 < eta) and (eta <= 2.5):
prob = 8.45e-2
elif (2.5 < eta):
prob = 0
return prob
class Wrangler(object):
'''
This class transforms the raw LHCO data to a set of features for machine learning and also provides a method for plotting
probability densities.
'''
__version__ = 'Beta_1.0'
# Initialize the class
def __init__(self, Ne_1 = None, Np_1 = None, Ne_2 = None, Np_2 = None, Nj = None, Nb = None):
self.Ne_1, self.Np_1, self.Ne_2, self.Np_2, self.Nj, self.Nb = Ne_1 or 0, Np_1 or 0, Ne_2 or 0, Np_2 or 0, Nj or 0, Nb or 0
'''
Attributes:
-----------
Ne_1 and Ne_2: Upper bounds on minimum number of electrons
Np_1 and Np_2: Upper bounds on minimum number of positrons
Nj: Upper bound on minimum number of jets
Nb: Required number of b-jets
Two attributes are required to specify the upper bounds on electrons and positrons since the same-sign lepton signature includes both
e- e- and e+ e+ events. Specifically, we filter events based on an or statement of the form
if (Num_e >= Ne_1, Num_p >= Np_1, Num_j >= Nj, Num_b >= Nb) or (Num_e >= Ne_2, Num_p >= Np_2, Num_j >= Nj, Num_b >= Nb):
keep event
else:
toss event
where Ne_1, Np_1 = 2, 0 and Ne_2, Np_2 = 0, 2 covers both e- e- and e+ e+ cases.
Methods:
--------
Three_Vec: Translate raw kinematic information into 3-momenta.
Four_Vec: Translate raw kinematic information into 4-momenta.
Inv_Mass: Caculate invariant mass of a 4-vector.
cos_theta_star: Calculate the polar angle of two objects in the Collins-Soper frame (Phys. Rev. D 16 (1977) 2219-2225).
Sphericity_Tensor: Create a sphericity tensor based on a given 3-momenta.
Event_Shape_Variables: Calculate event shape variables, i.e., sphericity, transverse sphericity, aplanarity, and planarity.
Delta_R: Returns angular distance between two final state particles.
Feature_Architect: Takes a LHCO event file and performs two jobs, (1) imposes basic selection cuts to ensure signature purity and (2) outputs data
in the form of a list of raw features which conforms to the basic format of a design matrix which can be directly used as input into classification
algorithms. Each data instance corresponds to a given event while the list of raw features within a given instance is as follows:
e_Num = Number of electrons
p_Num = Number of positrons
H_T_leptons = Total transverse momentum of all leptons
H_T_jets = Total transverse momentum of all jets
Delta_R_leptons = Angular distance between the two leading leptons
Delta_R_jets = Angular distance between the two leading jets
Delta_R_leptonjet = Angular distance between the leading lepton and jet
dilepton_mass = Invariant mass of the leading leptons
dijet_mass = Invariant mass of the leading jets
dileptonjet_mass = Invariant mass of the leading lepton and jet
cos_leptons = Polar angle between the two leading leptons in the Collins-Soper frame
cos_jets = Polar angle between the two leading jets in the Collins-Soper frame
cos_leptonjet = Polar angle between the leading lepton and jet in the Collins-Soper frame
MET = Missing transverse momentum
S_leptons = The sphericity of all leptons
TS_leptons = The transverse sphericity of all leptons
AP_leptons = The aplanarity of all leptons
P_leptons = The planarity of all leptons
S_jets = The sphericity of all jets
TS_jets = The transverse sphericity of all jets
AP_jets = The aplanarity of all jets
P_jets = The planarity of all jets
S_global = The sphericity of the full event
TS_global = The transverse sphericity of the full event (otherwise known as the circularity)
AP_global = The aplanarity of the full event
P_global = The planarity of the full event
Hist_Constructor: Constructs probability density plots based on a specified bin structure, cross section, and weight
'''
def Three_Vec(self, eta, phi, pT):
p1, p2, p3 = pT * sin(phi), pT * cos(phi), pT * sinh(eta)
return np.array([p1, p2, p3])
def Four_Vec(self, eta, phi, pT):
p0, p1, p2, p3 = pT * cosh(eta), pT * sin(phi), pT * cos(phi), pT * sinh(eta)
return np.array([p0, p1, p2, p3])
def Inv_Mass(self, p):
p0, p1, p2, p3 = p[0], p[1], p[2], p[3]
inv_mass = np.sqrt( p0**2 - p1**2 - p2**2 - p3*2 )
return inv_mass
def cos_theta_star(self, eta_1, phi_1, pT_1, eta_2, phi_2, pT_2):
pT_12 = LA.norm( np.array([pT_1 * sin(phi_1) + pT_2 * sin(phi_2), pT_1 * cos(phi_1) + pT_2 * cos(phi_2)]) )
p_12 = self.Four_Vec( eta_1, phi_1, pT_1 ) + self.Four_Vec( eta_2, phi_2, pT_2 )
m_12 = self.Inv_Mass( p_12 )
cos_theta = ( np.abs( sinh( eta_1 - eta_2 ) ) / np.sqrt( 1 + ( pT_12 / m_12 )**2 ) ) * ( 2 * pT_1 * pT_2 / m_12**2 )
return cos_theta
def Sphericity_Tensor(self, p):
px, py, pz = p[0], p[1], p[2]
M_xyz = np.array([[px**2, px * py, px * pz], [py * px, py**2, py * pz], [pz * px, pz * py, pz**2]])
return M_xyz
def Event_Shape_Variables(self, S_tensor):
# Calculate eigenvalues of sphericity matrices
lmbda = LA.eigvals(S_tensor).real
# Normalize and sort set of eigenvalues
lmbda = np.sort(lmbda / LA.norm(lmbda))[::-1]
# Define event shape variables
Sphericity = 1.5 * ( lmbda[1] + lmbda[2] )
Trans_Sphericity = 2 * lmbda[1] / ( lmbda[0] + lmbda[1] )
Aplanarity = 1.5 * lmbda[2]
Planarity = lmbda[1] - lmbda[2]
return Sphericity, Trans_Sphericity, Aplanarity, Planarity
def Delta_R(self, eta_1, phi_1, eta_2, phi_2):
delta_eta = eta_1 - eta_2
if phi_1 - phi_2 < - np.pi:
delta_phi = (phi_1 - phi_2) + 2 * np.pi
elif phi_1 - phi_2 > np.pi:
delta_phi = (phi_1 - phi_2) - 2 * np.pi
else:
delta_phi = phi_1 - phi_2
return np.sqrt( delta_eta**2 + delta_phi**2 )
def Feature_Architect(self, lhco_file):
with open(lhco_file, 'r+') as File:
delphes_file = File.readlines()
# Read off average matched cross section
#sigma = float(re.findall("-?\ *[0-9]+\.?[0-9]*(?:[Ee]\ *-?\ *[0-9]+)?", delphes_file[0])[0])
sigma = float( delphes_file[0].split(':')[1].strip() )
# Initialize the design matrix for the given dataset
X = []
# Initialize all analysis features
e_Num, L_e_pT, NL_e_pT, L_e_eta, L_e_phi, NL_e_eta, NL_e_phi = 0, 0, 0, 0, 0, 0, 0
p_Num, L_p_pT, NL_p_pT, L_p_eta, L_p_phi, NL_p_eta, NL_p_phi = 0, 0, 0, 0, 0, 0, 0
j_Num, L_j_pT, L_j_eta, L_j_phi, NL_j_pT, NL_j_eta, NL_j_phi = 0, 0, 0, 0, 0, 0, 0
NNL_j_pT, NNL_j_eta, NNL_j_phi, NNNL_j_pT, NNNL_j_eta, NNNL_j_phi = 0, 0, 0, 0, 0, 0
H_T_jets = 0
b_Num = 0
MET = 0
# Initialize sphericity tensors for the global, hadronic, and leptonic geometries
Sph_global = np.zeros((3,3), dtype = 'float')
Sph_jets = np.zeros((3,3), dtype = 'float')
Sph_leptons = np.zeros((3,3), dtype = 'float')
CF_Flag = False # Initialize charge flip flag
CF_prob_sum = 0 # Initialize a total charge flip probability
JF1_Flag = False # Initialize single jet fake flag
JF1_prob = (0.5/5000) * nCk(3, 1) # Define single jet fake probability
JF2_Flag = False # Initialize double jet fake flag
JF2_prob = (0.5/5000)**2 * nCk(4, 2) # Define double jet fake probability
# Begin event collection
line = 5 # Skip header info
Tot_event_counter, event_counter = 0, 0 # Initialize counters for total and retained events
while line < len(delphes_file):
# Collect info from a given event
if float(delphes_file[line].strip().split()[0]) != 0:
# Collect state info
state_info = [float(i) for i in delphes_file[line].strip().split()]
# If electron, collect electron info
if state_info[1] == 1 and state_info[6] == -1:
e_Num += 1
if state_info[4] > L_e_pT:
L_e_pT = state_info[4]
L_e_eta = state_info[2]
L_e_phi = state_info[3]
if NL_e_pT < state_info[4] < L_e_pT:
NL_e_pT = state_info[4]
NL_e_eta = state_info[2]
NL_e_phi = state_info[3]
# If positron, collect positron info
if state_info[1] == 1 and state_info[6] == 1:
p_Num += 1
if state_info[4] > L_p_pT:
L_p_pT = state_info[4]
L_p_eta = state_info[2]
L_p_phi = state_info[3]
if NL_p_pT < state_info[4] < L_p_pT:
NL_p_pT = state_info[4]
NL_p_eta = state_info[2]
NL_p_phi = state_info[3]
# If jet, collect jet info. Note: jets with pT too small to be mistaken for leptons still contribute to H_T and S_jets.
if state_info[1] == 4 and state_info[7] == 0:
j_Num += 1
if state_info[4] > L_j_pT:
L_j_pT = state_info[4]
L_j_eta = state_info[2]
L_j_phi = state_info[3]
elif NL_j_pT < state_info[4] and state_info[4] < L_j_pT:
NL_j_pT = state_info[4]
NL_j_eta = state_info[2]
NL_j_phi = state_info[3]
elif NNL_j_pT < state_info[4] and state_info[4] < NL_j_pT:
NNL_j_pT = state_info[4]
NNL_j_eta = state_info[2]
NNL_j_phi = state_info[3]
elif NNNL_j_pT < state_info[4] and state_info[4] < NNL_j_pT:
NNNL_j_pT = state_info[4]
NNNL_j_eta = state_info[2]
NNNL_j_phi = state_info[3]
elif state_info[4] < NNNL_j_pT:
H_T_jets += state_info[4]
Sph_jets += self.Sphericity_Tensor(self.Three_Vec(state_info[2], state_info[3], state_info[4]))
# If b jet, collect b jet info
if state_info[1] == 4 and state_info[7] > 0:
b_Num += 1
# If MET, collect MET info
if state_info[1] == 6:
MET += state_info[4]
# Increment line number
line += 1
else:
# Impose basic selection cuts. Signature: 2 same-sign leptons + (>= 2 jets) + (== 0 b-jets).
if (e_Num >= self.Ne_1 and p_Num >= self.Np_1 and j_Num >= self.Nj and b_Num == self.Nb) \
or (e_Num >= self.Ne_2 and p_Num >= self.Np_2 and j_Num >= self.Nj and b_Num == self.Nb):
# Once all requirements are passed then also increment retained event count
event_counter += 1
# Virtually all training features depend on which type of background is being currently analyzed. Because of this,
# we'll analyse each case individually below.
# Diboson selection cuts
if self.Ne_1 == 2 and self.Np_1 == 0 and self.Ne_2 == 0 and self.Np_2 == 2 and self.Nj == 2 and self.Nb == 0:
# Check if same-sign leptons are electrons or positrons. For ZZ events there's likely an equal number of e's
# and p's. In that case, take the same-sign pair to be the one with largest L_pT.
if (e_Num > p_Num) or ((e_Num == p_Num) and (L_e_pT > L_p_pT)):
H_T_leptons = L_e_pT + NL_e_pT
Delta_R_leptons = self.Delta_R(L_e_eta, L_e_phi, NL_e_eta, NL_e_phi)
Delta_R_leptonjet = self.Delta_R(L_e_eta, L_e_phi, L_j_eta, L_j_phi)
dilepton_p = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(NL_e_eta, NL_e_phi, NL_e_pT)
dilepton_mass = self.Inv_Mass(dilepton_p)
dileptonjet_p = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass = self.Inv_Mass(dileptonjet_p)
cos_leptons = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, NL_e_eta, NL_e_phi, NL_e_pT)
cos_leptonjet = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, L_j_eta, L_j_phi, L_j_pT)
Sph_leptons = self.Sphericity_Tensor(self.Three_Vec(L_e_eta, L_e_phi, L_e_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_e_eta, NL_e_phi, NL_e_pT))
elif (e_Num < p_Num) or ((e_Num == p_Num) and (L_e_pT < L_p_pT)):
H_T_leptons = L_p_pT + NL_p_pT
Delta_R_leptons = self.Delta_R(L_p_eta, L_p_phi, NL_p_eta, NL_p_phi)
Delta_R_leptonjet = self.Delta_R(L_p_eta, L_p_phi, L_j_eta, L_j_phi)
dilepton_p = self.Four_Vec(L_p_eta, L_p_phi, L_p_pT) + self.Four_Vec(NL_p_eta, NL_p_phi, NL_p_pT)
dilepton_mass = self.Inv_Mass(dilepton_p)
dileptonjet_p = self.Four_Vec(L_p_eta, L_p_phi, L_p_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass = self.Inv_Mass(dileptonjet_p)
cos_leptons = self.cos_theta_star(L_p_eta, L_p_phi, L_p_pT, NL_p_eta, NL_p_phi, NL_p_pT)
cos_leptonjet = self.cos_theta_star(L_p_eta, L_p_phi, L_p_pT, L_j_eta, L_j_phi, L_j_pT)
Sph_leptons = self.Sphericity_Tensor(self.Three_Vec(L_p_eta, L_p_phi, L_p_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_p_eta, NL_p_phi, NL_p_pT))
H_T_jets += L_j_pT + NL_j_pT + NNL_j_pT + NNNL_j_pT
Delta_R_jets = self.Delta_R(L_j_eta, L_j_phi, NL_j_eta, NL_j_phi)
dijet_p = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dijet_mass = self.Inv_Mass(dijet_p)
cos_jets = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NL_j_eta, NL_j_phi, NL_j_pT)
Sph_jets += self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_global = Sph_leptons + Sph_jets
S_leptons, TS_leptons, AP_leptons, P_leptons = self.Event_Shape_Variables(Sph_leptons)
S_jets, TS_jets, AP_jets, P_jets = self.Event_Shape_Variables(Sph_jets)
S_global, TS_global, AP_global, P_global = self.Event_Shape_Variables(Sph_global)
# Charge-flip selection cuts
if self.Ne_1 == 1 and self.Np_1 == 1 and self.Ne_2 == 1 and self.Np_2 == 1 and self.Nj == 2 and self.Nb == 0:
CF_Flag = True # Set charge flip flag
H_T_leptons = L_e_pT + L_p_pT
H_T_jets += L_j_pT + NL_j_pT + NNL_j_pT + NNNL_j_pT
Delta_R_leptons = self.Delta_R(L_e_eta, L_e_phi, L_p_eta, L_p_phi)
Delta_R_jets = self.Delta_R(L_j_eta, L_j_phi, NL_j_eta, NL_j_phi)
dilepton_p = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(L_p_eta, L_p_phi, L_p_pT)
dilepton_mass = self.Inv_Mass(dilepton_p)
dijet_p = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dijet_mass = self.Inv_Mass(dijet_p)
cos_leptons = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, L_p_eta, L_p_phi, L_p_pT)
cos_jets = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NL_j_eta, NL_j_phi, NL_j_pT)
Sph_leptons = self.Sphericity_Tensor(self.Three_Vec(L_e_eta, L_e_phi, L_e_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(L_p_eta, L_p_phi, L_p_pT))
Sph_jets += self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_global = Sph_leptons + Sph_jets
S_leptons, TS_leptons, AP_leptons, P_leptons = self.Event_Shape_Variables(Sph_leptons)
S_jets, TS_jets, AP_jets, P_jets = self.Event_Shape_Variables(Sph_jets)
S_global, TS_global, AP_global, P_global = self.Event_Shape_Variables(Sph_global)
if L_e_pT > L_p_pT:
# Adjust lepton numbers
e_Num += 1
p_Num -= 1
# Increment charge flip probability
CF_prob_sum += CF_probs(L_p_eta)
Delta_R_leptonjet = self.Delta_R(L_e_eta, L_e_phi, L_j_eta, L_j_phi)
dileptonjet_p = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass = self.Inv_Mass(dileptonjet_p)
cos_leptonjet = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, L_j_eta, L_j_phi, L_j_pT)
else:
# Adjust lepton numbers
e_Num -= 1
p_Num += 1
# Increment charge flip probability
CF_prob_sum += CF_probs(L_e_eta)
Delta_R_leptonjet = self.Delta_R(L_p_eta, L_p_phi, L_j_eta, L_j_phi)
dileptonjet_p = self.Four_Vec(L_p_eta, L_p_phi, L_p_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass = self.Inv_Mass(dileptonjet_p)
cos_leptonjet = self.cos_theta_star(L_p_eta, L_p_phi, L_p_pT, L_j_eta, L_j_phi, L_j_pT)
# Single jetfake selection cuts
if self.Ne_1 == 1 and self.Np_1 == 0 and self.Ne_2 == 0 and self.Np_2 == 1 and self.Nj == 3 and self.Nb == 0:
JF1_Flag = True # Set single jet fake flag
# Adjust number of jets
j_Num -= 1
if (e_Num > p_Num) or ((e_Num == p_Num) and (L_e_pT > L_p_pT)):
# Adjust lepton number
e_Num += 1
H_T_leptons_1 = L_e_pT + L_j_pT
H_T_leptons_2 = L_e_pT + NL_j_pT
H_T_leptons_3 = L_e_pT + NNL_j_pT
H_T_leptons = np.mean([H_T_leptons_1, H_T_leptons_2, H_T_leptons_3])
Delta_R_leptons_1 = self.Delta_R(L_e_eta, L_e_phi, L_j_eta, L_j_phi)
Delta_R_leptons_2 = self.Delta_R(L_e_eta, L_e_phi, NL_j_eta, NL_j_phi)
Delta_R_leptons_3 = self.Delta_R(L_e_eta, L_e_phi, NNL_j_eta, NNL_j_phi)
Delta_R_leptons = np.mean([Delta_R_leptons_1, Delta_R_leptons_2, Delta_R_leptons_3])
Delta_R_leptonjet_1 = self.Delta_R(L_e_eta, L_e_phi, NL_j_eta, NL_j_phi)
Delta_R_leptonjet_2 = self.Delta_R(L_e_eta, L_e_phi, L_j_eta, L_j_phi)
Delta_R_leptonjet_3 = self.Delta_R(L_e_eta, L_e_phi, L_j_eta, L_j_phi)
Delta_R_leptonjet = np.mean([Delta_R_leptonjet_1, Delta_R_leptonjet_2, Delta_R_leptonjet_3])
dilepton_p_1 = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dilepton_mass_1 = self.Inv_Mass(dilepton_p_1)
dilepton_p_2 = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dilepton_mass_2 = self.Inv_Mass(dilepton_p_2)
dilepton_p_3 = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)
dilepton_mass_3 = self.Inv_Mass(dilepton_p_3)
dilepton_mass = np.mean([dilepton_mass_1, dilepton_mass_2, dilepton_mass_3])
dileptonjet_p_1 = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dileptonjet_mass_1 = self.Inv_Mass(dileptonjet_p_1)
dileptonjet_p_2 = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass_2 = self.Inv_Mass(dileptonjet_p_2)
dileptonjet_p_3 = self.Four_Vec(L_e_eta, L_e_phi, L_e_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass_3 = self.Inv_Mass(dileptonjet_p_3)
dileptonjet_mass = np.mean([dileptonjet_mass_1, dileptonjet_mass_2, dileptonjet_mass_3])
cos_leptons_1 = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, L_j_eta, L_j_phi, L_j_pT)
cos_leptons_2 = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, NL_j_eta, NL_j_phi, NL_j_pT)
cos_leptons_3 = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, NNL_j_eta, NNL_j_phi, NNL_j_pT)
cos_leptons = np.mean([cos_leptons_1, cos_leptons_2, cos_leptons_3])
cos_leptonjet_1 = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, NL_j_eta, NL_j_phi, NL_j_pT)
cos_leptonjet_2 = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, L_j_eta, L_j_phi, L_j_pT)
cos_leptonjet_3 = self.cos_theta_star(L_e_eta, L_e_phi, L_e_pT, L_j_eta, L_j_phi, L_j_pT)
cos_leptonjet = np.mean([cos_leptonjet_1, cos_leptonjet_2, cos_leptonjet_3])
Sph_leptons_1 = self.Sphericity_Tensor(self.Three_Vec(L_e_eta, L_e_phi, L_e_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT))
Sph_leptons_2 = self.Sphericity_Tensor(self.Three_Vec(L_e_eta, L_e_phi, L_e_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT))
Sph_leptons_3 = self.Sphericity_Tensor(self.Three_Vec(L_e_eta, L_e_phi, L_e_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT))
Sph_leptons = (Sph_leptons_1 + Sph_leptons_2 + Sph_leptons_3) / 3
elif (e_Num < p_Num) or ((e_Num == p_Num) and (L_e_pT < L_p_pT)):
# Adjust lepton number
p_Num += 1
H_T_leptons_1 = L_p_pT + L_j_pT
H_T_leptons_2 = L_p_pT + NL_j_pT
H_T_leptons_3 = L_p_pT + NNL_j_pT
H_T_leptons = np.mean([H_T_leptons_1, H_T_leptons_2, H_T_leptons_3])
Delta_R_leptons_1 = self.Delta_R(L_p_eta, L_p_phi, L_j_eta, L_j_phi)
Delta_R_leptons_2 = self.Delta_R(L_p_eta, L_p_phi, NL_j_eta, NL_j_phi)
Delta_R_leptons_3 = self.Delta_R(L_p_eta, L_p_phi, NNL_j_eta, NNL_j_phi)
Delta_R_leptons = np.mean([Delta_R_leptons_1, Delta_R_leptons_2, Delta_R_leptons_3])
Delta_R_leptonjet_1 = self.Delta_R(L_p_eta, L_p_phi, NL_j_eta, NL_j_phi)
Delta_R_leptonjet_2 = self.Delta_R(L_p_eta, L_p_phi, L_j_eta, L_j_phi)
Delta_R_leptonjet_3 = self.Delta_R(L_p_eta, L_p_phi, L_j_eta, L_j_phi)
Delta_R_leptonjet = np.mean([Delta_R_leptonjet_1, Delta_R_leptonjet_2, Delta_R_leptonjet_3])
dilepton_p_1 = self.Four_Vec(L_p_eta, L_p_phi, L_p_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dilepton_mass_1 = self.Inv_Mass(dilepton_p_1)
dilepton_p_2 = self.Four_Vec(L_p_eta, L_p_phi, L_p_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dilepton_mass_2 = self.Inv_Mass(dilepton_p_2)
dilepton_p_3 = self.Four_Vec(L_p_eta, L_p_phi, L_p_pT) + self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)
dilepton_mass_3 = self.Inv_Mass(dilepton_p_3)
dilepton_mass = np.mean([dilepton_mass_1, dilepton_mass_2, dilepton_mass_3])
dileptonjet_p_1 = self.Four_Vec(L_p_eta, L_p_phi, L_p_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dileptonjet_mass_1 = self.Inv_Mass(dileptonjet_p_1)
dileptonjet_p_2 = self.Four_Vec(L_p_eta, L_p_phi, L_p_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass_2 = self.Inv_Mass(dileptonjet_p_2)
dileptonjet_p_3 = self.Four_Vec(L_p_eta, L_p_phi, L_p_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass_3 = self.Inv_Mass(dileptonjet_p_3)
dileptonjet_mass = np.mean([dileptonjet_mass_1, dileptonjet_mass_2, dileptonjet_mass_3])
cos_leptons_1 = self.cos_theta_star(L_p_eta, L_p_phi, L_p_pT, L_j_eta, L_j_phi, L_j_pT)
cos_leptons_2 = self.cos_theta_star(L_p_eta, L_p_phi, L_p_pT, NL_j_eta, NL_j_phi, NL_j_pT)
cos_leptons_3 = self.cos_theta_star(L_p_eta, L_p_phi, L_p_pT, NNL_j_eta, NNL_j_phi, NNL_j_pT)
cos_leptons = np.mean([cos_leptons_1, cos_leptons_2, cos_leptons_3])
cos_leptonjet_1 = self.cos_theta_star(L_p_eta, L_p_phi, L_p_pT, NL_j_eta, NL_j_phi, NL_j_pT)
cos_leptonjet_2 = self.cos_theta_star(L_p_eta, L_p_phi, L_p_pT, L_j_eta, L_j_phi, L_j_pT)
cos_leptonjet_3 = self.cos_theta_star(L_p_eta, L_p_phi, L_p_pT, L_j_eta, L_j_phi, L_j_pT)
cos_leptonjet = np.mean([cos_leptonjet_1, cos_leptonjet_2, cos_leptonjet_3])
Sph_leptons_1 = self.Sphericity_Tensor(self.Three_Vec(L_p_eta, L_p_phi, L_p_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT))
Sph_leptons_2 = self.Sphericity_Tensor(self.Three_Vec(L_p_eta, L_p_phi, L_p_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT))
Sph_leptons_3 = self.Sphericity_Tensor(self.Three_Vec(L_p_eta, L_p_phi, L_p_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT))
Sph_leptons = (Sph_leptons_1 + Sph_leptons_2 + Sph_leptons_3) / 3
H_T_jets_1 = NL_j_pT + NNL_j_pT + NNNL_j_pT
H_T_jets_2 = L_j_pT + NNL_j_pT + NNNL_j_pT
H_T_jets_3 = L_j_pT + NL_j_pT + NNNL_j_pT
H_T_jets += np.mean([H_T_jets_1, H_T_jets_2, H_T_jets_3])
Delta_R_jets_1 = self.Delta_R(NL_j_eta, NL_j_phi, NNL_j_eta, NNL_j_phi)
Delta_R_jets_2 = self.Delta_R(L_j_eta, L_j_phi, NNL_j_eta, NNL_j_phi)
Delta_R_jets_3 = self.Delta_R(L_j_eta, L_j_phi, NL_j_eta, NL_j_phi)
Delta_R_jets = np.mean([Delta_R_jets_1, Delta_R_jets_2, Delta_R_jets_3])
dijet_p_1 = self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT) + self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)
dijet_mass_1 = self.Inv_Mass(dijet_p_1)
dijet_p_2 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)
dijet_mass_2 = self.Inv_Mass(dijet_p_2)
dijet_p_3 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dijet_mass_3 = self.Inv_Mass(dijet_p_3)
dijet_mass = np.mean([dijet_mass_1, dijet_mass_2, dijet_mass_3])
cos_jets_1 = self.cos_theta_star(NL_j_eta, NL_j_phi, NL_j_pT, NNL_j_eta, NNL_j_phi, NNL_j_pT)
cos_jets_2 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NNL_j_eta, NNL_j_phi, NNL_j_pT)
cos_jets_3 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NL_j_eta, NL_j_phi, NL_j_pT)
cos_jets = np.mean([cos_jets_1, cos_jets_2, cos_jets_3])
Sph_jets_1 = self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_jets_2 = self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_jets_3 = self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_jets += (Sph_jets_1 + Sph_jets_2 + Sph_jets_3) / 3
Sph_global = Sph_leptons + Sph_jets
S_leptons, TS_leptons, AP_leptons, P_leptons = self.Event_Shape_Variables(Sph_leptons)
S_jets, TS_jets, AP_jets, P_jets = self.Event_Shape_Variables(Sph_jets)
S_global, TS_global, AP_global, P_global = self.Event_Shape_Variables(Sph_global)
# Double jetfake selection cuts
if self.Ne_1 == 0 and self.Np_1 == 0 and self.Ne_2 == 0 and self.Np_2 == 0 and self.Nj == 4 and self.Nb == 0:
JF2_Flag = True # Set double jet fake flag
# Adjust number of jets, electrons, and positrons
j_Num -= 2
if random() < 0.5:
e_Num += 2
else:
p_Num += 2
H_T_leptons_1 = L_j_pT + NL_j_pT
H_T_leptons_2 = L_j_pT + NNL_j_pT
H_T_leptons_3 = L_j_pT + NNNL_j_pT
H_T_leptons_4 = NL_j_pT + NNL_j_pT
H_T_leptons_5 = NL_j_pT + NNNL_j_pT
H_T_leptons_6 = NNL_j_pT + NNNL_j_pT
H_T_leptons = np.mean([H_T_leptons_1, H_T_leptons_2, H_T_leptons_3, H_T_leptons_4, H_T_leptons_5, H_T_leptons_6])
H_T_jets_1 = NNL_j_pT + NNNL_j_pT
H_T_jets_2 = NL_j_pT + NNNL_j_pT
H_T_jets_3 = NL_j_pT + NNL_j_pT
H_T_jets_4 = L_j_pT + NNNL_j_pT
H_T_jets_5 = L_j_pT + NNL_j_pT
H_T_jets_6 = L_j_pT + NL_j_pT
H_T_jets += np.mean([H_T_jets_1, H_T_jets_2, H_T_jets_3, H_T_jets_4, H_T_jets_5, H_T_jets_6])
Delta_R_leptons_1 = self.Delta_R(L_j_eta, L_j_phi, NL_j_eta, NL_j_phi)
Delta_R_leptons_2 = self.Delta_R(L_j_eta, L_j_phi, NNL_j_eta, NNL_j_phi)
Delta_R_leptons_3 = self.Delta_R(L_j_eta, L_j_phi, NNNL_j_eta, NNNL_j_phi)
Delta_R_leptons_4 = self.Delta_R(NL_j_eta, NL_j_phi, NNL_j_eta, NNL_j_phi)
Delta_R_leptons_5 = self.Delta_R(NL_j_eta, NL_j_phi, NNNL_j_eta, NNNL_j_phi)
Delta_R_leptons_6 = self.Delta_R(NNL_j_eta, NNL_j_phi, NNNL_j_eta, NNNL_j_phi)
Delta_R_leptons = np.mean([Delta_R_leptons_1, Delta_R_leptons_2, Delta_R_leptons_3, Delta_R_leptons_4,
Delta_R_leptons_5, Delta_R_leptons_6])
Delta_R_jets_1 = self.Delta_R(NNL_j_eta, NNL_j_phi, NNNL_j_eta, NNNL_j_phi)
Delta_R_jets_2 = self.Delta_R(NL_j_eta, NL_j_phi, NNNL_j_eta, NNNL_j_phi)
Delta_R_jets_3 = self.Delta_R(NL_j_eta, NL_j_phi, NNL_j_eta, NNL_j_phi)
Delta_R_jets_4 = self.Delta_R(L_j_eta, L_j_phi, NNNL_j_eta, NNNL_j_phi)
Delta_R_jets_5 = self.Delta_R(L_j_eta, L_j_phi, NNL_j_eta, NNL_j_phi)
Delta_R_jets_6 = self.Delta_R(L_j_eta, L_j_phi, NL_j_eta, NL_j_phi)
Delta_R_jets = np.mean([Delta_R_jets_1, Delta_R_jets_2, Delta_R_jets_3, Delta_R_jets_4, Delta_R_jets_5,
Delta_R_jets_6])
Delta_R_leptonjet_1 = self.Delta_R(L_j_eta, L_j_phi, NNL_j_eta, NNL_j_phi)
Delta_R_leptonjet_2 = self.Delta_R(L_j_eta, L_j_phi, NL_j_eta, NL_j_phi)
Delta_R_leptonjet_3 = self.Delta_R(L_j_eta, L_j_phi, NL_j_eta, NL_j_phi)
Delta_R_leptonjet_4 = self.Delta_R(NL_j_eta, NL_j_phi, L_j_eta, L_j_phi)
Delta_R_leptonjet_5 = self.Delta_R(NL_j_eta, NL_j_phi, L_j_eta, L_j_phi)
Delta_R_leptonjet_6 = self.Delta_R(NNL_j_eta, NNL_j_phi, L_j_eta, L_j_phi)
Delta_R_leptonjet = np.mean([Delta_R_leptonjet_1, Delta_R_leptonjet_2, Delta_R_leptonjet_3, Delta_R_leptonjet_4,
Delta_R_leptonjet_5, Delta_R_leptonjet_6])
dilepton_p_1 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dilepton_mass_1 = self.Inv_Mass(dilepton_p_1)
dilepton_p_2 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)
dilepton_mass_2 = self.Inv_Mass(dilepton_p_2)
dilepton_p_3 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
dilepton_mass_3 = self.Inv_Mass(dilepton_p_3)
dilepton_p_4 = self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT) + self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)
dilepton_mass_4 = self.Inv_Mass(dilepton_p_4)
dilepton_p_5 = self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT) + self.Four_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
dilepton_mass_5 = self.Inv_Mass(dilepton_p_5)
dilepton_p_6 = self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT) + self.Four_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
dilepton_mass_6 = self.Inv_Mass(dilepton_p_6)
dilepton_mass = np.mean([dilepton_mass_1, dilepton_mass_2, dilepton_mass_3, dilepton_mass_4, dilepton_mass_5,
dilepton_mass_6])
dijet_p_1 = self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT) + self.Four_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
dijet_mass_1 = self.Inv_Mass(dijet_p_1)
dijet_p_2 = self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT) + self.Four_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
dijet_mass_2 = self.Inv_Mass(dijet_p_2)
dijet_p_3 = self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT) + self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)
dijet_mass_3 = self.Inv_Mass(dijet_p_3)
dijet_p_4 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
dijet_mass_4 = self.Inv_Mass(dijet_p_4)
dijet_p_5 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)
dijet_mass_5 = self.Inv_Mass(dijet_p_5)
dijet_p_6 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dijet_mass_6 = self.Inv_Mass(dijet_p_6)
dijet_mass = np.mean([dijet_mass_1, dijet_mass_2, dijet_mass_3, dijet_mass_4, dijet_mass_5, dijet_mass_6])
dileptonjet_p_1 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)
dileptonjet_mass_1 = self.Inv_Mass(dileptonjet_p_1)
dileptonjet_p_2 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dileptonjet_mass_2 = self.Inv_Mass(dileptonjet_p_2)
dileptonjet_p_3 = self.Four_Vec(L_j_eta, L_j_phi, L_j_pT) + self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT)
dileptonjet_mass_3 = self.Inv_Mass(dileptonjet_p_3)
dileptonjet_p_4 = self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass_4 = self.Inv_Mass(dileptonjet_p_4)
dileptonjet_p_5 = self.Four_Vec(NL_j_eta, NL_j_phi, NL_j_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass_5 = self.Inv_Mass(dileptonjet_p_5)
dileptonjet_p_6 = self.Four_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT) + self.Four_Vec(L_j_eta, L_j_phi, L_j_pT)
dileptonjet_mass_6 = self.Inv_Mass(dileptonjet_p_6)
dileptonjet_mass = np.mean([dileptonjet_mass_1, dileptonjet_mass_2, dileptonjet_mass_3, dileptonjet_mass_4,
dileptonjet_mass_5, dileptonjet_mass_6])
cos_leptons_1 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NL_j_eta, NL_j_phi, NL_j_pT)
cos_leptons_2 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NNL_j_eta, NNL_j_phi, NNL_j_pT)
cos_leptons_3 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
cos_leptons_4 = self.cos_theta_star(NL_j_eta, NL_j_phi, NL_j_pT, NNL_j_eta, NNL_j_phi, NNL_j_pT)
cos_leptons_5 = self.cos_theta_star(NL_j_eta, NL_j_phi, NL_j_pT, NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
cos_leptons_6 = self.cos_theta_star(NNL_j_eta, NNL_j_phi, NNL_j_pT, NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
cos_leptons = np.mean([cos_leptons_1, cos_leptons_2, cos_leptons_3, cos_leptons_4, cos_leptons_5, cos_leptons_6])
cos_jets_1 = self.cos_theta_star(NNL_j_eta, NNL_j_phi, NNL_j_pT, NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
cos_jets_2 = self.cos_theta_star(NL_j_eta, NL_j_phi, NL_j_pT, NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
cos_jets_3 = self.cos_theta_star(NL_j_eta, NL_j_phi, NL_j_pT, NNL_j_eta, NNL_j_phi, NNL_j_pT)
cos_jets_4 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NNNL_j_eta, NNNL_j_phi, NNNL_j_pT)
cos_jets_5 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NNL_j_eta, NNL_j_phi, NNL_j_pT)
cos_jets_6 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NL_j_eta, NL_j_phi, NL_j_pT)
cos_jets = np.mean([cos_jets_1, cos_jets_2, cos_jets_3, cos_jets_4, cos_jets_5, cos_jets_6])
cos_leptonjet_1 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NNL_j_eta, NNL_j_phi, NNL_j_pT)
cos_leptonjet_2 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NL_j_eta, NL_j_phi, NL_j_pT)
cos_leptonjet_3 = self.cos_theta_star(L_j_eta, L_j_phi, L_j_pT, NL_j_eta, NL_j_phi, NL_j_pT)
cos_leptonjet_4 = self.cos_theta_star(NL_j_eta, NL_j_phi, NL_j_pT, L_j_eta, L_j_phi, L_j_pT)
cos_leptonjet_5 = self.cos_theta_star(NL_j_eta, NL_j_phi, NL_j_pT, L_j_eta, L_j_phi, L_j_pT)
cos_leptonjet_6 = self.cos_theta_star(NNL_j_eta, NNL_j_phi, NNL_j_pT, L_j_eta, L_j_phi, L_j_pT)
cos_leptonjet = np.mean([cos_leptonjet_1, cos_leptonjet_2, cos_leptonjet_3, cos_leptonjet_4, cos_leptonjet_5,
cos_leptonjet_6])
Sph_leptons_1 = self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT))
Sph_leptons_2 = self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT))
Sph_leptons_3 = self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_leptons_4 = self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT))
Sph_leptons_5 = self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_leptons_6 = self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_leptons = (Sph_leptons_1 + Sph_leptons_2 + Sph_leptons_3 + Sph_leptons_4 + Sph_leptons_5 + Sph_leptons_6) / 6
Sph_jets_1 = self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_jets_2 = self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_jets_3 = self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT))
Sph_jets_4 = self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNNL_j_eta, NNNL_j_phi, NNNL_j_pT))
Sph_jets_5 = self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NNL_j_eta, NNL_j_phi, NNL_j_pT))
Sph_jets_6 = self.Sphericity_Tensor(self.Three_Vec(L_j_eta, L_j_phi, L_j_pT)) + \
self.Sphericity_Tensor(self.Three_Vec(NL_j_eta, NL_j_phi, NL_j_pT))
Sph_jets += (Sph_jets_1 + Sph_jets_2 + Sph_jets_3 + Sph_jets_4 + Sph_jets_5 + Sph_jets_6) / 6
Sph_global = Sph_leptons + Sph_jets
S_leptons, TS_leptons, AP_leptons, P_leptons = self.Event_Shape_Variables(Sph_leptons)
S_jets, TS_jets, AP_jets, P_jets = self.Event_Shape_Variables(Sph_jets)
S_global, TS_global, AP_global, P_global = self.Event_Shape_Variables(Sph_global)
# Append event info to design matrix
X.append([e_Num, p_Num, j_Num, H_T_leptons, H_T_jets, Delta_R_leptons, Delta_R_jets, Delta_R_leptonjet, dilepton_mass,
dijet_mass, dileptonjet_mass, cos_leptons, cos_jets, cos_leptonjet, MET, S_leptons, TS_leptons, AP_leptons,
P_leptons, S_jets, TS_jets, AP_jets, P_jets, S_global, TS_global, AP_global, P_global])
# Increment total event count
Tot_event_counter += 1
# Re-initialize all features
e_Num, L_e_pT, NL_e_pT, L_e_eta, L_e_phi, NL_e_eta, NL_e_phi = 0, 0, 0, 0, 0, 0, 0
p_Num, L_p_pT, NL_p_pT, L_p_eta, L_p_phi, NL_p_eta, NL_p_phi = 0, 0, 0, 0, 0, 0, 0
j_Num, L_j_pT, L_j_eta, L_j_phi, NL_j_pT, NL_j_eta, NL_j_phi = 0, 0, 0, 0, 0, 0, 0
NNL_j_pT, NNL_j_eta, NNL_j_phi, NNNL_j_pT, NNNL_j_eta, NNNL_j_phi = 0, 0, 0, 0, 0, 0
H_T_jets = 0
b_Num = 0
MET = 0
Sph_global = np.zeros((3,3), dtype = 'float')
Sph_jets = np.zeros((3,3), dtype = 'float')
Sph_leptons = np.zeros((3,3), dtype = 'float')
# Increment line number
line += 1
# Define percentage of total events that passed basic selection cuts
perc = event_counter / Tot_event_counter
# Define weight and cross section
if CF_Flag:
w = sigma * (CF_prob_sum / event_counter) / Tot_event_counter
sigma_new = w * event_counter
elif JF1_Flag:
w = sigma * JF1_prob / Tot_event_counter
sigma_new = w * event_counter
elif JF2_Flag:
w = sigma * JF2_prob / Tot_event_counter
sigma_new = w * event_counter
else:
w = sigma / Tot_event_counter
sigma_new = w * event_counter
# Reset all flags in case the same class instance is used for multiple backgrounds
CF_Flag, JF1_Flag, JF2_Flag = False, False, False
# Return design matrix, adjusted cross section, cross section weight, and percentage of events that passed selection cuts
return X, sigma_new, w, perc
def Hist_Constructor(self, df, feature, sigma, w, min_val, max_val, bin_size):
# Create a bin structure
bin_list = np.arange(min_val, max_val + 2 * bin_size, bin_size)
# Initialize a list of aggregate bin weights
bin_weights = []
for bin_ in range(len(bin_list) - 1):
bin_weights.append( len( df[ (df[feature] >= bin_list[bin_]) & (df[feature] <= bin_list[bin_+1]) ] ) * w / sigma )
# Fill bin structure with weight increments
bin_list = tuple(itertools.chain(*zip(bin_list, bin_list)))[1:-1]
bin_weights = tuple(itertools.chain(*zip(bin_weights, bin_weights)))
return bin_list, bin_weights
if __name__ == '__main__':
# Define some paths
work_PATH = os.getcwd()
data_PATH = '/Users/pwinslow/ACFI/Research_Projects/Current_Projects/LNV@100TeV/Analysis_Codes/Data'
# Define feature list
Feature_List = ['electron number', 'positron number', 'jet Number', 'H_T(e)', 'H_T(j)', 'Delta_R(L_e, NL_e)',
'Delta_R(L_j, NL_j)', 'Delta_R(L_e, L_j)', 'm(L_e, NL_e)', 'm(L_j, NL_j)', 'm(L_e, L_j)',
'cos(L_e, NL_e)', 'cos(L_j, NL_j)', 'cos(L_e, L_j)', 'MET', 'Sphericity_leptonic',
'Transverse_Sphericity_leptonic', 'Aplanarity_leptonic', 'Planarity_leptonic', 'Sphericity_hadronic',
'Transverse_Sphericity_hadronic', 'Aplanarity_hadronic', 'Planarity_hadronic', 'Sphericity_global',
'Transverse_Sphericity_global', 'Aplanarity_global', 'Planarity_global']
# Initiate a Wrangler class instance for each background
DiBoson_Wrangler = Wrangler(Ne_1=2, Np_1=0, Ne_2=0, Np_2=2, Nj=2, Nb=0)
ChargeFlip_Wrangler = Wrangler(Ne_1=1, Np_1=1, Ne_2=1, Np_2=1, Nj=2, Nb=0)
OneJetFake_Wrangler = Wrangler(Ne_1=1, Np_1=0, Ne_2=0, Np_2=1, Nj=3, Nb=0)
TwoJetFake_Wrangler = Wrangler(Ne_1=0, Np_1=0, Ne_2=0, Np_2=0, Nj=4, Nb=0)
# Initiate a Wrangler class instance for the signal
Signal_Wrangler = Wrangler(Ne_1=2, Np_1=0, Ne_2=0, Np_2=2, Nj=2, Nb=0)
# Prepare for data imports
os.chdir(data_PATH)
# Import Diboson backgrounds, creating a dataframe for each one
print 'Beginning wrangling process...'
# Create dataframe for diboson WW background
X, sigma_jjWW, w_jjWW, jjWW_fraction = DiBoson_Wrangler.Feature_Architect('full_jjWWBG_lhco_events.dat')
X_jjWW = DataFrame(np.asarray(X))
X_jjWW.columns = Feature_List
X_jjWW['Class'] = 'Background'
X_jjWW['Subclass'] = 'DiBoson WW'
print 'Diboson-WW backgrounds wrangled...'
# Create dataframe for diboson WZ background
X, sigma_jjWZ, w_jjWZ, jjWZ_fraction = DiBoson_Wrangler.Feature_Architect('full_jjWZBG_lhco_events.dat')
X_jjWZ = DataFrame(np.asarray(X))
X_jjWZ.columns = Feature_List
X_jjWZ['Class'] = 'Background'
X_jjWZ['Subclass'] = 'DiBoson WZ'
print 'Diboson-WZ backgrounds wrangled...'
# Create dataframe for diboson ZZ background
X, sigma_jjZZ, w_jjZZ, jjZZ_fraction = DiBoson_Wrangler.Feature_Architect('full_jjZZBG_lhco_events.dat')
X_jjZZ = DataFrame(np.asarray(X))
X_jjZZ.columns = Feature_List
X_jjZZ['Class'] = 'Background'
X_jjZZ['Subclass'] = 'DiBoson ZZ'
print 'Diboson-ZZ backgrounds wrangled...'
# Import ChargeFlip backgrounds, creating a dataframe for each one
# Create dataframe for ChargeFlip Zy background
X, sigma_Zy, w_Zy, Zy_fraction = ChargeFlip_Wrangler.Feature_Architect('full_jjZyCFBG_lhco_events.dat')
X_Zy = DataFrame(np.asarray(X))
X_Zy.columns = Feature_List
X_Zy['Class'] = 'Background'
X_Zy['Subclass'] = 'ChargeFlip Zy'
print 'ChargeFlip-Zy backgrounds wrangled...'
# Create dataframe for ChargeFlip ttbar background
X, sigma_ttCF, w_ttCF, ttCF_fraction = ChargeFlip_Wrangler.Feature_Architect('full_ttbarCFBG_lhco_events.dat')
X_ttCF = DataFrame(np.asarray(X))
X_ttCF.columns = Feature_List
X_ttCF['Class'] = 'Background'
X_ttCF['Subclass'] = 'ChargeFlip ttbar'
print 'ChargeFlip-ttbar backgrounds wrangled...'
# Import JetFake backgrounds, creating a dataframe for each one
# Create dataframe for JetFake ttbar background
X, sigma_ttJF, w_ttJF, ttJF_fraction = OneJetFake_Wrangler.Feature_Architect('full_ttbarJFBG_lhco_events.dat')
X_ttJF = DataFrame(np.asarray(X))
X_ttJF.columns = Feature_List
X_ttJF['Class'] = 'Background'
X_ttJF['Subclass'] = 'JetFake ttbar'
print 'JetFake-ttbar backgrounds wrangled...'
# Create dataframe for JetFake W+jets background
X, sigma_WjJF, w_WjJF, WjJF_fraction = OneJetFake_Wrangler.Feature_Architect('full_WjetsJFBG_lhco_events.dat')
X_WjJF = DataFrame(np.asarray(X))
X_WjJF.columns = Feature_List
X_WjJF['Class'] = 'Background'
X_WjJF['Subclass'] = 'JetFake Wjets'
print 'JetFake-W+jets backgrounds wrangled...'
# Create dataframe for JetFake single top background
X, sigma_tJF, w_tJF, tJF_fraction = OneJetFake_Wrangler.Feature_Architect('full_SingletJFBG_lhco_events.dat')
X_tJF = DataFrame(np.asarray(X))
X_tJF.columns = Feature_List
X_tJF['Class'] = 'Background'
X_tJF['Subclass'] = 'JetFake Single top'
print 'JetFake-single t backgrounds wrangled...'
# Create dataframe for JetFake pure QCD background
X, sigma_QCD, w_QCD, QCD_fraction = TwoJetFake_Wrangler.Feature_Architect('full_4jetJFBG_lhco_events.dat')
X_QCD = DataFrame(np.asarray(X))
X_QCD.columns = Feature_List
X_QCD['Class'] = 'Background'
X_QCD['Subclass'] = 'JetFake QCD'
print 'JetFake-QCD backgrounds wrangled...'
# Import Signal into a dataframe
X, sigma_Signal, w_Signal, Signal_fraction = Signal_Wrangler.Feature_Architect('full_Signal_lhco_events.dat')
X_Signal = DataFrame(np.asarray(X))
X_Signal.columns = Feature_List
X_Signal['Class'] = 'Signal'
X_Signal['Subclass'] = 'Signal'
print 'Signal backgrounds wrangled...'
print 'All backgrounds and signal wrangled. Combining and exporting full dataset to dataframe...'
# Put all BG events into one dataframe and write to csv
BG_df = pd.concat([X_jjWW, X_jjWZ, X_jjZZ, X_Zy, X_ttCF, X_ttJF, X_WjJF, X_tJF, X_QCD], axis=0)
BG_df.to_csv('BGonly_df.csv', index=False)
# Write all BG cross section and weight info csv in data_PATH
BGsigma_list = [sigma_jjWW, sigma_jjWZ, sigma_jjZZ, sigma_Zy, sigma_ttCF, sigma_ttJF, sigma_WjJF, sigma_tJF, sigma_QCD]
BGweight_list = [w_jjWW, w_jjWZ, w_jjZZ, w_Zy, w_ttCF, w_ttJF, w_WjJF, w_tJF, w_QCD]
BGcs_df = DataFrame(np.concatenate((np.asarray(BGsigma_list).reshape(1,9), np.asarray(BGweight_list).reshape(1,9))),
index = ['cross section (pb)', 'cross section weight (pb)'],
columns=list(Series(BG_df['Subclass'].values.ravel()).unique()))
BGcs_df.to_csv('BGonly_cross_section_and_weights_df.csv')
# Put all events into one dataframe and write to csv
Full_df = pd.concat([X_jjWW, X_jjWZ, X_jjZZ, X_Zy, X_ttCF, X_ttJF, X_WjJF, X_tJF, X_QCD, X_Signal], axis=0)
Full_df.to_csv('Full_df.csv', index=False)
# Write cross section and weight info csv in data_PATH
sigma_list = [sigma_jjWW, sigma_jjWZ, sigma_jjZZ, sigma_Zy, sigma_ttCF, sigma_ttJF, sigma_WjJF, sigma_tJF,
sigma_QCD, sigma_Signal]
weight_list = [w_jjWW, w_jjWZ, w_jjZZ, w_Zy, w_ttCF, w_ttJF, w_WjJF, w_tJF, w_QCD, w_Signal]
cs_df = DataFrame(np.concatenate((np.asarray(sigma_list).reshape(1,10), np.asarray(weight_list).reshape(1,10))),
index = ['cross section (pb)', 'cross section weight (pb)'],
columns=list(Series(Full_df['Subclass'].values.ravel()).unique()))
cs_df.to_csv('cross_section_and_weights_df.csv')
# Change back to current working directory
os.chdir(work_PATH)
print 'Done.'
print 'Wrangled dataframe exported to Data folder with name: {}'.format('Full_df.csv')
print 'Cross Section and Cross Section Weight dataframe exported to Data folder with name: {}'.format('cross_section_and_weights_df.csv')
| 47.430784 | 151 | 0.695261 |
36d2192020121aa69f7cc948e5148237e5c88240 | 7,018 | py | Python | tests/test_standardqueryoperators/test_skip_while.py | rlugojr/RxPy | 9f9b1de0ab833e53b0d1626a3b43a6c9424f01ec | [
"ECL-2.0",
"Apache-2.0"
] | 78 | 2015-01-22T23:57:01.000Z | 2021-06-04T15:16:22.000Z | tests/test_standardqueryoperators/test_skip_while.py | rlugojr/RxPy | 9f9b1de0ab833e53b0d1626a3b43a6c9424f01ec | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2015-10-19T12:59:57.000Z | 2015-10-19T12:59:57.000Z | tests/test_standardqueryoperators/test_skip_while.py | rlugojr/RxPy | 9f9b1de0ab833e53b0d1626a3b43a6c9424f01ec | [
"ECL-2.0",
"Apache-2.0"
] | 11 | 2015-02-16T20:43:45.000Z | 2018-05-30T11:46:50.000Z | from rx.observable import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
def test_skip_while_complete_before():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(320, 3), on_completed(330), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.skip_while(predicate)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(330))
xs.subscriptions.assert_equal(subscribe(200, 330))
assert(invoked == 4)
def test_skip_while_complete_after():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.skip_while(predicate)
results = scheduler.start(create)
results.messages.assert_equal(on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
assert(invoked == 6)
def test_skip_while_error_before():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_error(270, ex), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.skip_while(predicate)
results = scheduler.start(create)
results.messages.assert_equal(on_error(270, ex))
xs.subscriptions.assert_equal(subscribe(200, 270))
assert(invoked == 2)
def test_skip_while_error_after():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_error(600, ex))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.skip_while(predicate)
results = scheduler.start(create)
results.messages.assert_equal(on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_error(600, ex))
xs.subscriptions.assert_equal(subscribe(200, 600))
assert(invoked == 6)
def test_skip_while_dispose_before():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.skip_while(predicate)
results = scheduler.start(create, disposed=300)
results.messages.assert_equal()
xs.subscriptions.assert_equal(subscribe(200, 300))
assert(invoked == 3)
def test_skip_while_dispose_after():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.skip_while(predicate)
results = scheduler.start(create, disposed=470)
results.messages.assert_equal(on_next(390, 4), on_next(410, 17), on_next(450, 8))
xs.subscriptions.assert_equal(subscribe(200, 470))
assert(invoked == 6)
def test_skip_while_zero():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(205, 100), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.skip_while(predicate)
results = scheduler.start(create)
results.messages.assert_equal(on_next(205, 100), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
assert(invoked == 1)
def test_skip_while_throw():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
ex = 'ex'
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
if invoked == 3:
raise Exception(ex)
return is_prime(x)
return xs.skip_while(predicate)
results = scheduler.start(create)
results.messages.assert_equal(on_error(290, ex))
xs.subscriptions.assert_equal(subscribe(200, 290))
assert(invoked == 3)
def test_skip_while_index():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
def create():
def predicate(x, i):
return i < 5
return xs.skip_while(predicate)
results = scheduler.start(create)
results.messages.assert_equal(on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
| 42.533333 | 269 | 0.6499 |
4db04b05b52c562e7f863bd739645dcc9eade399 | 1,091 | py | Python | official/cv/deeptext/mindspore_hub_conf.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/cv/deeptext/mindspore_hub_conf.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/cv/deeptext/mindspore_hub_conf.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""hub config"""
from src.Deeptext.deeptext_vgg16 import Deeptext_VGG16
from src.config import config
def deeptext_net(*args, **kwargs):
return Deeptext_VGG16(*args, **kwargs)
def create_network(name, *args, **kwargs):
"""create_network about deeptext"""
if name == "deeptext":
return deeptext_net(config=config, *args, **kwargs)
raise NotImplementedError(f"{name} is not implemented in the repo")
| 40.407407 | 78 | 0.692942 |
01b1b47526af3faece151ead220044d04eaca6da | 665 | py | Python | greetings/manage.py | LaTonia-Mertica/greeting-django-app | 661c81c2b0873f0170447930d54d5827ec25a201 | [
"MIT"
] | null | null | null | greetings/manage.py | LaTonia-Mertica/greeting-django-app | 661c81c2b0873f0170447930d54d5827ec25a201 | [
"MIT"
] | null | null | null | greetings/manage.py | LaTonia-Mertica/greeting-django-app | 661c81c2b0873f0170447930d54d5827ec25a201 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'greetings.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.913043 | 73 | 0.679699 |
eb8300a92be37a18badd6551dcacd785ed5f2d5b | 1,080 | py | Python | setup.py | Nebukadneza/fints2ledger | bbef542eac40aa61df4617c818fa361d89d73b12 | [
"MIT"
] | null | null | null | setup.py | Nebukadneza/fints2ledger | bbef542eac40aa61df4617c818fa361d89d73b12 | [
"MIT"
] | null | null | null | setup.py | Nebukadneza/fints2ledger | bbef542eac40aa61df4617c818fa361d89d73b12 | [
"MIT"
] | null | null | null | from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='fints2ledger',
version='0.6.2',
description='A tool for downloading transactions from FinTS banking APIs and sorting them into a ledger journal.',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/MoritzR/fints2ledger',
author='Moritz Rumpf',
author_email='moritz.rumpf@gmail.com',
license='MIT',
python_requires='>=3.5.0',
entry_points={
'console_scripts': ['fints2ledger=fints2ledger.main:main'],
},
install_requires=[
'mt-940>=4.11,<5',
'fints>=3,<4',
'pyyaml>=4.2b1,<5'
],
setup_requires=['green'],
packages=['fints2ledger'],
zip_safe=False,
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
]
)
| 28.421053 | 120 | 0.586111 |
89d57f33be8d05fd04c08998cc4533970fb2feee | 748 | py | Python | stage/migrations/0006_auto_20210916_1557.py | fkinyae/works | 1191e60b32c9f398ba7c00276d00796ff5490c07 | [
"MIT"
] | null | null | null | stage/migrations/0006_auto_20210916_1557.py | fkinyae/works | 1191e60b32c9f398ba7c00276d00796ff5490c07 | [
"MIT"
] | null | null | null | stage/migrations/0006_auto_20210916_1557.py | fkinyae/works | 1191e60b32c9f398ba7c00276d00796ff5490c07 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-16 12:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stage', '0005_alter_image_profile'),
]
operations = [
migrations.RemoveField(
model_name='follow',
name='followed',
),
migrations.RemoveField(
model_name='follow',
name='follower',
),
migrations.RemoveField(
model_name='profile',
name='user',
),
migrations.DeleteModel(
name='Comment',
),
migrations.DeleteModel(
name='Follow',
),
migrations.DeleteModel(
name='Profile',
),
]
| 21.371429 | 47 | 0.514706 |
a8efec1c87eeb17ba8f52081b1026c94b016bf70 | 3,648 | py | Python | step/train_cam.py | jbeomlee93/AdvCAM | fa08f0ad4c1f764f3ccaf36883c0ae43342d34c5 | [
"MIT"
] | 68 | 2021-03-17T02:59:57.000Z | 2022-03-31T12:48:10.000Z | step/train_cam.py | wanghaoyu33437/Anonymous-AdvCAM | fdaec85f436b5bbb0224fc3a0c9cf8bfa8005998 | [
"MIT"
] | 19 | 2021-03-25T12:05:42.000Z | 2022-03-20T15:53:42.000Z | step/train_cam.py | wanghaoyu33437/Anonymous-AdvCAM | fdaec85f436b5bbb0224fc3a0c9cf8bfa8005998 | [
"MIT"
] | 12 | 2021-04-26T02:59:56.000Z | 2022-03-29T08:19:20.000Z | import cv2
import torch
from torch.backends import cudnn
cudnn.enabled = True
from torch.utils.data import DataLoader
import torch.nn.functional as F
import importlib
import voc12.dataloader
from misc import pyutils, torchutils
from torch import autograd
import os
def validate(model, data_loader):
print('validating ... ', flush=True, end='')
val_loss_meter = pyutils.AverageMeter('loss1', 'loss2')
model.eval()
with torch.no_grad():
for pack in data_loader:
img = pack['img']
label = pack['label'].cuda(non_blocking=True)
x = model(img)
loss1 = F.multilabel_soft_margin_loss(x, label)
val_loss_meter.add({'loss1': loss1.item()})
model.train()
print('loss: %.4f' % (val_loss_meter.pop('loss1')))
return
def run(args):
model = getattr(importlib.import_module(args.cam_network), 'Net')()
train_dataset = voc12.dataloader.VOC12ClassificationDataset(args.train_list, voc12_root=args.voc12_root,
resize_long=(320, 640), hor_flip=True,
crop_size=512, crop_method="random")
train_data_loader = DataLoader(train_dataset, batch_size=args.cam_batch_size,
shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=True)
max_step = (len(train_dataset) // args.cam_batch_size) * args.cam_num_epoches
val_dataset = voc12.dataloader.VOC12ClassificationDataset(args.val_list, voc12_root=args.voc12_root,
crop_size=512)
val_data_loader = DataLoader(val_dataset, batch_size=args.cam_batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=True)
param_groups = model.trainable_parameters()
optimizer = torchutils.PolyOptimizer([
{'params': param_groups[0], 'lr': args.cam_learning_rate, 'weight_decay': args.cam_weight_decay},
{'params': param_groups[1], 'lr': 10*args.cam_learning_rate, 'weight_decay': args.cam_weight_decay},
], lr=args.cam_learning_rate, weight_decay=args.cam_weight_decay, max_step=max_step)
model = torch.nn.DataParallel(model).cuda()
model.train()
avg_meter = pyutils.AverageMeter()
timer = pyutils.Timer()
for ep in range(args.cam_num_epoches):
print('Epoch %d/%d' % (ep+1, args.cam_num_epoches))
for step, pack in enumerate(train_data_loader):
img = pack['img']
img = img.cuda()
label = pack['label'].cuda(non_blocking=True)
model.zero_grad()
x = model(img)
optimizer.zero_grad()
loss = F.multilabel_soft_margin_loss(x, label)
loss.backward()
avg_meter.add({'loss1': loss.item()})
optimizer.step()
if (optimizer.global_step-1)%100 == 0:
timer.update_progress(optimizer.global_step / max_step)
print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
'loss:%.4f' % (avg_meter.pop('loss1')),
'imps:%.1f' % ((step + 1) * args.cam_batch_size / timer.get_stage_elapsed()),
'lr: %.4f' % (optimizer.param_groups[0]['lr']),
'etc:%s' % (timer.str_estimated_complete()), flush=True)
else:
validate(model, val_data_loader)
timer.reset_stage()
torch.save(model.module.state_dict(), args.cam_weights_name + '.pth')
torch.cuda.empty_cache() | 34.415094 | 111 | 0.607182 |
e734f3b7fcfdeb8d0fe3587ff5abe3896d7605d8 | 1,602 | py | Python | apollo/epsilon_checker.py | TylerMathis/apollo | 0e37a37ec2f6e7fcfffa14c83f8c801f8422fa7b | [
"MIT"
] | null | null | null | apollo/epsilon_checker.py | TylerMathis/apollo | 0e37a37ec2f6e7fcfffa14c83f8c801f8422fa7b | [
"MIT"
] | null | null | null | apollo/epsilon_checker.py | TylerMathis/apollo | 0e37a37ec2f6e7fcfffa14c83f8c801f8422fa7b | [
"MIT"
] | null | null | null | from .verdict import Response, Correct, WrongAnswer, PresentationError
def compare_eps(user, judge, eps):
try:
user = float(user)
judge = float(judge)
absolute = abs(judge - user) <= eps + 1e-10
if (judge == 0):
return absolute
relative = abs((judge - user) / judge) <= eps + 1e-10
return absolute or relative
except ValueError:
return user == judge
# File references to user_out and judge_out
def check(user_out, judge_out, eps):
user_lines = [s.strip().split() for s in user_out.readlines()]
judge_lines = [s.strip().split() for s in judge_out.readlines()]
user_tokens = [tok for line in user_lines for tok in line]
judge_tokens = [tok for line in judge_lines for tok in line]
user_token_count = len(user_tokens)
judge_token_count = len(judge_tokens)
min_tokens = min(user_token_count, judge_token_count)
# Give wrong answer for missing output
if (user_token_count < judge_token_count):
return Response(WrongAnswer, 'Not enough output')
# Check input line by line
for user, judge, tok in zip(user_tokens, judge_tokens, range(min_tokens)):
if not compare_eps(user, judge, eps):
return Response(WrongAnswer, (
f'Token {tok} does not match.\n\n'
f'Expected: {judge}\n\n'
f'Recieved: {user}'
))
# Give presentation error for too much output
if (user_token_count > judge_token_count):
return Response(PresentationError, 'Too much output')
return Response(Correct)
| 32.04 | 78 | 0.644819 |
f19c94962cc87b0f77cc91333a92df97d3996002 | 8,271 | py | Python | src/dispatch/incident/models.py | r4is3/dispatch | 097ef041758e4d5995b6f3826e3fe03bfc5c224e | [
"Apache-2.0"
] | null | null | null | src/dispatch/incident/models.py | r4is3/dispatch | 097ef041758e4d5995b6f3826e3fe03bfc5c224e | [
"Apache-2.0"
] | null | null | null | src/dispatch/incident/models.py | r4is3/dispatch | 097ef041758e4d5995b6f3826e3fe03bfc5c224e | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from typing import List, Optional, Any
from pydantic import validator
from sqlalchemy import (
Column,
DateTime,
Float,
ForeignKey,
Integer,
PrimaryKeyConstraint,
String,
Table,
select,
)
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy_utils import TSVectorType
from fastapi_permissions import Allow
from dispatch.config import INCIDENT_RESOURCE_FAQ_DOCUMENT, INCIDENT_RESOURCE_INVESTIGATION_DOCUMENT
from dispatch.auth.models import UserRoles
from dispatch.conference.models import ConferenceRead
from dispatch.conversation.models import ConversationRead
from dispatch.database import Base
from dispatch.document.models import DocumentRead
from dispatch.enums import Visibility
from dispatch.event.models import EventRead
from dispatch.incident_priority.models import (
IncidentPriorityBase,
IncidentPriorityCreate,
IncidentPriorityRead,
)
from dispatch.incident_type.models import IncidentTypeCreate, IncidentTypeRead, IncidentTypeBase
from dispatch.models import DispatchBase, IndividualReadNested, TimeStampMixin
from dispatch.participant.models import ParticipantRead
from dispatch.participant_role.models import ParticipantRole, ParticipantRoleType
from dispatch.storage.models import StorageRead
from dispatch.ticket.models import TicketRead
from .enums import IncidentStatus
assoc_incident_terms = Table(
"assoc_incident_terms",
Base.metadata,
Column("incident_id", Integer, ForeignKey("incident.id")),
Column("term_id", Integer, ForeignKey("term.id")),
PrimaryKeyConstraint("incident_id", "term_id"),
)
assoc_incident_tags = Table(
"assoc_incident_tags",
Base.metadata,
Column("incident_id", Integer, ForeignKey("incident.id")),
Column("tag_id", Integer, ForeignKey("tag.id")),
PrimaryKeyConstraint("incident_id", "tag_id"),
)
class Incident(Base, TimeStampMixin):
id = Column(Integer, primary_key=True)
name = Column(String)
title = Column(String, nullable=False)
description = Column(String, nullable=False)
status = Column(String, default=IncidentStatus.active)
cost = Column(Float, default=0)
visibility = Column(String, default=Visibility.open)
# auto generated
reported_at = Column(DateTime, default=datetime.utcnow)
stable_at = Column(DateTime)
closed_at = Column(DateTime)
search_vector = Column(
TSVectorType(
"title", "description", "name", weights={"name": "A", "title": "B", "description": "C"}
)
)
@hybrid_property
def commander(self):
if self.participants:
for p in self.participants:
for pr in p.participant_roles:
if (
pr.role == ParticipantRoleType.incident_commander
and pr.renounced_at
is None # Column renounced_at will be null for the current incident commander
):
return p.individual
@commander.expression
def commander(cls):
return (
select(ParticipantRole.individual)
.where(ParticipantRole.incident_id == cls.id)
.where(ParticipantRole.role == ParticipantRoleType.incident_commander)
.where(ParticipantRole.renounce_at == None) # noqa
)
@hybrid_property
def reporter(self):
if self.participants:
for p in self.participants:
for role in p.participant_roles:
if role.role == ParticipantRoleType.reporter:
return p.individual
@reporter.expression
def reporter(cls):
return (
select(ParticipantRole.individual)
.where(ParticipantRole.incident_id == cls.id)
.where(ParticipantRole.role == ParticipantRoleType.reporter)
.where(ParticipantRole.renounce_at == None) # noqa
)
@hybrid_property
def incident_document(self):
if self.documents:
for d in self.documents:
if d.resource_type == INCIDENT_RESOURCE_INVESTIGATION_DOCUMENT:
return d
@hybrid_property
def incident_faq(self):
if self.documents:
for d in self.documents:
if d.resource_type == INCIDENT_RESOURCE_FAQ_DOCUMENT:
return d
@hybrid_property
def last_status_report(self):
if self.status_reports:
return sorted(self.status_reports, key=lambda r: r.created_at)[-1]
# resources
conference = relationship("Conference", uselist=False, backref="incident")
conversation = relationship("Conversation", uselist=False, backref="incident")
documents = relationship("Document", lazy="subquery", backref="incident")
events = relationship("Event", backref="incident")
groups = relationship("Group", lazy="subquery", backref="incident")
incident_priority = relationship("IncidentPriority", backref="incident")
incident_priority_id = Column(Integer, ForeignKey("incident_priority.id"))
incident_type = relationship("IncidentType", backref="incident")
incident_type_id = Column(Integer, ForeignKey("incident_type.id"))
participants = relationship("Participant", backref="incident")
status_reports = relationship("StatusReport", backref="incident")
storage = relationship("Storage", uselist=False, backref="incident")
tags = relationship("Tag", secondary=assoc_incident_tags, backref="incidents")
tasks = relationship("Task", backref="incident")
terms = relationship("Term", secondary=assoc_incident_terms, backref="incidents")
ticket = relationship("Ticket", uselist=False, backref="incident")
# Pydantic models...
class IncidentBase(DispatchBase):
title: str
description: str
status: Optional[IncidentStatus] = IncidentStatus.active
visibility: Optional[Visibility]
@validator("title")
def title_required(cls, v):
if not v:
raise ValueError("must not be empty string")
return v
@validator("description")
def description_required(cls, v):
if not v:
raise ValueError("must not be empty string")
return v
class IncidentCreate(IncidentBase):
incident_priority: IncidentPriorityCreate
incident_type: IncidentTypeCreate
class IncidentUpdate(IncidentBase):
incident_priority: IncidentPriorityBase
incident_type: IncidentTypeBase
reported_at: Optional[datetime] = None
stable_at: Optional[datetime] = None
commander: Optional[IndividualReadNested]
reporter: Optional[IndividualReadNested]
tags: Optional[List[Any]] = [] # any until we figure out circular imports
terms: Optional[List[Any]] = [] # any until we figure out circular imports
class IncidentRead(IncidentBase):
id: int
cost: float = None
name: str = None
reporter: Optional[IndividualReadNested]
commander: Optional[IndividualReadNested]
last_status_report: Optional[Any]
incident_priority: IncidentPriorityRead
incident_type: IncidentTypeRead
participants: Optional[List[ParticipantRead]] = []
storage: Optional[StorageRead] = None
ticket: Optional[TicketRead] = None
documents: Optional[List[DocumentRead]] = []
tags: Optional[List[Any]] = [] # any until we figure out circular imports
terms: Optional[List[Any]] = [] # any until we figure out circular imports
conference: Optional[ConferenceRead] = None
conversation: Optional[ConversationRead] = None
events: Optional[List[EventRead]] = []
created_at: Optional[datetime] = None
reported_at: Optional[datetime] = None
stable_at: Optional[datetime] = None
closed_at: Optional[datetime] = None
def __acl__(self):
if self.visibility == Visibility.restricted:
return [
(Allow, f"role:{UserRoles.admin}", "view"),
(Allow, f"role:{UserRoles.admin}", "edit"),
]
return [
(Allow, f"role:{UserRoles.user}", "view"),
(Allow, f"role:{UserRoles.user}", "edit"),
]
class IncidentPagination(DispatchBase):
total: int
items: List[IncidentRead] = []
| 35.497854 | 102 | 0.688913 |
136a36075d343ab321cbdbd792b734ac1dbc4dd9 | 3,746 | py | Python | chapter3/Readercoin_/test/functional/multiwallet.py | MyawBug/Blockchain-By-Example | 2d0495a130d1a9f91b7fb99359cbb8e9f7b9763d | [
"MIT"
] | 51 | 2018-12-14T09:09:20.000Z | 2022-03-28T03:25:45.000Z | chapter3/Readercoin_/test/functional/multiwallet.py | MyawBug/Blockchain-By-Example | 2d0495a130d1a9f91b7fb99359cbb8e9f7b9763d | [
"MIT"
] | 4 | 2019-08-02T18:23:17.000Z | 2022-02-12T04:33:25.000Z | chapter3/Readercoin_/test/functional/multiwallet.py | xiaqingdoc/--- | b15448739983b0787ffc963811294bcf44487303 | [
"MIT"
] | 42 | 2018-12-14T09:09:24.000Z | 2022-03-31T01:49:35.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Readercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a readercoind node can load multiple wallet files
"""
import os
import shutil
from test_framework.test_framework import ReadercoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class MultiWalletTest(ReadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-wallet=w1', '-wallet=w2', '-wallet=w3']]
def run_test(self):
assert_equal(set(self.nodes[0].listwallets()), {"w1", "w2", "w3"})
self.stop_node(0)
# should not initialize if there are duplicate wallets
self.assert_start_raises_init_error(0, ['-wallet=w1', '-wallet=w1'], 'Error loading wallet w1. Duplicate -wallet filename specified.')
# should not initialize if wallet file is a directory
os.mkdir(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w11'))
self.assert_start_raises_init_error(0, ['-wallet=w11'], 'Error loading wallet w11. -wallet filename must be a regular file.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w2'),
os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w22'))
self.assert_start_raises_init_error(0, ['-wallet=w2', '-wallet=w22'], 'duplicates fileid')
# should not initialize if wallet file is a symlink
os.symlink(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w1'), os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w12'))
self.assert_start_raises_init_error(0, ['-wallet=w12'], 'Error loading wallet w12. -wallet filename must be a regular file.')
self.start_node(0, self.extra_args[0])
w1 = self.nodes[0].get_wallet_rpc("w1")
w2 = self.nodes[0].get_wallet_rpc("w2")
w3 = self.nodes[0].get_wallet_rpc("w3")
wallet_bad = self.nodes[0].get_wallet_rpc("bad")
w1.generate(1)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", self.nodes[0].getwalletinfo)
# check w1 wallet balance
w1_info = w1.getwalletinfo()
assert_equal(w1_info['immature_balance'], 50)
w1_name = w1_info['walletname']
assert_equal(w1_name, "w1")
# check w2 wallet balance
w2_info = w2.getwalletinfo()
assert_equal(w2_info['immature_balance'], 0)
w2_name = w2_info['walletname']
assert_equal(w2_name, "w2")
w3_name = w3.getwalletinfo()['walletname']
assert_equal(w3_name, "w3")
assert_equal({"w1", "w2", "w3"}, {w1_name, w2_name, w3_name})
w1.generate(101)
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.generate(1)
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], "regtest")
assert_equal(batch[1]["result"]["walletname"], "w1")
if __name__ == '__main__':
MultiWalletTest().main()
| 41.164835 | 142 | 0.66551 |
b24919715bbd09366868029a979075945f20f6a6 | 5,486 | py | Python | composer/models/bert/model.py | IanWorley/composer | e4d443012511b387ad495b4add3b3b101d729741 | [
"Apache-2.0"
] | null | null | null | composer/models/bert/model.py | IanWorley/composer | e4d443012511b387ad495b4add3b3b101d729741 | [
"Apache-2.0"
] | null | null | null | composer/models/bert/model.py | IanWorley/composer | e4d443012511b387ad495b4add3b3b101d729741 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 MosaicML. All Rights Reserved.
"""Implements a BERT wrapper around a :class:`.ComposerTransformer`."""
from __future__ import annotations
from typing import TYPE_CHECKING, Mapping, Sequence, Union
import torch
from torchmetrics import Accuracy, MatthewsCorrcoef, MeanSquaredError, Metric, MetricCollection, SpearmanCorrcoef
from composer.metrics.nlp import BinaryF1Score, LanguageCrossEntropy, MaskedAccuracy
from composer.models.transformer_shared import ComposerTransformer
if TYPE_CHECKING:
import transformers
from composer.core.types import Batch, BatchDict, BatchPair
__all__ = ["BERTModel"]
class BERTModel(ComposerTransformer):
"""BERT model based on |:hugging_face:| Transformers.
For more information, see `Transformers <https://huggingface.co/transformers/>`_.
Args:
module (transformers.BertModel): An instance of BertModel that
contains the forward pass function.
config (transformers.BertConfig): The BertConfig object that
stores information about the model hyperparameters.
tokenizer (transformers.BertTokenizer): An instance of BertTokenizer. Necessary to process model inputs.
To create a BERT model for Language Model pretraining:
.. testcode::
from composer.models import BERTModel
import transformers
config = transformers.BertConfig()
hf_model = transformers.BertLMHeadModel(config=config)
tokenizer = transformers.BertTokenizer.from_pretrained("bert-base-uncased")
model = BERTModel(module=hf_model, config=config, tokenizer=tokenizer)
"""
def __init__(self, module: transformers.BertModel, config: transformers.BertConfig,
tokenizer: transformers.BertTokenizer) -> None:
super().__init__(
module=module, #type: ignore (thirdparty)
config=config,
tokenizer=tokenizer)
# we're going to remove the label from the expected inputs
# since we will handle metric calculation with TorchMetrics instead of HuggingFace.
self.model_inputs.remove("labels")
self.train_metrics = []
self.val_metrics = []
# TODO (Moin): make sure this is moved to be dataset-specific
# if config.num_labels=1, then we are training a regression task, so we should update our loss functions
if config.num_labels == 1:
self.train_loss = MeanSquaredError()
self.val_loss = MeanSquaredError()
self.train_spearman = SpearmanCorrcoef()
self.val_spearman = SpearmanCorrcoef()
self.train_metrics.extend([self.train_loss, self.train_spearman])
self.val_metrics.extend([self.val_loss, self.val_spearman])
if config.num_labels == 2:
self.train_f1 = BinaryF1Score()
self.val_f1 = BinaryF1Score()
self.train_metrics.extend([self.train_f1])
self.val_metrics.extend([self.val_f1])
if config.num_labels > 1 and config.num_labels != len(self.tokenizer):
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.train_matthews = MatthewsCorrcoef(num_classes=config.num_labels)
self.val_matthews = MatthewsCorrcoef(num_classes=config.num_labels)
self.train_metrics.extend([self.train_acc, self.train_matthews])
self.val_metrics.extend([self.val_acc, self.val_matthews])
if config.num_labels == len(self.tokenizer): # tests for MLM pre-training
ignore_index = -100
self.train_loss = LanguageCrossEntropy(ignore_index=ignore_index, vocab_size=config.num_labels)
self.val_loss = LanguageCrossEntropy(ignore_index=ignore_index, vocab_size=config.num_labels)
self.train_acc = MaskedAccuracy(ignore_index=ignore_index)
self.val_acc = MaskedAccuracy(ignore_index=ignore_index)
self.train_metrics.extend([self.train_loss, self.train_acc])
self.val_metrics.extend([self.val_loss, self.val_acc])
def loss(self, outputs: Mapping, batch: Batch) -> Union[torch.Tensor, Sequence[torch.Tensor]]:
if outputs.get('loss', None) is not None:
return outputs['loss']
else:
raise NotImplementedError('Calculating loss directly not supported yet.')
def validate(self, batch: BatchDict) -> BatchPair:
"""Runs the validation step.
Args:
batch (BatchDict): a dictionary of Dict[str, Tensor] of inputs
that the model expects, as found in :meth:`.ComposerTransformer.get_model_inputs`.
Returns:
tuple (Tensor, Tensor): with the output from the forward pass and the correct labels.
This is fed into directly into the output of :meth:`.ComposerModel.metrics`.
"""
assert self.training is False, "For validation, model must be in eval mode"
# temporary hack until eval on multiple datasets is finished
labels = batch.pop('labels')
output = self.forward(batch)
output = output['logits']
# if we are in the single class case, then remove the classes dimension
if output.shape[1] == 1:
output = output.squeeze(dim=1)
return output, labels
def metrics(self, train: bool = False) -> Union[Metric, MetricCollection]:
return MetricCollection(self.train_metrics) if train else MetricCollection(self.val_metrics)
| 40.940299 | 113 | 0.68374 |
0293c1b6701b671b15847dc057d26399c6a88eae | 2,389 | py | Python | playground/plugintest.py | einsweniger/mdt | 906d765e387367654a02a36e9b5ba7aca4480ed6 | [
"MIT"
] | 9 | 2015-11-05T10:25:24.000Z | 2019-03-04T09:01:41.000Z | playground/plugintest.py | einsweniger/mdt | 906d765e387367654a02a36e9b5ba7aca4480ed6 | [
"MIT"
] | 15 | 2015-02-03T11:05:53.000Z | 2017-11-20T12:43:46.000Z | playground/plugintest.py | einsweniger/mdt | 906d765e387367654a02a36e9b5ba7aca4480ed6 | [
"MIT"
] | 2 | 2016-11-23T12:47:02.000Z | 2018-11-01T18:19:20.000Z | import argparse
import abc
import collections
import numbers
import test.test_coroutines
def attrs(**kwargs):
def decorate(obj):
for k in kwargs:
setattr(obj, k, kwargs[k])
return decorate
class Name(collections.Callable):
def __call__(self, test):
pass
def __init__(self, name):
self.name = name
def __getattr__(self, item):
return self.name
def __get__(self, instance, owner):
return self.name
class SubClassRegistry(abc.ABCMeta):
def __init__(cls, name, bases, attrs):
super().__init__(name, bases, attrs)
if not hasattr(cls, 'registry'):
cls.registry = set()
cls.registry.add(cls)
cls.registry -= set(bases) # Remove base classes
print(cls.__mro__)
print(cls.__base__)
print(cls.__bases__)
# Metamethods, called on class objects:
def __iter__(cls):
return iter(cls.registry)
# def __str__(cls):
# if cls in cls.registry:
# return cls.__name__
# return cls.__name__ + ": " + ", ".join([sc.__name__ for sc in cls])
class CommandRegistry(SubClassRegistry):
needed_attributes = ['arguments', 'name', 'help']
def __init__(cls, name, bases, attrs):
super().__init__(name, bases, attrs)
if not hasattr(cls, 'parser'):
cls.parser = argparse.ArgumentParser()
cls.subparser = cls.parser.add_subparsers(help="internal sub command help")
else:
if 'name' not in attrs:
raise BaseException("you must provice arguments in your class")
class Command(metaclass=CommandRegistry):
@property
def name(self):
return None
@property
def help(self):
return None
@property
def arguments(self):
return None
class CommandDecorator:
def __init__(self, *args):
pass
class ArgumentContainer:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class Init(Command):
arguments = [
ArgumentContainer('--force', help='overwrite the config', action='store_true'),
ArgumentContainer('-c', '--courseids', dest='course_ids', nargs='+', help='moodle course id', action='append')
]
name = 'init'
@classmethod
def run(cls):
print('running auth')
# print(Init().arguments) | 23.89 | 118 | 0.614483 |
8f10db5bb72f2996584a8e2a7a233d71d2ae0040 | 39,724 | py | Python | src/twisted/python/test/test_release.py | ndg63276/twisted | f672a20395e8beece6350631a70514f06c391bae | [
"Unlicense",
"MIT"
] | null | null | null | src/twisted/python/test/test_release.py | ndg63276/twisted | f672a20395e8beece6350631a70514f06c391bae | [
"Unlicense",
"MIT"
] | null | null | null | src/twisted/python/test/test_release.py | ndg63276/twisted | f672a20395e8beece6350631a70514f06c391bae | [
"Unlicense",
"MIT"
] | null | null | null | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.release} and L{twisted.python._release}.
All of these tests are skipped on platforms other than Linux, as the release is
only ever performed on Linux.
"""
import glob
import functools
import operator
import os
import sys
import textwrap
import tempfile
import shutil
from io import BytesIO, StringIO
from unittest import skipIf
from twisted.trial.unittest import TestCase, FailTest, SkipTest
from twisted.python.procutils import which
from twisted.python import release
from twisted.python.filepath import FilePath
from twisted.python.reflect import requireModule
from incremental import Version
from subprocess import CalledProcessError
from twisted.python._release import (
findTwistedProjects,
replaceInFile,
Project,
filePathDelta,
APIBuilder,
BuildAPIDocsScript,
CheckNewsfragmentScript,
runCommand,
NotWorkingDirectory,
SphinxBuilder,
GitCommand,
getRepositoryCommand,
IVCSCommand,
)
if sys.platform != "win32":
skip = None
else:
skip = "Release toolchain only supported on POSIX."
class ExternalTempdirTestCase(TestCase):
"""
A test case which has mkdir make directories outside of the usual spot, so
that Git commands don't interfere with the Twisted checkout.
"""
def mktemp(self):
"""
Make our own directory.
"""
newDir = tempfile.mkdtemp(dir=tempfile.gettempdir())
self.addCleanup(shutil.rmtree, newDir)
return newDir
def _gitConfig(path):
"""
Set some config in the repo that Git requires to make commits. This isn't
needed in real usage, just for tests.
@param path: The path to the Git repository.
@type path: L{FilePath}
"""
runCommand(
[
"git",
"config",
"--file",
path.child(".git").child("config").path,
"user.name",
'"someone"',
]
)
runCommand(
[
"git",
"config",
"--file",
path.child(".git").child("config").path,
"user.email",
'"someone@someplace.com"',
]
)
def _gitInit(path):
"""
Run a git init, and set some config that git requires. This isn't needed in
real usage.
@param path: The path to where the Git repo will be created.
@type path: L{FilePath}
"""
runCommand(["git", "init", path.path])
_gitConfig(path)
def genVersion(*args, **kwargs):
"""
A convenience for generating _version.py data.
@param args: Arguments to pass to L{Version}.
@param kwargs: Keyword arguments to pass to L{Version}.
"""
return "from incremental import Version\n__version__={!r}".format(
Version(*args, **kwargs)
)
class StructureAssertingMixin:
"""
A mixin for L{TestCase} subclasses which provides some methods for
asserting the structure and contents of directories and files on the
filesystem.
"""
def createStructure(self, root, dirDict):
"""
Create a set of directories and files given a dict defining their
structure.
@param root: The directory in which to create the structure. It must
already exist.
@type root: L{FilePath}
@param dirDict: The dict defining the structure. Keys should be strings
naming files, values should be strings describing file contents OR
dicts describing subdirectories. All files are written in binary
mode. Any string values are assumed to describe text files and
will have their newlines replaced with the platform-native newline
convention. For example::
{"foofile": "foocontents",
"bardir": {"barfile": "bar\ncontents"}}
@type dirDict: C{dict}
"""
for x in dirDict:
child = root.child(x)
if isinstance(dirDict[x], dict):
child.createDirectory()
self.createStructure(child, dirDict[x])
else:
child.setContent(dirDict[x].replace("\n", os.linesep).encode())
def assertStructure(self, root, dirDict):
"""
Assert that a directory is equivalent to one described by a dict.
@param root: The filesystem directory to compare.
@type root: L{FilePath}
@param dirDict: The dict that should describe the contents of the
directory. It should be the same structure as the C{dirDict}
parameter to L{createStructure}.
@type dirDict: C{dict}
"""
children = [each.basename() for each in root.children()]
for pathSegment, expectation in dirDict.items():
child = root.child(pathSegment)
if callable(expectation):
self.assertTrue(expectation(child))
elif isinstance(expectation, dict):
self.assertTrue(child.isdir(), "{} is not a dir!".format(child.path))
self.assertStructure(child, expectation)
else:
actual = child.getContent().decode().replace(os.linesep, "\n")
self.assertEqual(actual, expectation)
children.remove(pathSegment)
if children:
self.fail("There were extra children in {}: {}".format(root.path, children))
class ProjectTests(ExternalTempdirTestCase):
"""
There is a first-class representation of a project.
"""
def assertProjectsEqual(self, observedProjects, expectedProjects):
"""
Assert that two lists of L{Project}s are equal.
"""
self.assertEqual(len(observedProjects), len(expectedProjects))
observedProjects = sorted(
observedProjects, key=operator.attrgetter("directory")
)
expectedProjects = sorted(
expectedProjects, key=operator.attrgetter("directory")
)
for observed, expected in zip(observedProjects, expectedProjects):
self.assertEqual(observed.directory, expected.directory)
def makeProject(self, version, baseDirectory=None):
"""
Make a Twisted-style project in the given base directory.
@param baseDirectory: The directory to create files in
(as a L{FilePath).
@param version: The version information for the project.
@return: L{Project} pointing to the created project.
"""
if baseDirectory is None:
baseDirectory = FilePath(self.mktemp())
segments = version[0].split(".")
directory = baseDirectory
for segment in segments:
directory = directory.child(segment)
if not directory.exists():
directory.createDirectory()
directory.child("__init__.py").setContent(b"")
directory.child("newsfragments").createDirectory()
directory.child("_version.py").setContent(genVersion(*version).encode())
return Project(directory)
def makeProjects(self, *versions):
"""
Create a series of projects underneath a temporary base directory.
@return: A L{FilePath} for the base directory.
"""
baseDirectory = FilePath(self.mktemp())
for version in versions:
self.makeProject(version, baseDirectory)
return baseDirectory
def test_getVersion(self):
"""
Project objects know their version.
"""
version = ("twisted", 2, 1, 0)
project = self.makeProject(version)
self.assertEqual(project.getVersion(), Version(*version))
def test_repr(self):
"""
The representation of a Project is Project(directory).
"""
foo = Project(FilePath("bar"))
self.assertEqual(repr(foo), "Project(%r)" % (foo.directory))
def test_findTwistedStyleProjects(self):
"""
findTwistedStyleProjects finds all projects underneath a particular
directory. A 'project' is defined by the existence of a 'newsfragments'
directory and is returned as a Project object.
"""
baseDirectory = self.makeProjects(("foo", 2, 3, 0), ("foo.bar", 0, 7, 4))
projects = findTwistedProjects(baseDirectory)
self.assertProjectsEqual(
projects,
[
Project(baseDirectory.child("foo")),
Project(baseDirectory.child("foo").child("bar")),
],
)
class UtilityTests(ExternalTempdirTestCase):
"""
Tests for various utility functions for releasing.
"""
def test_chdir(self):
"""
Test that the runChdirSafe is actually safe, i.e., it still
changes back to the original directory even if an error is
raised.
"""
cwd = os.getcwd()
def chAndBreak():
os.mkdir("releaseCh")
os.chdir("releaseCh")
1 // 0
self.assertRaises(ZeroDivisionError, release.runChdirSafe, chAndBreak)
self.assertEqual(cwd, os.getcwd())
def test_replaceInFile(self):
"""
L{replaceInFile} replaces data in a file based on a dict. A key from
the dict that is found in the file is replaced with the corresponding
value.
"""
content = "foo\nhey hey $VER\nbar\n"
with open("release.replace", "w") as outf:
outf.write(content)
expected = content.replace("$VER", "2.0.0")
replaceInFile("release.replace", {"$VER": "2.0.0"})
with open("release.replace") as f:
self.assertEqual(f.read(), expected)
expected = expected.replace("2.0.0", "3.0.0")
replaceInFile("release.replace", {"2.0.0": "3.0.0"})
with open("release.replace") as f:
self.assertEqual(f.read(), expected)
def doNotFailOnNetworkError(func):
"""
A decorator which makes APIBuilder tests not fail because of intermittent
network failures -- mamely, APIBuilder being unable to get the "object
inventory" of other projects.
@param func: The function to decorate.
@return: A decorated function which won't fail if the object inventory
fetching fails.
"""
@functools.wraps(func)
def wrapper(*a, **kw):
try:
func(*a, **kw)
except FailTest as e:
if e.args[0].startswith("'Failed to get object inventory from "):
raise SkipTest(
(
"This test is prone to intermittent network errors. "
"See ticket 8753. Exception was: {!r}"
).format(e)
)
raise
return wrapper
class DoNotFailTests(TestCase):
"""
Tests for L{doNotFailOnNetworkError}.
"""
def test_skipsOnAssertionError(self):
"""
When the test raises L{FailTest} and the assertion failure starts with
"'Failed to get object inventory from ", the test will be skipped
instead.
"""
@doNotFailOnNetworkError
def inner():
self.assertEqual("Failed to get object inventory from blah", "")
try:
inner()
except Exception as e:
self.assertIsInstance(e, SkipTest)
def test_doesNotSkipOnDifferentError(self):
"""
If there is a L{FailTest} that is not the intersphinx fetching error,
it will be passed through.
"""
@doNotFailOnNetworkError
def inner():
self.assertEqual("Error!!!!", "")
try:
inner()
except Exception as e:
self.assertIsInstance(e, FailTest)
@skipIf(not requireModule("pydoctor"), "Pydoctor is not present.")
class APIBuilderTests(ExternalTempdirTestCase):
"""
Tests for L{APIBuilder}.
"""
@doNotFailOnNetworkError
def test_build(self):
"""
L{APIBuilder.build} writes an index file which includes the name of the
project specified.
"""
stdout = BytesIO()
self.patch(sys, "stdout", stdout)
projectName = "Foobar"
packageName = "quux"
projectURL = "scheme:project"
sourceURL = "scheme:source"
docstring = "text in docstring"
privateDocstring = "should also appear in output"
inputPath = FilePath(self.mktemp()).child(packageName)
inputPath.makedirs()
inputPath.child("__init__.py").setContent(
"def foo():\n"
" '{}'\n"
"def _bar():\n"
" '{}'".format(docstring, privateDocstring).encode()
)
outputPath = FilePath(self.mktemp())
builder = APIBuilder()
builder.build(projectName, projectURL, sourceURL, inputPath, outputPath)
indexPath = outputPath.child("index.html")
self.assertTrue(
indexPath.exists(), "API index {!r} did not exist.".format(outputPath.path)
)
self.assertIn(
'<a href="{}">{}</a>'.format(projectURL, projectName),
indexPath.getContent().decode(),
"Project name/location not in file contents.",
)
quuxPath = outputPath.child("quux.html")
self.assertTrue(
quuxPath.exists(),
"Package documentation file {!r} did not exist.".format(quuxPath.path),
)
self.assertIn(
docstring,
quuxPath.getContent().decode(),
"Docstring not in package documentation file.",
)
self.assertIn(
'<a href="{}/{}">View Source</a>'.format(sourceURL, packageName),
quuxPath.getContent().decode(),
)
self.assertIn(
'<a class="functionSourceLink" href="%s/%s/__init__.py#L1">'
% (sourceURL, packageName),
quuxPath.getContent().decode(),
)
self.assertIn(privateDocstring, quuxPath.getContent().decode())
# There should also be a page for the foo function in quux.
self.assertTrue(quuxPath.sibling("quux.foo.html").exists())
self.assertEqual(stdout.getvalue(), b"")
@doNotFailOnNetworkError
def test_buildWithPolicy(self):
"""
L{BuildAPIDocsScript.buildAPIDocs} builds the API docs with values
appropriate for the Twisted project.
"""
stdout = BytesIO()
self.patch(sys, "stdout", stdout)
docstring = "text in docstring"
projectRoot = FilePath(self.mktemp())
packagePath = projectRoot.child("twisted")
packagePath.makedirs()
packagePath.child("__init__.py").setContent(
"def foo():\n" " '{}'\n".format(docstring).encode()
)
packagePath.child("_version.py").setContent(
genVersion("twisted", 1, 0, 0).encode()
)
outputPath = FilePath(self.mktemp())
script = BuildAPIDocsScript()
script.buildAPIDocs(projectRoot, outputPath)
indexPath = outputPath.child("index.html")
self.assertTrue(
indexPath.exists(), "API index {} did not exist.".format(outputPath.path)
)
self.assertIn(
'<a href="http://twistedmatrix.com/">Twisted</a>',
indexPath.getContent().decode(),
"Project name/location not in file contents.",
)
twistedPath = outputPath.child("twisted.html")
self.assertTrue(
twistedPath.exists(),
"Package documentation file {!r} did not exist.".format(twistedPath.path),
)
self.assertIn(
docstring,
twistedPath.getContent().decode(),
"Docstring not in package documentation file.",
)
# Here we check that it figured out the correct version based on the
# source code.
self.assertIn(
'<a href="https://github.com/twisted/twisted/tree/'
'twisted-1.0.0/src/twisted">View Source</a>',
twistedPath.getContent().decode(),
)
self.assertEqual(stdout.getvalue(), b"")
@doNotFailOnNetworkError
def test_buildWithDeprecated(self):
"""
The templates and System for Twisted includes adding deprecations.
"""
stdout = BytesIO()
self.patch(sys, "stdout", stdout)
projectName = "Foobar"
packageName = "quux"
projectURL = "scheme:project"
sourceURL = "scheme:source"
docstring = "text in docstring"
privateDocstring = "should also appear in output"
inputPath = FilePath(self.mktemp()).child(packageName)
inputPath.makedirs()
inputPath.child("__init__.py").setContent(
"from twisted.python.deprecate import deprecated\n"
"from incremental import Version\n"
"@deprecated(Version('Twisted', 15, 0, 0), "
"'Baz')\n"
"def foo():\n"
" '{}'\n"
"from twisted.python import deprecate\n"
"import incremental\n"
"@deprecate.deprecated(incremental.Version('Twisted', 16, 0, 0))\n"
"def _bar():\n"
" '{}'\n"
"@deprecated(Version('Twisted', 14, 2, 3), replacement='stuff')\n"
"class Baz:\n"
" pass"
"".format(docstring, privateDocstring).encode()
)
outputPath = FilePath(self.mktemp())
builder = APIBuilder()
builder.build(projectName, projectURL, sourceURL, inputPath, outputPath)
quuxPath = outputPath.child("quux.html")
self.assertTrue(
quuxPath.exists(),
"Package documentation file {!r} did not exist.".format(quuxPath.path),
)
self.assertIn(
docstring,
quuxPath.getContent().decode(),
"Docstring not in package documentation file.",
)
self.assertIn(
"foo was deprecated in Twisted 15.0.0; please use Baz instead.",
quuxPath.getContent().decode(),
)
self.assertIn(
"_bar was deprecated in Twisted 16.0.0.", quuxPath.getContent().decode()
)
self.assertIn(privateDocstring, quuxPath.getContent().decode())
# There should also be a page for the foo function in quux.
self.assertTrue(quuxPath.sibling("quux.foo.html").exists())
self.assertIn(
"foo was deprecated in Twisted 15.0.0; please use Baz instead.",
quuxPath.sibling("quux.foo.html").getContent().decode(),
)
self.assertIn(
"Baz was deprecated in Twisted 14.2.3; please use stuff instead.",
quuxPath.sibling("quux.Baz.html").getContent().decode(),
)
self.assertEqual(stdout.getvalue(), b"")
def test_apiBuilderScriptMainRequiresTwoArguments(self):
"""
SystemExit is raised when the incorrect number of command line
arguments are passed to the API building script.
"""
script = BuildAPIDocsScript()
self.assertRaises(SystemExit, script.main, [])
self.assertRaises(SystemExit, script.main, ["foo"])
self.assertRaises(SystemExit, script.main, ["foo", "bar", "baz"])
def test_apiBuilderScriptMain(self):
"""
The API building script invokes the same code that
L{test_buildWithPolicy} tests.
"""
script = BuildAPIDocsScript()
calls = []
script.buildAPIDocs = lambda a, b: calls.append((a, b))
script.main(["hello", "there"])
self.assertEqual(calls, [(FilePath("hello"), FilePath("there"))])
class FilePathDeltaTests(TestCase):
"""
Tests for L{filePathDelta}.
"""
def test_filePathDeltaSubdir(self):
"""
L{filePathDelta} can create a simple relative path to a child path.
"""
self.assertEqual(
filePathDelta(FilePath("/foo/bar"), FilePath("/foo/bar/baz")), ["baz"]
)
def test_filePathDeltaSiblingDir(self):
"""
L{filePathDelta} can traverse upwards to create relative paths to
siblings.
"""
self.assertEqual(
filePathDelta(FilePath("/foo/bar"), FilePath("/foo/baz")), ["..", "baz"]
)
def test_filePathNoCommonElements(self):
"""
L{filePathDelta} can create relative paths to totally unrelated paths
for maximum portability.
"""
self.assertEqual(
filePathDelta(FilePath("/foo/bar"), FilePath("/baz/quux")),
["..", "..", "baz", "quux"],
)
def test_filePathDeltaSimilarEndElements(self):
"""
L{filePathDelta} doesn't take into account final elements when
comparing 2 paths, but stops at the first difference.
"""
self.assertEqual(
filePathDelta(FilePath("/foo/bar/bar/spam"), FilePath("/foo/bar/baz/spam")),
["..", "..", "baz", "spam"],
)
@skipIf(not which("sphinx-build"), "Sphinx not available.")
class SphinxBuilderTests(TestCase):
"""
Tests for L{SphinxBuilder}.
@note: This test case depends on twisted.web, which violates the standard
Twisted practice of not having anything in twisted.python depend on
other Twisted packages and opens up the possibility of creating
circular dependencies. Do not use this as an example of how to
structure your dependencies.
@ivar builder: A plain L{SphinxBuilder}.
@ivar sphinxDir: A L{FilePath} representing a directory to be used for
containing a Sphinx project.
@ivar sourceDir: A L{FilePath} representing a directory to be used for
containing the source files for a Sphinx project.
"""
confContent = """\
source_suffix = '.rst'
master_doc = 'index'
"""
confContent = textwrap.dedent(confContent)
indexContent = """\
==============
This is a Test
==============
This is only a test
-------------------
In case you hadn't figured it out yet, this is a test.
"""
indexContent = textwrap.dedent(indexContent)
def setUp(self):
"""
Set up a few instance variables that will be useful.
"""
self.builder = SphinxBuilder()
# set up a place for a fake sphinx project
self.twistedRootDir = FilePath(self.mktemp())
self.sphinxDir = self.twistedRootDir.child("docs")
self.sphinxDir.makedirs()
self.sourceDir = self.sphinxDir
def createFakeSphinxProject(self):
"""
Create a fake Sphinx project for test purposes.
Creates a fake Sphinx project with the absolute minimum of source
files. This includes a single source file ('index.rst') and the
smallest 'conf.py' file possible in order to find that source file.
"""
self.sourceDir.child("conf.py").setContent(self.confContent.encode())
self.sourceDir.child("index.rst").setContent(self.indexContent.encode())
def verifyFileExists(self, fileDir, fileName):
"""
Helper which verifies that C{fileName} exists in C{fileDir} and it has
some content.
@param fileDir: A path to a directory.
@type fileDir: L{FilePath}
@param fileName: The last path segment of a file which may exist within
C{fileDir}.
@type fileName: L{str}
@raise FailTest: If C{fileDir.child(fileName)}:
1. Does not exist.
2. Is empty.
3. In the case where it's a path to a C{.html} file, the
content looks like an HTML file.
@return: L{None}
"""
# check that file exists
fpath = fileDir.child(fileName)
self.assertTrue(fpath.exists())
# check that the output files have some content
fcontents = fpath.getContent()
self.assertTrue(len(fcontents) > 0)
# check that the html files are at least html-ish
# this is not a terribly rigorous check
if fpath.path.endswith(".html"):
self.assertIn(b"<body", fcontents)
def test_build(self):
"""
Creates and builds a fake Sphinx project using a L{SphinxBuilder}.
"""
self.createFakeSphinxProject()
self.builder.build(self.sphinxDir)
self.verifyBuilt()
def test_main(self):
"""
Creates and builds a fake Sphinx project as if via the command line.
"""
self.createFakeSphinxProject()
self.builder.main([self.sphinxDir.parent().path])
self.verifyBuilt()
def test_warningsAreErrors(self):
"""
Creates and builds a fake Sphinx project as if via the command line,
failing if there are any warnings.
"""
output = StringIO()
self.patch(sys, "stdout", output)
self.createFakeSphinxProject()
with self.sphinxDir.child("index.rst").open("a") as f:
f.write(b"\n.. _malformed-link-target\n")
exception = self.assertRaises(
SystemExit, self.builder.main, [self.sphinxDir.parent().path]
)
self.assertEqual(exception.code, 1)
self.assertIn("malformed hyperlink target", output.getvalue())
self.verifyBuilt()
def verifyBuilt(self):
"""
Verify that a sphinx project has been built.
"""
htmlDir = self.sphinxDir.sibling("doc")
self.assertTrue(htmlDir.isdir())
doctreeDir = htmlDir.child("doctrees")
self.assertFalse(doctreeDir.exists())
self.verifyFileExists(htmlDir, "index.html")
self.verifyFileExists(htmlDir, "genindex.html")
self.verifyFileExists(htmlDir, "objects.inv")
self.verifyFileExists(htmlDir, "search.html")
self.verifyFileExists(htmlDir, "searchindex.js")
def test_failToBuild(self):
"""
Check that SphinxBuilder.build fails when run against a non-sphinx
directory.
"""
# note no fake sphinx project is created
self.assertRaises(CalledProcessError, self.builder.build, self.sphinxDir)
class CommandsTestMixin(StructureAssertingMixin):
"""
Test mixin for the VCS commands used by the release scripts.
"""
def setUp(self):
self.tmpDir = FilePath(self.mktemp())
def test_ensureIsWorkingDirectoryWithWorkingDirectory(self):
"""
Calling the C{ensureIsWorkingDirectory} VCS command's method on a valid
working directory doesn't produce any error.
"""
reposDir = self.makeRepository(self.tmpDir)
self.assertIsNone(self.createCommand.ensureIsWorkingDirectory(reposDir))
def test_ensureIsWorkingDirectoryWithNonWorkingDirectory(self):
"""
Calling the C{ensureIsWorkingDirectory} VCS command's method on an
invalid working directory raises a L{NotWorkingDirectory} exception.
"""
self.assertRaises(
NotWorkingDirectory,
self.createCommand.ensureIsWorkingDirectory,
self.tmpDir,
)
def test_statusClean(self):
"""
Calling the C{isStatusClean} VCS command's method on a repository with
no pending modifications returns C{True}.
"""
reposDir = self.makeRepository(self.tmpDir)
self.assertTrue(self.createCommand.isStatusClean(reposDir))
def test_statusNotClean(self):
"""
Calling the C{isStatusClean} VCS command's method on a repository with
no pending modifications returns C{False}.
"""
reposDir = self.makeRepository(self.tmpDir)
reposDir.child("some-file").setContent(b"something")
self.assertFalse(self.createCommand.isStatusClean(reposDir))
def test_remove(self):
"""
Calling the C{remove} VCS command's method remove the specified path
from the directory.
"""
reposDir = self.makeRepository(self.tmpDir)
testFile = reposDir.child("some-file")
testFile.setContent(b"something")
self.commitRepository(reposDir)
self.assertTrue(testFile.exists())
self.createCommand.remove(testFile)
testFile.restat(False) # Refresh the file information
self.assertFalse(testFile.exists(), "File still exists")
def test_export(self):
"""
The C{exportTo} VCS command's method export the content of the
repository as identical in a specified directory.
"""
structure = {
"README.rst": "Hi this is 1.0.0.",
"twisted": {
"newsfragments": {"README": "Hi this is 1.0.0"},
"_version.py": genVersion("twisted", 1, 0, 0),
"web": {
"newsfragments": {"README": "Hi this is 1.0.0"},
"_version.py": genVersion("twisted.web", 1, 0, 0),
},
},
}
reposDir = self.makeRepository(self.tmpDir)
self.createStructure(reposDir, structure)
self.commitRepository(reposDir)
exportDir = FilePath(self.mktemp()).child("export")
self.createCommand.exportTo(reposDir, exportDir)
self.assertStructure(exportDir, structure)
class GitCommandTest(CommandsTestMixin, ExternalTempdirTestCase):
"""
Specific L{CommandsTestMixin} related to Git repositories through
L{GitCommand}.
"""
createCommand = GitCommand
def makeRepository(self, root):
"""
Create a Git repository in the specified path.
@type root: L{FilePath}
@params root: The directory to create the Git repository into.
@return: The path to the repository just created.
@rtype: L{FilePath}
"""
_gitInit(root)
return root
def commitRepository(self, repository):
"""
Add and commit all the files from the Git repository specified.
@type repository: L{FilePath}
@params repository: The Git repository to commit into.
"""
runCommand(
["git", "-C", repository.path, "add"] + glob.glob(repository.path + "/*")
)
runCommand(["git", "-C", repository.path, "commit", "-m", "hop"])
class RepositoryCommandDetectionTest(ExternalTempdirTestCase):
"""
Test the L{getRepositoryCommand} to access the right set of VCS commands
depending on the repository manipulated.
"""
def setUp(self):
self.repos = FilePath(self.mktemp())
def test_git(self):
"""
L{getRepositoryCommand} from a Git repository returns L{GitCommand}.
"""
_gitInit(self.repos)
cmd = getRepositoryCommand(self.repos)
self.assertIs(cmd, GitCommand)
def test_unknownRepository(self):
"""
L{getRepositoryCommand} from a directory which doesn't look like a Git
repository produces a L{NotWorkingDirectory} exception.
"""
self.assertRaises(NotWorkingDirectory, getRepositoryCommand, self.repos)
class VCSCommandInterfaceTests(TestCase):
"""
Test that the VCS command classes implement their interface.
"""
def test_git(self):
"""
L{GitCommand} implements L{IVCSCommand}.
"""
self.assertTrue(IVCSCommand.implementedBy(GitCommand))
class CheckNewsfragmentScriptTests(ExternalTempdirTestCase):
"""
L{CheckNewsfragmentScript}.
"""
def setUp(self):
self.origin = FilePath(self.mktemp())
_gitInit(self.origin)
runCommand(["git", "checkout", "-b", "trunk"], cwd=self.origin.path)
self.origin.child("test").setContent(b"test!")
runCommand(["git", "add", self.origin.child("test").path], cwd=self.origin.path)
runCommand(["git", "commit", "-m", "initial"], cwd=self.origin.path)
self.repo = FilePath(self.mktemp())
runCommand(["git", "clone", self.origin.path, self.repo.path])
_gitConfig(self.repo)
def test_noArgs(self):
"""
Too few arguments returns a failure.
"""
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([])
self.assertEqual(
e.exception.args, ("Must specify one argument: the Twisted checkout",)
)
def test_diffFromTrunkNoNewsfragments(self):
"""
If there are changes from trunk, then there should also be a
newsfragment.
"""
runCommand(["git", "checkout", "-b", "mypatch"], cwd=self.repo.path)
somefile = self.repo.child("somefile")
somefile.setContent(b"change")
runCommand(["git", "add", somefile.path, somefile.path], cwd=self.repo.path)
runCommand(["git", "commit", "-m", "some file"], cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (1,))
self.assertEqual(logs[-1], "No newsfragment found. Have you committed it?")
def test_noChangeFromTrunk(self):
"""
If there are no changes from trunk, then no need to check the
newsfragments
"""
runCommand(["git", "checkout", "-b", "mypatch"], cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(
logs[-1], "On trunk or no diffs from trunk; no need to look at this."
)
def test_trunk(self):
"""
Running it on trunk always gives green.
"""
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(
logs[-1], "On trunk or no diffs from trunk; no need to look at this."
)
def test_release(self):
"""
Running it on a release branch returns green if there is no
newsfragments even if there are changes.
"""
runCommand(
["git", "checkout", "-b", "release-16.11111-9001"], cwd=self.repo.path
)
somefile = self.repo.child("somefile")
somefile.setContent(b"change")
runCommand(["git", "add", somefile.path, somefile.path], cwd=self.repo.path)
runCommand(["git", "commit", "-m", "some file"], cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(logs[-1], "Release branch with no newsfragments, all good.")
def test_releaseWithNewsfragments(self):
"""
Running it on a release branch returns red if there are new
newsfragments.
"""
runCommand(
["git", "checkout", "-b", "release-16.11111-9001"], cwd=self.repo.path
)
newsfragments = self.repo.child("twisted").child("newsfragments")
newsfragments.makedirs()
fragment = newsfragments.child("1234.misc")
fragment.setContent(b"")
unrelated = self.repo.child("somefile")
unrelated.setContent(b"Boo")
runCommand(["git", "add", fragment.path, unrelated.path], cwd=self.repo.path)
runCommand(["git", "commit", "-m", "fragment"], cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (1,))
self.assertEqual(logs[-1], "No newsfragments should be on the release branch.")
def test_onlyQuotes(self):
"""
Running it on a branch with only a quotefile change gives green.
"""
runCommand(["git", "checkout", "-b", "quotefile"], cwd=self.repo.path)
fun = self.repo.child("docs").child("fun")
fun.makedirs()
quotes = fun.child("Twisted.Quotes")
quotes.setContent(b"Beep boop")
runCommand(["git", "add", quotes.path], cwd=self.repo.path)
runCommand(["git", "commit", "-m", "quotes"], cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(logs[-1], "Quotes change only; no newsfragment needed.")
def test_newsfragmentAdded(self):
"""
Running it on a branch with a fragment in the newsfragments dir added
returns green.
"""
runCommand(["git", "checkout", "-b", "quotefile"], cwd=self.repo.path)
newsfragments = self.repo.child("twisted").child("newsfragments")
newsfragments.makedirs()
fragment = newsfragments.child("1234.misc")
fragment.setContent(b"")
unrelated = self.repo.child("somefile")
unrelated.setContent(b"Boo")
runCommand(["git", "add", fragment.path, unrelated.path], cwd=self.repo.path)
runCommand(["git", "commit", "-m", "newsfragment"], cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(logs[-1], "Found twisted/newsfragments/1234.misc")
def test_topfileButNotFragmentAdded(self):
"""
Running it on a branch with a non-fragment in the topfiles dir does not
return green.
"""
runCommand(["git", "checkout", "-b", "quotefile"], cwd=self.repo.path)
topfiles = self.repo.child("twisted").child("topfiles")
topfiles.makedirs()
notFragment = topfiles.child("1234.txt")
notFragment.setContent(b"")
unrelated = self.repo.child("somefile")
unrelated.setContent(b"Boo")
runCommand(["git", "add", notFragment.path, unrelated.path], cwd=self.repo.path)
runCommand(["git", "commit", "-m", "not topfile"], cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (1,))
self.assertEqual(logs[-1], "No newsfragment found. Have you committed it?")
def test_newsfragmentAddedButWithOtherNewsfragments(self):
"""
Running it on a branch with a fragment in the topfiles dir added
returns green, even if there are other files in the topfiles dir.
"""
runCommand(["git", "checkout", "-b", "quotefile"], cwd=self.repo.path)
newsfragments = self.repo.child("twisted").child("newsfragments")
newsfragments.makedirs()
fragment = newsfragments.child("1234.misc")
fragment.setContent(b"")
unrelated = newsfragments.child("somefile")
unrelated.setContent(b"Boo")
runCommand(["git", "add", fragment.path, unrelated.path], cwd=self.repo.path)
runCommand(["git", "commit", "-m", "newsfragment"], cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckNewsfragmentScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(logs[-1], "Found twisted/newsfragments/1234.misc")
| 33.43771 | 88 | 0.603338 |
3d311d6e708c58d35ba42cbccf0e469f9ea147db | 9,636 | py | Python | afs/lla/BosServerLLAParse.py | chanke/afspy | 525e7b3b53e58be515f11b83cc59ddb0765ef8e5 | [
"BSD-2-Clause"
] | null | null | null | afs/lla/BosServerLLAParse.py | chanke/afspy | 525e7b3b53e58be515f11b83cc59ddb0765ef8e5 | [
"BSD-2-Clause"
] | null | null | null | afs/lla/BosServerLLAParse.py | chanke/afspy | 525e7b3b53e58be515f11b83cc59ddb0765ef8e5 | [
"BSD-2-Clause"
] | null | null | null | """
functions for parsing
output from shell commands executed by lla.BosServer
"""
import datetime
import re
from BosServerLLAError import BosServerLLAError
from afs.model import BNode
def get_restart_times(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
general_restart_regex = re.compile("Server (\S+) restarts (?:at)?(.*)")
binary_restart_regex = re.compile(\
"Server (\S+) restarts for new binaries (?:at)?(.*)")
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
if len(output) != 2 :
raise BosServerLLAError("%s, %s" % (output, outerr) )
restart_times = {}
restart_times["general"] = \
general_restart_regex.match(output[0]).groups()[1].strip()
restart_times["newbinary"] = \
binary_restart_regex.match(output[1]).groups()[1].strip()
return restart_times
def set_restart_time(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
return True
def get_db_servers(ret, output, outerr, parse_param_list, logger) :
"""
parses result from method of same name in lla.BosServer
"""
dbserver_regex = re.compile("Host (\d+) is (\S+)")
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
db_servers = []
for line in output :
match_obj = dbserver_regex.match(line)
if match_obj :
server = {}
host = match_obj.groups()[1].strip()
if host[0] == "[" and host[len(host)-1] == "]" :
server['hostname'] = host[1:-1]
server['isClone'] = 1
else :
server['hostname'] = host
server['isClone'] = 0
db_servers.append(server)
return db_servers
def get_bnodes(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
bnodes = []
idx = 0
while 1 :
if idx >= len(output) : break
tokens = output[idx].split()
if tokens[0] == "Instance" :
this_bnode = BNode.BNode(instance_name=tokens[1][:-1], bnode_type=tokens[4][:-1])
if "currently running normally" in output[idx] :
this_bnode.status = "running"
elif "disabled, currently shutdown" in output[idx] :
this_bnode.status = "disabled"
elif ") currently shutting down." in output[idx] :
this_bnode.status = "shutting down"
else :
this_bnode.status = "stopped"
idx += 1
if "Auxiliary status is:" in output[idx] :
idx += 1
tokens = output[idx].split()
this_bnode.start_date = datetime.datetime.strptime(" ".join(tokens[4:9]), "%a %b %d %H:%M:%S %Y")
idx += 1
tokens = output[idx].split()
if tokens[0] == "Last" and tokens[1] == "exit" :
this_bnode.last_exit_date = datetime.datetime.strptime(" ".join(tokens[3:]), "%a %b %d %H:%M:%S %Y")
idx += 1
tokens = output[idx].split()
if tokens[0] == "Last" and tokens[1] == "error" :
this_bnode.error_exit_date = datetime.datetime.strptime(" ".join(tokens[4:9]).replace(",",""), "%a %b %d %H:%M:%S %Y")
idx += 1
tokens = output[idx].split()
this_bnode.commands = []
while 1 :
if tokens[0] == "Instance" : break
if tokens[0] == "Command" :
cmd = " ".join(tokens[3:]).translate(None,"'")
this_bnode.commands.append(cmd)
idx += 1
else :
import sys
for ii in range(len(output)) :
sys.stderr.write("%d: %s\n" % (ii, output[ii].strip()))
raise BosServerLLAError("parse error at line no %d : %s" % (idx, output[idx]))
if idx >= len(output) : break
tokens = output[idx].split()
bnodes.append(this_bnode)
return bnodes
def salvage(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
return output
def add_superuser(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
return True
def remove_superuser(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
return True
def get_superuserlist(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
if len(output) == 0 :
return []
superusers = []
# first line
for su in output[0].split()[2:] :
if len(su) == 0 : continue
superusers.append(su)
for line in output[1:] :
for su in line.split() :
if len(su) == 0 : continue
superusers.append(su)
return superusers
def get_filedate(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
# File /usr/afs/bin/fileserver dated Thu Nov 21 14:16:14 2013, no .BAK file, no .OLD file.
# or
# File /usr/afs/bin/fileserver dated Tue Jul 8 14:12:05 2014, .BAK file dated Fri Oct 10 10:07:38 2014, .OLD file dated Fri Oct 10 10:07:35 2014.
if "does not exist" in output[0] :
raise BosServerLLAError("%s, %s" % (output, outerr) )
tokens=output[0].split()
res_dict = { "current" : " ".join(tokens[4:8])[:-1],
"backup" : None, "old" : None }
if not "no .BAK file" in output[0] :
res_dict["backup"] = " ".join(tokens[12:16])[:-1]
if not "no .OLD file" in output[0] :
res_dict["old"] = " ".join(tokens[20:24])[:-1]
else :
if not "no .OLD file" in output[0] :
res_dict["old"] = " ".join(tokens[15:19])[:-1]
return res_dict
def restart(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
return True
def start_bnodes(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
return True
def stop_bnodes(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
return True
def execute_shell(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
return True
def get_log(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
## we just get the LogFile as array of lines
return output[1:]
def prune_log(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
return True
def shutdown(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
# bos doesnt return proper code
for line in output :
if "you are not authorized for this operation" in line :
raise BosServerLLAError(output)
return True
def startup(ret, output, outerr, parse_param_list, logger):
"""
parses result from method of same name in lla.BosServer
"""
if ret :
raise BosServerLLAError("%s, %s" % (output, outerr) )
# bos doesnt return proper code
for line in output :
if "you are not authorized for this operation" in line :
raise BosServerLLAError(output)
return True
#
# convenience helper
#
def restarttime_to_minutes(time):
"""
converts a restart time from the human readable output to
minutes after midnight.
-1 means never
"""
if time == "never" :
return -1
minutes = 0
tokens = time.split()
if tokens[1] == "pm" :
minutes = 12*60
hours, min = tokens[0].split(":")
minutes += int(hours)*60 + min
return minutes
def minutes_to_restarttime(minutes) :
"""
converts an int meaning Minutes after midnight into a
restartTime string understood by the bos command
"""
if minutes == -1 :
return "never"
pod = "am"
if minutes > 12*60 :
pod = "pm"
minutes -= 12*60
time = "%d:%02d %s" % (minutes / 60, minutes % 60, pod)
return time
| 33.458333 | 150 | 0.581465 |
78dc9e542ad8932b9c4f5e353298af0c78dff6d5 | 42,438 | py | Python | sdk/python/pulumi_azure/appservice/environment_v3.py | roderik/pulumi-azure | f6d0c058d6f9111a709bc5f1515d1638f9d615f0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/appservice/environment_v3.py | roderik/pulumi-azure | f6d0c058d6f9111a709bc5f1515d1638f9d615f0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/appservice/environment_v3.py | roderik/pulumi-azure | f6d0c058d6f9111a709bc5f1515d1638f9d615f0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['EnvironmentV3Args', 'EnvironmentV3']
@pulumi.input_type
class EnvironmentV3Args:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
subnet_id: pulumi.Input[str],
allow_new_private_endpoint_connections: Optional[pulumi.Input[bool]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentV3ClusterSettingArgs']]]] = None,
dedicated_host_count: Optional[pulumi.Input[int]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a EnvironmentV3 resource.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
:param pulumi.Input[str] subnet_id: The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
:param pulumi.Input[bool] allow_new_private_endpoint_connections: Should new Private Endpoint Connections be allowed. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input['EnvironmentV3ClusterSettingArgs']]] cluster_settings: Zero or more `cluster_setting` blocks as defined below.
:param pulumi.Input[int] dedicated_host_count: This ASEv3 should use dedicated Hosts. Possible vales are `2`. Changing this forces a new resource to be created.
:param pulumi.Input[str] internal_load_balancing_mode: Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None` (for an External VIP Type), and `"Web, Publishing"` (for an Internal VIP Type). Defaults to `None`.
:param pulumi.Input[str] name: The name of the App Service Environment. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "subnet_id", subnet_id)
if allow_new_private_endpoint_connections is not None:
pulumi.set(__self__, "allow_new_private_endpoint_connections", allow_new_private_endpoint_connections)
if cluster_settings is not None:
pulumi.set(__self__, "cluster_settings", cluster_settings)
if dedicated_host_count is not None:
pulumi.set(__self__, "dedicated_host_count", dedicated_host_count)
if internal_load_balancing_mode is not None:
pulumi.set(__self__, "internal_load_balancing_mode", internal_load_balancing_mode)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zone_redundant is not None:
pulumi.set(__self__, "zone_redundant", zone_redundant)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="allowNewPrivateEndpointConnections")
def allow_new_private_endpoint_connections(self) -> Optional[pulumi.Input[bool]]:
"""
Should new Private Endpoint Connections be allowed. Defaults to `true`.
"""
return pulumi.get(self, "allow_new_private_endpoint_connections")
@allow_new_private_endpoint_connections.setter
def allow_new_private_endpoint_connections(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_new_private_endpoint_connections", value)
@property
@pulumi.getter(name="clusterSettings")
def cluster_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentV3ClusterSettingArgs']]]]:
"""
Zero or more `cluster_setting` blocks as defined below.
"""
return pulumi.get(self, "cluster_settings")
@cluster_settings.setter
def cluster_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentV3ClusterSettingArgs']]]]):
pulumi.set(self, "cluster_settings", value)
@property
@pulumi.getter(name="dedicatedHostCount")
def dedicated_host_count(self) -> Optional[pulumi.Input[int]]:
"""
This ASEv3 should use dedicated Hosts. Possible vales are `2`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "dedicated_host_count")
@dedicated_host_count.setter
def dedicated_host_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dedicated_host_count", value)
@property
@pulumi.getter(name="internalLoadBalancingMode")
def internal_load_balancing_mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None` (for an External VIP Type), and `"Web, Publishing"` (for an Internal VIP Type). Defaults to `None`.
"""
return pulumi.get(self, "internal_load_balancing_mode")
@internal_load_balancing_mode.setter
def internal_load_balancing_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_load_balancing_mode", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the App Service Environment. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="zoneRedundant")
def zone_redundant(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "zone_redundant")
@zone_redundant.setter
def zone_redundant(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "zone_redundant", value)
@pulumi.input_type
class _EnvironmentV3State:
def __init__(__self__, *,
allow_new_private_endpoint_connections: Optional[pulumi.Input[bool]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentV3ClusterSettingArgs']]]] = None,
dedicated_host_count: Optional[pulumi.Input[int]] = None,
dns_suffix: Optional[pulumi.Input[str]] = None,
external_inbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
inbound_network_dependencies: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentV3InboundNetworkDependencyArgs']]]] = None,
internal_inbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
ip_ssl_address_count: Optional[pulumi.Input[int]] = None,
linux_outbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pricing_tier: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
windows_outbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering EnvironmentV3 resources.
:param pulumi.Input[bool] allow_new_private_endpoint_connections: Should new Private Endpoint Connections be allowed. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input['EnvironmentV3ClusterSettingArgs']]] cluster_settings: Zero or more `cluster_setting` blocks as defined below.
:param pulumi.Input[int] dedicated_host_count: This ASEv3 should use dedicated Hosts. Possible vales are `2`. Changing this forces a new resource to be created.
:param pulumi.Input[str] dns_suffix: the DNS suffix for this App Service Environment V3.
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_inbound_ip_addresses: The external outbound IP addresses of the App Service Environment V3.
:param pulumi.Input[Sequence[pulumi.Input['EnvironmentV3InboundNetworkDependencyArgs']]] inbound_network_dependencies: An Inbound Network Dependencies block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] internal_inbound_ip_addresses: The internal outbound IP addresses of the App Service Environment V3.
:param pulumi.Input[str] internal_load_balancing_mode: Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None` (for an External VIP Type), and `"Web, Publishing"` (for an Internal VIP Type). Defaults to `None`.
:param pulumi.Input[int] ip_ssl_address_count: The number of IP SSL addresses reserved for the App Service Environment V3.
:param pulumi.Input[Sequence[pulumi.Input[str]]] linux_outbound_ip_addresses: Outbound addresses of Linux based Apps in this App Service Environment V3
:param pulumi.Input[str] location: The location where the App Service Environment exists.
:param pulumi.Input[str] name: The name of the App Service Environment. Changing this forces a new resource to be created.
:param pulumi.Input[str] pricing_tier: Pricing tier for the front end instances.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
:param pulumi.Input[str] subnet_id: The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] windows_outbound_ip_addresses: Outbound addresses of Windows based Apps in this App Service Environment V3.
"""
if allow_new_private_endpoint_connections is not None:
pulumi.set(__self__, "allow_new_private_endpoint_connections", allow_new_private_endpoint_connections)
if cluster_settings is not None:
pulumi.set(__self__, "cluster_settings", cluster_settings)
if dedicated_host_count is not None:
pulumi.set(__self__, "dedicated_host_count", dedicated_host_count)
if dns_suffix is not None:
pulumi.set(__self__, "dns_suffix", dns_suffix)
if external_inbound_ip_addresses is not None:
pulumi.set(__self__, "external_inbound_ip_addresses", external_inbound_ip_addresses)
if inbound_network_dependencies is not None:
pulumi.set(__self__, "inbound_network_dependencies", inbound_network_dependencies)
if internal_inbound_ip_addresses is not None:
pulumi.set(__self__, "internal_inbound_ip_addresses", internal_inbound_ip_addresses)
if internal_load_balancing_mode is not None:
pulumi.set(__self__, "internal_load_balancing_mode", internal_load_balancing_mode)
if ip_ssl_address_count is not None:
pulumi.set(__self__, "ip_ssl_address_count", ip_ssl_address_count)
if linux_outbound_ip_addresses is not None:
pulumi.set(__self__, "linux_outbound_ip_addresses", linux_outbound_ip_addresses)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if pricing_tier is not None:
pulumi.set(__self__, "pricing_tier", pricing_tier)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if windows_outbound_ip_addresses is not None:
pulumi.set(__self__, "windows_outbound_ip_addresses", windows_outbound_ip_addresses)
if zone_redundant is not None:
pulumi.set(__self__, "zone_redundant", zone_redundant)
@property
@pulumi.getter(name="allowNewPrivateEndpointConnections")
def allow_new_private_endpoint_connections(self) -> Optional[pulumi.Input[bool]]:
"""
Should new Private Endpoint Connections be allowed. Defaults to `true`.
"""
return pulumi.get(self, "allow_new_private_endpoint_connections")
@allow_new_private_endpoint_connections.setter
def allow_new_private_endpoint_connections(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_new_private_endpoint_connections", value)
@property
@pulumi.getter(name="clusterSettings")
def cluster_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentV3ClusterSettingArgs']]]]:
"""
Zero or more `cluster_setting` blocks as defined below.
"""
return pulumi.get(self, "cluster_settings")
@cluster_settings.setter
def cluster_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentV3ClusterSettingArgs']]]]):
pulumi.set(self, "cluster_settings", value)
@property
@pulumi.getter(name="dedicatedHostCount")
def dedicated_host_count(self) -> Optional[pulumi.Input[int]]:
"""
This ASEv3 should use dedicated Hosts. Possible vales are `2`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "dedicated_host_count")
@dedicated_host_count.setter
def dedicated_host_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dedicated_host_count", value)
@property
@pulumi.getter(name="dnsSuffix")
def dns_suffix(self) -> Optional[pulumi.Input[str]]:
"""
the DNS suffix for this App Service Environment V3.
"""
return pulumi.get(self, "dns_suffix")
@dns_suffix.setter
def dns_suffix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_suffix", value)
@property
@pulumi.getter(name="externalInboundIpAddresses")
def external_inbound_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The external outbound IP addresses of the App Service Environment V3.
"""
return pulumi.get(self, "external_inbound_ip_addresses")
@external_inbound_ip_addresses.setter
def external_inbound_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_inbound_ip_addresses", value)
@property
@pulumi.getter(name="inboundNetworkDependencies")
def inbound_network_dependencies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentV3InboundNetworkDependencyArgs']]]]:
"""
An Inbound Network Dependencies block as defined below.
"""
return pulumi.get(self, "inbound_network_dependencies")
@inbound_network_dependencies.setter
def inbound_network_dependencies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentV3InboundNetworkDependencyArgs']]]]):
pulumi.set(self, "inbound_network_dependencies", value)
@property
@pulumi.getter(name="internalInboundIpAddresses")
def internal_inbound_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The internal outbound IP addresses of the App Service Environment V3.
"""
return pulumi.get(self, "internal_inbound_ip_addresses")
@internal_inbound_ip_addresses.setter
def internal_inbound_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "internal_inbound_ip_addresses", value)
@property
@pulumi.getter(name="internalLoadBalancingMode")
def internal_load_balancing_mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None` (for an External VIP Type), and `"Web, Publishing"` (for an Internal VIP Type). Defaults to `None`.
"""
return pulumi.get(self, "internal_load_balancing_mode")
@internal_load_balancing_mode.setter
def internal_load_balancing_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_load_balancing_mode", value)
@property
@pulumi.getter(name="ipSslAddressCount")
def ip_ssl_address_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of IP SSL addresses reserved for the App Service Environment V3.
"""
return pulumi.get(self, "ip_ssl_address_count")
@ip_ssl_address_count.setter
def ip_ssl_address_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ip_ssl_address_count", value)
@property
@pulumi.getter(name="linuxOutboundIpAddresses")
def linux_outbound_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Outbound addresses of Linux based Apps in this App Service Environment V3
"""
return pulumi.get(self, "linux_outbound_ip_addresses")
@linux_outbound_ip_addresses.setter
def linux_outbound_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "linux_outbound_ip_addresses", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location where the App Service Environment exists.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the App Service Environment. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pricingTier")
def pricing_tier(self) -> Optional[pulumi.Input[str]]:
"""
Pricing tier for the front end instances.
"""
return pulumi.get(self, "pricing_tier")
@pricing_tier.setter
def pricing_tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pricing_tier", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="windowsOutboundIpAddresses")
def windows_outbound_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Outbound addresses of Windows based Apps in this App Service Environment V3.
"""
return pulumi.get(self, "windows_outbound_ip_addresses")
@windows_outbound_ip_addresses.setter
def windows_outbound_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "windows_outbound_ip_addresses", value)
@property
@pulumi.getter(name="zoneRedundant")
def zone_redundant(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "zone_redundant")
@zone_redundant.setter
def zone_redundant(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "zone_redundant", value)
class EnvironmentV3(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_new_private_endpoint_connections: Optional[pulumi.Input[bool]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentV3ClusterSettingArgs']]]]] = None,
dedicated_host_count: Optional[pulumi.Input[int]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Manages a 3rd Generation (v3) App Service Environment.
> **NOTE:** App Service Environment V3 is currently in Preview.
## Import
A 3rd Generation (v3) App Service Environment can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/environmentV3:EnvironmentV3 myAppServiceEnv /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Web/hostingEnvironments/myAppServiceEnv
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_new_private_endpoint_connections: Should new Private Endpoint Connections be allowed. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentV3ClusterSettingArgs']]]] cluster_settings: Zero or more `cluster_setting` blocks as defined below.
:param pulumi.Input[int] dedicated_host_count: This ASEv3 should use dedicated Hosts. Possible vales are `2`. Changing this forces a new resource to be created.
:param pulumi.Input[str] internal_load_balancing_mode: Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None` (for an External VIP Type), and `"Web, Publishing"` (for an Internal VIP Type). Defaults to `None`.
:param pulumi.Input[str] name: The name of the App Service Environment. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
:param pulumi.Input[str] subnet_id: The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EnvironmentV3Args,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a 3rd Generation (v3) App Service Environment.
> **NOTE:** App Service Environment V3 is currently in Preview.
## Import
A 3rd Generation (v3) App Service Environment can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/environmentV3:EnvironmentV3 myAppServiceEnv /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Web/hostingEnvironments/myAppServiceEnv
```
:param str resource_name: The name of the resource.
:param EnvironmentV3Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EnvironmentV3Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_new_private_endpoint_connections: Optional[pulumi.Input[bool]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentV3ClusterSettingArgs']]]]] = None,
dedicated_host_count: Optional[pulumi.Input[int]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EnvironmentV3Args.__new__(EnvironmentV3Args)
__props__.__dict__["allow_new_private_endpoint_connections"] = allow_new_private_endpoint_connections
__props__.__dict__["cluster_settings"] = cluster_settings
__props__.__dict__["dedicated_host_count"] = dedicated_host_count
__props__.__dict__["internal_load_balancing_mode"] = internal_load_balancing_mode
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if subnet_id is None and not opts.urn:
raise TypeError("Missing required property 'subnet_id'")
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["tags"] = tags
__props__.__dict__["zone_redundant"] = zone_redundant
__props__.__dict__["dns_suffix"] = None
__props__.__dict__["external_inbound_ip_addresses"] = None
__props__.__dict__["inbound_network_dependencies"] = None
__props__.__dict__["internal_inbound_ip_addresses"] = None
__props__.__dict__["ip_ssl_address_count"] = None
__props__.__dict__["linux_outbound_ip_addresses"] = None
__props__.__dict__["location"] = None
__props__.__dict__["pricing_tier"] = None
__props__.__dict__["windows_outbound_ip_addresses"] = None
super(EnvironmentV3, __self__).__init__(
'azure:appservice/environmentV3:EnvironmentV3',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_new_private_endpoint_connections: Optional[pulumi.Input[bool]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentV3ClusterSettingArgs']]]]] = None,
dedicated_host_count: Optional[pulumi.Input[int]] = None,
dns_suffix: Optional[pulumi.Input[str]] = None,
external_inbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
inbound_network_dependencies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentV3InboundNetworkDependencyArgs']]]]] = None,
internal_inbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
ip_ssl_address_count: Optional[pulumi.Input[int]] = None,
linux_outbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pricing_tier: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
windows_outbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None) -> 'EnvironmentV3':
"""
Get an existing EnvironmentV3 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_new_private_endpoint_connections: Should new Private Endpoint Connections be allowed. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentV3ClusterSettingArgs']]]] cluster_settings: Zero or more `cluster_setting` blocks as defined below.
:param pulumi.Input[int] dedicated_host_count: This ASEv3 should use dedicated Hosts. Possible vales are `2`. Changing this forces a new resource to be created.
:param pulumi.Input[str] dns_suffix: the DNS suffix for this App Service Environment V3.
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_inbound_ip_addresses: The external outbound IP addresses of the App Service Environment V3.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentV3InboundNetworkDependencyArgs']]]] inbound_network_dependencies: An Inbound Network Dependencies block as defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] internal_inbound_ip_addresses: The internal outbound IP addresses of the App Service Environment V3.
:param pulumi.Input[str] internal_load_balancing_mode: Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None` (for an External VIP Type), and `"Web, Publishing"` (for an Internal VIP Type). Defaults to `None`.
:param pulumi.Input[int] ip_ssl_address_count: The number of IP SSL addresses reserved for the App Service Environment V3.
:param pulumi.Input[Sequence[pulumi.Input[str]]] linux_outbound_ip_addresses: Outbound addresses of Linux based Apps in this App Service Environment V3
:param pulumi.Input[str] location: The location where the App Service Environment exists.
:param pulumi.Input[str] name: The name of the App Service Environment. Changing this forces a new resource to be created.
:param pulumi.Input[str] pricing_tier: Pricing tier for the front end instances.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
:param pulumi.Input[str] subnet_id: The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] windows_outbound_ip_addresses: Outbound addresses of Windows based Apps in this App Service Environment V3.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _EnvironmentV3State.__new__(_EnvironmentV3State)
__props__.__dict__["allow_new_private_endpoint_connections"] = allow_new_private_endpoint_connections
__props__.__dict__["cluster_settings"] = cluster_settings
__props__.__dict__["dedicated_host_count"] = dedicated_host_count
__props__.__dict__["dns_suffix"] = dns_suffix
__props__.__dict__["external_inbound_ip_addresses"] = external_inbound_ip_addresses
__props__.__dict__["inbound_network_dependencies"] = inbound_network_dependencies
__props__.__dict__["internal_inbound_ip_addresses"] = internal_inbound_ip_addresses
__props__.__dict__["internal_load_balancing_mode"] = internal_load_balancing_mode
__props__.__dict__["ip_ssl_address_count"] = ip_ssl_address_count
__props__.__dict__["linux_outbound_ip_addresses"] = linux_outbound_ip_addresses
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["pricing_tier"] = pricing_tier
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["tags"] = tags
__props__.__dict__["windows_outbound_ip_addresses"] = windows_outbound_ip_addresses
__props__.__dict__["zone_redundant"] = zone_redundant
return EnvironmentV3(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowNewPrivateEndpointConnections")
def allow_new_private_endpoint_connections(self) -> pulumi.Output[Optional[bool]]:
"""
Should new Private Endpoint Connections be allowed. Defaults to `true`.
"""
return pulumi.get(self, "allow_new_private_endpoint_connections")
@property
@pulumi.getter(name="clusterSettings")
def cluster_settings(self) -> pulumi.Output[Sequence['outputs.EnvironmentV3ClusterSetting']]:
"""
Zero or more `cluster_setting` blocks as defined below.
"""
return pulumi.get(self, "cluster_settings")
@property
@pulumi.getter(name="dedicatedHostCount")
def dedicated_host_count(self) -> pulumi.Output[Optional[int]]:
"""
This ASEv3 should use dedicated Hosts. Possible vales are `2`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "dedicated_host_count")
@property
@pulumi.getter(name="dnsSuffix")
def dns_suffix(self) -> pulumi.Output[str]:
"""
the DNS suffix for this App Service Environment V3.
"""
return pulumi.get(self, "dns_suffix")
@property
@pulumi.getter(name="externalInboundIpAddresses")
def external_inbound_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
"""
The external outbound IP addresses of the App Service Environment V3.
"""
return pulumi.get(self, "external_inbound_ip_addresses")
@property
@pulumi.getter(name="inboundNetworkDependencies")
def inbound_network_dependencies(self) -> pulumi.Output[Sequence['outputs.EnvironmentV3InboundNetworkDependency']]:
"""
An Inbound Network Dependencies block as defined below.
"""
return pulumi.get(self, "inbound_network_dependencies")
@property
@pulumi.getter(name="internalInboundIpAddresses")
def internal_inbound_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
"""
The internal outbound IP addresses of the App Service Environment V3.
"""
return pulumi.get(self, "internal_inbound_ip_addresses")
@property
@pulumi.getter(name="internalLoadBalancingMode")
def internal_load_balancing_mode(self) -> pulumi.Output[Optional[str]]:
"""
Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None` (for an External VIP Type), and `"Web, Publishing"` (for an Internal VIP Type). Defaults to `None`.
"""
return pulumi.get(self, "internal_load_balancing_mode")
@property
@pulumi.getter(name="ipSslAddressCount")
def ip_ssl_address_count(self) -> pulumi.Output[int]:
"""
The number of IP SSL addresses reserved for the App Service Environment V3.
"""
return pulumi.get(self, "ip_ssl_address_count")
@property
@pulumi.getter(name="linuxOutboundIpAddresses")
def linux_outbound_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
"""
Outbound addresses of Linux based Apps in this App Service Environment V3
"""
return pulumi.get(self, "linux_outbound_ip_addresses")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location where the App Service Environment exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the App Service Environment. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pricingTier")
def pricing_tier(self) -> pulumi.Output[str]:
"""
Pricing tier for the front end instances.
"""
return pulumi.get(self, "pricing_tier")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Output[str]:
"""
The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="windowsOutboundIpAddresses")
def windows_outbound_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
"""
Outbound addresses of Windows based Apps in this App Service Environment V3.
"""
return pulumi.get(self, "windows_outbound_ip_addresses")
@property
@pulumi.getter(name="zoneRedundant")
def zone_redundant(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "zone_redundant")
| 53.923761 | 291 | 0.696899 |
9883449d754b703140321f4eaa1bdbfbf1b7bc10 | 27,451 | py | Python | web/addons/mail/tests/test_mail_message.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/addons/mail/tests/test_mail_message.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/addons/mail/tests/test_mail_message.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestMailMail(TestMail):
def test_00_partner_find_from_email(self):
""" Tests designed for partner fetch based on emails. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 1 - Partner ARaoul
p_a_id = self.res_partner.create(cr, uid, {'name': 'ARaoul', 'email': 'test@test.fr'})
# --------------------------------------------------
# CASE1: without object
# --------------------------------------------------
# Do: find partner with email -> first partner should be found
partner_info = self.mail_thread.message_partner_info_from_emails(cr, uid, None, ['Maybe Raoul <test@test.fr>'], link_mail=False)[0]
self.assertEqual(partner_info['full_name'], 'Maybe Raoul <test@test.fr>',
'mail_thread: message_partner_info_from_emails did not handle email')
self.assertEqual(partner_info['partner_id'], p_a_id,
'mail_thread: message_partner_info_from_emails wrong partner found')
# Data: add some data about partners
# 2 - User BRaoul
p_b_id = self.res_partner.create(cr, uid, {'name': 'BRaoul', 'email': 'test@test.fr', 'user_ids': [(4, user_raoul.id)]})
# Do: find partner with email -> first user should be found
partner_info = self.mail_thread.message_partner_info_from_emails(cr, uid, None, ['Maybe Raoul <test@test.fr>'], link_mail=False)[0]
self.assertEqual(partner_info['partner_id'], p_b_id,
'mail_thread: message_partner_info_from_emails wrong partner found')
# --------------------------------------------------
# CASE1: with object
# --------------------------------------------------
# Do: find partner in group where there is a follower with the email -> should be taken
self.mail_group.message_subscribe(cr, uid, [group_pigs.id], [p_b_id])
partner_info = self.mail_group.message_partner_info_from_emails(cr, uid, group_pigs.id, ['Maybe Raoul <test@test.fr>'], link_mail=False)[0]
self.assertEqual(partner_info['partner_id'], p_b_id,
'mail_thread: message_partner_info_from_emails wrong partner found')
class TestMailMessage(TestMail):
def test_00_mail_message_values(self):
""" Tests designed for testing email values based on mail.message, aliases, ... """
cr, uid, user_raoul_id = self.cr, self.uid, self.user_raoul_id
# Data: update + generic variables
reply_to1 = '_reply_to1@example.com'
reply_to2 = '_reply_to2@example.com'
email_from1 = 'from@example.com'
alias_domain = 'schlouby.fr'
raoul_from = 'Raoul Grosbedon <raoul@raoul.fr>'
raoul_from_alias = 'Raoul Grosbedon <raoul@schlouby.fr>'
raoul_reply_alias = 'YourCompany Pigs <group+pigs@schlouby.fr>'
# --------------------------------------------------
# Case1: without alias_domain
# --------------------------------------------------
param_ids = self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.domain')])
self.registry('ir.config_parameter').unlink(cr, uid, param_ids)
# Do: free message; specified values > default values
msg_id = self.mail_message.create(cr, user_raoul_id, {'no_auto_thread': True, 'reply_to': reply_to1, 'email_from': email_from1})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: message content
self.assertIn('reply_to', msg.message_id,
'mail_message: message_id should be specific to a mail_message with a given reply_to')
self.assertEqual(msg.reply_to, reply_to1,
'mail_message: incorrect reply_to: should come from values')
self.assertEqual(msg.email_from, email_from1,
'mail_message: incorrect email_from: should come from values')
# Do: create a mail_mail with the previous mail_message + specified reply_to
mail_id = self.mail_mail.create(cr, user_raoul_id, {'mail_message_id': msg_id, 'state': 'cancel', 'reply_to': reply_to2})
mail = self.mail_mail.browse(cr, user_raoul_id, mail_id)
# Test: mail_mail content
self.assertEqual(mail.reply_to, reply_to2,
'mail_mail: incorrect reply_to: should come from values')
self.assertEqual(mail.email_from, email_from1,
'mail_mail: incorrect email_from: should come from mail.message')
# Do: mail_message attached to a document
msg_id = self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_pigs_id})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: message content
self.assertIn('mail.group', msg.message_id,
'mail_message: message_id should contain model')
self.assertIn('%s' % self.group_pigs_id, msg.message_id,
'mail_message: message_id should contain res_id')
self.assertEqual(msg.reply_to, raoul_from,
'mail_message: incorrect reply_to: should be Raoul')
self.assertEqual(msg.email_from, raoul_from,
'mail_message: incorrect email_from: should be Raoul')
# --------------------------------------------------
# Case2: with alias_domain, without catchall alias
# --------------------------------------------------
self.registry('ir.config_parameter').set_param(cr, uid, 'mail.catchall.domain', alias_domain)
self.registry('ir.config_parameter').unlink(cr, uid, self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.alias')]))
# Update message
msg_id = self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_pigs_id})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: generated reply_to
self.assertEqual(msg.reply_to, raoul_reply_alias,
'mail_mail: incorrect reply_to: should be Pigs alias')
# Update message: test alias on email_from
msg_id = self.mail_message.create(cr, user_raoul_id, {})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: generated reply_to
self.assertEqual(msg.reply_to, raoul_from_alias,
'mail_mail: incorrect reply_to: should be message email_from using Raoul alias')
# --------------------------------------------------
# Case2: with alias_domain and catchall alias
# --------------------------------------------------
self.registry('ir.config_parameter').set_param(self.cr, self.uid, 'mail.catchall.alias', 'gateway')
# Update message
msg_id = self.mail_message.create(cr, user_raoul_id, {})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: generated reply_to
self.assertEqual(msg.reply_to, 'YourCompany <gateway@schlouby.fr>',
'mail_mail: reply_to should equal the catchall email alias')
# Do: create a mail_mail
mail_id = self.mail_mail.create(cr, uid, {'state': 'cancel', 'reply_to': 'someone@example.com'})
mail = self.mail_mail.browse(cr, uid, mail_id)
# Test: mail_mail content
self.assertEqual(mail.reply_to, 'someone@example.com',
'mail_mail: reply_to should equal the rpely_to given to create')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_10_mail_message_search_access_rights(self):
""" Testing mail_message.search() using specific _search implementation """
cr, uid, group_pigs_id = self.cr, self.uid, self.group_pigs_id
# Data: comment subtype for mail.message creation
ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'mail', 'mt_comment')
subtype_id = ref and ref[1] or False
# Data: Birds group, private
group_birds_id = self.mail_group.create(self.cr, self.uid, {'name': 'Birds', 'public': 'private'})
# Data: Raoul is member of Pigs
self.mail_group.message_subscribe(cr, uid, [group_pigs_id], [self.partner_raoul_id])
# Data: various author_ids, partner_ids, documents
msg_id1 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A', 'subtype_id': subtype_id})
msg_id2 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B', 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id3 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'subtype_id': subtype_id})
msg_id4 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id5 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+R Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
msg_id6 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Birds', 'model': 'mail.group', 'res_id': group_birds_id, 'subtype_id': subtype_id})
msg_id7 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B', 'subtype_id': subtype_id})
msg_id8 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B+R', 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
# Test: Bert: 2 messages that have Bert in partner_ids
msg_ids = self.mail_message.search(cr, self.user_bert_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id2, msg_id4]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group)
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test'), ('body', 'like', 'A')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group) + 2 messages as author
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
# Test: Admin: all messages
msg_ids = self.mail_message.search(cr, uid, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id1, msg_id2, msg_id3, msg_id4, msg_id5, msg_id6, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_15_mail_message_check_access_rule(self):
""" Testing mail_message.check_access_rule() """
cr, uid = self.cr, self.uid
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message')
priv_msg_id = self.mail_group.message_post(cr, uid, self.group_priv_id, body='Message')
# prepare an attachment
attachment_id = self.ir_attachment.create(cr, uid, {'datas': 'My attachment'.encode('base64'), 'name': 'doc.txt', 'datas_fname': 'doc.txt'})
# ----------------------------------------
# CASE1: read
# ----------------------------------------
# Do: create a new mail.message
message_id = self.mail_message.create(cr, uid, {'body': 'My Body', 'attachment_ids': [(4, attachment_id)]})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
with self.assertRaises(except_orm):
self.mail_message.read(cr, user_bert_id, message_id)
# Do: message is pushed to Bert
notif_id = self.mail_notification.create(cr, uid, {'message_id': message_id, 'partner_id': partner_bert_id})
# Test: Bert reads the message, ok because notification pushed
self.mail_message.read(cr, user_bert_id, message_id)
# Test: Bert downloads attachment, ok because he can read message
self.mail_message.download_attachment(cr, user_bert_id, message_id, attachment_id)
# Do: remove notification
self.mail_notification.unlink(cr, uid, notif_id)
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
with self.assertRaises(except_orm):
self.mail_message.read(cr, self.user_bert_id, message_id)
# Test: Bert downloads attachment, crash because he can't read message
with self.assertRaises(except_orm):
self.mail_message.download_attachment(cr, user_bert_id, message_id, attachment_id)
# Do: Bert is now the author
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_bert_id})
# Test: Bert reads the message, ok because Bert is the author
self.mail_message.read(cr, user_bert_id, message_id)
# Do: Bert is not the author anymore
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_raoul_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
with self.assertRaises(except_orm):
self.mail_message.read(cr, user_bert_id, message_id)
# Do: message is attached to a document Bert can read, Jobs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_jobs_id})
# Test: Bert reads the message, ok because linked to a doc he is allowed to read
self.mail_message.read(cr, user_bert_id, message_id)
# Do: message is attached to a document Bert cannot read, Pigs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_pigs_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
with self.assertRaises(except_orm):
self.mail_message.read(cr, user_bert_id, message_id)
# ----------------------------------------
# CASE2: create
# ----------------------------------------
# Do: Bert creates a message on Pigs -> ko, no creation rights
with self.assertRaises(AccessError):
self.mail_message.create(cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_pigs_id, 'body': 'Test'})
# Do: Bert create a message on Jobs -> ko, no creation rights
with self.assertRaises(AccessError):
self.mail_message.create(cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Bert create a private message -> ko, no creation rights
with self.assertRaises(AccessError):
self.mail_message.create(cr, user_bert_id, {'body': 'Test'})
# Do: Raoul creates a message on Jobs -> ok, write access to the related document
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Raoul creates a message on Priv -> ko, no write access to the related document
with self.assertRaises(except_orm):
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test'})
# Do: Raoul creates a private message -> ok
self.mail_message.create(cr, user_raoul_id, {'body': 'Test'})
# Do: Raoul creates a reply to a message on Priv -> ko
with self.assertRaises(except_orm):
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
# Do: Raoul creates a reply to a message on Priv-> ok if has received parent
self.mail_notification.create(cr, uid, {'message_id': priv_msg_id, 'partner_id': self.partner_raoul_id})
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
def test_20_message_set_star(self):
""" Tests for starring messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin stars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg.starred, 'mail_message starred failed')
# Do: Raoul stars msg
self.mail_message.set_message_starred(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
# Do: Admin unstars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unstarred for Admin, starred for Raoul
self.assertFalse(msg.starred, 'mail_message starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
def test_30_message_set_read(self):
""" Tests for reading messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin reads msg
self.mail_message.set_message_read(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif['is_read'], 'mail_notification read failed')
self.assertFalse(msg.to_read, 'mail_message read failed')
# Do: Raoul reads msg
self.mail_message.set_message_read(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif['is_read'], 'mail_notification starred failed')
self.assertFalse(msg_raoul.to_read, 'mail_message starred failed')
# Do: Admin unreads msg
self.mail_message.set_message_read(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unread for Admin, read for Raoul
self.assertTrue(msg.to_read, 'mail_message read failed')
self.assertFalse(msg_raoul.to_read, 'mail_message read failed')
def test_40_message_vote(self):
""" Test designed for the vote/unvote feature. """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin vote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
# Test: msg has Admin as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_admin]), 'mail_message vote: after voting, Admin should be in the voter')
# Do: Bert vote for msg
self.mail_message.vote_toggle(cr, self.user_raoul_id, [msg.id])
msg_raoul.refresh()
# Test: msg has Admin and Bert as voters
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_admin, self.user_raoul]), 'mail_message vote: after voting, Admin and Bert should be in the voters')
# Do: Admin unvote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
msg_raoul.refresh()
# Test: msg has Bert as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_50_mail_flow_access_rights(self):
""" Test a Chatter-looks alike flow to test access rights """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message', partner_ids=[self.partner_admin_id])
jobs_msg_id = self.mail_group.message_post(cr, uid, self.group_jobs_id, body='Message', partner_ids=[self.partner_admin_id])
# ----------------------------------------
# CASE1: Bert, without groups
# ----------------------------------------
# Do: Bert reads Jobs basic fields, ok because public = read access on the group
self.mail_group.read(cr, user_bert_id, [self.group_jobs_id], ['name', 'description'])
# Do: Bert reads Jobs messages, ok because read access on the group => read access on its messages
jobs_message_ids = self.mail_group.read(cr, user_bert_id, [self.group_jobs_id], ['message_ids'])[0]['message_ids']
self.mail_message.read(cr, user_bert_id, jobs_message_ids)
# Do: Bert browses Jobs, ok (no direct browse of partners), ok for messages, ko for followers (accessible to employees or partner manager)
bert_jobs = self.mail_group.browse(cr, user_bert_id, self.group_jobs_id)
trigger_read = bert_jobs.name
for message in bert_jobs.message_ids:
trigger_read = message.subject
for partner in bert_jobs.message_follower_ids:
with self.assertRaises(AccessError):
trigger_read = partner.name
# Do: Bert comments Jobs, ko because no creation right
with self.assertRaises(AccessError):
self.mail_group.message_post(cr, user_bert_id, self.group_jobs_id, body='I love Pigs')
# Do: Bert writes on its own profile, ko because no message create access
with self.assertRaises(AccessError):
self.res_users.message_post(cr, user_bert_id, user_bert_id, body='I love Bert')
self.res_partner.message_post(cr, user_bert_id, partner_bert_id, body='I love Bert')
# ----------------------------------------
# CASE2: Raoul, employee
# ----------------------------------------
# Do: Raoul browses Jobs -> ok, ok for message_ids, of for message_follower_ids
raoul_jobs = self.mail_group.browse(cr, user_raoul_id, self.group_jobs_id)
trigger_read = raoul_jobs.name
for message in raoul_jobs.message_ids:
trigger_read = message.subject
for partner in raoul_jobs.message_follower_ids:
trigger_read = partner.name
# Do: Raoul comments Jobs, ok
self.mail_group.message_post(cr, user_raoul_id, self.group_jobs_id, body='I love Pigs')
# Do: Raoul create a mail.compose.message record on Jobs, because he uses the wizard
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},
{'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_jobs_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
# Do: Raoul replies to a Jobs message using the composer
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text'},
{'default_composition_mode': 'comment', 'default_parent_id': pigs_msg_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
| 61.274554 | 217 | 0.641106 |
eb6c581882ba628589ac0d99b6740b864dc68cb6 | 16,350 | py | Python | mbrl/util/mujoco.py | eanswer/mbrl-lib | 576f4bea148bb674c79e85da51dccde50409d6a3 | [
"MIT"
] | null | null | null | mbrl/util/mujoco.py | eanswer/mbrl-lib | 576f4bea148bb674c79e85da51dccde50409d6a3 | [
"MIT"
] | null | null | null | mbrl/util/mujoco.py | eanswer/mbrl-lib | 576f4bea148bb674c79e85da51dccde50409d6a3 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple, Union, cast
import gym
import gym.wrappers
import hydra
import numpy as np
import omegaconf
import torch
import mbrl.planning
import mbrl.types
def _get_term_and_reward_fn(
cfg: Union[omegaconf.ListConfig, omegaconf.DictConfig],
) -> Tuple[mbrl.types.TermFnType, Optional[mbrl.types.RewardFnType]]:
import mbrl.env
term_fn = getattr(mbrl.env.termination_fns, cfg.overrides.term_fn)
if hasattr(cfg.overrides, "reward_fn") and cfg.overrides.reward_fn is not None:
reward_fn = getattr(mbrl.env.reward_fns, cfg.overrides.reward_fn)
else:
reward_fn = getattr(mbrl.env.reward_fns, cfg.overrides.term_fn, None)
return term_fn, reward_fn
def _handle_learned_rewards_and_seed(
cfg: Union[omegaconf.ListConfig, omegaconf.DictConfig],
env: gym.Env,
reward_fn: mbrl.types.RewardFnType,
) -> Tuple[gym.Env, mbrl.types.RewardFnType]:
if cfg.overrides.get("learned_rewards", True):
reward_fn = None
if cfg.seed is not None:
env.seed(cfg.seed)
env.observation_space.seed(cfg.seed + 1)
env.action_space.seed(cfg.seed + 2)
return env, reward_fn
def _legacy_make_env(
cfg: Union[omegaconf.ListConfig, omegaconf.DictConfig],
) -> Tuple[gym.Env, mbrl.types.TermFnType, Optional[mbrl.types.RewardFnType]]:
if "dmcontrol___" in cfg.overrides.env:
import mbrl.third_party.dmc2gym as dmc2gym
domain, task = cfg.overrides.env.split("___")[1].split("--")
term_fn, reward_fn = _get_term_and_reward_fn(cfg)
env = dmc2gym.make(domain_name=domain, task_name=task)
elif "gym___" in cfg.overrides.env:
env = gym.make(cfg.overrides.env.split("___")[1])
term_fn, reward_fn = _get_term_and_reward_fn(cfg)
else:
import mbrl.env.mujoco_envs
if cfg.overrides.env == "cartpole_continuous":
env = mbrl.env.cartpole_continuous.CartPoleEnv()
term_fn = mbrl.env.termination_fns.cartpole
reward_fn = mbrl.env.reward_fns.cartpole
elif cfg.overrides.env == "cartpole_pets_version":
env = mbrl.env.mujoco_envs.CartPoleEnv()
term_fn = mbrl.env.termination_fns.no_termination
reward_fn = mbrl.env.reward_fns.cartpole_pets
elif cfg.overrides.env == "pets_halfcheetah":
env = mbrl.env.mujoco_envs.HalfCheetahEnv()
term_fn = mbrl.env.termination_fns.no_termination
reward_fn = getattr(mbrl.env.reward_fns, "halfcheetah", None)
elif cfg.overrides.env == "pets_reacher":
env = mbrl.env.mujoco_envs.Reacher3DEnv()
term_fn = mbrl.env.termination_fns.no_termination
reward_fn = None
elif cfg.overrides.env == "pets_pusher":
env = mbrl.env.mujoco_envs.PusherEnv()
term_fn = mbrl.env.termination_fns.no_termination
reward_fn = mbrl.env.reward_fns.pusher
elif cfg.overrides.env == "ant_truncated_obs":
env = mbrl.env.mujoco_envs.AntTruncatedObsEnv()
term_fn = mbrl.env.termination_fns.ant
reward_fn = None
elif cfg.overrides.env == "dflex_ant":
env = mbrl.env.mujoco_envs.DflexAntEnv()
term_fn = mbrl.env.termination_fns.dflex_ant
reward_fn = None
elif cfg.overrides.env == "humanoid_truncated_obs":
env = mbrl.env.mujoco_envs.HumanoidTruncatedObsEnv()
term_fn = mbrl.env.termination_fns.ant
reward_fn = None
else:
raise ValueError("Invalid environment string.")
env = gym.wrappers.TimeLimit(
env, max_episode_steps=cfg.overrides.get("trial_length", 1000)
)
env, reward_fn = _handle_learned_rewards_and_seed(cfg, env, reward_fn)
return env, term_fn, reward_fn
def make_env(
cfg: Union[omegaconf.ListConfig, omegaconf.DictConfig],
) -> Tuple[gym.Env, mbrl.types.TermFnType, Optional[mbrl.types.RewardFnType]]:
"""Creates an environment from a given OmegaConf configuration object.
This method expects the configuration, ``cfg``,
to have the following attributes (some are optional):
- If ``cfg.overrides.env_cfg`` is present, this method
instantiates the environment using `hydra.utils.instantiate(env_cfg)`.
Otherwise, it expects attribute ``cfg.overrides.env``, which should be a
string description of the environment where valid options are:
- "dmcontrol___<domain>--<task>": a Deep-Mind Control suite environment
with the indicated domain and task (e.g., "dmcontrol___cheetah--run".
- "gym___<env_name>": a Gym environment (e.g., "gym___HalfCheetah-v2").
- "cartpole_continuous": a continuous version of gym's Cartpole environment.
- "pets_halfcheetah": the implementation of HalfCheetah used in Chua et al.,
PETS paper.
- "ant_truncated_obs": the implementation of Ant environment used in Janner et al.,
MBPO paper.
- "humanoid_truncated_obs": the implementation of Humanoid environment used in
Janner et al., MBPO paper.
- ``cfg.overrides.term_fn``: (only for dmcontrol and gym environments) a string
indicating the environment's termination function to use when simulating the
environment with the model. It should correspond to the name of a function in
:mod:`mbrl.env.termination_fns`.
- ``cfg.overrides.reward_fn``: (only for dmcontrol and gym environments)
a string indicating the environment's reward function to use when simulating the
environment with the model. If not present, it will try to use ``cfg.overrides.term_fn``.
If that's not present either, it will return a ``None`` reward function.
If provided, it should correspond to the name of a function in
:mod:`mbrl.env.reward_fns`.
- ``cfg.overrides.learned_rewards``: (optional) if present indicates that
the reward function will be learned, in which case the method will return
a ``None`` reward function.
- ``cfg.overrides.trial_length``: (optional) if presents indicates the maximum length
of trials. Defaults to 1000.
Args:
cfg (omegaconf.DictConf): the configuration to use.
Returns:
(tuple of env, termination function, reward function): returns the new environment,
the termination function to use, and the reward function to use (or ``None`` if
``cfg.learned_rewards == True``).
"""
env_cfg = cfg.overrides.get("env_cfg", None)
if env_cfg is None:
return _legacy_make_env(cfg)
env = hydra.utils.instantiate(cfg.overrides.env_cfg)
env = gym.wrappers.TimeLimit(
env, max_episode_steps=cfg.overrides.get("trial_length", 1000)
)
term_fn, reward_fn = _get_term_and_reward_fn(cfg)
env, reward_fn = _handle_learned_rewards_and_seed(cfg, env, reward_fn)
return env, term_fn, reward_fn
def make_env_from_str(env_name: str) -> gym.Env:
"""Creates a new environment from its string description.
Args:
env_name (str): the string description of the environment. Valid options are:
- "dmcontrol___<domain>--<task>": a Deep-Mind Control suite environment
with the indicated domain and task (e.g., "dmcontrol___cheetah--run".
- "gym___<env_name>": a Gym environment (e.g., "gym___HalfCheetah-v2").
- "cartpole_continuous": a continuous version of gym's Cartpole environment.
- "pets_halfcheetah": the implementation of HalfCheetah used in Chua et al.,
PETS paper.
- "ant_truncated_obs": the implementation of Ant environment used in Janner et al.,
MBPO paper.
- "humanoid_truncated_obs": the implementation of Humanoid environment used in
Janner et al., MBPO paper.
Returns:
(gym.Env): the created environment.
"""
if "dmcontrol___" in env_name:
import mbrl.third_party.dmc2gym as dmc2gym
domain, task = env_name.split("___")[1].split("--")
env = dmc2gym.make(domain_name=domain, task_name=task)
elif "gym___" in env_name:
env = gym.make(env_name.split("___")[1])
else:
import mbrl.env.mujoco_envs
if env_name == "cartpole_continuous":
env = mbrl.env.cartpole_continuous.CartPoleEnv()
elif env_name == "pets_cartpole":
env = mbrl.env.mujoco_envs.CartPoleEnv()
elif env_name == "pets_halfcheetah":
env = mbrl.env.mujoco_envs.HalfCheetahEnv()
elif env_name == "pets_reacher":
env = mbrl.env.mujoco_envs.Reacher3DEnv()
elif env_name == "pets_pusher":
env = mbrl.env.mujoco_envs.PusherEnv()
elif env_name == "ant_truncated_obs":
env = mbrl.env.mujoco_envs.AntTruncatedObsEnv()
elif env_name == "humanoid_truncated_obs":
env = mbrl.env.mujoco_envs.HumanoidTruncatedObsEnv()
else:
raise ValueError("Invalid environment string.")
env = gym.wrappers.TimeLimit(env, max_episode_steps=1000)
return env
class freeze_mujoco_env:
"""Provides a context to freeze a Mujoco environment.
This context allows the user to manipulate the state of a Mujoco environment and return it
to its original state upon exiting the context.
Works with mujoco gym and dm_control environments
(with `dmc2gym <https://github.com/denisyarats/dmc2gym>`_).
Example usage:
.. code-block:: python
env = gym.make("HalfCheetah-v2")
env.reset()
action = env.action_space.sample()
# o1_expected, *_ = env.step(action)
with freeze_mujoco_env(env):
step_the_env_a_bunch_of_times()
o1, *_ = env.step(action) # o1 will be equal to what o1_expected would have been
Args:
env (:class:`gym.wrappers.TimeLimit`): the environment to freeze.
"""
def __init__(self, env: gym.wrappers.TimeLimit):
self._env = env
self._init_state: np.ndarray = None
self._elapsed_steps = 0
self._step_count = 0
if _is_mujoco_gym_env(env):
self._enter_method = self._enter_mujoco_gym
self._exit_method = self._exit_mujoco_gym
elif "mbrl.third_party.dmc2gym" in self._env.env.__class__.__module__:
self._enter_method = self._enter_dmcontrol
self._exit_method = self._exit_dmcontrol
else:
raise RuntimeError("Tried to freeze an unsupported environment.")
def _enter_mujoco_gym(self):
self._init_state = (
self._env.env.data.qpos.ravel().copy(),
self._env.env.data.qvel.ravel().copy(),
)
self._elapsed_steps = self._env._elapsed_steps
def _exit_mujoco_gym(self):
self._env.set_state(*self._init_state)
self._env._elapsed_steps = self._elapsed_steps
def _enter_dmcontrol(self):
self._init_state = self._env.env._env.physics.get_state().copy()
self._elapsed_steps = self._env._elapsed_steps
self._step_count = self._env.env._env._step_count
def _exit_dmcontrol(self):
with self._env.env._env.physics.reset_context():
self._env.env._env.physics.set_state(self._init_state)
self._env._elapsed_steps = self._elapsed_steps
self._env.env._env._step_count = self._step_count
def __enter__(self):
return self._enter_method()
def __exit__(self, *_args):
return self._exit_method()
# Include the mujoco environments in mbrl.env
def _is_mujoco_gym_env(env: gym.wrappers.TimeLimit) -> bool:
class_module = env.env.__class__.__module__
return "gym.envs.mujoco" in class_module or (
"mbrl.env." in class_module and hasattr(env.env, "data")
)
def get_current_state(env: gym.wrappers.TimeLimit) -> Tuple:
"""Returns the internal state of the environment.
Returns a tuple with information that can be passed to :func:set_env_state` to manually
set the environment (or a copy of it) to the same state it had when this function was called.
Works with mujoco gym and dm_control environments
(with `dmc2gym <https://github.com/denisyarats/dmc2gym>`_).
Args:
env (:class:`gym.wrappers.TimeLimit`): the environment.
Returns:
(tuple): For mujoco gym environments, returns the internal state
(position and velocity), and the number of elapsed steps so far. For dm_control
environments it returns `physics.get_state().copy()`, elapsed steps and step_count.
"""
if _is_mujoco_gym_env(env):
state = (
env.env.data.qpos.ravel().copy(),
env.env.data.qvel.ravel().copy(),
)
elapsed_steps = env._elapsed_steps
return state, elapsed_steps
elif "mbrl.third_party.dmc2gym" in env.env.__class__.__module__:
state = env.env._env.physics.get_state().copy()
elapsed_steps = env._elapsed_steps
step_count = env.env._env._step_count
return state, elapsed_steps, step_count
else:
raise NotImplementedError(
"Only gym mujoco and dm_control environments supported."
)
def set_env_state(state: Tuple, env: gym.wrappers.TimeLimit):
"""Sets the state of the environment.
Assumes ``state`` was generated using :func:`get_current_state`.
Works with mujoco gym and dm_control environments
(with `dmc2gym <https://github.com/denisyarats/dmc2gym>`_).
Args:
state (tuple): see :func:`get_current_state` for a description.
env (:class:`gym.wrappers.TimeLimit`): the environment.
"""
if _is_mujoco_gym_env(env):
env.set_state(*state[0])
env._elapsed_steps = state[1]
elif "mbrl.third_party.dmc2gym" in env.env.__class__.__module__:
with env.env._env.physics.reset_context():
env.env._env.physics.set_state(state[0])
env._elapsed_steps = state[1]
env.env._env._step_count = state[2]
else:
raise NotImplementedError(
"Only gym mujoco and dm_control environments supported."
)
def rollout_mujoco_env(
env: gym.wrappers.TimeLimit,
initial_obs: np.ndarray,
lookahead: int,
agent: Optional[mbrl.planning.Agent] = None,
plan: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Runs the environment for some number of steps then returns it to its original state.
Works with mujoco gym and dm_control environments
(with `dmc2gym <https://github.com/denisyarats/dmc2gym>`_).
Args:
env (:class:`gym.wrappers.TimeLimit`): the environment.
initial_obs (np.ndarray): the latest observation returned by the environment (only
needed when ``agent is not None``, to get the first action).
lookahead (int): the number of steps to run. If ``plan is not None``,
it is overridden by `len(plan)`.
agent (:class:`mbrl.planning.Agent`, optional): if given, an agent to obtain actions.
plan (sequence of np.ndarray, optional): if given, a sequence of actions to execute.
Takes precedence over ``agent`` when both are given.
Returns:
(tuple of np.ndarray): the observations, rewards, and actions observed, respectively.
"""
actions = []
real_obses = []
rewards = []
with freeze_mujoco_env(cast(gym.wrappers.TimeLimit, env)):
current_obs = initial_obs.copy()
real_obses.append(current_obs)
if plan is not None:
lookahead = len(plan)
for i in range(lookahead):
a = plan[i] if plan is not None else agent.act(current_obs)
if isinstance(a, torch.Tensor):
a = a.numpy()
next_obs, reward, done, _ = env.step(a)
actions.append(a)
real_obses.append(next_obs)
rewards.append(reward)
if done:
break
current_obs = next_obs
return np.stack(real_obses), np.stack(rewards), np.stack(actions)
| 40.671642 | 99 | 0.66367 |
c7981630e74b71834406fa968a115493be35170e | 26,300 | py | Python | mom6_tools/m6toolbox.py | NCAR/mom6-tools | e5a605eab97013a421a7ea8a93ae950f9e429730 | [
"Apache-2.0"
] | 8 | 2019-06-18T22:47:07.000Z | 2022-02-10T16:22:47.000Z | mom6_tools/m6toolbox.py | NCAR/mom6-tools | e5a605eab97013a421a7ea8a93ae950f9e429730 | [
"Apache-2.0"
] | 5 | 2019-10-25T20:53:45.000Z | 2020-10-06T18:56:46.000Z | mom6_tools/m6toolbox.py | NCAR/mom6-tools | e5a605eab97013a421a7ea8a93ae950f9e429730 | [
"Apache-2.0"
] | 8 | 2019-06-03T20:53:39.000Z | 2021-12-10T22:41:58.000Z | """
A collection of useful functions...
"""
import numpy as np
import numpy.ma as ma
import tarfile
from scipy.io import netcdf
import xarray as xr
from collections import OrderedDict
import warnings
def check_time_interval(ti,tf,nc):
''' Checks if year_start and year_end are within the time interval of the dataset'''
if ti < nc.time_bnds.min() or tf > nc.time_bnds.max():
print('Start/End times = ',nc.time_bnds.min(), nc.time_bnds.max())
raise SyntaxError('Selected start/end years outside the range of the dataset. Please fix that and run again.')
return
def add_global_attrs(ds,attrs):
"""
Adds global attributes to a xarray dataset or dataarray.
Parameters
----------
ds : array dataset or dataarray
Dataset or dataarray to add attributes
attrs : dict
A dictionary will attributed to be added
Returns
-------
ds : array dataset or dataarray
Dataset or dataarray with added attributes
"""
import getpass
from datetime import date
for k in attrs.keys():
ds.attrs[str(k)] = attrs[str(k)]
ds.attrs['generated_by'] = getpass.getuser() + ' using mom6-tools, on ' + str(date.today())
return
def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0):
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if ma.isMA(datain):
dataout = ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if ma.isMA(lonsin):
lonsout = ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def request_workers(nw):
'''
If nw > 0, load appropriate modules, requests nw workers, and returns parallel = True,
and objects for cluster and client. Otherwise, do nothing and returns parallel = False.
'''
if nw>0:
try:
from ncar_jobqueue import NCARCluster
import dask
from dask.distributed import Client
except:
nw = 0
warnings.warn("Unable to import the following: ncar_jobqueue, dask and dask.distributed. \
The script will run in serial. Please install these modules if you want \
to run in parallel.")
if nw>0:
print('Requesting {} workers... \n'.format(nw))
cluster = NCARCluster(project='NCGD0011')
cluster.scale(nw)
dask.config.set({'distributed.dashboard.link': '/proxy/{port}/status'})
client = Client(cluster)
print(cluster.dashboard_link)
parallel = True
else:
print('No workers requested... \n')
parallel = False
cluster = None; client = None
return parallel, cluster, client
def section2quadmesh(x, z, q, representation='pcm'):
"""
Creates the appropriate quadmesh coordinates to plot a scalar q(1:nk,1:ni) at
horizontal positions x(1:ni+1) and between interfaces at z(nk+1,ni), using
various representations of the topography.
Returns X(2*ni+1), Z(nk+1,2*ni+1) and Q(nk,2*ni) to be passed to pcolormesh.
TBD: Optionally, x can be dimensioned as x(ni) in which case it will be extraplated as if it had
had dimensions x(ni+1).
Optional argument:
representation='pcm' (default) yields a step-wise visualization, appropriate for
z-coordinate models.
representation='plm' yields a piecewise-linear visualization more representative
of general-coordinate (and isopycnal) models.
representation='linear' is the aesthetically most pleasing but does not
represent the data conservatively.
"""
if x.ndim!=1: raise Exception('The x argument must be a vector')
if z.ndim!=2: raise Exception('The z argument should be a 2D array')
if q.ndim!=2: raise Exception('The z argument should be a 2D array')
qnk, qni = q.shape
znk, zni = z.shape
xni = x.size
if zni!=qni: raise Exception('The last dimension of z and q must be equal in length')
if znk!=qnk+1: raise Exception('The first dimension of z must be 1 longer than that of q. q has %i levels'%qnk)
if xni!=qni+1: raise Exception('The length of x must 1 longer than the last dimension of q')
if type( z ) == np.ma.core.MaskedArray: z[z.mask] = 0
if type( q ) == np.ma.core.MaskedArray: qmin = np.amin(q); q[q.mask] = qmin
periodicDomain = abs((x[-1]-x[0])-360. ) < 1e-6 # Detect if horizontal axis is a periodic domain
if representation=='pcm':
X = np.zeros((2*qni))
X[::2] = x[:-1]
X[1::2] = x[1:]
Z = np.zeros((qnk+1,2*qni))
Z[:,::2] = z
Z[:,1::2] = z
Q = np.zeros((qnk,2*qni-1))
Q[:,::2] = q
Q[:,1::2] = ( q[:,:-1] + q[:,1:] )/2.
elif representation=='linear':
X = np.zeros((2*qni+1))
X[::2] = x
X[1::2] = ( x[0:-1] + x[1:] )/2.
Z = np.zeros((qnk+1,2*qni+1))
Z[:,1::2] = z
Z[:,2:-1:2] = ( z[:,0:-1] + z[:,1:] )/2.
Z[:,0] = z[:,0]
Z[:,-1] = z[:,-1]
Q = np.zeros((qnk,2*qni))
Q[:,::2] = q
Q[:,1::2] = q
elif representation=='plm':
X = np.zeros((2*qni))
X[::2] = x[:-1]
X[1::2] = x[1:]
# PLM reconstruction for Z
dz = np.roll(z,-1,axis=1) - z # Right-sided difference
if not periodicDomain: dz[:,-1] = 0 # Non-periodic boundary
d2 = ( np.roll(z,-1,axis=1) - np.roll(z,1,axis=1) )/2. # Centered difference
d2 = ( dz + np.roll(dz,1,axis=1) )/2. # Centered difference
s = np.sign( d2 ) # Sign of centered slope
s[dz * np.roll(dz,1,axis=1) <= 0] = 0 # Flatten extrema
dz = np.abs(dz) # Only need magnitude from here on
S = s * np.minimum( np.abs(d2), np.minimum( dz, np.roll(dz,1,axis=1) ) ) # PLM slope
Z = np.zeros((qnk+1,2*qni))
Z[:,::2] = z - S/2.
Z[:,1::2] = z + S/2.
Q = np.zeros((qnk,2*qni-1))
Q[:,::2] = q
Q[:,1::2] = ( q[:,:-1] + q[:,1:] )/2.
else: raise Exception('Unknown representation!')
return X, Z, Q
def get_z(rg, depth, var_name):
"""Returns 3d interface positions from netcdf group rg, based on dimension data for variable var_name"""
if 'e' in rg.variables: # First try native approach
if len(rg.variables['e'])==3: return rg.variables['e'][:]
elif len(rg.variables['e'])==4: return rg.variables['e'][0]
if var_name not in rg.variables: raise Exception('Variable "'+var_name+'" not found in netcdf file')
if len(rg.variables[var_name].shape)<3: raise Exception('Variable "'+var_name+'" must have 3 or more dimensions')
try: vdim = rg.variables[var_name].dimensions[-3]
# handle xarray dataset, dimensions = dims
except: vdim = rg.variables[var_name].dims[-3]
if vdim not in rg.variables: raise Exception('Variable "'+vdim+'" should be a [CF] dimension variable but is missing')
#if 'edges' in rg.variables[vdim].ncattrs():
try: zvar = getattr(rg.variables[vdim],'edges')
except:
if 'zw' in rg.variables: zvar = 'zw'
elif 'zl' in rg.variables: zvar = 'zl'
elif 'z_l' in rg.variables: zvar = 'z_l'
else: raise Exception('Cannot figure out vertical coordinate from variable "'+var_name+'"')
if not len(rg.variables[zvar].shape)==1: raise Exception('Variable "'+zvar+'" was expected to be 1d')
if type(rg) == xr.core.dataset.Dataset:
zw = rg[zvar][:].data
else:
zw = rg.variables[zvar][:]
Zmod = np.zeros((zw.shape[0], depth.shape[0], depth.shape[1] ))
for k in range(zw.shape[0]):
Zmod[k] = -np.minimum( depth, abs(zw[k]) )
return Zmod
def rho_Wright97(S, T, P=0):
"""
Returns the density of seawater for the given salinity, potential temperature
and pressure.
Units: salinity in PSU, potential temperature in degrees Celsius and pressure in Pascals.
"""
a0 = 7.057924e-4; a1 = 3.480336e-7; a2 = -1.112733e-7
b0 = 5.790749e8; b1 = 3.516535e6; b2 = -4.002714e4
b3 = 2.084372e2; b4 = 5.944068e5; b5 = -9.643486e3
c0 = 1.704853e5; c1 = 7.904722e2; c2 = -7.984422
c3 = 5.140652e-2; c4 = -2.302158e2; c5 = -3.079464
al0 = a0 + a1*T + a2*S
p0 = b0 + b4*S + T * (b1 + T*(b2 + b3*T) + b5*S)
Lambda = c0 + c4*S + T * (c1 + T*(c2 + c3*T) + c5*S)
return (P + p0) / (Lambda + al0*(P + p0))
def ice9_v2(i, j, source, xcyclic=True, tripolar=True):
"""
An iterative (stack based) implementation of "Ice 9".
The flood fill starts at [j,i] and treats any positive value of "source" as
passable. Zero and negative values block flooding.
xcyclic = True allows cyclic behavior in the last index. (default)
tripolar = True allows a fold across the top-most edge. (default)
Returns an array of 0's and 1's.
"""
wetMask = 0*source
(nj,ni) = wetMask.shape
stack = set()
stack.add( (j,i) )
while stack:
(j,i) = stack.pop()
if wetMask[j,i] or source[j,i] <= 0: continue
wetMask[j,i] = 1
if i>0: stack.add( (j,i-1) )
elif xcyclic: stack.add( (j,ni-1) )
if i<ni-1: stack.add( (j,i+1) )
elif xcyclic: stack.add( (j,0) )
if j>0: stack.add( (j-1,i) )
if j<nj-1: stack.add( (j+1,i) )
elif tripolar: stack.add( (j,ni-1-i) ) # Tri-polar fold
return wetMask
def ice9it(i, j, depth, minD=0.):
"""
Recursive implementation of "ice 9".
Returns 1 where depth>minD and is connected to depth[j,i], 0 otherwise.
"""
wetMask = 0*depth
(nj,ni) = wetMask.shape
stack = set()
stack.add( (j,i) )
while stack:
(j,i) = stack.pop()
if wetMask[j,i] or depth[j,i] <= minD: continue
wetMask[j,i] = 1
if i>0: stack.add( (j,i-1) )
else: stack.add( (j,ni-1) ) # Periodic beyond i=0
if i<ni-1: stack.add( (j,i+1) )
else: stack.add( (j,0) ) # Periodic beyond i=ni-1
if j>0: stack.add((j-1,i))
if j<nj-1: stack.add( (j+1,i) )
else: stack.add( (j,ni-1-i) ) # Tri-polar fold beyond j=nj-1
return wetMask
def ice9(x, y, depth, xy0):
ji = nearestJI(x, y, xy0[0], xy0[1])
return ice9it(ji[1], ji[0], depth)
def ice9Wrapper(x, y, depth, xy0):
ji = nearestJI(x, y, xy0[0],xy0[1])
return ice9_v2(ji[1], ji[0], depth)
def maskFromDepth(depth, zCellTop):
"""
Generates a "wet mask" for a z-coordinate model based on relative location of
the ocean bottom to the upper interface of the cell.
depth (2d) is positiveo
zCellTop (scalar) is a negative position of the upper interface of the cell..
"""
wet = 0*depth
wet[depth>-zCellTop] = 1
return wet
def MOCpsi(vh, vmsk=None):
"""Sums 'vh' zonally and cumulatively in the vertical to yield an overturning stream function, psi(y,z)."""
shape = list(vh.shape); shape[-3] += 1
psi = np.zeros(shape[:-1])
if len(shape)==3:
for k in range(shape[-3]-1,0,-1):
if vmsk is None: psi[k-1,:] = psi[k,:] - vh[k-1].sum(axis=-1)
else: psi[k-1,:] = psi[k,:] - (vmsk*vh[k-1]).sum(axis=-1)
else:
for n in range(shape[0]):
for k in range(shape[-3]-1,0,-1):
if vmsk is None: psi[n,k-1,:] = psi[n,k,:] - vh[n,k-1].sum(axis=-1)
else: psi[n,k-1,:] = psi[n,k,:] - (vmsk*vh[n,k-1]).sum(axis=-1)
return psi
def moc_maskedarray(vh,mask=None):
if mask is not None:
_mask = np.ma.masked_where(np.not_equal(mask,1.),mask)
else:
_mask = 1.
_vh = vh * _mask
_vh_btm = np.ma.expand_dims(_vh[:,-1,:,:]*0.,axis=1)
_vh = np.ma.concatenate((_vh,_vh_btm),axis=1)
_vh = np.ma.sum(_vh,axis=-1) * -1.
_vh = _vh[:,::-1] # flip z-axis so running sum is from ocean floor to surface
_vh = np.ma.cumsum(_vh,axis=1)
_vh = _vh[:,::-1] # flip z-axis back to original order
return _vh
def nearestJI(x, y, x0, y0):
"""
Find (j,i) of cell with center nearest to (x0,y0).
"""
return np.unravel_index( ((x-x0)**2 + (y-y0)**2).argmin() , x.shape)
def southOfrestJI(x, y, xy0, xy1):
"""
Returns 1 for point south/east of the line that passes through xy0-xy1, 0 otherwise.
"""
x0 = xy0[0]; y0 = xy0[1]; x1 = xy1[0]; y1 = xy1[1]
dx = x1 - x0; dy = y1 - y0
Y = (x-x0)*dy - (y-y0)*dx
Y[Y>=0] = 1; Y[Y<=0] = 0
return Y
def southOf(x, y, xy0, xy1):
"""
Returns 1 for point south/east of the line that passes through xy0-xy1, 0 otherwise.
"""
x0 = xy0[0]; y0 = xy0[1]; x1 = xy1[0]; y1 = xy1[1]
dx = x1 - x0; dy = y1 - y0
Y = (x-x0)*dy - (y-y0)*dx
Y[Y>=0] = 1; Y[Y<=0] = 0
return Y
def genBasinMasks(x,y,depth,verbose=False, xda=False):
"""
Returns masking for different regions.
Parameters
----------
x : 2D array
Longitude
y : 2D array
Latitude
depth : 2D array
Ocean depth. Masked values must be set to zero.
verbose : boolean, optional
If True, print some stuff. Default is false.
xda : boolean, optional
If True, returns an xarray Dataset. Default is false.
Returns
-------
"""
rmask_od = OrderedDict()
rmask_od['Global'] = xr.where(depth > 0, 1.0, 0.0)
if verbose: print('Generating global wet mask...')
wet = ice9(x, y, depth, (0,-35)) # All ocean points seeded from South Atlantic
if verbose: print('done.')
code = 0*wet
if verbose: print('Finding Cape of Good Hope ...')
tmp = 1 - wet; tmp[x<-30] = 0
tmp = ice9(x, y, tmp, (20,-30.))
yCGH = (tmp*y).min()
if verbose: print('done.', yCGH)
if verbose: print('Finding Melbourne ...')
tmp = 1 - wet; tmp[x>-180] = 0
tmp = ice9(x, y, tmp, (-220,-25.))
yMel = (tmp*y).min()
if verbose: print('done.', yMel)
if verbose: print('Processing Persian Gulf ...')
tmp = wet*( 1-southOf(x, y, (55.,23.), (56.5,27.)) )
tmp = ice9(x, y, tmp, (53.,25.))
code[tmp>0] = 11
rmask_od['PersianGulf'] = xr.where(code == 11, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Red Sea ...')
tmp = wet*( 1-southOf(x, y, (40.,11.), (45.,13.)) )
tmp = ice9(x, y, tmp, (40.,18.))
code[tmp>0] = 10
rmask_od['RedSea'] = xr.where(code == 10, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Black Sea ...')
tmp = wet*( 1-southOf(x, y, (26.,42.), (32.,40.)) )
tmp = ice9(x, y, tmp, (32.,43.))
code[tmp>0] = 7
rmask_od['BlackSea'] = xr.where(code == 7, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Mediterranean ...')
tmp = wet*( southOf(x, y, (-5.7,35.5), (-5.7,36.5)) )
tmp = ice9(x, y, tmp, (4.,38.))
code[tmp>0] = 6
rmask_od['MedSea'] = xr.where(code == 6, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Baltic ...')
tmp = wet*( southOf(x, y, (8.6,56.), (8.6,60.)) )
tmp = ice9(x, y, tmp, (10.,58.))
code[tmp>0] = 9
rmask_od['BalticSea'] = xr.where(code == 9, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Hudson Bay ...')
tmp = wet*(
( 1-(1-southOf(x, y, (-95.,66.), (-83.5,67.5)))
*(1-southOf(x, y, (-83.5,67.5), (-84.,71.)))
)*( 1-southOf(x, y, (-70.,58.), (-70.,65.)) ) )
tmp = ice9(x, y, tmp, (-85.,60.))
code[tmp>0] = 8
rmask_od['HudsonBay'] = xr.where(code == 8, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Arctic ...')
tmp = wet*(
(1-southOf(x, y, (-171.,66.), (-166.,65.5))) * (1-southOf(x, y, (-64.,66.4), (-50.,68.5))) # Lab Sea
+ southOf(x, y, (-50.,0.), (-50.,90.)) * (1- southOf(x, y, (0.,65.5), (360.,65.5)) ) # Denmark Strait
+ southOf(x, y, (-18.,0.), (-18.,65.)) * (1- southOf(x, y, (0.,64.9), (360.,64.9)) ) # Iceland-Sweden
+ southOf(x, y, (20.,0.), (20.,90.)) # Barents Sea
+ (1-southOf(x, y, (-280.,55.), (-200.,65.)))
)
tmp = ice9(x, y, tmp, (0.,85.))
code[tmp>0] = 4
rmask_od['Arctic'] = xr.where(code == 4, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Pacific ...')
tmp = wet*( (1-southOf(x, y, (0.,yMel), (360.,yMel)))
-southOf(x, y, (-257,1), (-257,0))*southOf(x, y, (0,3), (1,3))
-southOf(x, y, (-254.25,1), (-254.25,0))*southOf(x, y, (0,-5), (1,-5))
-southOf(x, y, (-243.7,1), (-243.7,0))*southOf(x, y, (0,-8.4), (1,-8.4))
-southOf(x, y, (-234.5,1), (-234.5,0))*southOf(x, y, (0,-8.9), (1,-8.9))
)
tmp = ice9(x, y, tmp, (-150.,0.))
code[tmp>0] = 3
rmask_od['PacificOcean'] = xr.where(code == 3, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Atlantic ...')
tmp = wet*(1-southOf(x, y, (0.,yCGH), (360.,yCGH)))
tmp = ice9(x, y, tmp, (-20.,0.))
code[tmp>0] = 2
rmask_od['AtlanticOcean'] = xr.where(code == 2, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Indian ...')
tmp = wet*(1-southOf(x, y, (0.,yCGH), (360.,yCGH)))
tmp = ice9(x, y, tmp, (55.,0.))
code[tmp>0] = 5
rmask_od['IndianOcean'] = xr.where(code == 5, 1.0, 0.0)
wet = wet - tmp # Removed named points
if verbose: print('Processing Southern Ocean ...')
tmp = ice9(x, y, wet, (0.,-55.))
code[tmp>0] = 1
rmask_od['SouthernOcean'] = xr.where(code == 1, 1.0, 0.0)
wet = wet - tmp # Removed named points
#if verbose: print('Remapping Persian Gulf points to the Indian Ocean for OMIP/CMIP6 ...')
#code[code==11] = 5
code[wet>0] = -9
(j,i) = np.unravel_index( wet.argmax(), x.shape)
if j:
if verbose: print('There are leftover points unassigned to a basin code')
while j:
print(x[j,i],y[j,i],[j,i])
wet[j,i]=0
(j,i) = np.unravel_index( wet.argmax(), x.shape)
else:
if verbose: print('All points assigned a basin code')
################### IMPORTANT ############################
# points from following regions are not "removed" from wet
code1 = code.copy()
wet = ice9(x, y, depth, (0,-35)) # All ocean points seeded from South Atlantic
if verbose: print('Processing Labrador Sea ...')
tmp = wet*((southOf(x, y, (-65.,66.), (-45,66.)))
*(southOf(x, y, (-65.,52.), (-65.,66.)))
*(1-southOf(x, y, (-45.,52.), (-45,66.)))
*(1-southOf(x, y, (-65.,52.), (-45,52.))))
tmp = ice9(x, y, tmp, (-50.,55.))
code1[tmp>0] = 12
rmask_od['LabSea'] = xr.where(code1 == 12, 1.0, 0.0)
wet1 = ice9(x, y, depth, (0,-35)) # All ocean points seeded from South Atlantic
if verbose: print('Processing Baffin Bay ...')
tmp = wet1*((southOf(x, y, (-94.,80.), (-50.,80.)))
*(southOf(x, y, (-94.,66.), (-94.,80.)))
*(1-southOf(x, y, (-94.,66.), (-50,66.)))
*(1-southOf(x, y, (-50.,66.), (-50.,80.))))
tmp = ice9(x, y, tmp, (-70.,73.))
code1[tmp>0] = 13
# remove Hudson Bay
code1[rmask_od['HudsonBay']>0] = 0.
rmask_od['BaffinBay'] = xr.where(code1 == 13, 1.0, 0.0)
if verbose:
print("""
Basin codes:
-----------------------------------------------------------
(0) Global (7) Black Sea
(1) Southern Ocean (8) Hudson Bay
(2) Atlantic Ocean (9) Baltic Sea
(3) Pacific Ocean (10) Red Sea
(4) Arctic Ocean (11) Persian Gulf
(5) Indian Ocean (12) Lab Sea
(6) Mediterranean Sea (13) Baffin Bay
Important: basin codes overlap. Code 12 and 13 are only loaded if xda=True.
""")
rmask = xr.DataArray(np.zeros((len(rmask_od), depth.shape[0], depth.shape[1])),
dims=('region', 'yh', 'xh'),
coords={'region':list(rmask_od.keys())})
for i, rmask_field in enumerate(rmask_od.values()):
rmask.values[i,:,:] = rmask_field
if xda:
return rmask
else:
return code
def genBasinMasks_old(x,y,depth,verbose=False):
if verbose: print('Generating global wet mask ...')
wet = ice9Wrapper(x, y, depth, (0,-35)) # All ocean points seeded from South Atlantic
if verbose: print('done.')
code = 0*wet
if verbose: print('Finding Cape of Good Hope ...')
tmp = 1 - wet; tmp[x<-30] = 0
tmp = ice9Wrapper(x, y, tmp, (20,-30.))
yCGH = (tmp*y).min()
if verbose: print('done.', yCGH)
if verbose: print('Finding Melbourne ...')
tmp = 1 - wet; tmp[x>-180] = 0
tmp = ice9Wrapper(x, y, tmp, (-220,-25.))
yMel = (tmp*y).min()
if verbose: print('done.', yMel)
if verbose: print('Processing Persian Gulf ...')
tmp = wet*( 1-southOf(x, y, (55.,23.), (56.5,27.)) )
tmp = ice9Wrapper(x, y, tmp, (53.,25.))
code[tmp>0] = 11
wet = wet - tmp # Removed named points
if verbose: print('Processing Red Sea ...')
tmp = wet*( 1-southOf(x, y, (40.,11.), (45.,13.)) )
tmp = ice9Wrapper(x, y, tmp, (40.,18.))
code[tmp>0] = 10
wet = wet - tmp # Removed named points
if verbose: print('Processing Black Sea ...')
tmp = wet*( 1-southOf(x, y, (26.,42.), (32.,40.)) )
tmp = ice9Wrapper(x, y, tmp, (32.,43.))
code[tmp>0] = 7
wet = wet - tmp # Removed named points
if verbose: print('Processing Mediterranean ...')
tmp = wet*( southOf(x, y, (-5.7,35.5), (-5.7,36.5)) )
tmp = ice9Wrapper(x, y, tmp, (4.,38.))
code[tmp>0] = 6
wet = wet - tmp # Removed named points
if verbose: print('Processing Baltic ...')
tmp = wet*( southOf(x, y, (8.6,56.), (8.6,60.)) )
tmp = ice9Wrapper(x, y, tmp, (10.,58.))
code[tmp>0] = 9
wet = wet - tmp # Removed named points
if verbose: print('Processing Hudson Bay ...')
tmp = wet*(
( 1-(1-southOf(x, y, (-95.,66.), (-83.5,67.5)))
*(1-southOf(x, y, (-83.5,67.5), (-84.,71.)))
)*( 1-southOf(x, y, (-70.,58.), (-70.,65.)) ) )
tmp = ice9Wrapper(x, y, tmp, (-85.,60.))
code[tmp>0] = 8
wet = wet - tmp # Removed named points
if verbose: print('Processing Arctic ...')
tmp = wet*(
(1-southOf(x, y, (-171.,66.), (-166.,65.5))) * (1-southOf(x, y, (-64.,66.4), (-50.,68.5))) # Lab Sea
+ southOf(x, y, (-50.,0.), (-50.,90.)) * (1- southOf(x, y, (0.,65.5), (360.,65.5)) ) # Denmark Strait
+ southOf(x, y, (-18.,0.), (-18.,65.)) * (1- southOf(x, y, (0.,64.9), (360.,64.9)) ) # Iceland-Sweden
+ southOf(x, y, (20.,0.), (20.,90.)) # Barents Sea
+ (1-southOf(x, y, (-280.,55.), (-200.,65.)))
)
tmp = ice9Wrapper(x, y, tmp, (0.,85.))
code[tmp>0] = 4
wet = wet - tmp # Removed named points
if verbose: print('Processing Pacific ...')
tmp = wet*( (1-southOf(x, y, (0.,yMel), (360.,yMel)))
-southOf(x, y, (-257,1), (-257,0))*southOf(x, y, (0,3), (1,3))
-southOf(x, y, (-254.25,1), (-254.25,0))*southOf(x, y, (0,-5), (1,-5))
-southOf(x, y, (-243.7,1), (-243.7,0))*southOf(x, y, (0,-8.4), (1,-8.4))
-southOf(x, y, (-234.5,1), (-234.5,0))*southOf(x, y, (0,-8.9), (1,-8.9))
)
tmp = ice9Wrapper(x, y, tmp, (-150.,0.))
code[tmp>0] = 3
wet = wet - tmp # Removed named points
if verbose: print('Processing Atlantic ...')
tmp = wet*(1-southOf(x, y, (0.,yCGH), (360.,yCGH)))
tmp = ice9Wrapper(x, y, tmp, (-20.,0.))
code[tmp>0] = 2
wet = wet - tmp # Removed named points
if verbose: print('Processing Indian ...')
tmp = wet*(1-southOf(x, y, (0.,yCGH), (360.,yCGH)))
tmp = ice9Wrapper(x, y, tmp, (55.,0.))
code[tmp>0] = 5
wet = wet - tmp # Removed named points
if verbose: print('Processing Southern Ocean ...')
tmp = ice9Wrapper(x, y, wet, (0.,-55.))
code[tmp>0] = 1
wet = wet - tmp # Removed named points
if verbose: print('Remapping Persian Gulf points to the Indian Ocean for OMIP/CMIP6 ...')
code[code==11] = 5
code[wet>0] = -9
(j,i) = np.unravel_index( wet.argmax(), x.shape)
if j:
if verbose: print('There are leftover points unassigned to a basin code')
while j:
print(x[j,i],y[j,i],[j,i])
wet[j,i]=0
(j,i) = np.unravel_index( wet.argmax(), x.shape)
else:
if verbose: print('All points assigned a basin code')
if verbose:
print("""
Basin codes:
-----------------------------------------------------------
(1) Southern Ocean (6) Mediterranean Sea
(2) Atlantic Ocean (7) Black Sea
(3) Pacific Ocean (8) Hudson Bay
(4) Arctic Ocean (9) Baltic Sea
(5) Indian Ocean (10) Red Sea
""")
return code
# Tests
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy.matlib
# Test data
x=np.arange(5)
z=np.array([[0,0.2,0.3,-.1],[1,1.5,.7,.4],[2,2,1.5,2],[3,2.3,1.5,2.1]])*-1
q=np.matlib.rand(3,4)
print('x=',x)
print('z=',z)
print('q=',q)
X, Z, Q = section2quadmesh(x, z, q)
print('X=',X)
print('Z=',Z)
print('Q=',Q)
plt.subplot(3,1,1)
plt.pcolormesh(X, Z, Q)
X, Z, Q = section2quadmesh(x, z, q, representation='linear')
print('X=',X)
print('Z=',Z)
print('Q=',Q)
plt.subplot(3,1,2)
plt.pcolormesh(X, Z, Q)
X, Z, Q = section2quadmesh(x, z, q, representation='plm')
print('X=',X)
print('Z=',Z)
print('Q=',Q)
plt.subplot(3,1,3)
plt.pcolormesh(X, Z, Q)
plt.show()
| 34.55979 | 120 | 0.570076 |
2b15d6f5eea3427159f316c106c48239a55207ab | 3,140 | py | Python | explore_theano/ffnn.py | AndreasMadsen/bachelor-code | 115fd2b955de07f34cdec998ba2a7f103ae253e3 | [
"MIT"
] | 1 | 2015-06-16T06:53:52.000Z | 2015-06-16T06:53:52.000Z | explore_theano/ffnn.py | AndreasMadsen/bachelor-code | 115fd2b955de07f34cdec998ba2a7f103ae253e3 | [
"MIT"
] | null | null | null | explore_theano/ffnn.py | AndreasMadsen/bachelor-code | 115fd2b955de07f34cdec998ba2a7f103ae253e3 | [
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
from datasets import generate_quadrant
size = [2, 10, 4]
eta = 0.4
momentum = 0.9
epochs = 400
# data input & output
x = T.matrix('x')
t = T.ivector('t')
# forward pass
W1 = theano.shared(
np.random.randn(size[0], size[1]).astype('float32'),
name="W1", borrow=True)
Wb1 = theano.shared(
np.asarray(0, dtype='float32'),
name="Wb1", borrow=True)
a1 = T.dot(x, W1) + Wb1
b1 = T.nnet.sigmoid(a1)
W2 = theano.shared(
np.random.randn(size[1], size[2]).astype('float32'),
name="W2", borrow=True)
Wb2 = theano.shared(
np.asarray(0, dtype='float32'),
name="Wb1", borrow=True)
a2 = T.dot(b1, W2) + Wb2
y = T.nnet.softmax(a2)
# training error
# `categorical_crossentropy` returns a vector with the entropy for each value
L = T.mean(T.nnet.categorical_crossentropy(y, t))
# backward pass
(gW1, gWb1, gW2, gWb2) = T.grad(L, [W1, Wb1, W2, Wb2])
dW1 = theano.shared(
np.zeros_like(W1.get_value(), dtype='float32'),
name="dW1", borrow=True)
dWb1 = theano.shared(
np.zeros_like(Wb1.get_value(), dtype='float32'),
name="dWb1", borrow=True)
dW2 = theano.shared(
np.zeros_like(W2.get_value(), dtype='float32'),
name="dW1", borrow=True)
dWb2 = theano.shared(
np.zeros_like(Wb2.get_value(), dtype='float32'),
name="dWb2", borrow=True)
# Compile
# NOTE: all updates are made with the old values, thus the order of operation
# doesn't matter. To make momentum work without a delay as in
# http://stackoverflow.com/questions/28205589/the-update-order-of-theano-functions-update-list
# the update equation (dW1) is inserted into the `W1 = W1 + dW1` update.
train = theano.function(
inputs=[x, t],
outputs=L,
updates=((dW1, - momentum * dW1 - eta * gW1), (W1, W1 - momentum * dW1 - eta * gW1),
(dWb1, - momentum * dWb1 - eta * gWb1), (Wb1, Wb1 - momentum * dWb1 - eta * gWb1),
(dW2, - momentum * dW2 - eta * gW2), (W2, W2 - momentum * dW2 - eta * gW2),
(dWb2, - momentum * dWb2 - eta * gWb2), (Wb2, Wb2 - momentum * dWb2 - eta * gWb2))
)
error = theano.function(inputs=[x, t], outputs=L)
predict = theano.function(inputs=[x], outputs=y)
# Generate dataset
(train_X, train_t) = generate_quadrant(1000)
(test_X, test_t) = generate_quadrant(300)
train_error = np.zeros(epochs)
test_error = np.zeros(epochs)
for epoch in range(0, epochs):
train_error[epoch] = train(train_X, train_t)
test_error[epoch] = error(test_X, test_t)
print(W1.get_value(), Wb1.get_value())
print(W2.get_value(), Wb2.get_value())
predict_y = np.argmax(predict(test_X), axis=1)
plt.subplot(2, 1, 1)
plt.plot(np.arange(0, epochs), train_error, label='train', alpha=0.5)
plt.plot(np.arange(0, epochs), test_error, label='test', alpha=0.5)
plt.legend()
plt.ylabel('loss')
plt.subplot(2, 1, 2)
colors = np.asarray(["#ca0020", "#f4a582", "#92c5de", "#0571b0"])
plt.scatter(test_X[:, 0], test_X[:, 1], c=colors[predict_y], lw=0)
plt.axhline(y=0, xmin=-1, xmax=1, color="gray")
plt.axvline(x=0, ymin=-1, ymax=1, color="gray")
plt.xlim([-1, 1])
plt.ylim([-1, 1])
plt.show()
| 30.192308 | 95 | 0.659236 |
d1e12d102cd84fac01a7a6214abadce751fc9247 | 12,396 | py | Python | emtgan/common.py | ai-health-care/EMT-CycleGAN | 129d67bdb68137fe92bf3ca69fc2d4f4a83e87fa | [
"MIT"
] | null | null | null | emtgan/common.py | ai-health-care/EMT-CycleGAN | 129d67bdb68137fe92bf3ca69fc2d4f4a83e87fa | [
"MIT"
] | null | null | null | emtgan/common.py | ai-health-care/EMT-CycleGAN | 129d67bdb68137fe92bf3ca69fc2d4f4a83e87fa | [
"MIT"
] | 1 | 2021-02-16T04:14:13.000Z | 2021-02-16T04:14:13.000Z | import json
import os
with open('config.json', 'r') as f:
config = json.load(f)
root_dir = config.get('ROOT_DIR', '')
data_dir = os.path.join(root_dir, 'data')
model_dir = os.path.join(root_dir, 'saved_models')
os.makedirs(model_dir, exist_ok=True)
from itertools import chain
import numpy as np
import json
import re
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='white')
# torch
import torch
from torch import nn, optim
from torch.autograd import Variable
from scipy.optimize import minimize
import pydicom
def permute_unique(N: int):
"""
Return permutations for point tuple indices; each permutation
is unique.
Example (N = 4):
0 1
0 2
0 3
1 2
1 3
2 3
"""
assert N > 0
pairs = None
for i in range(N):
for j in range(i + 1, N):
if pairs is None:
pairs = np.array([i, j])
else:
pairs = np.vstack((pairs, np.array([i, j])))
return pairs
def _parse_model_section(lines: list):
model_offsets = {}
for line in lines:
token = ''
at_model = False
at_offset = False
model_coords = []
offset_coords = []
for c in line:
if c == '(':
if not at_model and not at_offset:
at_model = True
elif not at_model and at_offset:
pass
elif c == ')':
pass
elif c == ':':
at_model = False
at_offset = True
elif c.isspace():
pass
elif c == ',':
pass
elif c.isdigit() or c in ('e', '.', '-'):
token += c
continue
else:
raise RuntimeError('unexpected character ' + c)
if token:
if at_model:
model_coords.append(float(token))
elif at_offset:
offset_coords.append(float(token))
token = ''
if len(model_coords) != len(offset_coords):
raise RuntimeError('invalid mapping of coords in MODEL section')
modelpoint = tuple(model_coords)
offset = offset_coords
if modelpoint and offset:
model_offsets[modelpoint] = np.array(offset)
return model_offsets
def load_calibration(path: str):
"""
Load calibration data from file.
:param path: path to calibration file
"""
sections = {}
current_section = ''
section_lines = []
with open(path, 'r') as f:
text = f.read()
lines = text.splitlines()
for line in lines:
line = line.strip()
idx = line.find('#')
if idx != -1:
line = line[:idx]
if not line:
continue
if line.startswith('[') and line.endswith(']'):
if current_section:
sections[current_section].extend(section_lines)
section_lines = []
current_section = line[1:-1].upper()
if current_section not in sections:
sections[current_section] = []
elif not current_section:
raise RuntimeError('line does not belong to any section')
else:
section_lines.append(line)
if section_lines:
sections[current_section].extend(section_lines)
return _parse_model_section(sections['MODEL'])
calibration_map = load_calibration(os.path.join(root_dir, 'calibration.txt'))
class Sensor:
def __init__(self):
"""
"""
self.data = None
class Measurement:
def __init__(self, identifier: str):
self.identifier = identifier
self.sensors = []
self.model = None
def points(self, features):
P = self.sensors[1].data[features].values
return P
def modelpoints(self, offset=False):
P = self.model
if offset:
ref = self.sensors[0].data[['x', 'y', 'z']].median().values
return P + ref
return P
def displacements(self, features, permute_func=permute_unique):
"""
Return list of displacement vectors generated by a custom permutation.
:param features: input feature vector
"""
assert len(features) > 0
assert len(self.sensors) >= 2
n = len(features)
points = self.sensors[1].data[features].values
assert len(points) > 0
permutations = permute_func(len(points))
D = np.zeros((len(permutations), n * 2))
for i, p in enumerate(permutations):
D[i, 0:n] = points[p[0], 0:n]
D[i, n:n*2] = points[p[1], 0:n]
return D
def model_displacements(self, permute_func=permute_unique):
permutations = permute_func(len(self.model))
M = np.zeros((len(permutations), 6))
for i, p in enumerate(permutations):
M[i, 0:3] = self.model[p[0], 0:3]
M[i, 3:6] = self.model[p[1], 0:3]
return M
def __len__(self):
return len(self.model)
def filter_df(df, nsamples):
return df.groupby(df.index // nsamples).median()
class Dataset:
def __init__(self, identifier):
self.identifier = identifier
self.measurements = []
self.environments = set()
def subset(self, environments=None, description=None):
"""
:param description: regex that is matched to measurement descriptions
"""
if environments is None and description is None:
raise RuntimeError('invalid subset query')
sub = Dataset(self.identifier + '_sub')
inverse = Dataset(self.identifier + '_inv')
regex = re.compile(description)
for measurement in self.measurements:
if environments and measurement.environment in environments:
sub.add_measurement(measurement)
continue
if description and re.match(regex, measurement.description):
sub.add_measurement(measurement)
continue
inverse.add_measurement(measurement)
return sub, inverse
def merge(self, dataset):
merged = Dataset(self.identifier + '_' + dataset.identifier)
merged.measurements = self.measurements
merged.environments = self.environments
merged.measurements.extend(dataset.measurements)
merged.environments.update(dataset.environments)
return merged
def add_measurement(self, m: Measurement):
self.measurements.append(m)
self.environments.add(m.environment)
def summary(self):
s = '[DATASET] {}\n'.format(self.identifier)
s += '{} measurements\n'.format(len(self.measurements))
s += 'environments: {}\n'.format(','.join(self.environments))
s += 'rmse: {}\n'.format(self.rmse())
return s
def displacements(self,
features,
permute_func=permute_unique,
remove_duplicates=False,
shuffle=False):
"""
Return dataset displacements and corresponding model displacements.
:param features: input feature vector
:param permute_func: permutation function to generate displacements
:param remove_duplicates: remove redundant measured displacements
"""
assert len(features) > 0
D = None
M = None
for measurement in self.measurements:
d = measurement.displacements(features, permute_func)
m = measurement.model_displacements(permute_func)
if D is None:
D = d
M = m
else:
D = np.vstack((D, d))
M = np.vstack((M, m))
if remove_duplicates:
_, idx = np.unique(D, axis=0, return_index=True)
if shuffle:
np.random.shuffle(idx)
D = D[idx, :]
M = M[idx, :]
return D, M
def points(self, features):
P = None
for measurement in self.measurements:
p = measurement.points(features)
if P is None:
P = p
else:
P = np.vstack((P, p))
return P
def modelpoints(self, offset=False):
P = None
for measurement in self.measurements:
p = measurement.modelpoints(offset)
if P is None:
P = p
else:
P = np.vstack((P, p))
return P
def errors(self):
D, M = self.displacements(['x', 'y', 'z'])
d_D = np.linalg.norm(D[:, 0:3] - D[:, 3:6], axis=1)
d_M = np.linalg.norm(M[:, 0:3] - M[:, 3:6], axis=1)
E = d_D - d_M
return E
def mse(self):
E = self.errors()
mse = np.sum(np.square(E)) / len(E)
return mse
def rmse(self):
return np.sqrt(self.mse())
def load_measurement(directory: str):
assert len(directory) > 0
m = Measurement(directory)
m_meta = json.load(open(os.path.join(directory, 'measurement.json'), 'r'))
m.__dict__.update(m_meta)
points = pd.read_csv(os.path.join(directory, 'points.csv'), header=None)
m.model = points.values * np.array([8.0, 8.0, 9.6])
def get_calibrated_point(x):
return calibration_map.get((x[0], x[1], x[2]), (0, 0, 0))
offsets = np.apply_along_axis(get_calibrated_point, axis=1, arr=m.model)
m.model -= offsets
nsamples = m.samples_per_point
# TODO @henry extend this to up to 4 sensors
s0 = Sensor()
with open(os.path.join(directory, 'sensor_0.json'), 'r') as f:
s0_meta = json.load(f)
s0.__dict__.update(s0_meta)
data = pd.read_csv(os.path.join(directory, 'sensor_0.csv'))
s0.data = filter_df(data, nsamples)
s1 = Sensor()
with open(os.path.join(directory, 'sensor_1.json'), 'r') as f:
s1_meta = json.load(f)
s1.__dict__.update(s1_meta)
data = pd.read_csv(os.path.join(directory, 'sensor_1.csv'))
sx = data['x'].std()
sy = data['y'].std()
sz = data['z'].std()
sq = data['q'].std()
s1.data = filter_df(data, nsamples)
s1.data['rq'] = s0.data['q']
s1.data['sx'] = data['x'].groupby(data.index // nsamples).std()
s1.data['sy'] = data['y'].groupby(data.index // nsamples).std()
s1.data['sz'] = data['z'].groupby(data.index // nsamples).std()
s1.data['sq'] = data['q'].groupby(data.index // nsamples).std()
m.sensors.append(s0)
m.sensors.append(s1)
return m
def load_dataset(directory: str):
assert len(directory) > 0
dataset = Dataset(os.path.basename(directory))
for measurement_dir in os.listdir(directory):
if not measurement_dir.startswith('measurement'):
continue
m = load_measurement(os.path.join(directory, measurement_dir))
dataset.add_measurement(m)
return dataset
def flatten_first_dim(x, lmax=np.inf):
temp = []
for i in range(len(x)):
length = min(len(x[i]), lmax)
for j in range(length):
temp.append(x[i][j])
return np.asarray(temp)
def load_datasets(list_n, input_features=['x', 'y', 'z']):
X = []
Y = []
for carm in list_n:
print(f'{carm}.....', end='')
carm_path = os.path.join(data_dir, carm)
dataset = load_dataset(carm_path)
X.append(dataset.displacements(input_features)[0])
Y.append(dataset.displacements(input_features)[1])
print('done')
return X, Y
def dataset_bounds(dataset, labels, input_features):
dim = len(input_features)
Min = np.min((dataset.min(axis=0)[:dim], dataset.min(axis=0)[dim:]), axis=0)
Max = np.max((dataset.max(axis=0)[:dim], dataset.max(axis=0)[dim:]), axis=0)
labelMin = np.min(labels)
labelMax = np.max(labels)
return Min, Max, labelMin, labelMax
class DecayLambda():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), 'Decay must start before the training session ends!'
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch)/(self.n_epochs - self.decay_start_epoch)
| 31.382278 | 114 | 0.572927 |
f78ef081fb26eb1602d6daf472c05831daccf511 | 1,891 | py | Python | tests/engine/inline_layout/test_inline_non_replaced.py | jonboland/colosseum | cbf974be54fd7f6fddbe7285704cfaf7a866c5c5 | [
"BSD-3-Clause"
] | 71 | 2015-04-13T09:44:14.000Z | 2019-03-24T01:03:02.000Z | tests/engine/inline_layout/test_inline_non_replaced.py | jonboland/colosseum | cbf974be54fd7f6fddbe7285704cfaf7a866c5c5 | [
"BSD-3-Clause"
] | 35 | 2019-05-06T15:26:09.000Z | 2022-03-28T06:30:33.000Z | tests/engine/inline_layout/test_inline_non_replaced.py | jonboland/colosseum | cbf974be54fd7f6fddbe7285704cfaf7a866c5c5 | [
"BSD-3-Clause"
] | 139 | 2015-05-30T18:37:43.000Z | 2019-03-27T17:14:05.000Z | from colosseum.constants import AUTO, INLINE
from colosseum.declaration import CSS
from ...utils import LayoutTestCase, TestNode
class WidthTests(LayoutTestCase):
def test_no_horizontal_properties(self):
node = TestNode(
name='span',
style=CSS(display=INLINE)
)
node.intrinsic.width = 50
node.intrinsic.height = 10
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'span',
'border_box': {'position': (0, 0), 'size': (50, 10)},
'padding_box': {'position': (0, 0), 'size': (50, 10)},
'content': {'position': (0, 0), 'size': (50, 10)},
}
)
def test_auto_left_margin(self):
node = TestNode(
name='span',
style=CSS(display=INLINE, margin_left=AUTO)
)
node.intrinsic.width = 50
node.intrinsic.height = 10
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'span',
'border_box': {'position': (0, 0), 'size': (50, 10)},
'padding_box': {'position': (0, 0), 'size': (50, 10)},
'content': {'position': (0, 0), 'size': (50, 10)},
}
)
def test_auto_right_margin(self):
node = TestNode(
name='span',
style=CSS(display=INLINE, margin_right=AUTO)
)
node.intrinsic.width = 50
node.intrinsic.height = 10
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'span',
'border_box': {'position': (0, 0), 'size': (50, 10)},
'padding_box': {'position': (0, 0), 'size': (50, 10)},
'content': {'position': (0, 0), 'size': (50, 10)},
}
)
| 28.223881 | 70 | 0.471179 |
a9cc5001bbeb024455fe3c94a161c187d0f65baa | 818 | py | Python | quality_from_b.py | tomekrzymyszkiewicz/TSP-simulated-annealing | f6d4cbd73ed48099bb545879cde83c974ecd6b03 | [
"MIT"
] | null | null | null | quality_from_b.py | tomekrzymyszkiewicz/TSP-simulated-annealing | f6d4cbd73ed48099bb545879cde83c974ecd6b03 | [
"MIT"
] | null | null | null | quality_from_b.py | tomekrzymyszkiewicz/TSP-simulated-annealing | f6d4cbd73ed48099bb545879cde83c974ecd6b03 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import numpy as np
import pandas as pd
from matplotlib import pyplot as pllt
from scipy.special import gamma
def main():
if len(sys.argv)>1:
data = pd.read_csv(str(sys.argv[1]),usecols=['graph_name','calculated_path_weight','defined_path_weight','b'])
else:
data = pd.read_csv('wyniki.csv',usecols=['graph_name','calculated_path_weight','defined_path_weight','b'])
data = np.array(data)
X = [data[i][3] for i in range(len(data))]
Y = [(data[i][1]-data[i][2])/data[i][1]for i in range(len(data))]
pllt.scatter(X,Y)
for i in range(len(data)):
pllt.annotate('b='+str(data[i][3]),(X[i],Y[i]))
pllt.ylabel('Stosunek błędu do wartości optymalnej')
pllt.xlabel('Współczynnik b')
pllt.show()
if __name__ == "__main__":
main()
| 31.461538 | 118 | 0.649144 |
6b9d6cc378db061952573961c5cb23c01aa648cd | 2,529 | py | Python | orders/views.py | halitdincer/etsynest | 15b5a24382970ea893f8c0404e20ae9615e5b434 | [
"MIT"
] | 1 | 2022-01-14T21:40:46.000Z | 2022-01-14T21:40:46.000Z | orders/views.py | halitdincer/etsynest | 15b5a24382970ea893f8c0404e20ae9615e5b434 | [
"MIT"
] | null | null | null | orders/views.py | halitdincer/etsynest | 15b5a24382970ea893f8c0404e20ae9615e5b434 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import JsonResponse
from django.views.generic import ListView,DetailView
from django.views.generic.base import TemplateView
from django.utils import timezone
from .models import Order,OrderItem
class OrderListView(ListView):
model = Order
context_object_name = 'listings'
template_name = "orders/index.html"
def post(self, request):
l_data = []
for order in Order.objects.all():
if order.created_at:
l_data.append([order.order_id,
timezone.localtime(order.created_at).strftime('%d %B %Y'),
order.first_name,
'$' + str(round(order.subtotal+order.total_shipping_cost,2)),
'$' + str(round(order.total_fees,2)),
'$' + str(round(order.total_tax_cost,2)),
'$' + str(order.total_production_cost),
'$' + str(round(order.revenue,2)),
])
else:
l_data.append([order.order_id,
"",
order.first_name,
'$' + str(round(order.subtotal+order.total_shipping_cost,2)),
'$' + str(round(order.total_fees,2)),
'$' + str(round(order.total_tax_cost,2)),
'$' + str(order.total_production_cost),
'$' + str(round(order.revenue,2)),
])
context = {}
context['draw'] = int(request.GET.get('draw',0))+1
context['recordsTotal'] = len(l_data)
context['recordsFiltered'] = len(l_data)
context['data'] = l_data
return JsonResponse(context, safe=False)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class OrderDetailView(DetailView):
model = Order
context_object_name = 'order'
template_name = "orders/detail.html"
slug_field = "order_id"
slug_url_kwarg = "order_id"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['item_list'] = OrderItem.objects.filter(order=self.object)
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse(self.object.as_json(), **response_kwargs)
| 37.191176 | 93 | 0.544484 |
68e45a2251b919e3a1e54ed2cb7964d51752a45f | 5,477 | py | Python | TheDigger_src/lib/waf.py | Jistrokz/TheDigger | d2831b0b8fdf75595c4049d885abb3e6a79b9a30 | [
"MIT"
] | 5 | 2021-06-20T16:49:06.000Z | 2022-03-03T07:21:42.000Z | TheDigger_src/lib/waf.py | Jistrokz/TheDigger | d2831b0b8fdf75595c4049d885abb3e6a79b9a30 | [
"MIT"
] | null | null | null | TheDigger_src/lib/waf.py | Jistrokz/TheDigger | d2831b0b8fdf75595c4049d885abb3e6a79b9a30 | [
"MIT"
] | null | null | null | from requests.exceptions import TooManyRedirects, ConnectionError
from TheDigger_src.utils.web_server_validator import WebServerValidator
from TheDigger_src.utils.exceptions import WAFException, WebServerValidatorException
from TheDigger_src.utils.request_handler import RequestHandler
from TheDigger_src.utils.coloring import COLOR, COLORED_COMBOS
from TheDigger_src.utils.help_utils import HelpUtilities
from TheDigger_src.utils.logger import Logger
SERVER = "Server"
class WAFApplicationMethods:
@classmethod
def detect_cloudfront(cls, res):
service = "CloudFront"
waf_headers = ("Via", "X-cache")
if any(h in res.headers.keys() for h in waf_headers) and any(service.lower() in val for val in res.headers.values()):
return True
if res.headers.get(SERVER) == service:
return True
return
@classmethod
def detect_incapsula(cls, res):
if "X-Iinfo" in res.headers.keys() or res.headers.get("X-CDN") == "Incapsula":
return True
return
@classmethod
def detect_distil(cls, res):
if res.headers.get("x-distil-cs"):
return True
return
@classmethod
def detect_cloudflare(cls, res):
if "CF-RAY" in res.headers.keys() or res.headers.get(SERVER) == "cloudflare":
return True
return
@classmethod
def detect_edgecast(cls, res):
if SERVER in res.headers.keys() and "ECD" in res.headers[SERVER]:
return True
return
@classmethod
def detect_maxcdn(cls, res):
if SERVER in res.headers.keys() and "NetDNA-cache" in res.headers[SERVER]:
return True
return
@classmethod
def detect_sucuri(cls, res):
if any((
res.headers.get(SERVER) == "Sucuri/Cloudproxy",
"X-Sucuri-ID" in res.headers.keys(),
"X-Sucuri-Cache"in res.headers.keys(),
"Access Denied - Sucuri Website Firewall" in res.text)):
return True
return
@classmethod
def detect_reblaze(cls, res):
if res.headers.get(SERVER) == "Reblaze Secure Web Gateway" or res.cookies.get("rbzid"):
return True
return
class WAF:
def __init__(self, host):
self.host = host
self.cnames = host.dns_results.get('CNAME')
self.request_handler = RequestHandler()
self.web_server_validator = WebServerValidator()
self.waf_present = False
self.waf_cname_map = {
"incapdns": "Incapsula",
"edgekey": "Akamai",
"akamai": "Akamai",
"edgesuite": "Akamai",
"distil": "Distil Networks",
"cloudfront": "CloudFront",
"netdna-cdn": "MaxCDN"
}
self.waf_app_method_map = {
"CloudFront": WAFApplicationMethods.detect_cloudfront,
"Cloudflare": WAFApplicationMethods.detect_cloudflare,
"Incapsula": WAFApplicationMethods.detect_incapsula,
"MaxCDN": WAFApplicationMethods.detect_maxcdn,
"Edgecast": WAFApplicationMethods.detect_edgecast,
"Distil Networks": WAFApplicationMethods.detect_distil,
"Sucuri": WAFApplicationMethods.detect_sucuri,
"Reblaze": WAFApplicationMethods.detect_reblaze
}
log_file = HelpUtilities.get_output_path("{}/WAF.txt".format(self.host.target))
self.logger = Logger(log_file)
def _waf_detected(self, name, where):
self.logger.info(
"{} Detected WAF presence in {}: {}{}{}".format(
COLORED_COMBOS.BAD, where, COLOR.RED, name, COLOR.RESET))
self.waf_present = True
def _detect_by_cname(self):
for waf in self.waf_cname_map:
if any(waf in str(cname) for cname in self.cnames):
self._waf_detected(self.waf_cname_map.get(waf), "CNAME record")
async def _detect_by_application(self):
try:
session = self.request_handler.get_new_session()
response = session.get(
timeout=20,
allow_redirects=True,
url="{}://{}:{}".format(
self.host.protocol,
self.host.target,
self.host.port
)
)
for waf, method in self.waf_app_method_map.items():
result = method(response)
if result:
self._waf_detected(waf, "web application")
except (ConnectionError, TooManyRedirects) as e:
raise WAFException("Couldn't get response from server.\n"
"Caused due to exception: {}".format(str(e)))
async def detect(self):
self.logger.info("{} Trying to detect WAF presence in {}".format(COLORED_COMBOS.INFO, self.host))
if self.cnames:
self._detect_by_cname()
try:
self.web_server_validator.validate_target_webserver(self.host)
await self._detect_by_application()
if not self.waf_present:
self.logger.info("{} Did not detect WAF presence in target".format(COLORED_COMBOS.GOOD))
except WebServerValidatorException:
self.logger.info(
"{} Target does not seem to have an active web server on port {}. "
"No WAF could be detected on an application level.".format(COLORED_COMBOS.NOTIFY, self.host.port)) | 37.258503 | 125 | 0.610005 |
aa56294af55ce958366f8a6d23c72925ad815957 | 7,338 | py | Python | Main.py | LiewMK/Pygame-SpriteGame-ProjectileMotion | 784675223d865764d7e1dc574efc3b5b53b33405 | [
"CC0-1.0"
] | null | null | null | Main.py | LiewMK/Pygame-SpriteGame-ProjectileMotion | 784675223d865764d7e1dc574efc3b5b53b33405 | [
"CC0-1.0"
] | null | null | null | Main.py | LiewMK/Pygame-SpriteGame-ProjectileMotion | 784675223d865764d7e1dc574efc3b5b53b33405 | [
"CC0-1.0"
] | null | null | null | import pygame
import os
from tkinter import *
from level1 import *
from level2 import *
from level3 import *
from level4 import *
from level5 import*
class mainmenu():
pygame.init()
pygame.mixer.init()
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (70, 25)
bg = pygame.image.load('MainMenu.png')
pygame.display.set_caption('RISE OF GUARDIAN')
insbg = pygame.image.load('Instruction.png')
playOn = pygame.image.load('PlayOn.png')
playOff = pygame.image.load('PlayOff.png')
instructionOn = pygame.image.load('InstructionOn.png')
instructionOff = pygame.image.load('InstructionOff.png')
exitOn = pygame.image.load('ExitOn.png')
exitOff = pygame.image.load('ExitOff.png')
pygame.mixer.music.load('HeartOfCourage.ogg')
pygame.mixer.music.play(-1)
def __init__(self):
self.w = pygame.display.set_mode((1400, 800))
self.MRun = True
self.BMenu = False
self.LL = []
self.Game = True
self.level = 1
self.mute = False
self.hp = 5
self.score = 0
self.decision = 0
def instruc(self):
paused = True
while paused:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
paused = False
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if 0 + 60 > mouse[0] > 0 and 0 + 60 > mouse[1] > 0:
if click[0] == 1:
paused = False
self.w.blit(self.insbg, (0, 0))
pygame.display.update()
def menu(self):
while self.MRun:
self.w.blit(self.bg, (0, 0))
mouse = pygame.mouse.get_pos()
if 576 + 248 > mouse[0] > 576 and 298 + 68 > mouse[1] > 298 and self.MRun:
self.w.blit(self.playOn, (576, 298))
else:
self.w.blit(self.playOff, (576, 298))
if 525 + 347 > mouse[0] > 525 and 442 + 68 > mouse[1] > 442 and self.MRun:
self.w.blit(self.instructionOn, (525, 442))
else:
self.w.blit(self.instructionOff, (525, 442))
if 576 + 248 > mouse[0] > 576 and 586 + 68 > mouse[1] > 586 and self.MRun:
self.w.blit(self.exitOn, (576, 586))
else:
self.w.blit(self.exitOff, (576, 586))
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.MRun = False
self.BMenu = False
if event.type == pygame.MOUSEBUTTONDOWN:
if 576 + 248 > mouse[0] > 576 and 298 + 68 > mouse[1] > 298 and self.MRun:
self.game()
self.MRun = False
if 525 + 347 > mouse[0] > 525 and 442 + 68 > mouse[1] > 442 and self.MRun:
self.instruc()
if 576 + 248 > mouse[0] > 576 and 586 + 68 > mouse[1] > 586 and self.MRun:
self.MRun = False
self.BMenu = False
pygame.display.update()
if self.BMenu:
self.MRun = True
def game(self):
self.level = 1
pygame.mixer.music.load('Evergreen.ogg')
pygame.mixer.music.play(-1)
while self.Game:
if not self.mute:
pygame.mixer.music.unpause()
else:
pygame.mixer.music.pause()
if self.level == 1:
LEVEL1 = Level1(self.w, self.hp, self.score, self.mute)
GET = LEVEL1.RUN()
self.hp = GET[0]
self.score = GET[1]
self.mute = GET[2]
self.decision = GET[3]
elif self.level == 2:
LEVEL2 = Level2(self.w, self.hp, self.score, self.mute)
GET = LEVEL2.RUN()
self.hp = GET[0]
self.score = GET[1]
self.mute = GET[2]
self.decision = GET[3]
elif self.level == 3:
LEVEL3 = Level3(self.w, self.hp, self.score, self.mute)
GET = LEVEL3.RUN()
self.hp = GET[0]
self.score = GET[1]
self.mute = GET[2]
self.decision = GET[3]
elif self.level == 4:
pygame.mixer.music.load('Aeterna.ogg')
pygame.mixer.music.play(-1)
LEVEL4 = Level4(self.w, self.hp, self.score, self.mute)
GET = LEVEL4.RUN()
self.hp = GET[0]
self.score = GET[1]
self.mute = GET[2]
self.decision = GET[3]
elif self.level == 5:
pygame.mixer.music.load('StrengthOfaThousandMen.ogg')
pygame.mixer.music.play(-1)
LEVEL5 = Level5(self.w, self.hp, self.score, self.mute)
GET = LEVEL5.RUN()
self.hp = GET[0]
self.score = GET[1]
self.mute = GET[2]
self.decision = GET[3]
if self.decision==-1:
lose = True
while lose:
self.level = 1
self.hp = 5
self.score = 0
if not self.mute:
pygame.mixer.music.load('Evergreen.ogg')
pygame.mixer.music.play(-1)
self.LL.append(Level1(self.w, self.hp, self.score, self.mute))
GET = self.LL[-1].RUN()
self.hp = GET[0]
self.score = GET[1]
self.mute = GET[2]
self.decision = GET[3]
if self.decision==1:
self.level += 1
break
elif self.decision==0:
self.BMenu = True
self.Game = False
if not self.mute:
pygame.mixer.music.load('HeartOfCourage.ogg')
pygame.mixer.music.play(-1)
break
elif self.decision == 1:
self.level += 1
else:
self.hp = 5
self.score = 0
self.BMenu = True
if not self.mute:
pygame.mixer.music.load('HeartOfCourage.ogg')
pygame.mixer.music.play(-1)
break
class exitwindow():
def __init__(self, top):
self.top = top
top.title("RISE OF GUARDIAN")
top.geometry("1000x600+280+100")
credit = PhotoImage(file="Credit.png")
self.label = Label(top, image=credit)
self.label.image = credit
self.label.pack(fill="both", expand=True)
M = mainmenu()
M.menu()
pygame.quit()
top = Tk()
my_gui = exitwindow(top)
top.mainloop()
quit() | 37.438776 | 95 | 0.455437 |
b11e1bdf3a67a2b1bd365ff70bba1df4bd7a7ecf | 4,038 | py | Python | 55_Life/python/life.py | MartinThoma/basic-computer-games | bcd59488ff57bf7e52e152c6fc5fa964c76d0694 | [
"Unlicense"
] | null | null | null | 55_Life/python/life.py | MartinThoma/basic-computer-games | bcd59488ff57bf7e52e152c6fc5fa964c76d0694 | [
"Unlicense"
] | null | null | null | 55_Life/python/life.py | MartinThoma/basic-computer-games | bcd59488ff57bf7e52e152c6fc5fa964c76d0694 | [
"Unlicense"
] | null | null | null | """
LIFE
An implementation of John Conway's popular cellular automaton
Ported by Dave LeCompte
"""
PAGE_WIDTH = 64
MAX_WIDTH = 70
MAX_HEIGHT = 24
def print_centered(msg):
spaces = " " * ((PAGE_WIDTH - len(msg)) // 2)
print(spaces + msg)
def print_header(title):
print_centered(title)
print_centered("CREATIVE COMPUTING MORRISTOWN, NEW JERSEY")
print()
print()
print()
def get_pattern():
print("ENTER YOUR PATTERN:")
c = 0
pattern = {}
while True:
line = input()
if line == "DONE":
return pattern
# BASIC input would strip of leading whitespace.
# Python input does not. The following allows you to start a
# line with a dot to disable the whitespace stripping. This is
# unnecessary for Python, but for historical accuracy, it's
# staying in.
if line[0] == ".":
line = " " + line[1:]
pattern[c] = line
c += 1
def main() -> None:
print_header("LIFE")
pattern = get_pattern()
pattern_height = len(pattern)
pattern_width = 0
for _line_num, line in pattern.items():
pattern_width = max(pattern_width, len(line))
min_x = 11 - pattern_height // 2
min_y = 33 - pattern_width // 2
max_x = MAX_HEIGHT - 1
max_y = MAX_WIDTH - 1
a = [[0 for y in range(MAX_WIDTH)] for x in range(MAX_HEIGHT)]
p = 0
g = 0
invalid = False
# line 140
# transcribe the input pattern into the active array
for x in range(0, pattern_height):
for y in range(0, len(pattern[x])):
if pattern[x][y] != " ":
a[min_x + x][min_y + y] = 1
p += 1
print()
print()
print()
while True:
if invalid:
inv_str = "INVALID!"
else:
inv_str = ""
print(f"GENERATION: {g}\tPOPULATION: {p} {inv_str}")
next_min_x = MAX_HEIGHT - 1
next_min_y = MAX_WIDTH - 1
next_max_x = 0
next_max_y = 0
p = 0
g += 1
for _ in range(min_x):
print()
for x in range(min_x, max_x + 1):
print
line = [" "] * MAX_WIDTH
for y in range(min_y, max_y + 1):
if a[x][y] == 2:
a[x][y] = 0
continue
elif a[x][y] == 3:
a[x][y] = 1
elif a[x][y] != 1:
continue
# line 261
line[y] = "*"
next_min_x = min(x, next_min_x)
next_max_x = max(x, next_max_x)
next_min_y = min(y, next_min_y)
next_max_y = max(y, next_max_y)
print("".join(line))
# line 295
for _ in range(max_x + 1, MAX_HEIGHT):
print()
print()
min_x = next_min_x
max_x = next_max_x
min_y = next_min_y
max_y = next_max_y
if min_x < 3:
min_x = 3
invalid = True
if max_x > 22:
max_x = 22
invalid = True
if min_y < 3:
min_y = 3
invalid = True
if max_y > 68:
max_y = 68
invalid = True
# line 309
p = 0
for x in range(min_x - 1, max_x + 2):
for y in range(min_y - 1, max_y + 2):
count = 0
for i in range(x - 1, x + 2):
for j in range(y - 1, y + 2):
if a[i][j] == 1 or a[i][j] == 2:
count += 1
if a[x][y] == 0:
if count == 3:
a[x][y] = 3
p += 1
elif (count < 3) or (count > 4):
a[x][y] = 2
else:
p += 1
# line 635
min_x = min_x - 1
min_y = min_y - 1
max_x = max_x + 1
max_y = max_y + 1
if __name__ == "__main__":
main()
| 23.206897 | 70 | 0.454681 |
9c1416479cb1af0eb592546e05f90ba4dd3ff5fc | 5,430 | py | Python | homeassistant/components/surepetcare/sensor.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 7 | 2019-02-07T14:14:12.000Z | 2019-07-28T06:56:10.000Z | homeassistant/components/surepetcare/sensor.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 47 | 2020-07-23T07:14:33.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/surepetcare/sensor.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Support for Sure PetCare Flaps/Pets sensors."""
import logging
from typing import Any, Dict, Optional
from surepy import SureLockStateID, SureProductID
from homeassistant.const import (
ATTR_VOLTAGE,
CONF_ID,
CONF_TYPE,
DEVICE_CLASS_BATTERY,
UNIT_PERCENTAGE,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from . import SurePetcareAPI
from .const import (
DATA_SURE_PETCARE,
SPC,
SURE_BATT_VOLTAGE_DIFF,
SURE_BATT_VOLTAGE_LOW,
TOPIC_UPDATE,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up Sure PetCare Flaps sensors."""
if discovery_info is None:
return
entities = []
spc = hass.data[DATA_SURE_PETCARE][SPC]
for entity in spc.ids:
sure_type = entity[CONF_TYPE]
if sure_type in [
SureProductID.CAT_FLAP,
SureProductID.PET_FLAP,
SureProductID.FEEDER,
]:
entities.append(SureBattery(entity[CONF_ID], sure_type, spc))
if sure_type in [SureProductID.CAT_FLAP, SureProductID.PET_FLAP]:
entities.append(Flap(entity[CONF_ID], sure_type, spc))
async_add_entities(entities, True)
class SurePetcareSensor(Entity):
"""A binary sensor implementation for Sure Petcare Entities."""
def __init__(self, _id: int, sure_type: SureProductID, spc: SurePetcareAPI):
"""Initialize a Sure Petcare sensor."""
self._id = _id
self._sure_type = sure_type
self._spc = spc
self._spc_data: Dict[str, Any] = self._spc.states[self._sure_type].get(self._id)
self._state: Dict[str, Any] = {}
self._name = (
f"{self._sure_type.name.capitalize()} "
f"{self._spc_data['name'].capitalize()}"
)
self._async_unsub_dispatcher_connect = None
@property
def name(self) -> str:
"""Return the name of the device if any."""
return self._name
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return f"{self._spc_data['household_id']}-{self._id}"
@property
def available(self) -> bool:
"""Return true if entity is available."""
return bool(self._state)
@property
def should_poll(self) -> bool:
"""Return true."""
return False
async def async_update(self) -> None:
"""Get the latest data and update the state."""
self._spc_data = self._spc.states[self._sure_type].get(self._id)
self._state = self._spc_data.get("status")
_LOGGER.debug("%s -> self._state: %s", self._name, self._state)
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
@callback
def update() -> None:
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, TOPIC_UPDATE, update
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
class Flap(SurePetcareSensor):
"""Sure Petcare Flap."""
@property
def state(self) -> Optional[int]:
"""Return battery level in percent."""
return SureLockStateID(self._state["locking"]["mode"]).name.capitalize()
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the device."""
attributes = None
if self._state:
attributes = {"learn_mode": bool(self._state["learn_mode"])}
return attributes
class SureBattery(SurePetcareSensor):
"""Sure Petcare Flap."""
@property
def name(self) -> str:
"""Return the name of the device if any."""
return f"{self._name} Battery Level"
@property
def state(self) -> Optional[int]:
"""Return battery level in percent."""
battery_percent: Optional[int]
try:
per_battery_voltage = self._state["battery"] / 4
voltage_diff = per_battery_voltage - SURE_BATT_VOLTAGE_LOW
battery_percent = min(int(voltage_diff / SURE_BATT_VOLTAGE_DIFF * 100), 100)
except (KeyError, TypeError):
battery_percent = None
return battery_percent
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return f"{self._spc_data['household_id']}-{self._id}-battery"
@property
def device_class(self) -> str:
"""Return the device class."""
return DEVICE_CLASS_BATTERY
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return state attributes."""
attributes = None
if self._state:
voltage_per_battery = float(self._state["battery"]) / 4
attributes = {
ATTR_VOLTAGE: f"{float(self._state['battery']):.2f}",
f"{ATTR_VOLTAGE}_per_battery": f"{voltage_per_battery:.2f}",
}
return attributes
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement."""
return UNIT_PERCENTAGE
| 29.351351 | 88 | 0.635175 |
bb66d58dc3efe04180995063e28aad40e413ba0d | 291 | py | Python | examples/errors2/components/dnsproblem2/component.py | gocept/batou | 4d239996f464c406cde82c48155e5b8273a9063d | [
"BSD-2-Clause-FreeBSD"
] | 34 | 2019-09-06T05:30:10.000Z | 2022-03-12T01:25:38.000Z | examples/errors2/components/dnsproblem2/component.py | gocept/batou | 4d239996f464c406cde82c48155e5b8273a9063d | [
"BSD-2-Clause-FreeBSD"
] | 204 | 2019-09-05T14:41:12.000Z | 2022-03-10T12:14:37.000Z | examples/errors2/components/dnsproblem2/component.py | gocept/batou | 4d239996f464c406cde82c48155e5b8273a9063d | [
"BSD-2-Clause-FreeBSD"
] | 25 | 2019-10-10T07:13:41.000Z | 2022-03-24T14:52:25.000Z | from batou.component import Component
from batou.utils import Address
class DNSProblem2(Component):
attribute_without_v6 = Address("localhost:22", require_v6=False)
def configure(self):
# Accessing `listen_v6` causes an error:
self.attribute_without_v6.listen_v6
| 24.25 | 68 | 0.749141 |
220d0531cd02708df98da8c5cd47aeec1402292e | 7,845 | py | Python | README/functions.py | cnickle/penquins | 3d09eeaa887f8ec56e65b7b6b87a51da824d3edf | [
"Apache-2.0"
] | null | null | null | README/functions.py | cnickle/penquins | 3d09eeaa887f8ec56e65b7b6b87a51da824d3edf | [
"Apache-2.0"
] | 3 | 2021-07-06T00:23:39.000Z | 2021-11-20T14:59:33.000Z | README/functions.py | cnickle/penquins | 3d09eeaa887f8ec56e65b7b6b87a51da824d3edf | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 19 18:46:23 2019
Functions to be used in fitting and calculations
@author: Cameron
"""
import numpy as np
from scipy.integrate import quad
from scipy.integrate import dblquad
from numba import jit
import pandas as pd
import matplotlib.pyplot as plt
eV = 1 #Energy
K = 1 #Temperature Units
C = 1 #Coulombs
s = 1 #seconds
V = 1 #volts
kb = 8.6173324e-5*eV/K #Boltzmann Constant
q = 1.6e-19*C
h = 4.1356e-15*eV*s
# %% Some Basic Mathematical Functions
@jit
def linear(x,m,b):
return b+x*m
@jit
def sigmoid(x,pos,width):
return 1/(1+np.exp((x-pos)/width))
@jit
def fermi(E,T):
return 1/(np.exp((E)/(kb*T))+1)
@jit
def gaussian(x,A, mu,sigma):
return A*np.exp(-.5*((x-mu)/(sigma))**2)
@jit
def normalized_gaussian(x, mu,sigma):
A = 1
def gaus(ep):
return gaussian(ep,A,mu,sigma)
A = 1/quad(gaus,mu-3*sigma,mu+3*sigma)[0]
return gaussian(x, A, mu, sigma)
# %% Some Basic Physics Functions
@jit
def densityOfStates(E,ep,gamma):
numerator = gamma
denominator = (E-ep)**2+(gamma/2)**2
return numerator/denominator#/(2*np.pi)
@jit
def rateRatio(gammaL,gammaR):
return gammaL*gammaR/(gammaL+gammaR)
# %% The Following functions All Deal with the
# Single level tunnel Model
@jit
def single_level_tunnel_model_integrand_Alt(E,ep,c,vg,eta,vb,gammaC,gammaW,T):
return -gammaC*(fermi(E+vb/2,T)-fermi(E-vb/2,T))/\
((E-((ep+c*vg)+(eta-1/2)*vb))**2+(gammaW/2)**2)
# The following function is a 'catch all' function. If sigma != 0 then
# then the function returns the gaussian version.
def tunnelmodel_singleLevel(vb,n, gammaC,gammaW, deltaE1,eta,sigma,c,vg,T):
if sigma == 0:
limits = [-1*np.abs(vb),1*np.abs(vb)]
def integrand (E):
result = single_level_tunnel_model_integrand_Alt(E,deltaE1,c,vg,eta,vb,gammaC,gammaW,T)
return result
return n*q/h*quad(integrand,
limits[0],
limits[1])[0]
else:
A = 1
args = (A,deltaE1,sigma)
A = 1/quad(gaussian,deltaE1-3*sigma,deltaE1+3*sigma,args=args)[0]
limits = [min([deltaE1-3*sigma,-1*np.abs(vb)]),\
max([deltaE1+3*sigma,1*np.abs(vb)])]
def integrand (E,ep):
result = gaussian(ep,A,deltaE1,sigma)*\
single_level_tunnel_model_integrand_Alt(E,ep,c,vg,eta,vb,gammaC,gammaW,T)
return result
return n*q/h*dblquad(integrand,
limits[0],
limits[1],
lambda x: limits[0],
lambda x: limits[1])[0]
# This is the 2 level version of the Single Level tunnel model.
# This isn't really needed to be a seperate model as you simply need
# To add the contributions from each level.
def tunnelmodel_2level(vb, n, c,vg,T,
gammaC1, gammaW1, deltaE1, eta1, sigma1,
gammaC2, gammaW2, deltaE2, eta2, sigma2):
args1 = (n, gammaC1, gammaW1, deltaE1, eta1, sigma1,c,vg,T)
args2 = (n, gammaC2, gammaW2, deltaE2, eta2, sigma2,c,vg,T)
I1 = tunnelmodel_singleLevel(vb,*args1)
I2 = tunnelmodel_singleLevel(vb,*args2)
return I1+I2
# %%Below are all of the functions deal with Nitzan's Hysteresis Model
@jit
def averageBridgePopulation_integrand(E,ep,c,vg,eta,vb,gammaL,gammaR,T):
gammaW = gammaL+gammaR
return ((fermi(E+vb/2,T)*gammaL+gammaR*fermi(E-vb/2,T))/\
((E-((ep+c*vg)+(eta-1/2)*vb))**2+(gammaW/2)**2))
def averageBridgePopulation(vb, gammaL, gammaR, deltaE, eta, c, vg, T):
limits = [-10,10]
def integrand (E):
result = averageBridgePopulation_integrand(E, deltaE, c, vg, eta, vb, gammaL, gammaR, T)
return result
return quad(integrand,
limits[0],
limits[1])[0]/(2*np.pi)
@jit
def MarcusETRates(vb, gamma, lam, epsilon, T):
alpha = vb-epsilon
S = 2*np.sqrt(np.pi*kb*T/lam)
R_plus = (gamma/4)*S*np.exp(-(alpha+lam)**2/(4*lam*kb*T))
R_minus = (gamma/4)*S*np.exp(-(alpha-lam)**2/(4*lam*kb*T))
return R_plus,R_minus
def HysteresisModel_withP(vb, n, gammaL, gammaR, kappa, sigma, E_AB, E_AC, chi, eta,
gam, lam, P, u, c, vg, T):
volts = list(set(np.round(vb,2)))
#%% Calculate all currents:
calcDB = pd.DataFrame()
calcDB['V'] = sorted(volts)
eqSTL = np.vectorize(tunnelmodel_singleLevel)
calcDB['I_np'] = eqSTL(calcDB['V'], n, gammaL*gammaR, gammaL+gammaR, E_AB,
eta, sigma, c, vg, T)
calcDB['I_p'] = eqSTL(calcDB['V'], n, gammaL*gammaR*kappa**2,
(gammaL+gammaR)*kappa, E_AB+chi, eta, sigma, c, vg,
T)
eqETRates = np.vectorize(MarcusETRates)
calcDB['R_AC'], calcDB['R_CA'] = eqETRates(calcDB['V'], gam, lam, E_AC, T)
calcDB['R_BD'], calcDB['R_DB'] = eqETRates(calcDB['V'], gam*kappa, lam,
E_AC+chi, T)
eqBridge = np.vectorize(averageBridgePopulation)
calcDB['n_np'] = eqBridge(calcDB['V'], gammaL, gammaR, E_AB, eta, c, vg, T)
calcDB['n_p'] = eqBridge(calcDB['V'], gammaL*kappa, gammaR*kappa,
E_AB+chi, eta, c, vg, T)
calcDB['k_S0_S1'] = (1-calcDB['n_np'])*calcDB['R_AC'] + calcDB['n_np']*calcDB['R_BD']
calcDB['k_S1_S0'] = (1-calcDB['n_p'])*calcDB['R_CA'] + calcDB['n_p']*calcDB['R_DB']
delt = abs(vb[2]-vb[3])/u
I = []
Parray = []
delArray = []
for i,V in enumerate(vb):
V = np.round(V,2)
tempDf =calcDB[calcDB['V']==np.round(V,2)].reset_index()
calcs = dict(tempDf.iloc[0])
Parray += [P]
I += [((1-P)*calcs['I_np']+P*calcs['I_p'])]
dPdt = calcs['k_S0_S1']-P*(calcs['k_S0_S1']+calcs['k_S1_S0'])
delArray += [dPdt]
P = P+dPdt*delt
return I, Parray
def HysteresisModel(vb, n, gammaL, gammaR, kappa, sigma, E_AB, E_AC, chi, eta,
gam, lam, P, u, c, vg, T):
I, __ = HysteresisModel_withP(vb, n, gammaL, gammaR, kappa, sigma, E_AB,
E_AC, chi, eta, gam, lam, P, u, c, vg, T)
return np.array(I)
# %% This is another Nitzan Function that was applied to the BTTF
# molecule
def E_act_fixedtemp_gatevoltage(Vg,E,l):
T0=260
T1=330
def integrandOne(ep):
num=np.exp(-((E+Vg/2)+ep-l)**2/(4*kb*T0*l))
denom=1/(np.exp((ep-Vg/2)/(kb*T0))+1)
return num*denom
def integrandTwo(ep):
num=np.exp(-((E+Vg/2)+ep+l)**2/(4*kb*T0*l))
denom=1-1/(np.exp((ep-Vg/2)/(kb*T0))+1)
return num*denom
def integrandThree(ep):
num=np.exp(-((E+Vg/2)+ep-l)**2/(4*kb*T1*l))
denom=1/(np.exp((ep-Vg/2)/(kb*T1))+1)
return num*denom
def integrandFour(ep):
num=np.exp(-((E+Vg/2)+ep+l)**2/(4*kb*T1*l))
denom=1-1/(np.exp((ep-Vg/2)/(kb*T1))+1)
return num*denom
One = quad(integrandOne, -10, 10)
Two = quad(integrandTwo, -10, 10)
Three = quad(integrandThree, -10, 10)
Four = quad(integrandFour, -10, 10)
leftSide=np.log((One[0]+Two[0])*(1/(np.sqrt(4*np.pi*l*kb*T0))))
rightSide=np.log((Three[0]+Four[0])*(1/(np.sqrt(4*np.pi*l*kb*T1))))
FinalAns=-1000*kb*T0**2*(leftSide-rightSide)/(T1-T0)
return FinalAns
def E_act_fixedtemp_biasvoltage(V,E,l,cap,W,A):
Vg=cap*(1-1/(1+np.exp((V-A)/W)))
return E_act_fixedtemp_gatevoltage(Vg,E,l) | 33.101266 | 99 | 0.555895 |
e63abc84154942e7b4a739113172dd9f4df28c15 | 18,333 | py | Python | matrix/plugin.program.openwizard/resources/libs/common/config.py | nzmodbox/repo.modbox | 5a5d77089f94f2fdde755ccc2e5f93e81f54f261 | [
"Apache-2.0"
] | null | null | null | matrix/plugin.program.openwizard/resources/libs/common/config.py | nzmodbox/repo.modbox | 5a5d77089f94f2fdde755ccc2e5f93e81f54f261 | [
"Apache-2.0"
] | null | null | null | matrix/plugin.program.openwizard/resources/libs/common/config.py | nzmodbox/repo.modbox | 5a5d77089f94f2fdde755ccc2e5f93e81f54f261 | [
"Apache-2.0"
] | null | null | null | ################################################################################
# Copyright (C) 2019 drinfernoo #
# #
# This Program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2, or (at your option) #
# any later version. #
# #
# This Program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with XBMC; see the file COPYING. If not, write to #
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #
# http://www.gnu.org/copyleft/gpl.html #
################################################################################
import xbmc
import xbmcaddon
import xbmcvfs
import os
import uservar
class Config:
def __init__(self):
self.init_meta()
self.init_uservars()
self.init_paths()
self.init_settings()
def init_meta(self):
self.ADDON_ID = xbmcaddon.Addon().getAddonInfo('id')
self.ADDON = xbmcaddon.Addon(self.ADDON_ID)
self.ADDON_NAME = self.ADDON.getAddonInfo('name')
self.ADDON_VERSION = self.ADDON.getAddonInfo('version')
self.ADDON_PATH = self.ADDON.getAddonInfo('path')
self.ADDON_ICON = self.ADDON.getAddonInfo('icon')
self.ADDON_FANART = self.ADDON.getAddonInfo('fanart')
self.KODIV = float(xbmc.getInfoLabel("System.BuildVersion")[:4])
self.RAM = int(xbmc.getInfoLabel("System.Memory(total)")[:-2])
def init_uservars(self):
# User Edit Variables
self.ADDONTITLE = uservar.ADDONTITLE
self.BUILDERNAME = uservar.BUILDERNAME
self.EXCLUDES = uservar.EXCLUDES
self.BUILDFILE = uservar.BUILDFILE
self.UPDATECHECK = uservar.UPDATECHECK
self.APKFILE = uservar.APKFILE
self.YOUTUBETITLE = uservar.YOUTUBETITLE
self.YOUTUBEFILE = uservar.YOUTUBEFILE
self.ADDONFILE = uservar.ADDONFILE
self.ADVANCEDFILE = uservar.ADVANCEDFILE
# Themeing Menu Items
self.ICONBUILDS = uservar.ICONBUILDS if not uservar.ICONBUILDS.endswith('://') else self.ADDON_ICON
self.ICONMAINT = uservar.ICONMAINT if not uservar.ICONMAINT.endswith('://') else self.ADDON_ICON
self.ICONSPEED = uservar.ICONSPEED if not uservar.ICONSPEED.endswith('://') else self.ADDON_ICON
self.ICONAPK = uservar.ICONAPK if not uservar.ICONAPK.endswith('://') else self.ADDON_ICON
self.ICONADDONS = uservar.ICONADDONS if not uservar.ICONADDONS.endswith('://') else self.ADDON_ICON
self.ICONYOUTUBE = uservar.ICONYOUTUBE if not uservar.ICONYOUTUBE.endswith('://') else self.ADDON_ICON
self.ICONSAVE = uservar.ICONSAVE if not uservar.ICONSAVE.endswith('://') else self.ADDON_ICON
self.ICONTRAKT = uservar.ICONTRAKT if not uservar.ICONTRAKT.endswith('://') else self.ADDON_ICON
self.ICONDEBRID = uservar.ICONREAL if not uservar.ICONREAL.endswith('://') else self.ADDON_ICON
self.ICONLOGIN = uservar.ICONLOGIN if not uservar.ICONLOGIN.endswith('://') else self.ADDON_ICON
self.ICONCONTACT = uservar.ICONCONTACT if not uservar.ICONCONTACT.endswith('://') else self.ADDON_ICON
self.ICONSETTINGS = uservar.ICONSETTINGS if not uservar.ICONSETTINGS.endswith('://') else self.ADDON_ICON
self.HIDESPACERS = uservar.HIDESPACERS
self.SPACER = uservar.SPACER
self.COLOR1 = uservar.COLOR1
self.COLOR2 = uservar.COLOR2
self.THEME1 = uservar.THEME1
self.THEME2 = uservar.THEME2
self.THEME3 = uservar.THEME3
self.THEME4 = uservar.THEME4
self.THEME5 = uservar.THEME5
self.HIDECONTACT = uservar.HIDECONTACT
self.CONTACT = uservar.CONTACT
self.CONTACTICON = uservar.CONTACTICON if not uservar.CONTACTICON.endswith('://') else self.ADDON_ICON
self.CONTACTFANART = uservar.CONTACTFANART if not uservar.CONTACTFANART.endswith('://') else self.ADDON_FANART
# Auto Update For Those With No Repo
self.AUTOUPDATE = uservar.AUTOUPDATE
# Auto Install Repo If Not Installed
self.AUTOINSTALL = uservar.AUTOINSTALL
self.REPOID = uservar.REPOID
self.REPOADDONXML = uservar.REPOADDONXML
self.REPOZIPURL = uservar.REPOZIPURL
# Notification Window
self.ENABLE_NOTIFICATION = uservar.ENABLE
self.NOTIFICATION = uservar.NOTIFICATION
self.HEADERTYPE = uservar.HEADERTYPE
self.FONTHEADER = uservar.FONTHEADER
self.HEADERMESSAGE = uservar.HEADERMESSAGE
self.HEADERIMAGE = uservar.HEADERIMAGE
self.FONTSETTINGS = uservar.FONTSETTINGS
self.BACKGROUND = uservar.BACKGROUND
self.BACKGROUND = self.BACKGROUND if not self.BACKGROUND == '' else self.ADDON_FANART
def init_paths(self):
# Static variables
self.CLEANFREQ = ['Every Startup', 'Every Day', 'Every Three Days',
'Weekly', 'Monthly']
self.LOGFILES = ['log', 'xbmc.old.log', 'kodi.log']
self.DEFAULTPLUGINS = ['metadata.album.universal',
'metadata.artists.universal',
'metadata.common.fanart.tv',
'metadata.common.imdb.com',
'metadata.common.musicbrainz.org',
'metadata.themoviedb.org',
'metadata.tvdb.com',
'service.xbmc.versioncheck']
self.USER_AGENT = ('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/35.0.1916.153 Safari'
'/537.36 SE 2.X MetaSr 1.0')
self.DB_FILES = ['Addons', 'ADSP', 'Epg', 'MyMusic', 'MyVideos',
'Textures', 'TV', 'ViewModes']
self.EXCLUDE_FILES = ['onechannelcache.db', 'saltscache.db',
'saltscache.db-shm', 'saltscache.db-wal',
'saltshd.lite.db', 'saltshd.lite.db-shm',
'saltshd.lite.db-wal', 'queue.db',
'commoncache.db', 'access.log', 'trakt.db',
'video_cache.db', '.gitignore', '.DS_Store',
'Textures13.db', 'Thumbs.db']
self.XMLS = ['advancedsettings.xml', 'sources.xml', 'favourites.xml',
'profiles.xml', 'playercorefactory.xml', 'guisettings.xml']
self.MODURL = 'http://mirrors.kodi.tv/addons/matrix/'
self.MODURL2 = 'http://mirrors.kodi.tv/addons/jarvis/'
self.DEPENDENCIES = ['script.module.bottle', 'script.module.certifi',
'script.module.chardet', 'script.module.idna',
'script.module.requests', 'script.module.six',
'script.module.urllib3', 'script.module.web-pdb']
# Default special paths
self.XBMC = xbmcvfs.translatePath('special://xbmc/')
self.HOME = xbmcvfs.translatePath('special://home/')
self.TEMP = xbmcvfs.translatePath('special://temp/')
self.MASTERPROFILE = xbmcvfs.translatePath('special://masterprofile/')
self.PROFILE = xbmcvfs.translatePath('special://profile/')
self.SUBTITLES = xbmcvfs.translatePath('special://subtitles/')
self.USERDATA = xbmcvfs.translatePath('special://userdata/')
self.DATABASE = xbmcvfs.translatePath('special://database/')
self.THUMBNAILS = xbmcvfs.translatePath('special://thumbnails/')
self.RECORDINGS = xbmcvfs.translatePath('special://recordings/')
self.SCREENSHOTS = xbmcvfs.translatePath('special://screenshots/')
self.MUSICPLAYLISTS = xbmcvfs.translatePath('special://musicplaylists/')
self.VIDEOPLAYLISTS = xbmcvfs.translatePath('special://videoplaylists/')
self.CDRIPS = xbmcvfs.translatePath('special://cdrips/')
self.SKIN = xbmcvfs.translatePath('special://skin/')
self.LOGPATH = xbmcvfs.translatePath('special://logpath/')
# Constructed paths
self.ADDONS = os.path.join(self.HOME, 'addons')
self.KODIADDONS = os.path.join(self.XBMC, 'addons')
self.PLUGIN = os.path.join(self.ADDONS, self.ADDON_ID)
self.PACKAGES = os.path.join(self.ADDONS, 'packages')
self.ADDON_DATA = os.path.join(self.USERDATA, 'addon_data')
self.PLUGIN_DATA = os.path.join(self.ADDON_DATA, self.ADDON_ID)
self.QRCODES = os.path.join(self.PLUGIN_DATA, 'QRCodes')
self.SPEEDTEST = os.path.join(self.PLUGIN_DATA, 'SpeedTest')
self.ARCHIVE_CACHE = os.path.join(self.TEMP, 'archive_cache')
self.ART = os.path.join(self.PLUGIN, 'resources', 'art')
self.DEBRIDFOLD = os.path.join(self.PLUGIN_DATA, 'debrid')
self.TRAKTFOLD = os.path.join(self.PLUGIN_DATA, 'trakt')
self.LOGINFOLD = os.path.join(self.PLUGIN_DATA, 'login')
# File paths
self.ADVANCED = os.path.join(self.USERDATA, 'advancedsettings.xml')
self.SOURCES = os.path.join(self.USERDATA, 'sources.xml')
self.GUISETTINGS = os.path.join(self.USERDATA, 'guisettings.xml')
self.FAVOURITES = os.path.join(self.USERDATA, 'favourites.xml')
self.PROFILES = os.path.join(self.USERDATA, 'profiles.xml')
self.WIZLOG = os.path.join(self.PLUGIN_DATA, 'wizard.log')
self.WHITELIST = os.path.join(self.PLUGIN_DATA, 'whitelist.txt')
self.EXCLUDE_DIRS = [self.ADDON_PATH,
os.path.join(self.HOME, 'cache'),
os.path.join(self.HOME, 'system'),
os.path.join(self.HOME, 'temp'),
os.path.join(self.HOME, 'My_Builds'),
os.path.join(self.HOME, 'cdm'),
os.path.join(self.ADDONS, 'temp'),
os.path.join(self.ADDONS, 'packages'),
os.path.join(self.ADDONS, 'archive_cache'),
os.path.join(self.USERDATA, 'Thumbnails'),
os.path.join(self.USERDATA, 'peripheral_data'),
os.path.join(self.USERDATA, 'library')]
def init_settings(self):
self.FIRSTRUN = self.get_setting('first_install')
# Build variables
self.BUILDNAME = self.get_setting('buildname')
self.BUILDCHECK = self.get_setting('nextbuildcheck')
self.DEFAULTSKIN = self.get_setting('defaultskin')
self.DEFAULTNAME = self.get_setting('defaultskinname')
self.DEFAULTIGNORE = self.get_setting('defaultskinignore')
self.BUILDVERSION = self.get_setting('buildversion')
self.BUILDTHEME = self.get_setting('buildtheme')
self.BUILDLATEST = self.get_setting('latestversion')
self.DISABLEUPDATE = self.get_setting('disableupdate')
self.INSTALLED = self.get_setting('installed')
self.EXTRACT = self.get_setting('extract')
self.EXTERROR = self.get_setting('errors')
# View variables
self.SHOW19 = self.get_setting('show19')
self.SHOWADULT = self.get_setting('adult')
self.SEPARATE = self.get_setting('separate')
self.DEVELOPER = self.get_setting('developer')
# Auto-Clean variables
self.AUTOCLEANUP = self.get_setting('autoclean')
self.AUTOCACHE = self.get_setting('clearcache')
self.AUTOPACKAGES = self.get_setting('clearpackages')
self.AUTOTHUMBS = self.get_setting('clearthumbs')
self.AUTOFREQ = self.get_setting('autocleanfreq')
self.AUTOFREQ = int(float(self.AUTOFREQ)) if self.AUTOFREQ.isdigit() else 0
self.AUTONEXTRUN = self.get_setting('nextautocleanup')
# Video Cache variables
self.INCLUDEVIDEO = self.get_setting('includevideo')
self.INCLUDEALL = self.get_setting('includeall')
self.INCLUDEEXODUSREDUX = self.get_setting('includeexodusredux')
self.INCLUDEGAIA = self.get_setting('includegaia')
self.INCLUDESEREN = self.get_setting('includeseren')
self.INCLUDETHECREW = self.get_setting('includethecrew')
self.INCLUDEYODA = self.get_setting('includeyoda')
self.INCLUDEVENOM = self.get_setting('includevenom')
self.INCLUDENUMBERS = self.get_setting('includenumbers')
self.INCLUDESCRUBS = self.get_setting('includescrubs')
# Notification variables
self.NOTIFY = self.get_setting('notify')
self.NOTEID = self.get_setting('noteid')
self.NOTEDISMISS = self.get_setting('notedismiss')
# Save Data variables
self.TRAKTSAVE = self.get_setting('traktnextsave')
self.DEBRIDSAVE = self.get_setting('debridnextsave')
self.LOGINSAVE = self.get_setting('loginnextsave')
self.KEEPFAVS = self.get_setting('keepfavourites')
self.KEEPSOURCES = self.get_setting('keepsources')
self.KEEPPROFILES = self.get_setting('keepprofiles')
self.KEEPPLAYERCORE = self.get_setting('keepplayercore')
self.KEEPADVANCED = self.get_setting('keepadvanced')
self.KEEPGUISETTINGS = self.get_setting('keepguisettings')
self.KEEPREPOS = self.get_setting('keeprepos')
self.KEEPSUPER = self.get_setting('keepsuper')
self.KEEPWHITELIST = self.get_setting('keepwhitelist')
self.KEEPTRAKT = self.get_setting('keeptrakt')
self.KEEPDEBRID = self.get_setting('keepdebrid')
self.KEEPLOGIN = self.get_setting('keeplogin')
# Backup variables
self.BACKUPLOCATION = xbmcvfs.translatePath(self.get_setting('path') if not self.get_setting('path') == '' else self.HOME)
self.MYBUILDS = os.path.join(self.BACKUPLOCATION, 'My_Builds')
# Logging variables
self.DEBUGLEVEL = self.get_setting('debuglevel')
self.ENABLEWIZLOG = self.get_setting('wizardlog')
self.CLEANWIZLOG = self.get_setting('autocleanwiz')
self.CLEANWIZLOGBY = self.get_setting('wizlogcleanby')
self.CLEANDAYS = self.get_setting('wizlogcleandays')
self.CLEANSIZE = self.get_setting('wizlogcleansize')
self.CLEANLINES = self.get_setting('wizlogcleanlines')
self.MAXWIZSIZE = [100, 200, 300, 400, 500, 1000]
self.MAXWIZLINES = [100, 200, 300, 400, 500]
self.MAXWIZDATES = [1, 2, 3, 7]
self.KEEPOLDLOG = self.get_setting('oldlog') == 'true'
self.KEEPWIZLOG = self.get_setting('wizlog') == 'true'
self.KEEPCRASHLOG = self.get_setting('crashlog') == 'true'
self.LOGEMAIL = self.get_setting('email')
self.NEXTCLEANDATE = self.get_setting('nextwizcleandate')
def get_setting(self, key, id=xbmcaddon.Addon().getAddonInfo('id')):
try:
return xbmcaddon.Addon(id).getSetting(key)
except:
return False
def set_setting(self, key, value, id=xbmcaddon.Addon().getAddonInfo('id')):
try:
return xbmcaddon.Addon(id).setSetting(key, value)
except:
return False
def open_settings(self, id=None, cat=None, set=None, activate=False):
offset = [(100, 200), (-100, -80)]
if not id:
id = self.ADDON_ID
try:
xbmcaddon.Addon(id).openSettings()
except:
import logging
logging.log('Cannot open settings for {}'.format(id), level=xbmc.LOGERROR)
if int(self.KODIV) < 18:
use = 0
else:
use = 1
if cat is not None:
category_id = cat + offset[use][0]
xbmc.executebuiltin('SetFocus({})'.format(category_id))
if set is not None:
setting_id = set + offset[use][1]
xbmc.executebuiltin('SetFocus({})'.format(setting_id))
if activate:
xbmc.executebuiltin('SendClick({})'.format(setting_id))
def clear_setting(self, type):
build = {'buildname': '', 'buildversion': '', 'buildtheme': '',
'latestversion': '', 'nextbuildcheck': '2019-01-01 00:00:00'}
install = {'extract': '', 'errors': '', 'installed': ''}
default = {'defaultskinignore': 'false', 'defaultskin': '',
'defaultskinname': ''}
lookfeel = ['default.enablerssfeeds', 'default.font', 'default.rssedit',
'default.skincolors', 'default.skintheme',
'default.skinzoom', 'default.soundskin',
'default.startupwindow', 'default.stereostrength']
if type == 'build':
for element in build:
self.set_setting(element, build[element])
for element in install:
self.set_setting(element, install[element])
for element in default:
self.set_setting(element, default[element])
for element in lookfeel:
self.set_setting(element, '')
elif type == 'default':
for element in default:
self.set_setting(element, default[element])
for element in lookfeel:
self.set_setting(element, '')
elif type == 'install':
for element in install:
self.set_setting(element, install[element])
elif type == 'lookfeel':
for element in lookfeel:
self.set_setting(element, '')
else:
self.set_setting(type, '')
CONFIG = Config()
| 51.209497 | 130 | 0.598211 |
2d36cad24d564b91d0467bc3491891f3a3bfca9d | 569 | py | Python | todo/api.py | joyinsky/tododjango | a2449b08182646f00a5b54694ca8297d961f231e | [
"MIT"
] | 1 | 2015-12-26T21:20:02.000Z | 2015-12-26T21:20:02.000Z | todo/api.py | joyinsky/tododjango | a2449b08182646f00a5b54694ca8297d961f231e | [
"MIT"
] | null | null | null | todo/api.py | joyinsky/tododjango | a2449b08182646f00a5b54694ca8297d961f231e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from django.conf.urls import patterns, include, url
from rest_framework import viewsets, routers, serializers
from .models import Todo
class TodoSerializer(serializers.ModelSerializer):
class Meta:
model = Todo
class TodoViewSet(viewsets.ModelViewSet):
model = Todo
serializer_class = TodoSerializer
router = routers.DefaultRouter()
router.register('todos', TodoViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
| 23.708333 | 82 | 0.738137 |
3b1ec2ca7bb8fa732580023379c58e88c6fd9f7c | 1,077 | py | Python | scripts/artifacts/pSettings.py | qubytelogic/abrignoni-ALEAPP | 53724d5b7cacf2fb57c4aa4d10a929b4260ffb71 | [
"MIT"
] | 2 | 2020-11-18T21:43:06.000Z | 2022-03-22T14:34:50.000Z | scripts/artifacts/pSettings.py | qubytelogic/abrignoni-ALEAPP | 53724d5b7cacf2fb57c4aa4d10a929b4260ffb71 | [
"MIT"
] | null | null | null | scripts/artifacts/pSettings.py | qubytelogic/abrignoni-ALEAPP | 53724d5b7cacf2fb57c4aa4d10a929b4260ffb71 | [
"MIT"
] | null | null | null | import sqlite3
import textwrap
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, is_platform_windows
def get_pSettings(files_found, report_folder, seeker):
file_found = str(files_found[0])
db = sqlite3.connect(file_found)
cursor = db.cursor()
cursor.execute('''
select
name,
value
from partner
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Partner Settings')
report.start_artifact_report(report_folder, 'Partner Settings')
report.add_script()
data_headers = ('Name','Value' ) # Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list = []
for row in all_rows:
data_list.append((row[0],row[1]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
else:
logfunc('No Partner Settings data available')
db.close()
return
| 28.342105 | 131 | 0.675952 |
7f754a69a374bf61babff7902c57e86c45f527ef | 17,993 | py | Python | odoo-13.0/addons/l10n_ch/models/account_invoice.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/l10n_ch/models/account_invoice.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/l10n_ch/models/account_invoice.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError
from odoo.tools.float_utils import float_split_str
from odoo.tools.misc import mod10r
l10n_ch_ISR_NUMBER_LENGTH = 27
l10n_ch_ISR_ID_NUM_LENGTH = 6
class AccountMove(models.Model):
_inherit = 'account.move'
l10n_ch_isr_subscription = fields.Char(compute='_compute_l10n_ch_isr_subscription', help='ISR subscription number identifying your company or your bank to generate ISR.')
l10n_ch_isr_subscription_formatted = fields.Char(compute='_compute_l10n_ch_isr_subscription', help="ISR subscription number your company or your bank, formated with '-' and without the padding zeros, to generate ISR report.")
l10n_ch_isr_number = fields.Char(compute='_compute_l10n_ch_isr_number', store=True, help='The reference number associated with this invoice')
l10n_ch_isr_number_spaced = fields.Char(compute='_compute_l10n_ch_isr_number_spaced', help="ISR number split in blocks of 5 characters (right-justified), to generate ISR report.")
l10n_ch_isr_optical_line = fields.Char(compute="_compute_l10n_ch_isr_optical_line", help='Optical reading line, as it will be printed on ISR')
l10n_ch_isr_valid = fields.Boolean(compute='_compute_l10n_ch_isr_valid', help='Boolean value. True iff all the data required to generate the ISR are present')
l10n_ch_isr_sent = fields.Boolean(default=False, help="Boolean value telling whether or not the ISR corresponding to this invoice has already been printed or sent by mail.")
l10n_ch_currency_name = fields.Char(related='currency_id.name', readonly=True, string="Currency Name", help="The name of this invoice's currency") #This field is used in the "invisible" condition field of the 'Print ISR' button.
l10n_ch_isr_needs_fixing = fields.Boolean(compute="_compute_l10n_ch_isr_needs_fixing", help="Used to show a warning banner when the vendor bill needs a correct ISR payment reference. ")
@api.depends('invoice_partner_bank_id.l10n_ch_isr_subscription_eur', 'invoice_partner_bank_id.l10n_ch_isr_subscription_chf')
def _compute_l10n_ch_isr_subscription(self):
""" Computes the ISR subscription identifying your company or the bank that allows to generate ISR. And formats it accordingly"""
def _format_isr_subscription(isr_subscription):
#format the isr as per specifications
currency_code = isr_subscription[:2]
middle_part = isr_subscription[2:-1]
trailing_cipher = isr_subscription[-1]
middle_part = re.sub('^0*', '', middle_part)
return currency_code + '-' + middle_part + '-' + trailing_cipher
def _format_isr_subscription_scanline(isr_subscription):
# format the isr for scanline
return isr_subscription[:2] + isr_subscription[2:-1].rjust(6, '0') + isr_subscription[-1:]
for record in self:
record.l10n_ch_isr_subscription = False
record.l10n_ch_isr_subscription_formatted = False
if record.invoice_partner_bank_id:
if record.currency_id.name == 'EUR':
isr_subscription = record.invoice_partner_bank_id.l10n_ch_isr_subscription_eur
elif record.currency_id.name == 'CHF':
isr_subscription = record.invoice_partner_bank_id.l10n_ch_isr_subscription_chf
else:
#we don't format if in another currency as EUR or CHF
continue
if isr_subscription:
isr_subscription = isr_subscription.replace("-", "") # In case the user put the -
record.l10n_ch_isr_subscription = _format_isr_subscription_scanline(isr_subscription)
record.l10n_ch_isr_subscription_formatted = _format_isr_subscription(isr_subscription)
def _get_isrb_id_number(self):
"""Hook to fix the lack of proper field for ISR-B Customer ID"""
# FIXME
# replace l10n_ch_postal by an other field to not mix ISR-B
# customer ID as it forbid the following validations on l10n_ch_postal
# number for Vendor bank accounts:
# - validation of format xx-yyyyy-c
# - validation of checksum
self.ensure_one()
partner_bank = self.invoice_partner_bank_id
return partner_bank.l10n_ch_postal or ''
@api.depends('name', 'invoice_partner_bank_id.l10n_ch_postal')
def _compute_l10n_ch_isr_number(self):
"""Generates the ISR or QRR reference
An ISR references are 27 characters long.
QRR is a recycling of ISR for QR-bills. Thus works the same.
The invoice sequence number is used, removing each of its non-digit characters,
and pad the unused spaces on the left of this number with zeros.
The last digit is a checksum (mod10r).
There are 2 types of references:
* ISR (Postfinance)
The reference is free but for the last
digit which is a checksum.
If shorter than 27 digits, it is filled with zeros on the left.
e.g.
120000000000234478943216899
\________________________/|
1 2
(1) 12000000000023447894321689 | reference
(2) 9: control digit for identification number and reference
* ISR-B (Indirect through a bank, requires a customer ID)
In case of ISR-B The firsts digits (usually 6), contain the customer ID
at the Bank of this ISR's issuer.
The rest (usually 20 digits) is reserved for the reference plus the
control digit.
If the [customer ID] + [the reference] + [the control digit] is shorter
than 27 digits, it is filled with zeros between the customer ID till
the start of the reference.
e.g.
150001123456789012345678901
\____/\__________________/|
1 2 3
(1) 150001 | id number of the customer (size may vary)
(2) 12345678901234567890 | reference
(3) 1: control digit for identification number and reference
"""
for record in self:
has_qriban = record.invoice_partner_bank_id and record.invoice_partner_bank_id._is_qr_iban() or False
isr_subscription = record.l10n_ch_isr_subscription
if (has_qriban or isr_subscription) and record.name:
id_number = record._get_isrb_id_number()
if id_number:
id_number = id_number.zfill(l10n_ch_ISR_ID_NUM_LENGTH)
invoice_ref = re.sub('[^\d]', '', record.name)
# keep only the last digits if it exceed boundaries
full_len = len(id_number) + len(invoice_ref)
ref_payload_len = l10n_ch_ISR_NUMBER_LENGTH - 1
extra = full_len - ref_payload_len
if extra > 0:
invoice_ref = invoice_ref[extra:]
internal_ref = invoice_ref.zfill(ref_payload_len - len(id_number))
record.l10n_ch_isr_number = mod10r(id_number + internal_ref)
else:
record.l10n_ch_isr_number = False
@api.depends('l10n_ch_isr_number')
def _compute_l10n_ch_isr_number_spaced(self):
def _space_isr_number(isr_number):
to_treat = isr_number
res = ''
while to_treat:
res = to_treat[-5:] + res
to_treat = to_treat[:-5]
if to_treat:
res = ' ' + res
return res
for record in self:
if record.name and record.invoice_partner_bank_id and record.invoice_partner_bank_id.l10n_ch_postal:
record.l10n_ch_isr_number_spaced = _space_isr_number(record.l10n_ch_isr_number)
else:
record.l10n_ch_isr_number_spaced = False
def _get_l10n_ch_isr_optical_amount(self):
"""Prepare amount string for ISR optical line"""
self.ensure_one()
currency_code = None
if self.currency_id.name == 'CHF':
currency_code = '01'
elif self.currency_id.name == 'EUR':
currency_code = '03'
units, cents = float_split_str(self.amount_residual, 2)
amount_to_display = units + cents
amount_ref = amount_to_display.zfill(10)
optical_amount = currency_code + amount_ref
optical_amount = mod10r(optical_amount)
return optical_amount
@api.depends(
'currency_id.name', 'amount_residual', 'name',
'invoice_partner_bank_id.l10n_ch_isr_subscription_eur',
'invoice_partner_bank_id.l10n_ch_isr_subscription_chf')
def _compute_l10n_ch_isr_optical_line(self):
""" Compute the optical line to print on the bottom of the ISR.
This line is read by an OCR.
It's format is:
amount>reference+ creditor>
Where:
- amount: currency and invoice amount
- reference: ISR structured reference number
- in case of ISR-B contains the Customer ID number
- it can also contains a partner reference (of the debitor)
- creditor: Subscription number of the creditor
An optical line can have the 2 following formats:
* ISR (Postfinance)
0100003949753>120000000000234478943216899+ 010001628>
|/\________/| \________________________/| \_______/
1 2 3 4 5 6
(1) 01 | currency
(2) 0000394975 | amount 3949.75
(3) 4 | control digit for amount
(5) 12000000000023447894321689 | reference
(6) 9: control digit for identification number and reference
(7) 010001628: subscription number (01-162-8)
* ISR-B (Indirect through a bank, requires a customer ID)
0100000494004>150001123456789012345678901+ 010234567>
|/\________/| \____/\__________________/| \_______/
1 2 3 4 5 6 7
(1) 01 | currency
(2) 0000049400 | amount 494.00
(3) 4 | control digit for amount
(4) 150001 | id number of the customer (size may vary, usually 6 chars)
(5) 12345678901234567890 | reference
(6) 1: control digit for identification number and reference
(7) 010234567: subscription number (01-23456-7)
"""
for record in self:
record.l10n_ch_isr_optical_line = ''
if record.l10n_ch_isr_number and record.l10n_ch_isr_subscription and record.currency_id.name:
# Final assembly (the space after the '+' is no typo, it stands in the specs.)
record.l10n_ch_isr_optical_line = '{amount}>{reference}+ {creditor}>'.format(
amount=record._get_l10n_ch_isr_optical_amount(),
reference=record.l10n_ch_isr_number,
creditor=record.l10n_ch_isr_subscription,
)
@api.depends(
'type', 'name', 'currency_id.name',
'invoice_partner_bank_id.l10n_ch_isr_subscription_eur',
'invoice_partner_bank_id.l10n_ch_isr_subscription_chf')
def _compute_l10n_ch_isr_valid(self):
"""Returns True if all the data required to generate the ISR are present"""
for record in self:
record.l10n_ch_isr_valid = record.type == 'out_invoice' and\
record.name and \
record.l10n_ch_isr_subscription and \
record.l10n_ch_currency_name in ['EUR', 'CHF']
@api.depends('type', 'invoice_partner_bank_id', 'invoice_payment_ref')
def _compute_l10n_ch_isr_needs_fixing(self):
for inv in self:
if inv.type == 'in_invoice' and inv.company_id.country_id.code == "CH":
partner_bank = inv.invoice_partner_bank_id
if partner_bank:
needs_isr_ref = partner_bank._is_qr_iban() or partner_bank._is_isr_issuer()
else:
needs_isr_ref = False
if needs_isr_ref and not inv._has_isr_ref():
inv.l10n_ch_isr_needs_fixing = True
continue
inv.l10n_ch_isr_needs_fixing = False
def _has_isr_ref(self):
"""Check if this invoice has a valid ISR reference (for Switzerland)
e.g.
12371
000000000000000000000012371
210000000003139471430009017
21 00000 00003 13947 14300 09017
"""
self.ensure_one()
ref = self.invoice_payment_ref or self.ref
if not ref:
return False
ref = ref.replace(' ', '')
if re.match(r'^(\d{2,27})$', ref):
return ref == mod10r(ref[:-1])
return False
def split_total_amount(self):
""" Splits the total amount of this invoice in two parts, using the dot as
a separator, and taking two precision digits (always displayed).
These two parts are returned as the two elements of a tuple, as strings
to print in the report.
This function is needed on the model, as it must be called in the report
template, which cannot reference static functions
"""
return float_split_str(self.amount_residual, 2)
def display_swiss_qr_code(self):
""" DEPRECATED FUNCTION: not used anymore. QR-bills can now always
be generated, with a dedicated report
"""
self.ensure_one()
qr_parameter = self.env['ir.config_parameter'].sudo().get_param('l10n_ch.print_qrcode')
return self.partner_id.country_id.code == 'CH' and qr_parameter
def isr_print(self):
""" Triggered by the 'Print ISR' button.
"""
self.ensure_one()
if self.l10n_ch_isr_valid:
self.l10n_ch_isr_sent = True
return self.env.ref('l10n_ch.l10n_ch_isr_report').report_action(self)
else:
raise ValidationError(_("""You cannot generate an ISR yet.\n
For this, you need to :\n
- set a valid postal account number (or an IBAN referencing one) for your company\n
- define its bank\n
- associate this bank with a postal reference for the currency used in this invoice\n
- fill the 'bank account' field of the invoice with the postal to be used to receive the related payment. A default account will be automatically set for all invoices created after you defined a postal account for your company."""))
def can_generate_qr_bill(self):
""" Returns True iff the invoice can be used to generate a QR-bill.
"""
self.ensure_one()
# First part of this condition is due to fix commit https://github.com/odoo/odoo/commit/719f087b1b5be5f1f276a0f87670830d073f6ef4
# We do that to ensure not to try generating QR-bills for modules that haven't been
# updated yet. Not doing that could crash when trying to send an invoice by mail,
# as the QR report data haven't been loaded.
# TODO: remove this in master
return not self.env.ref('l10n_ch.l10n_ch_swissqr_template').inherit_id \
and self.invoice_partner_bank_id.validate_swiss_code_arguments(self.invoice_partner_bank_id.currency_id, self.partner_id, self.invoice_payment_ref)
def print_ch_qr_bill(self):
""" Triggered by the 'Print QR-bill' button.
"""
self.ensure_one()
if not self.can_generate_qr_bill():
raise UserError(_("Cannot generate the QR-bill. Please check you have configured the address of your company and debtor. If you are using a QR-IBAN, also check the invoice's payment reference is a QR reference."))
self.l10n_ch_isr_sent = True
return self.env.ref('l10n_ch.l10n_ch_qr_report').report_action(self)
def action_invoice_sent(self):
# OVERRIDE
rslt = super(AccountMove, self).action_invoice_sent()
if self.l10n_ch_isr_valid:
rslt['context']['l10n_ch_mark_isr_as_sent'] = True
return rslt
@api.returns('mail.message', lambda value: value.id)
def message_post(self, **kwargs):
if self.env.context.get('l10n_ch_mark_isr_as_sent'):
self.filtered(lambda inv: not inv.l10n_ch_isr_sent).write({'l10n_ch_isr_sent': True})
return super(AccountMove, self.with_context(mail_post_autofollow=True)).message_post(**kwargs)
def _get_invoice_reference_ch_invoice(self):
""" This sets ISR reference number which is generated based on customer's `Bank Account` and set it as
`Payment Reference` of the invoice when invoice's journal is using Switzerland's communication standard
"""
self.ensure_one()
return self.l10n_ch_isr_number
def _get_invoice_reference_ch_partner(self):
""" This sets ISR reference number which is generated based on customer's `Bank Account` and set it as
`Payment Reference` of the invoice when invoice's journal is using Switzerland's communication standard
"""
self.ensure_one()
return self.l10n_ch_isr_number
@api.model
def space_qrr_reference(self, qrr_ref):
""" Makes the provided QRR reference human-friendly, spacing its elements
by blocks of 5 from right to left.
"""
spaced_qrr_ref = ''
i = len(qrr_ref) # i is the index after the last index to consider in substrings
while i > 0:
spaced_qrr_ref = qrr_ref[max(i-5, 0) : i] + ' ' + spaced_qrr_ref
i -= 5
return spaced_qrr_ref
| 47.981333 | 267 | 0.644473 |
f0a2241dd8ecada63ae43e0942dec04d3a306997 | 410 | py | Python | tests/test_database.py | ltalirz/mofchecker | ea0bc5a05cc951f40aeb42d54fd5e30af4db21d9 | [
"MIT"
] | 5 | 2020-11-03T19:26:14.000Z | 2021-11-11T12:20:18.000Z | tests/test_database.py | ltalirz/mofchecker | ea0bc5a05cc951f40aeb42d54fd5e30af4db21d9 | [
"MIT"
] | 114 | 2020-11-30T09:38:54.000Z | 2022-03-25T04:02:25.000Z | tests/test_database.py | ltalirz/mofchecker | ea0bc5a05cc951f40aeb42d54fd5e30af4db21d9 | [
"MIT"
] | 3 | 2020-12-08T10:44:31.000Z | 2021-08-23T15:00:25.000Z | # -*- coding: utf-8 -*-
"""Testing the database module"""
from mofchecker.database import MOFCheckerDB
def test_mofcheckerdb():
"""Test the database lookup"""
database = MOFCheckerDB()
assert len(database.lookup_composition("H54 C90 N18 O12")) == 1
assert len(database.lookup_graph_hash("f75ab1f90320513bad85f2242e7c07ee")) == 1
assert len(database.lookup_graph_hash("fdblkblabla")) == 0
| 34.166667 | 83 | 0.721951 |
a72e8dc1a12f9fba6c8b2f7176709a85531b074c | 1,128 | py | Python | adminmgr/media/code/A3/task2/BD_494_707_801_1130_Xc88Orx.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/A3/task2/BD_494_707_801_1130_Xc88Orx.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/A3/task2/BD_494_707_801_1130_Xc88Orx.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | import findspark
findspark.init()
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql.types import StructType
from pyspark.sql.functions import col
spark = SparkSession\
.builder\
.appName("Task1_2")\
.getOrCreate()
userSchema = StructType().add("ID", "string").add("Lang", "string").add("Date","string").add("Source","string").add("len","integer").add("likes","string").add("RTs","string").add("Hashtags",
"string").add("UserMentionNames","string").add("UserMentionID","string").add("Name","string").add("Place","string").add("Followers","string").add("Friends","string")
csvDF = spark.readStream.option("sep", ";").schema(userSchema).csv("hdfs://localhost:9000/stream")
ratios = csvDF.select("name",(col("Followers")/col("Friends")).alias("FRRatio"))
new = ratios.groupBy("name","FRRatio").count()
sorted_new = new.orderBy("FRRatio",ascending=False).select("name","FRRatio")
var = sorted_new.limit(5)
query=var \
.writeStream\
.outputMode("complete")\
.format("console")\
.start()
query.awaitTermination(60)
query.stop() | 36.387097 | 190 | 0.712766 |
b7bfe1f80b25e96deab0a50edfd16895060b1d4c | 10,636 | py | Python | test/functional/wallet_backup.py | driyal/driyal | 7bcf75dc7e6df61243760071cff4b7bfd72f1535 | [
"MIT"
] | null | null | null | test/functional/wallet_backup.py | driyal/driyal | 7bcf75dc7e6df61243760071cff4b7bfd72f1535 | [
"MIT"
] | null | null | null | test/functional/wallet_backup.py | driyal/driyal | 7bcf75dc7e6df61243760071cff4b7bfd72f1535 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2021 The DRiyal Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import DRiyalTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletBackupTest(DRiyalTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
["-whitelist=noban@127.0.0.1", "-keypool=100"],
["-whitelist=noban@127.0.0.1", "-keypool=100"],
["-whitelist=noban@127.0.0.1", "-keypool=100"],
["-whitelist=noban@127.0.0.1"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
self.sync_mempools()
self.generate(self.nodes[3], 1)
# As above, this mirrors the original bash test.
def start_three(self, args=()):
self.start_node(0, self.extra_args[0] + list(args))
self.start_node(1, self.extra_args[1] + list(args))
self.start_node(2, self.extra_args[2] + list(args))
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
def restore_invalid_wallet(self):
node = self.nodes[3]
invalid_wallet_file = os.path.join(self.nodes[0].datadir, 'invalid_wallet_file.bak')
open(invalid_wallet_file, 'a', encoding="utf8").write('invald wallet')
wallet_name = "res0"
not_created_wallet_file = os.path.join(node.datadir, self.chain, 'wallets', wallet_name)
error_message = "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(not_created_wallet_file)
assert_raises_rpc_error(-18, error_message, node.restorewallet, wallet_name, invalid_wallet_file)
assert not os.path.exists(not_created_wallet_file)
def restore_nonexistent_wallet(self):
node = self.nodes[3]
nonexistent_wallet_file = os.path.join(self.nodes[0].datadir, 'nonexistent_wallet.bak')
wallet_name = "res0"
assert_raises_rpc_error(-8, "Backup file does not exist", node.restorewallet, wallet_name, nonexistent_wallet_file)
not_created_wallet_file = os.path.join(node.datadir, self.chain, 'wallets', wallet_name)
assert not os.path.exists(not_created_wallet_file)
def restore_wallet_existent_name(self):
node = self.nodes[3]
backup_file = os.path.join(self.nodes[0].datadir, 'wallet.bak')
wallet_name = "res0"
wallet_file = os.path.join(node.datadir, self.chain, 'wallets', wallet_name)
error_message = "Failed to create database path '{}'. Database already exists.".format(wallet_file)
assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file)
assert os.path.exists(wallet_file)
def init_three(self):
self.init_wallet(node=0)
self.init_wallet(node=1)
self.init_wallet(node=2)
def run_test(self):
self.log.info("Generating initial blockchain")
self.generate(self.nodes[0], 1)
self.generate(self.nodes[1], 1)
self.generate(self.nodes[2], 1)
self.generate(self.nodes[3], COINBASE_MATURITY)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for _ in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
if not self.options.descriptors:
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
for _ in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.generate(self.nodes[3], COINBASE_MATURITY + 1)
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring wallets on node 3 using backup files")
self.restore_invalid_wallet()
self.restore_nonexistent_wallet()
backup_file_0 = os.path.join(self.nodes[0].datadir, 'wallet.bak')
backup_file_1 = os.path.join(self.nodes[1].datadir, 'wallet.bak')
backup_file_2 = os.path.join(self.nodes[2].datadir, 'wallet.bak')
self.nodes[3].restorewallet("res0", backup_file_0)
self.nodes[3].restorewallet("res1", backup_file_1)
self.nodes[3].restorewallet("res2", backup_file_2)
assert os.path.exists(os.path.join(self.nodes[3].datadir, self.chain, 'wallets', "res0"))
assert os.path.exists(os.path.join(self.nodes[3].datadir, self.chain, 'wallets', "res1"))
assert os.path.exists(os.path.join(self.nodes[3].datadir, self.chain, 'wallets', "res2"))
res0_rpc = self.nodes[3].get_wallet_rpc("res0")
res1_rpc = self.nodes[3].get_wallet_rpc("res1")
res2_rpc = self.nodes[3].get_wallet_rpc("res2")
assert_equal(res0_rpc.getbalance(), balance0)
assert_equal(res1_rpc.getbalance(), balance1)
assert_equal(res2_rpc.getbalance(), balance2)
self.restore_wallet_existent_name()
if not self.options.descriptors:
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
self.start_three(["-nowallet"])
self.init_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, '.', 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
| 41.065637 | 159 | 0.662749 |
16d66178946c3f9076a98f20174b8e29879d4f0a | 8,128 | py | Python | Model/predictor-dl-model/predictor_dl_model/trainer/hparams.py | rangaswamymr/incubator-bluemarlin | 6cb60b2a41edc6509377f9eacb7660d199a9485b | [
"Apache-2.0"
] | null | null | null | Model/predictor-dl-model/predictor_dl_model/trainer/hparams.py | rangaswamymr/incubator-bluemarlin | 6cb60b2a41edc6509377f9eacb7660d199a9485b | [
"Apache-2.0"
] | null | null | null | Model/predictor-dl-model/predictor_dl_model/trainer/hparams.py | rangaswamymr/incubator-bluemarlin | 6cb60b2a41edc6509377f9eacb7660d199a9485b | [
"Apache-2.0"
] | null | null | null | # MIT License
# Copyright (c) 2018 Artur Suilin
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow.contrib.training as training
import re
# Manually selected params
params_s32 = dict(
batch_size=300,
# train_window=380,
train_window=60,
train_skip_first=12,
rnn_depth=500,
use_attn=False,
attention_depth=60,
attention_heads=10,
encoder_readout_dropout=0.4768781146510798,
encoder_rnn_layers=1,
decoder_rnn_layers=1,
# decoder_state_dropout_type=['outside','outside'],
decoder_input_dropout=[1.0, 1.0, 1.0],
decoder_output_dropout=[0.975, 1.0, 1.0], # min 0.95
decoder_state_dropout=[0.99, 0.995, 0.995], # min 0.95
decoder_variational_dropout=[False, False, False],
# decoder_candidate_l2=[0.0, 0.0],
# decoder_gates_l2=[0.0, 0.0],
# decoder_state_dropout_type='outside',
# decoder_input_dropout=1.0,
# decoder_output_dropout=1.0,
# decoder_state_dropout=0.995, #0.98, # min 0.95
# decoder_variational_dropout=False,
decoder_candidate_l2=0.0,
decoder_gates_l2=0.0,
fingerprint_fc_dropout=0.8232342370695286,
gate_dropout=0.9967589439360334, # 0.9786,
gate_activation='none',
encoder_dropout=0.030490422531402273,
encoder_stability_loss=0.0, # max 100
encoder_activation_loss=1e-06, # max 0.001
decoder_stability_loss=0.0, # max 100
decoder_activation_loss=5e-06, # max 0.001
)
# Default incumbent on last smac3 search
params_definc = dict(
batch_size=10,
train_window=50,
train_skip_first=0,
rnn_depth=128,
use_attn=True,
attention_depth=64,
attention_heads=1,
encoder_readout_dropout=0.4768781146510798,
encoder_rnn_layers=1,
decoder_rnn_layers=1,
decoder_input_dropout=[1.0, 1.0, 1.0],
decoder_output_dropout=[1.0, 1.0, 1.0],
decoder_state_dropout=[0.995, 0.995, 0.995],
decoder_variational_dropout=[False, False, False],
decoder_candidate_l2=0.0,
decoder_gates_l2=0.0,
fingerprint_fc_dropout=0.8232342370695286,
gate_dropout=0.8961710392091516,
gate_activation='none',
encoder_dropout=0.030490422531402273,
encoder_stability_loss=0.0,
encoder_activation_loss=1e-05,
decoder_stability_loss=0.0,
decoder_activation_loss=5e-05,
)
# Found incumbent 0.35503610596060753
#"decoder_activation_loss='1e-05'", "decoder_output_dropout:0='1.0'", "decoder_rnn_layers='1'", "decoder_state_dropout:0='0.995'", "encoder_activation_loss='1e-05'", "encoder_rnn_layers='1'", "gate_dropout='0.7934826952854418'", "rnn_depth='243'", "train_window='135'", "use_attn='1'", "attention_depth='17'", "attention_heads='2'", "encoder_readout_dropout='0.7711751356092252'", "fingerprint_fc_dropout='0.9693950737901414'"
params_foundinc = dict(
batch_size=256,
train_window=135,
train_skip_first=0,
rnn_depth=243,
use_attn=True,
attention_depth=17,
attention_heads=2,
encoder_readout_dropout=0.7711751356092252,
encoder_rnn_layers=1,
decoder_rnn_layers=1,
decoder_input_dropout=[1.0, 1.0, 1.0],
decoder_output_dropout=[1.0, 1.0, 1.0],
decoder_state_dropout=[0.995, 0.995, 0.995],
decoder_variational_dropout=[False, False, False],
decoder_candidate_l2=0.0,
decoder_gates_l2=0.0,
fingerprint_fc_dropout=0.9693950737901414,
gate_dropout=0.7934826952854418,
gate_activation='none',
encoder_dropout=0.0,
encoder_stability_loss=0.0,
encoder_activation_loss=1e-05,
decoder_stability_loss=0.0,
decoder_activation_loss=1e-05,
)
# 81 on smac_run0 (0.3552077534247418 x 7)
# {'decoder_activation_loss': 0.0, 'decoder_output_dropout:0': 0.85, 'decoder_rnn_layers': 2, 'decoder_state_dropout:0': 0.995,
# 'encoder_activation_loss': 0.0, 'encoder_rnn_layers': 2, 'gate_dropout': 0.7665920904244501, 'rnn_depth': 201,
# 'train_window': 143, 'use_attn': 1, 'attention_depth': 17, 'attention_heads': 2, 'decoder_output_dropout:1': 0.975,
# 'decoder_state_dropout:1': 0.99, 'encoder_dropout': 0.0304904225, 'encoder_readout_dropout': 0.4444295965935664, 'fingerprint_fc_dropout': 0.26412480387331017}
params_inst81 = dict(
batch_size=256,
train_window=143,
train_skip_first=0,
rnn_depth=201,
use_attn=True,
attention_depth=17,
attention_heads=2,
encoder_readout_dropout=0.4444295965935664,
encoder_rnn_layers=2,
decoder_rnn_layers=2,
decoder_input_dropout=[1.0, 1.0, 1.0],
decoder_output_dropout=[0.85, 0.975, 1.0],
decoder_state_dropout=[0.995, 0.99, 0.995],
decoder_variational_dropout=[False, False, False],
decoder_candidate_l2=0.0,
decoder_gates_l2=0.0,
fingerprint_fc_dropout=0.26412480387331017,
gate_dropout=0.7665920904244501,
gate_activation='none',
encoder_dropout=0.0304904225,
encoder_stability_loss=0.0,
encoder_activation_loss=0.0,
decoder_stability_loss=0.0,
decoder_activation_loss=0.0,
)
# 121 on smac_run0 (0.3548671560628074 x 3)
# {'decoder_activation_loss': 1e-05, 'decoder_output_dropout:0': 0.975, 'decoder_rnn_layers': 2, 'decoder_state_dropout:0': 1.0,
# 'encoder_activation_loss': 1e-05, 'encoder_rnn_layers': 1, 'gate_dropout': 0.8631496699358483, 'rnn_depth': 122,
# 'train_window': 269, 'use_attn': 1, 'attention_depth': 29, 'attention_heads': 4, 'decoder_output_dropout:1': 0.975,
# 'decoder_state_dropout:1': 0.975, 'encoder_readout_dropout': 0.9835390239895767, 'fingerprint_fc_dropout': 0.7452161827064421}
# 83 on smac_run1 (0.355050330259362 x 7)
# {'decoder_activation_loss': 1e-06, 'decoder_output_dropout:0': 0.925, 'decoder_rnn_layers': 2, 'decoder_state_dropout:0': 0.98,
# 'encoder_activation_loss': 1e-06, 'encoder_rnn_layers': 1, 'gate_dropout': 0.9275441207192259, 'rnn_depth': 138,
# 'train_window': 84, 'use_attn': 1, 'attention_depth': 52, 'attention_heads': 2, 'decoder_output_dropout:1': 0.925,
# 'decoder_state_dropout:1': 0.98, 'encoder_readout_dropout': 0.6415488109353416, 'fingerprint_fc_dropout': 0.2581296623398802}
params_inst83 = dict(
batch_size=256,
train_window=84,
train_skip_first=0,
rnn_depth=138,
use_attn=True,
attention_depth=52,
attention_heads=2,
encoder_readout_dropout=0.6415488109353416,
encoder_rnn_layers=1,
decoder_rnn_layers=2,
decoder_input_dropout=[1.0, 1.0, 1.0],
decoder_output_dropout=[0.925, 0.925, 1.0],
decoder_state_dropout=[0.98, 0.98, 0.995],
decoder_variational_dropout=[False, False, False],
decoder_candidate_l2=0.0,
decoder_gates_l2=0.0,
fingerprint_fc_dropout=0.2581296623398802,
gate_dropout=0.9275441207192259,
gate_activation='none',
encoder_dropout=0.0,
encoder_stability_loss=0.0,
encoder_activation_loss=1e-06,
decoder_stability_loss=0.0,
decoder_activation_loss=1e-06,
)
def_params = params_s32
sets = {
's32': params_s32,
'definc': params_definc,
'foundinc': params_foundinc,
'inst81': params_inst81,
'inst83': params_inst83,
}
def build_hparams(params=def_params):
return training.HParams(**params)
def build_from_set(set_name):
return build_hparams(sets[set_name])
| 37.114155 | 426 | 0.735974 |
caebdb2f116cc17fa758e605bcf40081f3b8c9b1 | 4,361 | py | Python | Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/ops/image_grad.py | JustinACoder/H22-GR3-UnrealAI | 361eb9ef1147f8a2991e5f98c4118cd823184adf | [
"MIT"
] | 6 | 2022-02-04T18:12:24.000Z | 2022-03-21T23:57:12.000Z | Lib/site-packages/tensorflow/python/ops/image_grad.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/tensorflow/python/ops/image_grad.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2022-02-08T03:53:23.000Z | 2022-02-08T03:53:23.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains Gradient functions for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
@ops.RegisterGradient("ResizeNearestNeighbor")
def _ResizeNearestNeighborGrad(op, grad):
"""The derivatives for nearest neighbor resizing.
Args:
op: The ResizeNearestNeighbor op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input and the output.
"""
image = op.inputs[0]
if image.get_shape()[1:3].is_fully_defined():
image_shape = image.get_shape()[1:3]
else:
image_shape = array_ops.shape(image)[1:3]
grads = gen_image_ops.resize_nearest_neighbor_grad(
grad,
image_shape,
align_corners=op.get_attr("align_corners"))
return [grads, None]
@ops.RegisterGradient("ResizeBilinear")
def _ResizeBilinearGrad(op, grad):
"""The derivatives for bilinear resizing.
Args:
op: The ResizeBilinear op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
grad0 = gen_image_ops.resize_bilinear_grad(
grad, op.inputs[0], align_corners=op.get_attr("align_corners"))
return [grad0, None]
@ops.RegisterGradient("ResizeBicubic")
def _ResizeBicubicGrad(op, grad):
"""The derivatives for bicubic resizing.
Args:
op: The ResizeBicubic op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
allowed_types = [dtypes.float32, dtypes.float64]
grad0 = None
if op.inputs[0].dtype in allowed_types:
grad0 = gen_image_ops.resize_bicubic_grad(
grad, op.inputs[0], align_corners=op.get_attr("align_corners"))
return [grad0, None]
@ops.RegisterGradient("CropAndResize")
def _CropAndResizeGrad(op, grad):
"""The derivatives for crop_and_resize.
We back-propagate to the image only when the input image tensor has floating
point dtype but we always back-propagate to the input boxes tensor.
Args:
op: The CropAndResize op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input image, boxes, as well as the always-None
gradients w.r.t. box_ind and crop_size.
"""
image = op.inputs[0]
if image.get_shape().is_fully_defined():
image_shape = image.get_shape().as_list()
else:
image_shape = array_ops.shape(image)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops.crop_and_resize_grad_image(
grad, op.inputs[1], op.inputs[2], image_shape, T=op.get_attr("T"),
method=op.get_attr("method"))
# pylint: enable=protected-access
else:
grad0 = None
# `grad0` is the gradient to the input image pixels and it
# has been implemented for nearest neighbor and bilinear sampling
# respectively. `grad1` is the gradient to the input crop boxes' coordinates.
# When using nearest neighbor sampling, the gradient to crop boxes'
# coordinates are not well defined. In practice, we still approximate
# grad1 using the gradient derived from bilinear sampling.
grad1 = gen_image_ops.crop_and_resize_grad_boxes(
grad, op.inputs[0], op.inputs[1], op.inputs[2])
return [grad0, grad1, None, None]
| 34.338583 | 81 | 0.700069 |
7b7a62940f0b2a14bc7b02f4b89c1d68a324c753 | 1,439 | py | Python | dosagelib/helpers.py | Null000/dosage | 391313972cf6feda7db27c1a411e543af44581cd | [
"MIT"
] | 22 | 2015-01-16T23:58:44.000Z | 2022-02-02T03:32:19.000Z | dosagelib/helpers.py | Null000/dosage | 391313972cf6feda7db27c1a411e543af44581cd | [
"MIT"
] | 29 | 2015-01-03T10:07:38.000Z | 2020-03-12T13:33:10.000Z | dosagelib/helpers.py | Null000/dosage | 391313972cf6feda7db27c1a411e543af44581cd | [
"MIT"
] | 13 | 2015-01-26T09:18:56.000Z | 2022-03-17T09:40:42.000Z | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
from .util import getQueryParams
def queryNamer(paramName, usePageUrl=False):
"""Get name from URL query part."""
@classmethod
def _namer(cls, imageUrl, pageUrl):
"""Get URL query part."""
url = pageUrl if usePageUrl else imageUrl
return getQueryParams(url)[paramName][0]
return _namer
def regexNamer(regex, usePageUrl=False):
"""Get name from regular expression."""
@classmethod
def _namer(cls, imageUrl, pageUrl):
"""Get first regular expression group."""
url = pageUrl if usePageUrl else imageUrl
mo = regex.search(url)
if mo:
return mo.group(1)
return _namer
def bounceStarter(url, nextSearch):
"""Get start URL by "bouncing" back and forth one time."""
@classmethod
def _starter(cls):
"""Get bounced start URL."""
data = cls.getPage(url)
url1 = cls.fetchUrl(url, data, cls.prevSearch)
data = cls.getPage(url1)
return cls.fetchUrl(url1, data, nextSearch)
return _starter
def indirectStarter(url, latestSearch):
"""Get start URL by indirection."""
@classmethod
def _starter(cls):
"""Get indirect start URL."""
data = cls.getPage(url)
return cls.fetchUrl(url, data, latestSearch)
return _starter
| 29.979167 | 63 | 0.644892 |
0fec38e739a93c454566a7a7e829464435e594b4 | 317 | py | Python | ABC/abc001-abc050/abc029/c.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | ABC/abc001-abc050/abc029/c.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | ABC/abc001-abc050/abc029/c.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
from itertools import product
n = int(input())
# See:
# https://docs.python.jp/3/library/itertools.html#itertools.product
for j in sorted(list(product(['a', 'b', 'c'], repeat=n))):
print(''.join(map(str, j)))
if __name__ == '__main__':
main()
| 18.647059 | 71 | 0.567823 |
2e59f60d5c528e92ab0f5a083d203d76e67f8439 | 890 | py | Python | solver/core/function.py | robalar/A2_Project | 33413ea622963ad200077de9a45d061b044ef3f2 | [
"MIT"
] | null | null | null | solver/core/function.py | robalar/A2_Project | 33413ea622963ad200077de9a45d061b044ef3f2 | [
"MIT"
] | null | null | null | solver/core/function.py | robalar/A2_Project | 33413ea622963ad200077de9a45d061b044ef3f2 | [
"MIT"
] | null | null | null | from .expr import Expression
class Function(Expression):
name = None
commutative = False
nargs = 1
@property
def derivative(self):
return None
def __new__(cls, *args):
if len(args) > cls.nargs:
raise ValueError('Too many args passed to {}'.format(cls.name))
elif len(args) < cls.nargs:
raise ValueError('Not enough args passed to {}'.format(cls.name))
return super(Function, cls).__new__(cls, *args)
def __hash__(self):
return hash(self.name)
@property
def basic_string(self):
return '{}({})'.format(self.name, ''.join([x.basic_string for x in self.args]))
@property
def latex(self):
return '\\{}({})'.format(self.name, ''.join([x.latex for x in self.args]))
@classmethod
def eval(cls, args):
return self | 24.722222 | 88 | 0.569663 |
041ccca3a14a02334dd99d15921cbaa9cbc39b3f | 857 | py | Python | opencensus/metrics_quickstart_test.py | HoleCat/echarlosperros | b67460de0467e05b42a763c4430b26ecfd97c2aa | [
"Apache-2.0"
] | null | null | null | opencensus/metrics_quickstart_test.py | HoleCat/echarlosperros | b67460de0467e05b42a763c4430b26ecfd97c2aa | [
"Apache-2.0"
] | null | null | null | opencensus/metrics_quickstart_test.py | HoleCat/echarlosperros | b67460de0467e05b42a763c4430b26ecfd97c2aa | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import metrics_quickstart
def test_quickstart_main(capsys):
# Run the quickstart, making sure that it runs successfully
metrics_quickstart.main()
output = capsys.readouterr()
assert "Fake latency recorded" in output.out
| 37.26087 | 75 | 0.742124 |
b04b2ce44ff529ecac2659dc04721691169f3bf7 | 1,342 | py | Python | examples/italic.py | tresoldi/malign | dad7f2585db3b12f2edbf587f591463aed7c98f5 | [
"MIT"
] | null | null | null | examples/italic.py | tresoldi/malign | dad7f2585db3b12f2edbf587f591463aed7c98f5 | [
"MIT"
] | 1 | 2020-08-07T13:01:29.000Z | 2020-08-07T13:01:29.000Z | examples/italic.py | tresoldi/malign | dad7f2585db3b12f2edbf587f591463aed7c98f5 | [
"MIT"
] | null | null | null | import os
import csv
from pathlib import Path
from collections import defaultdict
import catcoocc
RESOURCE_PATH = Path(os.path.realpath(__file__)).parent.parent / "resources"
def load_data(languages):
# Collect data
filename = RESOURCE_PATH / "northeuralex_italic.tsv"
# Collect cogids
cogid = defaultdict(dict)
with open(filename) as tsvfile:
reader = csv.DictReader(tsvfile, delimiter="\t")
for row in reader:
cogid[row["COGID"]][row["LANGUAGE"]] = row["ALIGNMENT"].split(" ")
# Only keep cogids with the languages we want
filter = {}
for identifier, values in cogid.items():
found = [lang in values for lang in languages]
if all(found):
filter[identifier] = {lang: values[lang] for lang in languages}
data = []
for identifier, values in filter.items():
data.append([values[lang] for lang in languages])
return data
def main():
# Load data
data = load_data(["Italian", "Spanish"])
# Compute cooccs
cooccs = catcoocc.collect_cooccs(data)
scorer = catcoocc.scorer.CatScorer(cooccs)
s = scorer.theil_u()
s = catcoocc.scorer.scale_scorer(s, nrange=(0, 10))
s = catcoocc.scorer.invert_scorer(s)
for c in sorted(set(cooccs)):
print(c, s[c])
if __name__ == "__main__":
main()
| 24.851852 | 78 | 0.649031 |
bb9534a830f351f56cbb84e3496e9b2b288d8f1d | 6,361 | py | Python | pytorch_pretrained_bert/__main__.py | andrew4242/transformers | 93e9971c54e060e528adfdb0ebe149f2b284d660 | [
"Apache-2.0"
] | null | null | null | pytorch_pretrained_bert/__main__.py | andrew4242/transformers | 93e9971c54e060e528adfdb0ebe149f2b284d660 | [
"Apache-2.0"
] | null | null | null | pytorch_pretrained_bert/__main__.py | andrew4242/transformers | 93e9971c54e060e528adfdb0ebe149f2b284d660 | [
"Apache-2.0"
] | null | null | null | # coding: utf8
def main():
import sys
if (len(sys.argv) < 4 or len(sys.argv) > 6) or sys.argv[1] not in ["bert", "gpt", "transfo_xl", "gpt2", "xlnet"]:
print(
"Should be used as one of: \n"
">> `pytorch_pretrained_bert bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`, \n"
">> `pytorch_pretrained_bert gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`, \n"
">> `pytorch_pretrained_bert transfo_xl TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG]` or \n"
">> `pytorch_pretrained_bert gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG]` or \n"
">> `pytorch_pretrained_bert xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME]`")
else:
if sys.argv[1] == "bert":
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "gpt":
from .convert_openai_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
if len(sys.argv) < 4 or len(sys.argv) > 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`")
else:
OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
OPENAI_GPT_CONFIG = sys.argv[4]
else:
OPENAI_GPT_CONFIG = ""
convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH,
OPENAI_GPT_CONFIG,
PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "transfo_xl":
try:
from .convert_transfo_xl_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) < 4 or len(sys.argv) > 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert transfo_xl TF_CHECKPOINT/TF_DATASET_FILE PYTORCH_DUMP_OUTPUT [TF_CONFIG]`")
else:
if 'ckpt' in sys.argv[2].lower():
TF_CHECKPOINT = sys.argv[2]
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = sys.argv[2]
TF_CHECKPOINT = ""
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE)
elif sys.argv[1] == "gpt2":
try:
from .convert_gpt2_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) < 4 or len(sys.argv) > 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [TF_CONFIG]`")
else:
TF_CHECKPOINT = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
else:
try:
from .convert_xlnet_checkpoint_to_pytorch import convert_xlnet_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) < 5 or len(sys.argv) > 6:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME]`")
else:
TF_CHECKPOINT = sys.argv[2]
TF_CONFIG = sys.argv[3]
PYTORCH_DUMP_OUTPUT = sys.argv[4]
if len(sys.argv) == 6:
FINETUNING_TASK = sys.argv[5]
convert_xlnet_checkpoint_to_pytorch(TF_CHECKPOINT,
TF_CONFIG,
PYTORCH_DUMP_OUTPUT,
FINETUNING_TASK)
if __name__ == '__main__':
main()
| 55.313043 | 146 | 0.580412 |
2b2123bb33d6cc1124b1de4626c8d986a45acd86 | 20,306 | py | Python | Lander/Main_MoonLander.py | IrvKalb/pygwidgetsExamples | 0239edfc8553bf51a95d330107dfec48ff1581c1 | [
"BSD-3-Clause"
] | 1 | 2022-01-05T13:35:08.000Z | 2022-01-05T13:35:08.000Z | Lander/Main_MoonLander.py | IrvKalb/pygwidgetsExamples | 0239edfc8553bf51a95d330107dfec48ff1581c1 | [
"BSD-3-Clause"
] | null | null | null | Lander/Main_MoonLander.py | IrvKalb/pygwidgetsExamples | 0239edfc8553bf51a95d330107dfec48ff1581c1 | [
"BSD-3-Clause"
] | null | null | null | #Moon Lander, where we try to land in 1 of the 3 landing pads, and keep speed under 2 m/s
import pygame
from pygame.locals import *
import sys
import random
import pygwidgets
import pyghelpers
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
ORANGE = (255, 100, 0)
GREENISH_YELLOW = (150, 255, 0)
GREENISH = (10,200,125)
WINDOW_WIDTH = 1600
WINDOW_HEIGHT = 900
MAX_WIDTH = WINDOW_WIDTH - 100
MAX_HEIGHT = WINDOW_HEIGHT - 100
FRAMES_PER_SECOND = 30
MOON_START = WINDOW_HEIGHT - 50
LANDED_SAFE = 'landedSafe'
LANDED_WITH_INJURIES = 'landedWithInjuries'
LANDED_CRASHED = 'landedCrashed'
LANDED_NOT = 'landedNot'
GAME_FLYING = 'gameFlying'
GAME_JUST_LANDED = 'gameJustLanded'
GAME_SHOWING_DIALOG = 'gameShowingDialog'
# CONSTANTS
LANDER_MASS = 1
LANDER_FUEL = 100
SPEED_LIMIT = 2.00
PLANET_MASS = 7.347 * (10 ** 8)
PLANET_RADIUS = 1.079
GRAVITY_CONSTANT = 6.673 * (10 ** -11)
GRAVITY = (GRAVITY_CONSTANT * LANDER_MASS * PLANET_MASS)/ (PLANET_RADIUS ** 2)
class FuelGauge(object):
def __init__(self, window):
self.window = window
self.outerRect = pygame.Rect(725, WINDOW_HEIGHT - 30, 150, 40)
self.thermometerRange = pygame.Rect(750 - 1, WINDOW_HEIGHT - 15, 102, 12)
self.thermometer = pygame.Rect(750, WINDOW_HEIGHT - 14, 100, 10)
self.fuelDisplay = pygwidgets.DisplayText(window, (750, WINDOW_HEIGHT - 28), '', \
fontSize=20, textColor=BLACK)
def mDraw(self, fuelAmount):
if fuelAmount < 0:
fuelAmount = 0
self.thermometer.width = int(fuelAmount)
self.fuelDisplay.setValue('Fuel: ' + str(fuelAmount))
if landerFuel >= 90:
color = GREEN
elif landerFuel > 65:
color = GREENISH_YELLOW
elif landerFuel > 45:
color = YELLOW
elif landerFuel > 10:
color = ORANGE
else:
color = RED
pygame.draw.rect(self.window, color, self.outerRect, 0)
self.fuelDisplay.draw()
pygame.draw.rect(self.window, BLACK, self.thermometerRange, 0)
if fuelAmount > 0:
pygame.draw.rect(self.window, WHITE, self.thermometer, 0)
class LandingPad(object):
HEIGHT = 10
def __init__(self, window, landingBonus, color, minX, maxX, maxWidth, minWidth):
self.window = window
self.color = color
self.landingBonus = landingBonus
self.minX = minX
self.maxX = maxX
self.maxWidth = maxWidth
self.minWidth = minWidth
self.mReset()
def mReset(self):
# Create new size and positions for all landing pads
x = random.randrange(self.minX, self.maxX)
width = random.randrange(self.minWidth, self.maxWidth)
self.rect = pygame.Rect(x, MOON_START - 5, width, LandingPad.HEIGHT)
def mIntersects(self, landerRect):
intersects = landerRect.colliderect(self.rect)
if intersects: # check if landed completely inside the landing pad
inside = (landerRect.left >= self.rect.left) and ((landerRect.left + landerRect.width) <= self.rect.left + self.rect.width)
return True, inside
else:
return False, False # does not intersect, and did therefore did not land safely
def mGetBonus(self):
return self.landingBonus
def mDraw(self):
pygame.draw.ellipse(self.window, self.color, self.rect, 0)
class LandingPadMgr(object):
def __init__(self, window, minWidth):
oLandingPad1 = LandingPad(window, 75, YELLOW, 1, 300, 85, minWidth)
oLandingPad2 = LandingPad(window, 50, GREEN, 400, 1150, 160, minWidth + 15) # make larger for easy landing
oLandingPad3 = LandingPad(window, 75, YELLOW, 1300, 1525, 85, minWidth)
self.landingPadList = [oLandingPad1, oLandingPad2, oLandingPad3]
self.mReset()
def mReset(self):
for oLandingPad in self.landingPadList:
oLandingPad.mReset()
def mCheckForLanding(self, landerRect):
for oLandingPad in self.landingPadList:
landed, insidePad = oLandingPad.mIntersects(landerRect)
if landed:
bonus = oLandingPad.mGetBonus()
return bonus, insidePad
return 0, False # signal that it did not land
def mGetBonus(self, whichLandingPad):
oLandingPad = self.landingPadList[whichLandingPad]
self.bonus = oLandingPad.mGetBonus()
return self.bonus
def mDraw(self):
for landingPad in self.landingPadList:
landingPad.mDraw()
class StarField(object):
def __init__(self, window):
self.window = window
self.mReset()
self.earth = pygame.image.load("images/earth.jpg")
def mReset(self):
self.starInfo = []
nStars = random.randrange(25, 50)
for i in range(nStars):
x = random.randrange(0, WINDOW_WIDTH)
y = random.randrange(0, MOON_START)
radius = random.randrange(1, 4)
self.starInfo.append(((x, y), radius))
self.earthLeft = random.randrange(0, WINDOW_WIDTH)
self.earthTop = random.randrange(0, MOON_START)
def mDraw(self):
for thisStarTuple in self.starInfo:
pygame.draw.circle(window, WHITE, (thisStarTuple[0]), thisStarTuple[1])
self.window.blit(self.earth, (self.earthLeft, self.earthTop))
class Lander(object):
STARTING_FUEL = 100
MIN_X = 100
MAX_X = WINDOW_WIDTH - 100
START_Y = 2.0
def __init__(self, window):
self.window = window
self.imageOK = pygame.image.load("images/lander.png")
self.imageCrashed = pygame.image.load("images/landerCrashed.png")
self.imageInjuries = pygame.image.load("images/landerInjuries.png")
self.rect = self.imageOK.get_rect()
self.leftJet = pygame.image.load("images/jetLeft.png")
self.rightJet = pygame.image.load("images/jetRight.png")
self.mainJet = pygame.image.load("images/jetMain.png")
self.leftArrow = pygame.image.load("images/arrowLeft.png")
self.leftArrowLeft = 2
self.leftArrowTop = WINDOW_HEIGHT / 2
self.rightArrow = pygame.image.load("images/arrowRight.png")
self.rightArrowLeft = WINDOW_WIDTH - 47 # so it shows on window
self.rightArrowTop = WINDOW_HEIGHT / 2
self.upArrow = pygame.image.load("images/arrowUp.png")
self.upArrowLeft = WINDOW_WIDTH / 2
self.upArrowTop = 2
self.mReset()
def mReset(self):
self.image = self.imageOK
self.fuel = Lander.STARTING_FUEL
# Need these as floating point, because speed increments are decimal values
self.xSpeed = float(random.randrange(-5, 6))
self.ySpeed = 0.0
self.landerX = float(random.randrange(Lander.MIN_X, Lander.MAX_X))
self.landerY = 2.0
self.rect.left = int(self.landerX)
self.rect.top = self.landerY
self.landed = False
self.leftEngineOn = False
self.rightEngineOn = False
self.mainEngineOn = False
self.jetSoundPlaying = False
self.engineSoundPlaying = True
def mUpdate(self, moveLeftEngineOn, moveRightEngineOn, mainEngineOn):
self.leftEngineOn = moveLeftEngineOn
self.rightEngineOn = moveRightEngineOn
self.mainEngineOn = mainEngineOn
if self.jetSoundPlaying and (not moveRightEngineOn) and (not moveLeftEngineOn):
jetSound.stop()
self.jetSoundPlaying = False
if self.engineSoundPlaying and (not mainEngineOn):
engineSound.stop()
self.engineSoundPlaying = False
if self.fuel > 0:
if self.leftEngineOn:
self.xSpeed = self.xSpeed - .1
self.fuel = self.fuel - .25
if not self.jetSoundPlaying:
jetSound.play(-1) #continuous
self.jetSoundPlaying = True
if self.rightEngineOn:
self.xSpeed = self.xSpeed + .1
self.fuel = self.fuel - .25
if not self.jetSoundPlaying:
jetSound.play(-1)
self.jetSoundPlaying = True
if self.mainEngineOn:
self.ySpeed = self.ySpeed - .25
self.fuel = self.fuel - 1
if not self.engineSoundPlaying:
engineSound.play(-1) #continuous
self.engineSoundPlaying = True
else:
self.leftEngineOn = False
self.rightEngineOn = False
self.mainEngineOn = False
self.landerX = self.landerX + self.xSpeed
self.ySpeed = self.ySpeed + GRAVITY
self.landerY = self.landerY + self.ySpeed
self.rect.left = int(self.landerX)
self.rect.top = int(self.landerY)
return self.rect, self.xSpeed, self.ySpeed
def mDown(self, landedState): # Lander has landed, may have crashed
if landedState == LANDED_CRASHED:
self.image = self.imageCrashed
elif landedState == LANDED_WITH_INJURIES:
self.image = self.imageInjuries
self.ySpeed = 0
self.leftEngineOn = False
self.rightEngineOn = False
self.mainEngineOn = False
self.ySpeed = 0
if self.jetSoundPlaying:
jetSound.stop()
self.jetSoundPlaying = False
if self.engineSoundPlaying:
engineSound.stop()
self.engineSoundPlaying = False
def mGetWidth(self):
return self.rect.width
def mDraw(self):
# Show arrows if off window
if self.rect.left < 0:
self.window.blit(self.leftArrow, (self.leftArrowLeft, self.leftArrowTop))
if self.rect.left > WINDOW_WIDTH:
self.window.blit(self.rightArrow, (self.rightArrowLeft, self.rightArrowTop))
if self.rect.top < 0:
self.window.blit(self.upArrow, (self.upArrowLeft, self.upArrowTop))
# Draw the lander, and any jets that are on
self.window.blit(self.image, self.rect)
if self.leftEngineOn:
self.window.blit(self.rightJet, (self.rect.left, self.rect.top))
if self.rightEngineOn:
self.window.blit(self.leftJet, (self.rect.left, self.rect.top))
if self.mainEngineOn:
self.window.blit(self.mainJet, (self.rect.left, self.rect.top))
def mGetFuel(self):
return self.fuel
def mGetYSpeed(self):
return self.ySpeed
#Initialize pygame
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
pygame.init()
window= pygame.display.set_mode([WINDOW_WIDTH, WINDOW_HEIGHT])
gameFont = pygame.font.SysFont("monospaces", 30)
endFont = pygame.font.SysFont("monospaces", 60)
fuelFont = pygame.font.SysFont("monospaces", 20)
oLander = Lander(window)
minLandingPadSize = oLander.mGetWidth() + 5
oLandingPadMgr = LandingPadMgr(window, minLandingPadSize)
oStarField = StarField(window)
oFuelGauge = FuelGauge(window)
gameState = GAME_FLYING
landedState = LANDED_NOT
# Score
score = 0
#The ground
moon = pygame.image.load("images/moon.png")
austronaut = pygame.image.load("images/astronaut.png")
liveSpeedX = pygwidgets.DisplayText(window, (500, MOON_START + 20), '', \
fontSize=30, textColor=GREEN)
liveSpeedY = pygwidgets.DisplayText(window, (900, MOON_START + 20), '', \
fontSize=30, textColor=GREEN)
scoreText = pygwidgets.DisplayText(window, (10, MOON_START + 20), '', \
fontSize=30, textColor=GREEN)
countUpTimerField = pygwidgets.DisplayText(window, (WINDOW_WIDTH - 150, MOON_START + 20, ), '0', \
fontSize=30, textColor=GREEN)
# Stuff dealing with dialog box when one round of the game is done
messageDisplay = pygwidgets.DisplayText(window, (565, 290), '', \
fontSize=48, textColor=BLACK)
speedDisplay = pygwidgets.DisplayText(window, (565, 340), '', \
fontSize=48, textColor=BLACK)
newSoftestField = pygwidgets.DisplayText(window, (565, 390), '', \
fontSize=48, textColor=BLACK)
newFastestField = pygwidgets.DisplayText(window, (565, 440), '', \
fontSize=48, textColor=BLACK)
playAgainDisplay = pygwidgets.DisplayText(window, (690, 550), 'Play again?', \
fontSize=48, textColor=BLACK)
startButton = pygwidgets.TextButton(window, (750, 610), 'Start', width=60, height=30)
yesButton = pygwidgets.TextButton(window, (720, 610), 'Yes', width=60, height=30)
noButton = pygwidgets.TextButton(window, (820, 610), 'No', width=60, height=30)
DATA_FILE_PATH = 'LanderData.txt'
# Data file will be made of two entries - separated by a comma:
# <softestSoFar>,<fastestSoFar>
if pyghelpers.fileExists(DATA_FILE_PATH):
savedDataString = pyghelpers.readFile(DATA_FILE_PATH)
savedDataList = savedDataString.split(',')
softestSoFar = float(savedDataList[0])
fastestSoFar = float(savedDataList[1])
else: #first time, set some outrageous values
softestSoFar = 10000.
fastestSoFar = 10000.
oCountUpTimer = pyghelpers.CountUpTimer()
clock = pygame.time.Clock() # set the speed (frames per second)
introwindow = pygame.image.load("images/introscreen.png")
jetSound = pygame.mixer.Sound('sounds/jet.wav')
engineSound = pygame.mixer.Sound('sounds/engine.wav')
landedSafelySound = pygame.mixer.Sound('sounds/landedSafely.wav')
crashMinorSound = pygame.mixer.Sound('sounds/crashMinor.wav')
crashMajorSound = pygame.mixer.Sound('sounds/crashMajor.wav')
## Intro window:
waitingToPressStart = True
while waitingToPressStart:
for event in pygame.event.get():
# check if the event is the X button
if event.type == pygame.QUIT:
# if it is quit the game
pygame.quit()
sys.exit()
if startButton.handleEvent(event):
oLander.mReset()
oLandingPadMgr.mReset()
oStarField.mReset()
gameState = GAME_FLYING
landedState = LANDED_NOT
oCountUpTimer.start()
waitingToPressStart = False # start up
# Draw star field, moon, control text, landing pads, lander, and control text
oStarField.mDraw()
window.blit(moon, (0, MOON_START))
landerFuel = oLander.mGetFuel()
oFuelGauge.mDraw(landerFuel)
scoreText.draw()
oLandingPadMgr.mDraw()
oLander.mDraw()
window.blit(introwindow, (400, 200))
startButton.draw()
# update the window
pygame.display.update()
# slow things down a bit
clock.tick(FRAMES_PER_SECOND) # make PyGame wait the correct amount
## MAIN PLAYING LOOP
# 4 - Loop forever
while True:
# 5 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type == pygame.QUIT:
# if it is quit the game
pygame.quit()
sys.exit()
if gameState == GAME_SHOWING_DIALOG:
if yesButton.handleEvent(event):
oLander.mReset()
oLandingPadMgr.mReset()
oStarField.mReset()
gameState = GAME_FLYING
landedState = LANDED_NOT
oCountUpTimer.start()
if noButton.handleEvent(event):
pygame.quit()
sys.exit()
else: # not landed
#Moving the lander
keyPressedList = pygame.key.get_pressed()
landerRect, landerXSpeed, landerYSpeed = oLander.mUpdate(keyPressedList[pygame.K_LEFT], keyPressedList[K_RIGHT], keyPressedList[K_UP])
landerXSpeed = round(landerXSpeed, 4)
landerYSpeed = round(landerYSpeed, 4)
bonusForLandingOnPad, landedInsidePad = oLandingPadMgr.mCheckForLanding(landerRect) # 0 if not landed, otherwise bonus
if bonusForLandingOnPad > 0:
gameState = GAME_JUST_LANDED
if landerYSpeed < SPEED_LIMIT:
if landedInsidePad :
landedState = LANDED_SAFE
score = score + bonusForLandingOnPad
else:
landedState = LANDED_WITH_INJURIES
else:
landedState = LANDED_CRASHED
score = score - 10
oLander.mDown(landedState)
oCountUpTimer.stop()
if gameState == GAME_FLYING: #check for collision on moon
if (landerRect.top + landerRect.height) > MOON_START:
gameState = GAME_JUST_LANDED
score = score - 10
landedState = LANDED_CRASHED
oLander.mDown(landedState)
oCountUpTimer.stop()
if gameState == GAME_FLYING:
liveSpeedX.setValue('Hor. Speed: ' + str(landerXSpeed) + ' m/s')
liveSpeedY.setValue('Vert. Speed: ' + str(landerYSpeed) + 'm/s')
if gameState == GAME_JUST_LANDED: # only runs once
liveSpeedX.setValue('')
liveSpeedY.setValue('')
if landedState == LANDED_SAFE:
messageDisplay.setValue('Safe landing!')
landedSafelySound.play()
elif landedState == LANDED_WITH_INJURIES:
messageDisplay.setValue('Landed, but there are injuries')
crashMinorSound.play()
else: # LANDED_CRASHED
messageDisplay.setValue('Crashed! No survivors')
crashMajorSound.play()
speedDisplay.setValue('Landing speed: ' + str(landerYSpeed))
writeDataFile = False
newSoftestField.setValue('')
newFastestField.setValue('')
if (bonusForLandingOnPad > 0) and (landedState != LANDED_CRASHED):
if landerYSpeed < softestSoFar:
softestSoFar = landerYSpeed
newSoftestField.setValue('New softest landing: ' + str(softestSoFar))
writeDataFile = True
seconds = oCountUpTimer.getTime()
if seconds < fastestSoFar:
fastestSoFar = seconds
newFastestField.setValue('New fastest landing: ' + str(fastestSoFar))
writeDataFile = True
if writeDataFile:
dataList = [str(softestSoFar), str(fastestSoFar)]
dataString = ','.join(dataList)
print('Writing file')
pyghelpers.writeFile(DATA_FILE_PATH, dataString)
gameState = GAME_SHOWING_DIALOG
scoreText.setValue("Score: " + str(score))
sec = oCountUpTimer.getTime() # ask the clock object for the elapsed time
countUpTimerField.setValue('Time: ' + str(sec)) # put that into a text field
# 6 - clear the window before drawing it again
window.fill(BLACK)
# Draw star field, moon, control text, landing pads, lander, and control text
oStarField.mDraw()
window.blit(moon, (0, MOON_START))
landerFuel = oLander.mGetFuel()
oFuelGauge.mDraw(landerFuel)
liveSpeedX.draw()
liveSpeedY.draw()
scoreText.draw()
countUpTimerField.draw()
oLandingPadMgr.mDraw()
oLander.mDraw()
if gameState == GAME_SHOWING_DIALOG:
pygame.draw.rect(window, WHITE, (400, 200, 800, 500))
if landedState == LANDED_SAFE:
window.blit(austronaut, (oLander.rect.left + 50, MOON_START - 18))
newSoftestField.draw()
newFastestField.draw()
elif landedState == LANDED_WITH_INJURIES:
newSoftestField.draw()
newFastestField.draw()
else: # LANDED_CRASHED
pass # nothing
messageDisplay.draw()
speedDisplay.draw()
playAgainDisplay.draw()
yesButton.draw()
noButton.draw()
# 8 - update the window
pygame.display.update()
# 9 slow things down a bit
clock.tick(FRAMES_PER_SECOND) # make PyGame wait the correct amount
| 33.343186 | 142 | 0.616025 |
6c82451086b41e84ad4586d411ffc25c3e3a2486 | 5,557 | py | Python | bird/mdct_tools.py | mmoussallam/bird | 6a362de7d3a52dfcddaed13e8c736d039b03fbb4 | [
"BSD-3-Clause"
] | 11 | 2015-02-02T21:41:41.000Z | 2022-03-12T17:23:01.000Z | bird/mdct_tools.py | mmoussallam/bird | 6a362de7d3a52dfcddaed13e8c736d039b03fbb4 | [
"BSD-3-Clause"
] | 1 | 2021-01-03T20:45:36.000Z | 2021-01-04T16:02:49.000Z | bird/mdct_tools.py | mmoussallam/bird | 6a362de7d3a52dfcddaed13e8c736d039b03fbb4 | [
"BSD-3-Clause"
] | 5 | 2016-04-06T20:42:27.000Z | 2021-01-03T20:42:53.000Z | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Manuel Moussallam <manuel.moussallam@gmail.com>
#
# License: BSD (3-clause)
import math
import numpy as np
from scipy import linalg
from scipy.fftpack import fft, ifft
import six
def _framing(a, L):
shape = a.shape[:-1] + (a.shape[-1] - L + 1, L)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape,
strides=strides)[::L // 2].T.copy()
def mdct_waveform(scale, freq_bin):
L = float(scale)
K = L / 2.0
fact = math.sqrt(2.0 / K)
const_fact = (np.pi / K) * (float(freq_bin) + 0.5)
const_offset = (L + 1.0) / 2.0
f = np.pi / L
i = np.arange(scale, dtype=np.float)
wf = (fact * np.sin(f * (i + 0.5)) *
np.cos(const_fact * ((i - K / 2.0) + const_offset)))
return wf / linalg.norm(wf)
def mdct(x, L):
"""Modified Discrete Cosine Transform (MDCT)
Returns the Modified Discrete Cosine Transform with fixed
window size L of the signal x.
The window is based on a sine window.
Parameters
----------
x : ndarray, shape (N,)
The signal
L : int
The window length
Returns
-------
y : ndarray, shape (L/2, 2 * N / L)
The MDCT coefficients
See also
--------
imdct
"""
x = np.asarray(x, dtype=np.float)
N = x.size
# Number of frequency channels
K = L // 2
# Test length
if N % K != 0:
raise RuntimeError('Input length must be a multiple of the half of '
'the window size')
# Pad edges with zeros
xx = np.zeros(L // 4 + N + L // 4)
xx[L // 4:-L // 4] = x
x = xx
del xx
# Number of frames
P = N // K
if P < 2:
raise ValueError('Signal too short')
# Framing
x = _framing(x, L)
# Windowing
aL = np.arange(L, dtype=np.float)
w_long = np.sin((np.pi / L) * (aL + 0.5))
w_edge_L = w_long.copy()
w_edge_L[:L // 4] = 0.
w_edge_L[L // 4:L // 2] = 1.
w_edge_R = w_long.copy()
w_edge_R[L // 2:L // 2 + L // 4] = 1.
w_edge_R[L // 2 + L // 4:] = 0.
x[:, 0] *= w_edge_L
x[:, 1:-1] *= w_long[:, None]
x[:, -1] *= w_edge_R
# Pre-twiddle
x = x.astype(np.complex)
x *= np.exp((-1j * np.pi / L) * aL)[:, None]
# FFT
y = fft(x, axis=0)
# Post-twiddle
y = y[:L // 2, :]
y *= np.exp((-1j * np.pi * (L // 2 + 1.) / L)
* (0.5 + aL[:L // 2]))[:, None]
# Real part and scaling
y = math.sqrt(2. / K) * np.real(y)
return y
def imdct(y, L):
"""Inverse Modified Discrete Cosine Transform (MDCT)
Returns the Inverse Modified Discrete Cosine Transform
with fixed window size L of the vector of coefficients y.
The window is based on a sine window.
Parameters
----------
y : ndarray, shape (L/2, 2 * N / L)
The MDCT coefficients
L : int
The window length
Returns
-------
x : ndarray, shape (N,)
The reconstructed signal
See also
--------
mdct
"""
# Signal length
N = y.size
# Number of frequency channels
K = L // 2
# Test length
if N % K != 0:
raise ValueError('Input length must be a multiple of the half of '
'the window size')
# Number of frames
P = N // K
if P < 2:
raise ValueError('Signal too short')
# Reshape
temp = y
y = np.zeros((L, P), dtype=np.float)
y[:K, :] = temp
del temp
# Pre-twiddle
aL = np.arange(L, dtype=np.float)
y = y * np.exp((1j * np.pi * (L / 2. + 1.) / L) * aL)[:, None]
# IFFT
x = ifft(y, axis=0)
# Post-twiddle
x *= np.exp((1j * np.pi / L) * (aL + (L / 2. + 1.) / 2.))[:, None]
# Windowing
w_long = np.sin((np.pi / L) * (aL + 0.5))
w_edge_L = w_long.copy()
w_edge_L[:L // 4] = 0.
w_edge_L[L // 4:L // 2] = 1.
w_edge_R = w_long.copy()
w_edge_R[L // 2:L // 2 + L // 4] = 1.
w_edge_R[L // 2 + L // 4:L] = 0.
x[:, 0] *= w_edge_L
x[:, 1:-1] *= w_long[:, None]
x[:, -1] *= w_edge_R
# Real part and scaling
x = math.sqrt(2. / K) * L * np.real(x)
# Overlap and add
def overlap_add(y, x):
z = np.concatenate((y, np.zeros((K,))))
z[-2 * K:] += x
return z
x = six.moves.reduce(overlap_add, [x[:, i] for i in range(x.shape[1])])
# Cut edges
x = x[K // 2:-K // 2].copy()
return x
class MDCT(object):
"""Modified Discrete Cosine Transform (MDCT)
Supports multiple MDCT dictionaries.
Parameters
----------
sizes : list of int
The sizes of MDCT windows e.g. [256, 1024]
"""
def __init__(self, sizes):
self.sizes = sizes
def _dot(self, y):
cnt = 0
N = y.size / len(self.sizes)
x = np.zeros(N)
for L in self.sizes:
this_y = y[cnt:cnt + N]
if (np.count_nonzero(this_y) > 0):
this_x = imdct(np.reshape(this_y, (L // 2, -1)), L)
x += this_x
cnt += N
return x
def dot(self, y):
if y.ndim == 1:
return self._dot(y)
else:
return np.array([self._dot(this_y) for this_y in y])
def _doth(self, x):
return np.concatenate([mdct(x, L).ravel() for L in self.sizes])
def doth(self, x):
if x.ndim == 1:
return self._doth(x)
else:
return np.array([self._doth(this_x) for this_x in x])
| 23.447257 | 78 | 0.506568 |
135d86aae6a0f39fefe90508b4c01d4489506412 | 1,129 | py | Python | pro1.py | griledchicken/VAMPY-2017-CS | 1bc71734751850b580b481eac51c5c235d0ca9e2 | [
"MIT"
] | null | null | null | pro1.py | griledchicken/VAMPY-2017-CS | 1bc71734751850b580b481eac51c5c235d0ca9e2 | [
"MIT"
] | null | null | null | pro1.py | griledchicken/VAMPY-2017-CS | 1bc71734751850b580b481eac51c5c235d0ca9e2 | [
"MIT"
] | null | null | null | import turtle as t
def triangle(size):
t.forward(size)
t.right(120)
t.forward(size*2)
t.right(120)
t.forward(size*3)
t.right(120)
t.forward(size*4)
t.right(120)
t.forward(size*5)
t.right(120)
t.forward(size*6)
t.right(120)
t.forward(size*7)
t.right(120)
t.forward(size*8)
t.right(120)
t.forward(size*9)
t.right(120)
t.forward(size*10)
t.right(120)
t.forward(size*11)
t.right(120)
t.forward(size*12)
t.right(120)
t.forward(size*13)
t.right(120)
t.forward(size*14)
def dog(pup):
t.speed(1)
t.forward(pup*3)
t.left(90)
t.forward(pup)
t.right(30)
t.forward(pup/3)
t.right(120)
t.forward(pup/3)
t.left(60)
t.forward(pup)
t.right(90)
t.forward(pup/2)
t.right(90)
t.forward(pup/1.5)
t.left(90)
t.forward(2*pup)
t.right(90)
t.forward(pup/3)
t.right(90)
t.forward(pup)
t.left(90)
t.forward(pup*2)
t.left(90)
t.forward(pup)
t.right(90)
t.forward(pup/3)
t.right(90)
t.forward(pup*1.3)
t.left(90)
t.forward(pup)
t.right(90)
t.forward(pup/5)
def Circly(size):
for i in range(100):
for i in range(180):
t.speed(1000)
t.forward(size)
t.right(2)
t.right(3.6)
| 13.768293 | 23 | 0.647476 |
ee052f7e2f4cdb2b89ef700d4c610582802d401f | 1,868 | py | Python | ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py | mail2nsrajesh/ironic-inspector | 7fa31ac6be2c1a03a0b2303b01e363cab14794a5 | [
"Apache-2.0"
] | null | null | null | ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py | mail2nsrajesh/ironic-inspector | 7fa31ac6be2c1a03a0b2303b01e363cab14794a5 | [
"Apache-2.0"
] | null | null | null | ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py | mail2nsrajesh/ironic-inspector | 7fa31ac6be2c1a03a0b2303b01e363cab14794a5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""inital_db_schema
Revision ID: 578f84f38d
Revises:
Create Date: 2015-09-15 14:52:22.448944
"""
# revision identifiers, used by Alembic.
revision = '578f84f38d'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'nodes',
sa.Column('uuid', sa.String(36), primary_key=True),
sa.Column('started_at', sa.Float, nullable=True),
sa.Column('finished_at', sa.Float, nullable=True),
sa.Column('error', sa.Text, nullable=True),
mysql_ENGINE='InnoDB',
mysql_DEFAULT_CHARSET='UTF8'
)
op.create_table(
'attributes',
sa.Column('name', sa.String(255), primary_key=True),
sa.Column('value', sa.String(255), primary_key=True),
sa.Column('uuid', sa.String(36), sa.ForeignKey('nodes.uuid')),
mysql_ENGINE='InnoDB',
mysql_DEFAULT_CHARSET='UTF8'
)
op.create_table(
'options',
sa.Column('uuid', sa.String(36), sa.ForeignKey('nodes.uuid'),
primary_key=True),
sa.Column('name', sa.String(255), primary_key=True),
sa.Column('value', sa.Text),
mysql_ENGINE='InnoDB',
mysql_DEFAULT_CHARSET='UTF8'
)
| 29.1875 | 75 | 0.670236 |
4ed9014eabe49126bd4bac0a2f19df702b3d2933 | 1,261 | py | Python | items.py | shawnantonucci/Zomb1e-Mud | 05f6eeeae7487d3f03cbc1aa1872bf557201746e | [
"MIT"
] | 1 | 2019-06-08T01:39:22.000Z | 2019-06-08T01:39:22.000Z | items.py | shawnantonucci/Zomb1e-Mud | 05f6eeeae7487d3f03cbc1aa1872bf557201746e | [
"MIT"
] | 1 | 2019-06-04T06:15:09.000Z | 2019-06-04T06:15:09.000Z | items.py | shawnantonucci/Zomb1e-Mud | 05f6eeeae7487d3f03cbc1aa1872bf557201746e | [
"MIT"
] | null | null | null |
class Weapon:
def __init__(self):
raise NotImplementedError("Do not create raw Weapon objects.")
def __str__(self):
return self.name
class Rock(Weapon):
def __init__(self):
self.name = "Rock"
self.description = "A rock the size of a baseball."
self.damage = 5
self.value = 1
class Dagger(Weapon):
def __init__(self):
self.name = "Dagger"
self.description = "A small dagger. It looks pretty sharp."
self.damage = 10
self.value = 20
class RustySword(Weapon):
def __init__(self):
self.name = "Rusty Sword"
self.description = "A rusty sword. Its pretty sharp still"
self.damage = 20
self.value = 100
# Consumables
class Consumable:
def __init__(self):
raise NotImplementedError("Do not create raw Consumable objects.")
def __str__(self):
return "{} (+{} Hp)".format(self.name, self.healing_value)
class PlainBread(Consumable):
def __init__(self):
self.name = "Plain slice of bread"
self.healing_value = 10
self.value = 12
class HealingPotion(Consumable):
def __init__(self):
self.name = "Healing Patch"
self.healing_value = 50
self.value = 60
| 25.734694 | 74 | 0.618557 |
3fe8ebb16d842258bc5528008b78414254fb9acc | 47,111 | py | Python | keras-ResNet50/tensorflow/python/ops/gen_spectral_ops.py | wuh0007/severless_ML_live | 088b78b06434583b7443ab877a6cdd80121bb8d1 | [
"MIT"
] | 1 | 2020-07-06T14:18:59.000Z | 2020-07-06T14:18:59.000Z | keras-ResNet50/tensorflow/python/ops/gen_spectral_ops.py | wuh0007/severless_ML_live | 088b78b06434583b7443ab877a6cdd80121bb8d1 | [
"MIT"
] | null | null | null | keras-ResNet50/tensorflow/python/ops/gen_spectral_ops.py | wuh0007/severless_ML_live | 088b78b06434583b7443ab877a6cdd80121bb8d1 | [
"MIT"
] | null | null | null | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: spectral_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util.tf_export import tf_export
def batch_fft(input, name=None):
r"""TODO: add doc.
Args:
input: A `Tensor` of type `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BatchFFT", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BatchFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BatchFFT",
name, _ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return batch_fft_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def batch_fft_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function batch_fft
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.complex64)
_inputs_flat = [input]
_attrs = None
_result = _execute.execute(b"BatchFFT", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BatchFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def batch_fft2d(input, name=None):
r"""TODO: add doc.
Args:
input: A `Tensor` of type `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BatchFFT2D", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BatchFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BatchFFT2D",
name, _ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return batch_fft2d_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def batch_fft2d_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function batch_fft2d
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.complex64)
_inputs_flat = [input]
_attrs = None
_result = _execute.execute(b"BatchFFT2D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BatchFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def batch_fft3d(input, name=None):
r"""TODO: add doc.
Args:
input: A `Tensor` of type `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BatchFFT3D", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BatchFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BatchFFT3D",
name, _ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return batch_fft3d_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def batch_fft3d_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function batch_fft3d
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.complex64)
_inputs_flat = [input]
_attrs = None
_result = _execute.execute(b"BatchFFT3D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BatchFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def batch_ifft(input, name=None):
r"""TODO: add doc.
Args:
input: A `Tensor` of type `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BatchIFFT", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BatchIFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BatchIFFT",
name, _ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return batch_ifft_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def batch_ifft_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function batch_ifft
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.complex64)
_inputs_flat = [input]
_attrs = None
_result = _execute.execute(b"BatchIFFT", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BatchIFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def batch_ifft2d(input, name=None):
r"""TODO: add doc.
Args:
input: A `Tensor` of type `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BatchIFFT2D", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BatchIFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BatchIFFT2D",
name, _ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return batch_ifft2d_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def batch_ifft2d_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function batch_ifft2d
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.complex64)
_inputs_flat = [input]
_attrs = None
_result = _execute.execute(b"BatchIFFT2D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BatchIFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def batch_ifft3d(input, name=None):
r"""TODO: add doc.
Args:
input: A `Tensor` of type `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BatchIFFT3D", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BatchIFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BatchIFFT3D",
name, _ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return batch_ifft3d_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def batch_ifft3d_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function batch_ifft3d
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.complex64)
_inputs_flat = [input]
_attrs = None
_result = _execute.execute(b"BatchIFFT3D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BatchIFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('spectral.fft', 'fft')
@deprecated_endpoints('fft')
def fft(input, name=None):
r"""Fast Fourier transform.
Computes the 1-dimensional discrete Fourier transform over the inner-most
dimension of `input`.
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
A complex64 tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"FFT", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tcomplex", _op.get_attr("Tcomplex"))
_execute.record_gradient(
"FFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "FFT", name,
_ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return fft_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def fft_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fft
"""
_ctx = ctx if ctx else _context.context()
_attr_Tcomplex, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("Tcomplex", _attr_Tcomplex)
_result = _execute.execute(b"FFT", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"FFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('spectral.fft2d', 'fft2d')
def fft2d(input, name=None):
r"""2D fast Fourier transform.
Computes the 2-dimensional discrete Fourier transform over the inner-most
2 dimensions of `input`.
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
A complex64 tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"FFT2D", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tcomplex", _op.get_attr("Tcomplex"))
_execute.record_gradient(
"FFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "FFT2D", name,
_ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return fft2d_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def fft2d_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fft2d
"""
_ctx = ctx if ctx else _context.context()
_attr_Tcomplex, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("Tcomplex", _attr_Tcomplex)
_result = _execute.execute(b"FFT2D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"FFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('spectral.fft3d', 'fft3d')
def fft3d(input, name=None):
r"""3D fast Fourier transform.
Computes the 3-dimensional discrete Fourier transform over the inner-most 3
dimensions of `input`.
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
A complex64 tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"FFT3D", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tcomplex", _op.get_attr("Tcomplex"))
_execute.record_gradient(
"FFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "FFT3D", name,
_ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return fft3d_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def fft3d_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fft3d
"""
_ctx = ctx if ctx else _context.context()
_attr_Tcomplex, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("Tcomplex", _attr_Tcomplex)
_result = _execute.execute(b"FFT3D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"FFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('spectral.ifft', 'ifft')
@deprecated_endpoints('ifft')
def ifft(input, name=None):
r"""Inverse fast Fourier transform.
Computes the inverse 1-dimensional discrete Fourier transform over the
inner-most dimension of `input`.
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
A complex64 tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IFFT", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tcomplex", _op.get_attr("Tcomplex"))
_execute.record_gradient(
"IFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IFFT", name,
_ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return ifft_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def ifft_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ifft
"""
_ctx = ctx if ctx else _context.context()
_attr_Tcomplex, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("Tcomplex", _attr_Tcomplex)
_result = _execute.execute(b"IFFT", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('spectral.ifft2d', 'ifft2d')
def ifft2d(input, name=None):
r"""Inverse 2D fast Fourier transform.
Computes the inverse 2-dimensional discrete Fourier transform over the
inner-most 2 dimensions of `input`.
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
A complex64 tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IFFT2D", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tcomplex", _op.get_attr("Tcomplex"))
_execute.record_gradient(
"IFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IFFT2D", name,
_ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return ifft2d_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def ifft2d_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ifft2d
"""
_ctx = ctx if ctx else _context.context()
_attr_Tcomplex, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("Tcomplex", _attr_Tcomplex)
_result = _execute.execute(b"IFFT2D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('spectral.ifft3d', 'ifft3d')
def ifft3d(input, name=None):
r"""Inverse 3D fast Fourier transform.
Computes the inverse 3-dimensional discrete Fourier transform over the
inner-most 3 dimensions of `input`.
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
A complex64 tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IFFT3D", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tcomplex", _op.get_attr("Tcomplex"))
_execute.record_gradient(
"IFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IFFT3D", name,
_ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return ifft3d_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def ifft3d_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ifft3d
"""
_ctx = ctx if ctx else _context.context()
_attr_Tcomplex, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("Tcomplex", _attr_Tcomplex)
_result = _execute.execute(b"IFFT3D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def irfft(input, fft_length, name=None):
r"""Inverse real-valued fast Fourier transform.
Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
signal over the inner-most dimension of `input`.
The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
`fft_length` is not provided, it is computed from the size of the inner-most
dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
compute `input` is odd, it should be provided since it cannot be inferred
properly.
Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
than the corresponding dimension of `input`, the dimension is cropped. If it is
larger, the dimension is padded with zeros.
Args:
input: A `Tensor` of type `complex64`. A complex64 tensor.
fft_length: A `Tensor` of type `int32`.
An int32 tensor of shape [1]. The FFT length.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IRFFT", input=input, fft_length=fft_length, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"IRFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IRFFT", name,
_ctx._post_execution_callbacks, input, fft_length)
return _result
except _core._FallbackException:
return irfft_eager_fallback(
input, fft_length, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def irfft_eager_fallback(input, fft_length, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function irfft
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.complex64)
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
_inputs_flat = [input, fft_length]
_attrs = None
_result = _execute.execute(b"IRFFT", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IRFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def irfft2d(input, fft_length, name=None):
r"""Inverse 2D real-valued fast Fourier transform.
Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
signal over the inner-most 2 dimensions of `input`.
The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
The inner-most dimension contains the `fft_length / 2 + 1` unique components of
the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
from the size of the inner-most 2 dimensions of `input`. If the FFT length used
to compute `input` is odd, it should be provided since it cannot be inferred
properly.
Along each axis `IRFFT2D` is computed on, if `fft_length` (or
`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
corresponding dimension of `input`, the dimension is cropped. If it is larger,
the dimension is padded with zeros.
Args:
input: A `Tensor` of type `complex64`. A complex64 tensor.
fft_length: A `Tensor` of type `int32`.
An int32 tensor of shape [2]. The FFT length for each dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IRFFT2D", input=input, fft_length=fft_length, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"IRFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IRFFT2D",
name, _ctx._post_execution_callbacks, input, fft_length)
return _result
except _core._FallbackException:
return irfft2d_eager_fallback(
input, fft_length, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def irfft2d_eager_fallback(input, fft_length, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function irfft2d
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.complex64)
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
_inputs_flat = [input, fft_length]
_attrs = None
_result = _execute.execute(b"IRFFT2D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IRFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def irfft3d(input, fft_length, name=None):
r"""Inverse 3D real-valued fast Fourier transform.
Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
signal over the inner-most 3 dimensions of `input`.
The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
The inner-most dimension contains the `fft_length / 2 + 1` unique components of
the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
from the size of the inner-most 3 dimensions of `input`. If the FFT length used
to compute `input` is odd, it should be provided since it cannot be inferred
properly.
Along each axis `IRFFT3D` is computed on, if `fft_length` (or
`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
corresponding dimension of `input`, the dimension is cropped. If it is larger,
the dimension is padded with zeros.
Args:
input: A `Tensor` of type `complex64`. A complex64 tensor.
fft_length: A `Tensor` of type `int32`.
An int32 tensor of shape [3]. The FFT length for each dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IRFFT3D", input=input, fft_length=fft_length, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"IRFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IRFFT3D",
name, _ctx._post_execution_callbacks, input, fft_length)
return _result
except _core._FallbackException:
return irfft3d_eager_fallback(
input, fft_length, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def irfft3d_eager_fallback(input, fft_length, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function irfft3d
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.complex64)
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
_inputs_flat = [input, fft_length]
_attrs = None
_result = _execute.execute(b"IRFFT3D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IRFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def rfft(input, fft_length, name=None):
r"""Real-valued fast Fourier transform.
Computes the 1-dimensional discrete Fourier transform of a real-valued signal
over the inner-most dimension of `input`.
Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
followed by the `fft_length / 2` positive-frequency terms.
Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
corresponding dimension of `input`, the dimension is cropped. If it is larger,
the dimension is padded with zeros.
Args:
input: A `Tensor` of type `float32`. A float32 tensor.
fft_length: A `Tensor` of type `int32`.
An int32 tensor of shape [1]. The FFT length.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"RFFT", input=input, fft_length=fft_length, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"RFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "RFFT", name,
_ctx._post_execution_callbacks, input, fft_length)
return _result
except _core._FallbackException:
return rfft_eager_fallback(
input, fft_length, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def rfft_eager_fallback(input, fft_length, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function rfft
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.float32)
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
_inputs_flat = [input, fft_length]
_attrs = None
_result = _execute.execute(b"RFFT", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"RFFT", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def rfft2d(input, fft_length, name=None):
r"""2D real-valued fast Fourier transform.
Computes the 2-dimensional discrete Fourier transform of a real-valued signal
over the inner-most 2 dimensions of `input`.
Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
of `output`: the zero-frequency term, followed by the `fft_length / 2`
positive-frequency terms.
Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
corresponding dimension of `input`, the dimension is cropped. If it is larger,
the dimension is padded with zeros.
Args:
input: A `Tensor` of type `float32`. A float32 tensor.
fft_length: A `Tensor` of type `int32`.
An int32 tensor of shape [2]. The FFT length for each dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"RFFT2D", input=input, fft_length=fft_length, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"RFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "RFFT2D", name,
_ctx._post_execution_callbacks, input, fft_length)
return _result
except _core._FallbackException:
return rfft2d_eager_fallback(
input, fft_length, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def rfft2d_eager_fallback(input, fft_length, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function rfft2d
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.float32)
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
_inputs_flat = [input, fft_length]
_attrs = None
_result = _execute.execute(b"RFFT2D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"RFFT2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def rfft3d(input, fft_length, name=None):
r"""3D real-valued fast Fourier transform.
Computes the 3-dimensional discrete Fourier transform of a real-valued signal
over the inner-most 3 dimensions of `input`.
Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
of `output`: the zero-frequency term, followed by the `fft_length / 2`
positive-frequency terms.
Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
corresponding dimension of `input`, the dimension is cropped. If it is larger,
the dimension is padded with zeros.
Args:
input: A `Tensor` of type `float32`. A float32 tensor.
fft_length: A `Tensor` of type `int32`.
An int32 tensor of shape [3]. The FFT length for each dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"RFFT3D", input=input, fft_length=fft_length, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"RFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "RFFT3D", name,
_ctx._post_execution_callbacks, input, fft_length)
return _result
except _core._FallbackException:
return rfft3d_eager_fallback(
input, fft_length, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def rfft3d_eager_fallback(input, fft_length, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function rfft3d
"""
_ctx = ctx if ctx else _context.context()
input = _ops.convert_to_tensor(input, _dtypes.float32)
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
_inputs_flat = [input, fft_length]
_attrs = None
_result = _execute.execute(b"RFFT3D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"RFFT3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "BatchFFT"
# input_arg {
# name: "input"
# type: DT_COMPLEX64
# }
# output_arg {
# name: "output"
# type: DT_COMPLEX64
# }
# deprecation {
# version: 15
# explanation: "Use FFT"
# }
# }
# op {
# name: "BatchFFT2D"
# input_arg {
# name: "input"
# type: DT_COMPLEX64
# }
# output_arg {
# name: "output"
# type: DT_COMPLEX64
# }
# deprecation {
# version: 15
# explanation: "Use FFT2D"
# }
# }
# op {
# name: "BatchFFT3D"
# input_arg {
# name: "input"
# type: DT_COMPLEX64
# }
# output_arg {
# name: "output"
# type: DT_COMPLEX64
# }
# deprecation {
# version: 15
# explanation: "Use FFT3D"
# }
# }
# op {
# name: "BatchIFFT"
# input_arg {
# name: "input"
# type: DT_COMPLEX64
# }
# output_arg {
# name: "output"
# type: DT_COMPLEX64
# }
# deprecation {
# version: 15
# explanation: "Use IFFT"
# }
# }
# op {
# name: "BatchIFFT2D"
# input_arg {
# name: "input"
# type: DT_COMPLEX64
# }
# output_arg {
# name: "output"
# type: DT_COMPLEX64
# }
# deprecation {
# version: 15
# explanation: "Use IFFT2D"
# }
# }
# op {
# name: "BatchIFFT3D"
# input_arg {
# name: "input"
# type: DT_COMPLEX64
# }
# output_arg {
# name: "output"
# type: DT_COMPLEX64
# }
# deprecation {
# version: 15
# explanation: "Use IFFT3D"
# }
# }
# op {
# name: "FFT"
# input_arg {
# name: "input"
# type_attr: "Tcomplex"
# }
# output_arg {
# name: "output"
# type_attr: "Tcomplex"
# }
# attr {
# name: "Tcomplex"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "FFT2D"
# input_arg {
# name: "input"
# type_attr: "Tcomplex"
# }
# output_arg {
# name: "output"
# type_attr: "Tcomplex"
# }
# attr {
# name: "Tcomplex"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "FFT3D"
# input_arg {
# name: "input"
# type_attr: "Tcomplex"
# }
# output_arg {
# name: "output"
# type_attr: "Tcomplex"
# }
# attr {
# name: "Tcomplex"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "IFFT"
# input_arg {
# name: "input"
# type_attr: "Tcomplex"
# }
# output_arg {
# name: "output"
# type_attr: "Tcomplex"
# }
# attr {
# name: "Tcomplex"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "IFFT2D"
# input_arg {
# name: "input"
# type_attr: "Tcomplex"
# }
# output_arg {
# name: "output"
# type_attr: "Tcomplex"
# }
# attr {
# name: "Tcomplex"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "IFFT3D"
# input_arg {
# name: "input"
# type_attr: "Tcomplex"
# }
# output_arg {
# name: "output"
# type_attr: "Tcomplex"
# }
# attr {
# name: "Tcomplex"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "IRFFT"
# input_arg {
# name: "input"
# type: DT_COMPLEX64
# }
# input_arg {
# name: "fft_length"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type: DT_FLOAT
# }
# }
# op {
# name: "IRFFT2D"
# input_arg {
# name: "input"
# type: DT_COMPLEX64
# }
# input_arg {
# name: "fft_length"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type: DT_FLOAT
# }
# }
# op {
# name: "IRFFT3D"
# input_arg {
# name: "input"
# type: DT_COMPLEX64
# }
# input_arg {
# name: "fft_length"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type: DT_FLOAT
# }
# }
# op {
# name: "RFFT"
# input_arg {
# name: "input"
# type: DT_FLOAT
# }
# input_arg {
# name: "fft_length"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type: DT_COMPLEX64
# }
# }
# op {
# name: "RFFT2D"
# input_arg {
# name: "input"
# type: DT_FLOAT
# }
# input_arg {
# name: "fft_length"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type: DT_COMPLEX64
# }
# }
# op {
# name: "RFFT3D"
# input_arg {
# name: "input"
# type: DT_FLOAT
# }
# input_arg {
# name: "fft_length"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type: DT_COMPLEX64
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n.\n\010BatchFFT\022\t\n\005input\030\010\032\n\n\006output\030\010B\013\010\017\022\007Use FFT\n2\n\nBatchFFT2D\022\t\n\005input\030\010\032\n\n\006output\030\010B\r\010\017\022\tUse FFT2D\n2\n\nBatchFFT3D\022\t\n\005input\030\010\032\n\n\006output\030\010B\r\010\017\022\tUse FFT3D\n0\n\tBatchIFFT\022\t\n\005input\030\010\032\n\n\006output\030\010B\014\010\017\022\010Use IFFT\n4\n\013BatchIFFT2D\022\t\n\005input\030\010\032\n\n\006output\030\010B\016\010\017\022\nUse IFFT2D\n4\n\013BatchIFFT3D\022\t\n\005input\030\010\032\n\n\006output\030\010B\016\010\017\022\nUse IFFT3D\nJ\n\003FFT\022\021\n\005input\"\010Tcomplex\032\022\n\006output\"\010Tcomplex\"\034\n\010Tcomplex\022\004type\032\0020\010:\006\n\0042\002\010\022\nL\n\005FFT2D\022\021\n\005input\"\010Tcomplex\032\022\n\006output\"\010Tcomplex\"\034\n\010Tcomplex\022\004type\032\0020\010:\006\n\0042\002\010\022\nL\n\005FFT3D\022\021\n\005input\"\010Tcomplex\032\022\n\006output\"\010Tcomplex\"\034\n\010Tcomplex\022\004type\032\0020\010:\006\n\0042\002\010\022\nK\n\004IFFT\022\021\n\005input\"\010Tcomplex\032\022\n\006output\"\010Tcomplex\"\034\n\010Tcomplex\022\004type\032\0020\010:\006\n\0042\002\010\022\nM\n\006IFFT2D\022\021\n\005input\"\010Tcomplex\032\022\n\006output\"\010Tcomplex\"\034\n\010Tcomplex\022\004type\032\0020\010:\006\n\0042\002\010\022\nM\n\006IFFT3D\022\021\n\005input\"\010Tcomplex\032\022\n\006output\"\010Tcomplex\"\034\n\010Tcomplex\022\004type\032\0020\010:\006\n\0042\002\010\022\n.\n\005IRFFT\022\t\n\005input\030\010\022\016\n\nfft_length\030\003\032\n\n\006output\030\001\n0\n\007IRFFT2D\022\t\n\005input\030\010\022\016\n\nfft_length\030\003\032\n\n\006output\030\001\n0\n\007IRFFT3D\022\t\n\005input\030\010\022\016\n\nfft_length\030\003\032\n\n\006output\030\001\n-\n\004RFFT\022\t\n\005input\030\001\022\016\n\nfft_length\030\003\032\n\n\006output\030\010\n/\n\006RFFT2D\022\t\n\005input\030\001\022\016\n\nfft_length\030\003\032\n\n\006output\030\010\n/\n\006RFFT3D\022\t\n\005input\030\001\022\016\n\nfft_length\030\003\032\n\n\006output\030\010")
| 31.874831 | 2,090 | 0.675278 |
f8fbd355ee987eeac36dbe213f512a5ac5b67060 | 3,075 | py | Python | test/unit_tests/test_init_workspace.py | dseifert/catkin | f972729b3f99479e0844e304b575b6f2a96c5779 | [
"BSD-3-Clause"
] | 250 | 2015-01-02T09:29:09.000Z | 2022-03-28T08:48:28.000Z | test/unit_tests/test_init_workspace.py | dseifert/catkin | f972729b3f99479e0844e304b575b6f2a96c5779 | [
"BSD-3-Clause"
] | 456 | 2015-01-01T00:42:47.000Z | 2022-03-22T13:36:33.000Z | test/unit_tests/test_init_workspace.py | dseifert/catkin | f972729b3f99479e0844e304b575b6f2a96c5779 | [
"BSD-3-Clause"
] | 261 | 2015-01-10T14:07:49.000Z | 2022-03-26T13:29:58.000Z | import os
import shutil
import tempfile
import unittest
from os.path import join
try:
from catkin.init_workspace import init_workspace, _symlink_or_copy
except ImportError as impe:
raise ImportError(
'Please adjust your pythonpath before running this test: %s' % str(impe))
class InitWorkspaceTest(unittest.TestCase):
def test_symlink_or_copy(self):
try:
root_dir = tempfile.mkdtemp()
os.makedirs(join(root_dir, 'subdir'))
os.makedirs(join(root_dir, 'subdir2'))
with open(join(root_dir, 'subdir', 'foo'), 'ab') as fhand:
fhand.write('content'.encode('UTF-8'))
_symlink_or_copy(join(root_dir, 'subdir', 'foo'),
join(root_dir, 'foolink'))
_symlink_or_copy(join(root_dir, 'subdir', 'foo'),
join(root_dir, 'subdir', 'foolink'))
_symlink_or_copy(os.path.relpath(join(root_dir, 'subdir', 'foo'),
os.getcwd()),
join(root_dir, 'foolinkrel'))
self.assertEqual(join(root_dir, 'subdir', 'foo'),
os.readlink(join(root_dir, 'foolink')))
self.assertEqual(join(root_dir, 'subdir', 'foo'),
os.readlink(join(root_dir, 'subdir', 'foolink')))
self.assertEqual(os.path.relpath(join(root_dir, 'subdir', 'foo'),
os.getcwd()),
os.readlink(join(root_dir, 'foolinkrel')))
finally:
# pass
shutil.rmtree(root_dir)
def test_init_workspace(self):
try:
root_dir = tempfile.mkdtemp()
os.makedirs(join(root_dir, 'ws1'))
os.makedirs(join(root_dir, 'ws1', 'catkin'))
os.makedirs(join(root_dir, 'ws1', 'catkin', 'cmake'))
with open(join(root_dir, 'ws1', 'catkin', 'cmake', 'toplevel.cmake'),
'ab') as fhand:
fhand.write(''.encode('UTF-8'))
with open(join(root_dir, 'ws1', '.catkin'), 'ab') as fhand:
fhand.write(''.encode('UTF-8'))
os.makedirs(join(root_dir, 'ws2'))
with open(join(root_dir, 'ws2', '.catkin'), 'ab') as fhand:
fhand.write(''.encode('UTF-8'))
init_workspace(join(root_dir, 'ws1'))
init_workspace(join(root_dir, 'ws2'))
# in same workspace symlink should be relative
self.assertEqual(
join('catkin', 'cmake', 'toplevel.cmake'),
os.readlink(join(root_dir, 'ws1', 'CMakeLists.txt')))
# outside workspace, path should be absolute
self.assertTrue(
os.path.samefile(
join(os.path.dirname(__file__),
'..', '..', 'cmake', 'toplevel.cmake'),
os.readlink(join(root_dir, 'ws2', 'CMakeLists.txt'))))
finally:
# pass
shutil.rmtree(root_dir)
| 40.460526 | 81 | 0.522276 |
8e830a2f5c443ac94ea773ca1e7a56ade3172bce | 258 | py | Python | palindrome.py | Ratheshprabakar/Python-Programs | fca9d4f0b5f5f5693b3d7e25c6d890f4973dc19e | [
"MIT"
] | 2 | 2019-07-10T06:32:05.000Z | 2019-11-13T07:52:53.000Z | palindrome.py | Ratheshprabakar/Python-Programs | fca9d4f0b5f5f5693b3d7e25c6d890f4973dc19e | [
"MIT"
] | null | null | null | palindrome.py | Ratheshprabakar/Python-Programs | fca9d4f0b5f5f5693b3d7e25c6d890f4973dc19e | [
"MIT"
] | 1 | 2019-10-12T06:56:13.000Z | 2019-10-12T06:56:13.000Z | #Python program to check whether the number is a palindrome or not
def palindrome(a):
if(a[::-1]==a):
print(a,"is palindrome")
else:
print(a,"is not a palindrome")
def main():
a=input("Enter a number")
palindrome(a)
if __name__=='__main__':
main()
| 19.846154 | 66 | 0.678295 |
6b90c5179ad41c3027104211c129a4081c39a60a | 584 | py | Python | Python/library_examples/sensors/optical/isl29125.py | treehopper-electronics/treehopper-sdk | 1abe8aacd9723470e1a4ffe734cdecfda2110ae3 | [
"MIT"
] | 3 | 2018-03-16T07:00:42.000Z | 2022-03-27T00:39:55.000Z | Python/library_examples/sensors/optical/isl29125.py | treehopper-electronics/treehopper-sdk | 1abe8aacd9723470e1a4ffe734cdecfda2110ae3 | [
"MIT"
] | 16 | 2016-08-12T18:51:04.000Z | 2021-04-16T16:14:07.000Z | Python/library_examples/sensors/optical/isl29125.py | treehopper-electronics/treehopper-sdk | 1abe8aacd9723470e1a4ffe734cdecfda2110ae3 | [
"MIT"
] | 6 | 2015-11-04T15:53:49.000Z | 2020-06-25T18:34:47.000Z | from time import sleep
from treehopper.api import *
from treehopper.libraries.sensors.optical import Isl29125
from colr import color
def data_received(sender, red, green, blue):
max_val = max(red, max(green, blue))
r = 255 * red / max_val
g = 255 * green / max_val
b = 255 * blue / max_val
print(color(f' red={red:4.3f}, green={green:4.3f}, blue={blue:4.3f} ', fore=(0, 0, 0), back=(r, g, b)))
board = find_boards()[0]
board.connect()
sensor = Isl29125(board.i2c, board.pins[15])
sensor.interrupt_received += data_received
while True:
sleep(1000)
| 26.545455 | 115 | 0.669521 |
31d8692254e889f496103927aa5475a765caa49f | 4,564 | py | Python | src/lava/lib/dl/slayer/block/rf.py | PeaBrane/lava-dl | b205b4e0466788c5232ff20497ac0fc433cbccca | [
"BSD-3-Clause"
] | null | null | null | src/lava/lib/dl/slayer/block/rf.py | PeaBrane/lava-dl | b205b4e0466788c5232ff20497ac0fc433cbccca | [
"BSD-3-Clause"
] | null | null | null | src/lava/lib/dl/slayer/block/rf.py | PeaBrane/lava-dl | b205b4e0466788c5232ff20497ac0fc433cbccca | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Resonate and Fire layer blocks."""
import torch
from . import base
from ..neuron import rf
from ..synapse import complex as synapse
from ..axon import Delay
class AbstractRF(torch.nn.Module):
"""Abstract Resonate and Fire block class. This should never be
instantiated on it's own."""
def __init__(self, *args, **kwargs):
super(AbstractRF, self).__init__(*args, **kwargs)
if self.neuron_params is not None:
self.neuron = rf.Neuron(**self.neuron_params)
delay = kwargs['delay'] if 'delay' in kwargs.keys() else False
self.delay = Delay(max_delay=62) if delay is True else None
del self.neuron_params
def _doc_from_base(base_doc):
return base_doc.__doc__.replace(
'Abstract', 'Resonate & Fire'
).replace(
'neuron parameter', 'RF parameter'
).replace(
'This should never be instantiated on its own.',
'The block is 8 bit quantization ready.'
)
class Input(AbstractRF, base.AbstractInput):
def __init__(self, *args, **kwargs):
super(Input, self).__init__(*args, **kwargs)
if self.neuron is not None:
self.pre_hook_fx = self.neuron.quantize_8bit
Input.__doc__ = _doc_from_base(base.AbstractInput)
class Flatten(base.AbstractFlatten):
def __init__(self, *args, **kwargs):
super(Flatten, self).__init__(*args, **kwargs)
Flatten.__doc__ = _doc_from_base(base.AbstractFlatten)
class Average(base.AbstractAverage):
def __init__(self, *args, **kwargs):
super(Average, self).__init__(*args, **kwargs)
Average.__doc__ = _doc_from_base(base.AbstractAverage)
class Affine(AbstractRF, base.AbstractAffine):
def __init__(self, *args, **kwargs):
super(Affine, self).__init__(*args, **kwargs)
self.synapse = synapse.Dense(**self.synapse_params)
if 'pre_hook_fx' not in kwargs.keys():
self.synapse.pre_hook_fx = self.neuron.quantize_8bit
self.neuron.threshold = None
# this disables spike and reset in dynamics
del self.synapse_params
Affine.__doc__ = _doc_from_base(base.AbstractAffine)
class TimeDecimation(base.AbstractTimeDecimation):
def __init__(self, *args, **kwargs):
super(TimeDecimation, self).__init__(*args, **kwargs)
TimeDecimation.__doc__ = _doc_from_base(base.AbstractTimeDecimation)
class Dense(AbstractRF, base.AbstractDense):
def __init__(self, *args, **kwargs):
super(Dense, self).__init__(*args, **kwargs)
self.synapse = synapse.Dense(**self.synapse_params)
if 'pre_hook_fx' not in kwargs.keys():
self.synapse.pre_hook_fx = self.neuron.quantize_8bit
del self.synapse_params
Dense.__doc__ = _doc_from_base(base.AbstractDense)
class Conv(AbstractRF, base.AbstractConv):
def __init__(self, *args, **kwargs):
super(Conv, self).__init__(*args, **kwargs)
self.synapse = synapse.Conv(**self.synapse_params)
if 'pre_hook_fx' not in kwargs.keys():
self.synapse.pre_hook_fx = self.neuron.quantize_8bit
del self.synapse_params
Conv.__doc__ = _doc_from_base(base.AbstractConv)
class Pool(AbstractRF, base.AbstractPool):
def __init__(self, *args, **kwargs):
super(Pool, self).__init__(*args, **kwargs)
self.synapse = synapse.Pool(**self.synapse_params)
if 'pre_hook_fx' not in kwargs.keys():
self.synapse.pre_hook_fx = self.neuron.quantize_8bit
del self.synapse_params
Pool.__doc__ = _doc_from_base(base.AbstractPool)
class KWTA(AbstractRF, base.AbstractKWTA):
def __init__(self, *args, **kwargs):
super(KWTA, self).__init__(*args, **kwargs)
self.synapse = synapse.Dense(**self.synapse_params)
if 'pre_hook_fx' not in kwargs.keys():
self.synapse.pre_hook_fx = self.neuron.quantize_8bit
del self.synapse_params
KWTA.__doc__ = _doc_from_base(base.AbstractKWTA)
class Recurrent(AbstractRF, base.AbstractRecurrent):
def __init__(self, *args, **kwargs):
super(Recurrent, self).__init__(*args, **kwargs)
self.input_synapse = synapse.Dense(**self.synapse_params)
self.recurrent_synapse = synapse.Dense(**self.recurrent_params)
self.input_synapse.pre_hook_fx = self.neuron.quantize_8bit
self.recurrent_synapse.pre_hook_fx = self.neuron.quantize_8bit
del self.synapse_params
del self.recurrent_params
Recurrent.__doc__ = _doc_from_base(base.AbstractRecurrent)
| 31.475862 | 71 | 0.690403 |
19a53dfcd71eba6ea39ed9e2edae01a1d834dc74 | 39,166 | py | Python | src/m1_Point.py | nandoltw/09-ImplementingClasses | 0f1791ba782a8b5a98a757a621085742c1cf73e0 | [
"MIT"
] | null | null | null | src/m1_Point.py | nandoltw/09-ImplementingClasses | 0f1791ba782a8b5a98a757a621085742c1cf73e0 | [
"MIT"
] | null | null | null | src/m1_Point.py | nandoltw/09-ImplementingClasses | 0f1791ba782a8b5a98a757a621085742c1cf73e0 | [
"MIT"
] | null | null | null | """
A simple Point class.
NOTE: This is NOT rosegraphics -- it is your OWN Point class.
Authors: David Mutchler, Vibha Alangar, Dave Fisher, Amanda Stouder,
their colleagues and Thomas Nandola.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
run_test_init()
run_test_repr()
run_test_clone()
run_test_move_to()
run_test_move_by()
run_test_get_number_of_moves_made()
run_test_get_distance_from()
run_test_get_distance_from_start()
run_test_get_distance_traveled()
run_test_closer_to()
run_test_halfway_to()
################################################################################
# IMPORTANT:
# Your instructor will help you get started on this exercise.
################################################################################
# ------------------------------------------------------------------------------
# DONE: 2. With your instructor, READ THE INSTRUCTIONS in
# DONE (continued) in file m0_INSTRUCTIONS.txt, asking questions as needed.
#
# DONE (continued): Then implement a class called Point that has NO METHODS
# DONE (continued) yet, just the lines that start the definition of any class:
#
# class NAME_OF_CLASS(object):
# """ Brief description of what objects of the class 'are'."""
#
# Run the program and correct any syntax (notational) errors.
# ------------------------------------------------------------------------------
################################################################################
# NOTE: For ALL of the methods that you implement, the method is allowed
# to have additional side effects as needed by it and/or other methods.
################################################################################
import math
class Point(object):
def __init__(self,x,y):
self.x = x
self.y = y
self.moves_made = 0
self.startx = x
self.starty = y
def __repr__(self):
return 'Point({},{})'.format(self.x,self.y)
#It's job is to print a string
def clone(self):
return Point(self.x,self.y)
def move_to(self,x,y):
self.x = x
self.y = y
self.moves_made = self.moves_made + 1
def move_by(self,dx,dy):
self.x = self.x + dx
self.y = self.y + dy
self.moves_made = self.moves_made + 1
def get_number_of_moves_made(self):
return self.moves_made
def get_distance_from(self,point):
point_x = (self.x-point.x)**2
point_y = (self.y-point.y)**2
return math.sqrt(point_x+point_y)
def get_distance_from_start(self):
pointx = (self.startx-self.x)**2
pointy = (self.starty-self.y)**2
return math.sqrt(pointx+pointy)
def get_distance_traveled(self):
distance = (self.x - self.startx) + (self.y - self.starty)
return distance
def closer_to(self,p1,p2):
p1_final = p1.get_distance_from(self)
p2_final = p2.get_distance_from(self)
# p1_x = (p1.x - self.x)**2
# p2_x = (p2.x - self.x)**2
# p1_y = (p1.y - self.y)**2
# p2_y = (p2.y - self.y)**2
#
# p1_final = math.sqrt(p1_x+p1_y)
# p2_final = math.sqrt(p2_x+p2_y)
if p1_final > p2_final:
return p2
elif p2_final > p1_final:
return p1
elif p1_final == p2_final:
return p1
def halfway_to(self,p2):
p2_x = (p2.x + self.x)/2
p2_y = (p2.y + self.y)/2
point = Point(p2_x,p2_y)
return point
def run_test_init():
"""
Tests the __init__ method of the Point class.
-- IMPORTANT: There are TWO underscores on each side.
-- Note: the __init__ method runs when one constructs
a Point. See examples below.
Here is the specification for the __init__ method:
What comes in:
-- self
-- an integer x
-- an integer y
where (x, y) is to be the initial position of this Point.
What goes out: Nothing (i.e., None).
Side effects: Sets two instance variables named:
x
y
to the given coordinate (i.e., to the given x and y).
Other methods should modify the instance variables
x
y
as needed so that they always indicate the CURRENT position
of the Point.
EXAMPLE: The following shows __init__ in action.
You may also use this example to test this method.
p1 = Point(30, 18)
print()
print('Expected for p1: 30 18')
print('Actual for p1: ', p1.x, p1.y)
p2 = Point(100, -40)
print()
print('Expected for p2: 100 -40')
print('Actual for p2: ', p2.x, p2.y)
print('Expected for p1: 30 18')
print('Actual for p1: ', p1.x, p1.y)
p1.y = 999
print()
print('Expected for p1: 30 999')
print('Actual for p1: ', p1.x, p1.y)
print('Expected for p2: 100 -40')
print('Actual for p2: ', p2.x, p2.y)
"""
# --------------------------------------------------------------------------
# DONE: 3.
# a. Read the above specification of the __init__ method.
# Do NOT proceed until you understand WHAT it should do
# (but not necessarily HOW it will do it).
# NO CODE YET. Ask questions as needed.
#
# b. Examine the EXAMPLE code in the doc-string above.
# Make sure that you see how that code works, and how it
# TESTS the __init__ method. ** ASK QUESTIONS AS NEEDED. **
#
# c. Select the code in the EXAMPLE in the doc-string above.
# Copy-and-paste it into this RUN_TEST_INIT function, putting
# the copied code just below the PRINT statements below.
#
# Use the Tab and Shift-Tab keystrokes as needed to fix the
# indentation of the pasted code.
#
# You cannot RUN the copy-pasted tests because you
# have not (yet) implemented the __init__ method.
#
# d. Implement and test the __init__ method.
# Make sure that you UNDERSTAND your code and are not just
# "pattern matching" from examples.
# ASK QUESIONS AS NEEDED. COMMIT YOUR WORK.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the __init__ method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(30, 18)
print()
print('Expected for p1: 30 18')
print('Actual for p1: ', p1.x, p1.y)
p2 = Point(100, -40)
print()
print('Expected for p2: 100 -40')
print('Actual for p2: ', p2.x, p2.y)
print('Expected for p1: 30 18')
print('Actual for p1: ', p1.x, p1.y)
p1.y = 999
print()
print('Expected for p1: 30 999')
print('Actual for p1: ', p1.x, p1.y)
print('Expected for p2: 100 -40')
print('Actual for p2: ', p2.x, p2.y)
def run_test_repr():
"""
Tests the __repr__ method of the Point class.
-- IMPORTANT: There are TWO underscores on each side.
-- Note: the __repr__ method is called by the PRINT
function and other functions that DISPLAY a Point object.
See examples below.
Here is the specification for the __repr__ method:
What comes in:
-- self
What goes out:
Returns a string that represents a Point like this:
'Point(x, y)'
where x and y are replaced by this Point's
x and y coordinates.
Side effects: None.
EXAMPLE: The following shows __repr__ in action.
You may also use this example to test this method.
p1 = Point(30, 18)
print()
print('Expected for p1: Point(30, 18)')
print('Actual for p1: ', p1)
p2 = Point(100, -40)
print()
print('Expected for p2: Point(100, -40)')
print('Actual for p2: ', p2)
print('Expected for p1: Point(30, 18)')
print('Actual for p1: ', p1)
p1.y = 999
print()
print('Expected for p1: Point(30, 999)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(100, -40)')
print('Actual for p2: ', p2)
"""
# --------------------------------------------------------------------------
# DONE: 4. Follow the same instructions as in _TODO_ 3 above,
# but for the __repr__ method specified above.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the __repr__ method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(30, 18)
print()
print('Expected for p1: Point(30, 18)')
print('Actual for p1: ', p1)
p2 = Point(100, -40)
print()
print('Expected for p2: Point(100, -40)')
print('Actual for p2: ', p2)
print('Expected for p1: Point(30, 18)')
print('Actual for p1: ', p1)
p1.y = 999
print()
print('Expected for p1: Point(30, 999)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(100, -40)')
print('Actual for p2: ', p2)
def run_test_clone():
"""
Tests the clone method of the Point class.
Here is the specification for the clone method:
What comes in:
-- self
What goes out:
Returns a new Point whose x and y coordinates are the same
as the x and y coordinates of this Point.
Side effects: None.
EXAMPLE: The following shows clone in action.
You may also use this example to test this method.
p1 = Point(10, 8)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
p2 = p1.clone()
p3 = p2.clone()
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(10, 8)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(10, 8)')
print('Actual for p3: ', p3)
p1.x = 999
print()
print('Expected for p1: Point(999, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(10, 8)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(10, 8)')
print('Actual for p3: ', p3)
p1.y = 333
p2 = Point(11, 22)
p3.x = 777
p3.y = 555
print()
print('Expected for p1: Point(999. 333)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(11, 22)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(777, 555)')
print('Actual for p3: ', p3)
"""
# --------------------------------------------------------------------------
# DONE: 5. Follow the same instructions as in _TODO_ 3 above,
# but for the clone method specified above.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the clone method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
p2 = p1.clone()
p3 = p2.clone()
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(10, 8)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(10, 8)')
print('Actual for p3: ', p3)
p1.x = 999
print()
print('Expected for p1: Point(999, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(10, 8)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(10, 8)')
print('Actual for p3: ', p3)
p1.y = 333
p2 = Point(11, 22)
p3.x = 777
p3.y = 555
print()
print('Expected for p1: Point(999. 333)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(11, 22)')
print('Actual for p2: ', p2)
print('Expected for p3: Point(777, 555)')
print('Actual for p3: ', p3)
def run_test_move_to():
"""
Tests the move_to method of the Point class.
Here is the specification for the move_to method:
What comes in:
-- self
-- an integer x
-- an integer y
What goes out: Nothing (i.e., None).
Side effects: Changes the instance variables
x
y
that store the position of this Point to the given x and y.
This has the effect of "moving" this Point TO the given (x, y).
EXAMPLE: The following shows move_to in action.
You may also use this example to test this method.
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p1.move_to(5, -1)
p2.move_to(0, 0)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 0)')
print('Actual for p2: ', p2)
p2.y = 99
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 99)')
print('Actual for p2: ', p2)
check_has_no_return = p2.move_to(0, 222)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 222)')
print('Actual for p2: ', p2)
if check_has_no_return is not None:
print('** FAILED: This method should NOT return an explicit value;')
print('** in fact, it returned:', check_has_no_return)
"""
# --------------------------------------------------------------------------
# DONE: 6. Follow the same instructions as in _TODO_ 3 above,
# but for the move_to method specified above.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the move_to method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p1.move_to(5, -1)
p2.move_to(0, 0)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 0)')
print('Actual for p2: ', p2)
p2.y = 99
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 99)')
print('Actual for p2: ', p2)
check_has_no_return = p2.move_to(0, 222)
print()
print('Expected for p1: Point(5, -1)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(0, 222)')
print('Actual for p2: ', p2)
if check_has_no_return is not None:
print('** FAILED: This method should NOT return an explicit value;')
print('** in fact, it returned:', check_has_no_return)
def run_test_move_by():
"""
Tests the move_by method of the Point class.
Here is the specification for the move_by method:
What comes in:
-- self
-- an integer dx
-- an integer dy
What goes out: Nothing (i.e., None).
Side effects: Adds the given dx and dy
to the instance variables
x
y
that store the position of this Point.
This has the effect of "moving" this Point BY the given (dx, dy).
EXAMPLE: The following shows move_by in action.
You may also use this example to test this method.
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p1.move_by(5, -1)
p2.move_by(0, 0)
print()
print('Expected for p1: Point(15, 7)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p2.move_by(200, 0)
print()
print('Expected for p1: Point(15, 7)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(250, 20)')
print('Actual for p2: ', p2)
check_has_no_return = p2.move_by(-100, 300)
print()
print('Expected for p1: Point(15, 7)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(150, 320)')
print('Actual for p2: ', p2)
if check_has_no_return is not None:
print('** FAILED: This method should NOT return an explicit value;')
print('** in fact, it returned:', check_has_no_return)
"""
# --------------------------------------------------------------------------
# DONE: 7. Follow the same instructions as in _TODO_ 3 above,
# but for the move_by method specified above.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the move_by method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1: Point(10, 8)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p1.move_by(5, -1)
p2.move_by(0, 0)
print()
print('Expected for p1: Point(15, 7)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(50, 20)')
print('Actual for p2: ', p2)
p2.move_by(200, 0)
print()
print('Expected for p1: Point(15, 7)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(250, 20)')
print('Actual for p2: ', p2)
check_has_no_return = p2.move_by(-100, 300)
print()
print('Expected for p1: Point(15, 7)')
print('Actual for p1: ', p1)
print('Expected for p2: Point(150, 320)')
print('Actual for p2: ', p2)
if check_has_no_return is not None:
print('** FAILED: This method should NOT return an explicit value;')
print('** in fact, it returned:', check_has_no_return)
def run_test_get_number_of_moves_made():
"""
Tests the get_number_of_moves_made method of the Point class.
Here is the specification for the get_number_of_moves_made method:
What comes in:
-- self
What goes out: Returns an integer that is the number of times that
this Point has "moved" via calls to move_to and/or move_by.
Side effects:
** You figure out what side effect(s) MUST happen! **
EXAMPLE: The following shows get_number_of_moves_made in action.
You may also use this example to test this method.
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1 moves made: 0')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 0')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_by(5, -1)
p2.move_by(0, 0)
print()
print('Expected for p1 moves made: 1')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 1')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p2.move_by(200, 0)
p2.move_by(-100, 300)
p2.move_to(-100, 300)
p1.move_to(3, 3)
print()
print('Expected for p1 moves made: 2')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_by(200, 0)
p1.move_by(-100, 300)
p1.move_to(-100, 300)
p1.move_to(3, 3)
print()
print('Expected for p1 moves made: 6')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.x = 400
print()
print('Expected for p1 moves made: 6')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_to(3, 3)
p2.move_by(0, 0)
print()
print('Expected for p1 moves made: 7')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 5')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
"""
# --------------------------------------------------------------------------
# DONE: 8. Follow the same instructions as in _TODO_ 3 above,
# but for the get_number_of_moves_made method specified above.
# DONE (continued): HINT: What must a Point REMEMBER for this method?
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the get_number_of_moves_made method')
print('of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 8)
p2 = Point(50, 20)
print()
print('Expected for p1 moves made: 0')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 0')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_by(5, -1)
p2.move_by(0, 0)
print()
print('Expected for p1 moves made: 1')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 1')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p2.move_by(200, 0)
p2.move_by(-100, 300)
p2.move_to(-100, 300)
p1.move_to(3, 3)
print()
print('Expected for p1 moves made: 2')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_by(200, 0)
p1.move_by(-100, 300)
p1.move_to(-100, 300)
p1.move_to(3, 3)
print()
print('Expected for p1 moves made: 6')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.x = 400
print()
print('Expected for p1 moves made: 6')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 4')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
p1.move_to(3, 3)
p2.move_by(0, 0)
print()
print('Expected for p1 moves made: 7')
print('Actual for p1 moves made: ', p1.get_number_of_moves_made())
print('Expected for p2 moves made: 5')
print('Actual for p2 moves made: ', p2.get_number_of_moves_made())
def run_test_get_distance_from():
"""
Tests the get_distance_from method of the Point class.
Here is the specification for the get_distance_from method:
What comes in:
-- self
-- another Point object
What goes out:
Returns the distance from this Point to the given Point.
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows get_distance_from in action.
You may also use this example to test this method.
p1 = Point(1, 5)
p2 = Point(10, 5)
p3 = Point(13, 9)
print()
print('Expected p1 to p2: 9.0')
print('Actual p1 to p2:', p1.get_distance_from(p2))
print()
print('Expected p2 to p3: 5.0')
print('Actual p2 to p3:', p2.get_distance_from(p3))
print('Expected p3 to p2: 5.0')
print('Actual p3 to p2:', p3.get_distance_from(p2))
print()
print('Expected p1 to p3: about 12.65')
print('Actual p1 to p3:', p1.get_distance_from(p3))
print('Expected p3 to p1: about 12.65')
print('Actual p3 to p1:', p3.get_distance_from(p1))
print()
print('Expected p1 to p1: 0.0')
print('Actual p1 to p1:', p1.get_distance_from(p1))
print('Expected p2 to p2: 0.0')
print('Actual p2 to p2:', p2.get_distance_from(p2))
print('Expected p3 to p3: 0.0')
print('Actual p3 to p3:', p3.get_distance_from(p3))
p4 = p1.clone()
print()
print('Expected p1 to p4: 0.0')
print('Actual p1 to p4:', p1.get_distance_from(p4))
print('Expected p4 to p1: 0.0')
print('Actual p4 to p1:', p4.get_distance_from(p1))
print('Expected p4 to p2: 9.0')
print('Actual p4 to p2:', p4.get_distance_from(p2))
print('Expected p2 to p4: 9.0')
print('Actual p2 to p4:', p2.get_distance_from(p4))
"""
# --------------------------------------------------------------------------
# DONE: 9. Follow the same instructions as in _TODO_ 3 above,
# but for the get_distance_from method specified above.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the get_distance_from method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(1, 5)
p2 = Point(10, 5)
p3 = Point(13, 9)
print()
print('Expected p1 to p2: 9.0')
print('Actual p1 to p2:', p1.get_distance_from(p2))
print()
print('Expected p2 to p3: 5.0')
print('Actual p2 to p3:', p2.get_distance_from(p3))
print('Expected p3 to p2: 5.0')
print('Actual p3 to p2:', p3.get_distance_from(p2))
print()
print('Expected p1 to p3: about 12.65')
print('Actual p1 to p3:', p1.get_distance_from(p3))
print('Expected p3 to p1: about 12.65')
print('Actual p3 to p1:', p3.get_distance_from(p1))
print()
print('Expected p1 to p1: 0.0')
print('Actual p1 to p1:', p1.get_distance_from(p1))
print('Expected p2 to p2: 0.0')
print('Actual p2 to p2:', p2.get_distance_from(p2))
print('Expected p3 to p3: 0.0')
print('Actual p3 to p3:', p3.get_distance_from(p3))
p4 = p1.clone()
print()
print('Expected p1 to p4: 0.0')
print('Actual p1 to p4:', p1.get_distance_from(p4))
print('Expected p4 to p1: 0.0')
print('Actual p4 to p1:', p4.get_distance_from(p1))
print('Expected p4 to p2: 9.0')
print('Actual p4 to p2:', p4.get_distance_from(p2))
print('Expected p2 to p4: 9.0')
print('Actual p2 to p4:', p2.get_distance_from(p4))
def run_test_get_distance_from_start():
"""
Tests the get_distance_from_START method of the Point class.
Here is the specification for the get_distance_from_start method:
What comes in:
-- self
What goes out:
Returns the distance from this Point's current position
to the position that the Point was at when it was constructed.
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows get_distance_from_START in action.
You may also use this example to test this method.
p1 = Point(20, 30)
p1.move_to(111, 222)
p1.move_by(10, 20)
p1.move_to(0, 0)
p1.move_to(21, 31)
print()
print('p1 from start to (21, 31), should be about 1.414')
print('Actually is:', p1.get_distance_from_start())
p1.move_by(29, 39)
print()
print('p1 from start to (50, 70), should be about 50.0')
print('Actually is:', p1.get_distance_from_start())
p2 = Point(1, 1)
print()
print('p2 from start to (1, 1), should be about 0.0')
print('Actually is:', p2.get_distance_from_start())
p2.move_to(11, 1)
print()
print('p2 from start to (11, 1), should be about 10.0')
print('Actually is:', p2.get_distance_from_start())
p2.move_to(999, 999)
p2.move_to(1, 1)
print()
print('p2 from start to (1, 1), should be about 0.0')
print('Actually is:', p2.get_distance_from_start())
"""
# --------------------------------------------------------------------------
# DONE: 10. Follow the same instructions as in _TODO_ 3 above,
# but for the get_distance_from_START method specified above.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the get_distance_from_START method')
print('of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(20, 30)
p1.move_to(111, 222)
p1.move_by(10, 20)
p1.move_to(0, 0)
p1.move_to(21, 31)
print()
print('p1 from start to (21, 31), should be about 1.414')
print('Actually is:', p1.get_distance_from_start())
p1.move_by(29, 39)
print()
print('p1 from start to (50, 70), should be about 50.0')
print('Actually is:', p1.get_distance_from_start())
p2 = Point(1, 1)
print()
print('p2 from start to (1, 1), should be about 0.0')
print('Actually is:', p2.get_distance_from_start())
p2.move_to(11, 1)
print()
print('p2 from start to (11, 1), should be about 10.0')
print('Actually is:', p2.get_distance_from_start())
p2.move_to(999, 999)
p2.move_to(1, 1)
print()
print('p2 from start to (1, 1), should be about 0.0')
print('Actually is:', p2.get_distance_from_start())
def run_test_get_distance_traveled():
"""
Tests the get_distance_traveled method of the Point class.
Here is the specification for the get_distance_traveled method:
What comes in:
-- self
What goes out: Returns the sum of all the distances that
this Point has "moved" via calls to move_to and/or move_by.
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows get_distance_traveled in action.
You may also use this example to test this method.
p1 = Point(20, 30)
p1.move_to(21, 30)
p1.move_to(21, 38)
print()
print('Expected p1 has traveled 9.0')
print('Actual:', p1.get_distance_traveled())
p1.move_by(1, 1)
print()
print('Expected p1 has now traveled about 10.414')
print('Actual:', p1.get_distance_traveled())
p2 = Point(0, 0)
p3 = Point(100, 22)
p4 = Point(0, 555)
for k in range(100):
p2.move_by(0, k + 1)
p3.move_by(k + 1, 0)
p4.move_to(k + 1, 555)
print()
print('Expected p2 has now traveled', 101 * 50.0)
print('Actual:', p2.get_distance_traveled())
print('Expected p3 has now traveled', 101 * 50.0)
print('Actual:', p3.get_distance_traveled())
print('Expected p4 has now traveled 100.0')
print('Actual:', p4.get_distance_traveled())
"""
# --------------------------------------------------------------------------
# DONE: 11. Follow the same instructions as in _TODO_ 3 above,
# but for the get_distance_traveled method specified above.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the get_distance_traveled method')
print('of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(20, 30)
p1.move_to(21, 30)
p1.move_to(21, 38)
print()
print('Expected p1 has traveled 9.0')
print('Actual:', p1.get_distance_traveled())
p1.move_by(1, 1)
print()
print('Expected p1 has now traveled about 10.414')
print('Actual:', p1.get_distance_traveled())
p2 = Point(0, 0)
p3 = Point(100, 22)
p4 = Point(0, 555)
for k in range(100):
p2.move_by(0, k + 1)
p3.move_by(k + 1, 0)
p4.move_to(k + 1, 555)
print()
print('Expected p2 has now traveled', 101 * 50.0)
print('Actual:', p2.get_distance_traveled())
print('Expected p3 has now traveled', 101 * 50.0)
print('Actual:', p3.get_distance_traveled())
print('Expected p4 has now traveled 100.0')
print('Actual:', p4.get_distance_traveled())
def run_test_closer_to():
"""
Tests the closer_to method of the Point class.
Here is the specification for the closer_to method:
What comes in:
-- self
-- a Point object p2
-- a Point object p3
What goes out:
Returns whichever of p2 and p3 this Point is closer to.
(Just to be specific, it should return p2 in the case of a tie.)
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows closer_to in action.
You may also use this example to test this method.
p1 = Point(10, 20)
p2 = Point(15, 20)
p3 = Point(14, 24)
print()
print('Expected:', p2)
print('Actual: ', p1.closer_to(p2, p3))
print('Expected:', p2)
print('Actual: ', p1.closer_to(p3, p2))
print()
print('Expected:', p1)
print('Actual: ', p1.closer_to(p1, p3))
print('Expected:', p2)
print('Actual: ', p2.closer_to(p3, p2))
print('Expected:', p3)
print('Actual: ', p3.closer_to(p3, p3))
print()
p4 = p1.clone()
p5 = p1.clone()
print('Expected:', p4)
print('Actual: ', p1.closer_to(p4, p5))
print('Expected: True')
print('Actual: ', p1.closer_to(p4, p5) is p4)
print('Expected: False')
print('Actual: ', p1.closer_to(p4, p5) is p5)
"""
# --------------------------------------------------------------------------
# DONE: 12. Follow the same instructions as in TO-DO 3 above,
# but for the closer_to method specified above.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the closer_to method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 20)
p2 = Point(15, 20)
p3 = Point(14, 24)
print()
print('Expected:', p2)
print('Actual: ', p1.closer_to(p2, p3))
print('Expected:', p2)
print('Actual: ', p1.closer_to(p3, p2))
print()
print('Expected:', p1)
print('Actual: ', p1.closer_to(p1, p3))
print('Expected:', p2)
print('Actual: ', p2.closer_to(p3, p2))
print('Expected:', p3)
print('Actual: ', p3.closer_to(p3, p3))
print()
p4 = p1.clone()
p5 = p1.clone()
print('Expected:', p4)
print('Actual: ', p1.closer_to(p4, p5))
print('Expected: True')
print('Actual: ', p1.closer_to(p4, p5) is p4)
print('Expected: False')
print('Actual: ', p1.closer_to(p4, p5) is p5)
def run_test_halfway_to():
"""
Tests the halfway_to method of the Point class.
Here is the specification for the halfway_to method:
What comes in:
-- self
-- a Point object p2
What goes out:
Returns a new Point that is halfway between this Point and p2.
That is, the x coordinate of the new Point is the average
of the x coordinate of this Point and the x coordinate of p2,
and likewise for the new Point's y coordinate.
Side effects:
** You figure out WHETHER OR NOT side effect(s) MUST happen! **
EXAMPLE: The following shows halfway_to in action.
You may also use this example to test this method.
p1 = Point(10, 20)
p2 = Point(30, 100)
print()
print('Should be: Point(20.0, 60.0)')
print('Actual is:', p1.halfway_to(p2))
print('Should be: Point(20.0, 60.0)')
print('Actual is:', p2.halfway_to(p1))
print()
print('Should be: Point(10.0, 20.0)')
print('Actual is:', p1.halfway_to(p1))
p3 = Point(-10, 20)
p4 = Point(30, -100)
print()
print('Should be: Point(10.0, -40.0)')
print('Actual is:', p3.halfway_to(p4))
print('Should be: Point(10.0, -40.0)')
print('Actual is:', p3.halfway_to(p4))
print()
print('Should be: Point(-10.0, 20.0)')
print('Actual is:', p3.halfway_to(p3))
"""
# --------------------------------------------------------------------------
# DONE: 13. Follow the same instructions as in TO-DO 3 above,
# but for the halfway_to method specified above.
# --------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Testing the halfway_to method of the Point class.')
print('-----------------------------------------------------------')
p1 = Point(10, 20)
p2 = Point(30, 100)
print()
print('Should be: Point(20.0, 60.0)')
print('Actual is:', p1.halfway_to(p2))
print('Should be: Point(20.0, 60.0)')
print('Actual is:', p2.halfway_to(p1))
print()
print('Should be: Point(10.0, 20.0)')
print('Actual is:', p1.halfway_to(p1))
p3 = Point(-10, 20)
p4 = Point(30, -100)
print()
print('Should be: Point(10.0, -40.0)')
print('Actual is:', p3.halfway_to(p4))
print('Should be: Point(10.0, -40.0)')
print('Actual is:', p3.halfway_to(p4))
print()
print('Should be: Point(-10.0, 20.0)')
print('Actual is:', p3.halfway_to(p3))
# ------------------------------------------------------------------------------
# Calls main to start the ball rolling.
# ------------------------------------------------------------------------------
main()
| 33.734711 | 80 | 0.529464 |
f354a78723caeaabf67be50a9816a92727419d2c | 5,969 | py | Python | 2_ConvolutionalGANs/scoring.py | PacktPublishing/Generative-Adversarial-Networks-with-PyTorch-1.0-Cookbook | c9b8dcf57e03a11683254ca2b662c3503b5ead36 | [
"MIT"
] | 9 | 2019-06-10T13:05:19.000Z | 2021-11-08T13:10:48.000Z | 2_ConvolutionalGANs/scoring.py | urantialife/Generative-Adversarial-Networks-with-PyTorch-1.0-Cookbook | a228d141b36b0a6276cf17d6ac5a8978b8df9a60 | [
"MIT"
] | null | null | null | 2_ConvolutionalGANs/scoring.py | urantialife/Generative-Adversarial-Networks-with-PyTorch-1.0-Cookbook | a228d141b36b0a6276cf17d6ac5a8978b8df9a60 | [
"MIT"
] | 7 | 2019-01-30T01:59:18.000Z | 2020-09-19T05:47:07.000Z | """
This function calculates the Frechet Inception Distance between two datasets.
"""
import argparse
import os
import numpy as np
import sys
from scipy import linalg
from scipy.misc import imread
from PIL import Image
import torch
import torch.nn as nn
import torchvision as tv
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import compute_features
pe = os.path.exists
pj = os.path.join
class ImageFolder(Dataset):
def __init__(self, data_path, max_N=-1, ext=".png"):
self._images = [pj(data_path,f) for f in os.listdir(data_path) \
if f.endswith(ext)]
if max_N > 0:
self._images = self._images[:max_N]
np.random.shuffle(self._images)
# self._transform = tv.transforms.ToTensor()
self._transform = tv.transforms.Compose([
tv.transforms.Resize(299),
tv.transforms.ToTensor()
])
def __getitem__(self, index):
return self._transform( Image.open(self._images[index]) ), -1
def __len__(self):
return len(self._images)
class InceptionV3(nn.Module):
def __init__(self):
super().__init__()
inception = tv.models.inception_v3(pretrained=True)
self._layers = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2),
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2),
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))]
self._model = nn.Sequential(*self._layers)
def forward(self, x):
x = F.interpolate(x, size=(299, 299), mode="bilinear",
align_corners=False)
x = 2.0*x - 1.0
for layer in self._layers:
x = layer(x)
return x
def get_features(self, x):
return self.forward(x)
def calculate_fid(cfg):
m1,cov1 = get_mean_and_cov(cfg["dataset_1"], cfg)
m2,cov2 = get_mean_and_cov(cfg["dataset_2"], cfg)
fid_value = calculate_frechet(m1, cov1, m2, cov2)
return fid_value
# Lucic et al. 2017
def calculate_frechet(mu1, cov1, mu2, cov2):
dmu = mu1 - mu2
cov_mean,_ = linalg.sqrtm(cov1.dot(cov2), disp=False)
frechet = dmu.dot(dmu) + np.trace(cov1 + cov2 - 2*cov_mean)
return frechet
def calculate_inc_score(cfg):
# feats = get_feats(cfg["dataset_1"], cfg)[:, :1000]
feats = np.exp( get_feats(cfg["dataset_1"], cfg)[:, :1000] )
feats = feats / np.sum(feats, 1, keepdims=True)
mean,std = feats_score(feats, cfg["inc_score_splits"])
return mean,std
def check_paths(cfg):
cfg["dataset_1"] = os.path.abspath(cfg["dataset_1"])
cfg["dataset_2"] = os.path.abspath(cfg["dataset_2"])
if not pe(cfg["dataset_1"]) or (cfg["method"]=="fid" \
and not pe(cfg["dataset_2"])):
raise RuntimeError("Invalid path supplied")
def feats_score(feats, splits):
scores = []
inc = feats.shape[0] // splits
for i in range(splits):
p = feats[ (i * inc) : ((i + 1) * inc), : ]
q = np.expand_dims(np.mean(p, axis=0), axis=0)
kl_div = p * (np.log(p) - np.log(q))
kl_div = np.mean(np.sum(kl_div, 1))
scores.append(np.exp(kl_div))
return np.mean(scores), np.std(scores)
def get_feats(path, cfg):
print("Getting Inception V3 model...")
model = InceptionV3()
cudev = cfg["cuda"]
if cudev >= 0:
model.cuda(cudev)
print("...Done")
dataset = ImageFolder(path, max_N=cfg["max_N"], ext=cfg["ext"])
data_loader = DataLoader(dataset, batch_size=cfg["batch_size"],
num_workers=cfg["num_workers"], shuffle=False)
print("Generating features...")
feats = compute_features(model, data_loader, cfg["cuda"],
make_chip_list=False)
print("...Done")
return feats
def get_mean_and_cov(path, cfg):
feats = get_feats(path, cfg)
mu = np.mean(feats, axis=0)
cov = np.cov(feats, rowvar=False)
return mu,cov
def main(args):
cfg = vars(args)
check_paths(cfg)
if cfg["method"]=="fid":
fid = calculate_fid(cfg)
print("FID: %0.4f" % fid)
else:
inc_score,is_std = calculate_inc_score(cfg)
print("Inception score: %0.4f with std %0.4f" % (inc_score, is_std))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--d1", "--dataset-1", dest="dataset_1", type=str,
default="./data/cifar10-real")
parser.add_argument("--d2", "--dataset-2", dest="dataset_2", type=str,
default="./data/cifar10-fake")
parser.add_argument("--cuda", type=int, default=0,
help="Cuda device number, select -1 for cpu")
parser.add_argument("--num-workers", type=int, default=4,
help="Number of worker threads to use loading data")
parser.add_argument("--batch-size", type=int, default=32)
parser.add_argument("--ext", "--extension", dest="ext", type=str,
default=".png")
parser.add_argument("--method", type=str, default="fid",
choices=["fid", "inc_score"])
parser.add_argument("--incep-feat-dim", type=int, default=2048,
help="Size of Inception feature vectors to use")
parser.add_argument("--inc-score-splits", type=int, default=10)
parser.add_argument("--max-N", type=int, default=-1)
args = parser.parse_args()
main(args)
| 33.723164 | 77 | 0.612665 |
1064edd9327c9874ba52d70b4fc96d607b186e8b | 3,556 | py | Python | option/option.py | dustinmaurer/options-strategy-backtester | 1974bfd672d163a39f928208f36a8470a99e1a48 | [
"MIT"
] | null | null | null | option/option.py | dustinmaurer/options-strategy-backtester | 1974bfd672d163a39f928208f36a8470a99e1a48 | [
"MIT"
] | null | null | null | option/option.py | dustinmaurer/options-strategy-backtester | 1974bfd672d163a39f928208f36a8470a99e1a48 | [
"MIT"
] | 1 | 2021-04-11T07:18:55.000Z | 2021-04-11T07:18:55.000Z | """The Option oobject stores (or solves for) all attributes of an option"""
import scipy.stats as sps
from scipy.special import ndtri
import numpy as np
from datetime import date, timedelta
class Option:
def __init__(self, underlying, kind, strike, expiration, iv=.2, value=None, delta=None):
"""
A class used to represent a European style Option contract
...
Attributes
----------
underlying : str
the name of the underyling security for the option
kind : str
either 'put' or 'call'
kind_val : int
puts are represented by 1 calls by -1 since usually we deal with short options
strike : float
the price at which the option would be executed
expiration : date
the date that the option expires
iv : float
the implied volatility of the option
value : float
the current theoretic value of the option
delta : int
the current theoretic delta of the option
Functions
-------
getValue(underlyingPrice, date, iv)
Calculates the value of the option
"""
if type(underlying) is not str:
raise TypeError("The underlying must be a string.")
self.underlying = underlying
if kind.lower() not in ["put","call"]:
raise TypeError("The option must be either a put or a call.")
self.kind = kind.lower()
self.kind_value = 1 if self.kind == "call" else -1
if strike <= 0:
raise TypeError("The option strike must be positive.")
self.strike = strike
if type(expiration) is not date:
try:
expiration = date.fromisoformat(expiration)
except:
raise TypeError("Expiration must be a date.")
self.expiration = expiration
self.iv = iv
self.value = value
self.delta = delta
self.interest_rate = 0
def getValue(self, price, eval_date):
"""
Calculates the value of the option
"""
if type(eval_date) is not date:
try:
eval_date = date.fromisoformat(eval_date)
except:
raise TypeError("Expiration must be a date.")
time_delta = self.expiration - eval_date + timedelta(days = 1)
if time_delta.days <= 0:
if self.kind == "put":
if self.strike <= price:
return 0
else:
return price - self.strike
else:
if self.strike >= price:
return 0
else:
return self.strike - price
time = time_delta.days / 365.0
d1 = (np.log(price / self.strike) + .5 * self.iv ** 2 * time) / self.iv / np.sqrt(time)
d2 = d1 - self.iv * np.sqrt(time)
value = self.kind_value * price * sps.norm.cdf( self.kind_value * d1 ) - self.kind_value * self.strike * sps.norm.cdf( self.kind_value * d2 )
self.delta = self.kind_value * sps.norm.cdf( self.kind_value * d1 )
return value
def setStrikeFromDelta(self, delta, price, eval_date):
if type(eval_date) is not date:
try:
eval_date = date.fromisoformat(eval_date)
except:
raise TypeError("Expiration must be a date.")
time_delta = self.expiration - eval_date + timedelta(days = 1)
time = time_delta.days / 365.0
self.strike = np.exp(- ( ndtri(delta) * self.iv * np.sqrt(time) - self.iv ** 2 * 0.5 * time) ) * price
return self.strike
| 30.655172 | 147 | 0.580427 |
a219df200161163a0252f1444a6322873e6709d1 | 84 | py | Python | inclearn/models/__init__.py | ZepengHuo/DER_growing_representation | d711034c550bcac40a6ec7dfa1c65a79589efe93 | [
"MIT"
] | 79 | 2021-03-29T07:50:31.000Z | 2022-03-30T04:13:27.000Z | inclearn/models/__init__.py | ZepengHuo/DER_growing_representation | d711034c550bcac40a6ec7dfa1c65a79589efe93 | [
"MIT"
] | 20 | 2021-04-07T01:42:24.000Z | 2022-03-18T08:59:30.000Z | inclearn/models/__init__.py | ZepengHuo/DER_growing_representation | d711034c550bcac40a6ec7dfa1c65a79589efe93 | [
"MIT"
] | 12 | 2021-07-02T02:33:54.000Z | 2022-02-21T11:23:20.000Z | from .incmodel import IncModel
from .align import Weight_Align
from .bic import BiC
| 21 | 31 | 0.821429 |
30b9c9018b5c9851dde0a3273527b9eb77558731 | 959 | py | Python | test_calculator.py | python-frederick/python-testing-101 | 5bbce304c76a73548612c6ab702f3ee43bf6b00d | [
"MIT"
] | 9 | 2019-04-19T01:20:26.000Z | 2022-03-17T00:17:53.000Z | test_calculator.py | SlikNik/python-testing-101 | 5bbce304c76a73548612c6ab702f3ee43bf6b00d | [
"MIT"
] | null | null | null | test_calculator.py | SlikNik/python-testing-101 | 5bbce304c76a73548612c6ab702f3ee43bf6b00d | [
"MIT"
] | 6 | 2020-06-13T21:58:48.000Z | 2022-03-01T11:08:56.000Z | import pytest
from calculator import Calculator, CalculatorError
def test_add():
calculator = Calculator()
result = calculator.add(2, 3)
assert result == 5
def test_add_weird_stuff():
calculator = Calculator()
with pytest.raises(CalculatorError):
result = calculator.add("two", 3)
def test_add_weirder_stuff():
calculator = Calculator()
with pytest.raises(CalculatorError):
result = calculator.add("two", "three")
def test_subtract():
calculator = Calculator()
result = calculator.subtract(9, 3)
assert result == 6
def test_multiply():
calculator = Calculator()
result = calculator.multiply(9, 3)
assert result == 27
def test_divide():
calculator = Calculator()
result = calculator.divide(9, 3)
assert result == 3
def test_divide_by_zero():
calculator = Calculator()
with pytest.raises(CalculatorError):
result = calculator.divide(9, 0)
| 16.824561 | 50 | 0.668405 |
8f07c8d8edf28b78cca982cdba8d73df3042ceaa | 1,057 | py | Python | app.py | DavidCastilloAlvarado/Movilnet_Object_detection_JS | 6bc85b24deb40b74cf08e9395d889c9e46b40346 | [
"MIT"
] | null | null | null | app.py | DavidCastilloAlvarado/Movilnet_Object_detection_JS | 6bc85b24deb40b74cf08e9395d889c9e46b40346 | [
"MIT"
] | null | null | null | app.py | DavidCastilloAlvarado/Movilnet_Object_detection_JS | 6bc85b24deb40b74cf08e9395d889c9e46b40346 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response
# import camera driver
#if os.environ.get('CAMERA'):
# Camera = import_module('camera_' + os.environ['CAMERA']).Camera
#else:
# from camera import Camera
from camera_opencv import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index_v1.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0' ,threaded=True)
| 26.425 | 77 | 0.66982 |
8c0c5efec21c547f948c11930d67575e95ab01a5 | 569 | py | Python | model/group.py | BrotherGelo/Software-Testing_first_task | c46d5c000fdad2030c119e4cad3a8ba0a8d8aa3f | [
"Apache-2.0"
] | null | null | null | model/group.py | BrotherGelo/Software-Testing_first_task | c46d5c000fdad2030c119e4cad3a8ba0a8d8aa3f | [
"Apache-2.0"
] | null | null | null | model/group.py | BrotherGelo/Software-Testing_first_task | c46d5c000fdad2030c119e4cad3a8ba0a8d8aa3f | [
"Apache-2.0"
] | null | null | null | from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.footer = footer
self.header = header
self.id = id
def __repr__(self):
return "%s:%s:%s:%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | 25.863636 | 103 | 0.58348 |
d7368e830bcf23126d257ec5df4b04eb8c54619b | 1,504 | py | Python | experiments/benchmarking/stress-ng/second/yaml2json.py | ljishen/kividry | 931e7248f4a3447cf7a0378adb51a5ad380bd96a | [
"Apache-2.0"
] | null | null | null | experiments/benchmarking/stress-ng/second/yaml2json.py | ljishen/kividry | 931e7248f4a3447cf7a0378adb51a5ad380bd96a | [
"Apache-2.0"
] | null | null | null | experiments/benchmarking/stress-ng/second/yaml2json.py | ljishen/kividry | 931e7248f4a3447cf7a0378adb51a5ad380bd96a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import yaml
import sys
import os
# usage: postprocess <stressor_class> <method>
#
# when <method> is given, we assume only one entry in the yaml output
out_file_path = os.path.dirname(os.path.realpath(__file__))+'/out.yml';
with open(out_file_path, 'r') as f:
y = yaml.load(f)
stressor_class = sys.argv[1]
if len(sys.argv) == 3:
stressor_name = sys.argv[2]
metric = y['metrics'][0]
print("{")
print("\"name\": \"stressng-"+stressor_class+"-"+stressor_name+"\",")
print("\"class\": \"" + stressor_class + "\",")
print("\"units\": \"bogo-ops-per-second\",")
print("\"lower_is_better\": false,")
print("\"result\": "+str(metric['bogo-ops-per-second-real-time']))
print("}")
else:
i = 0
for metrics in y['metrics']:
if metrics['stressor'] is None:
# handle special case of the 'null' stressor, which isn't
# correctly displayed in the YAML output of stress-ng
stressor_name = 'null'
else:
stressor_name = metrics['stressor']
print("{")
print("\"name\": \"stressng-"+stressor_class+"-"+stressor_name+"\",")
print("\"class\": \"" + stressor_class + "\",")
print("\"units\": \"bogo-ops-per-second\",")
print("\"lower_is_better\": false,")
print("\"result\": " + str(metrics['bogo-ops-per-second-real-time']))
if i < len(y['metrics'])-1:
print("},")
else:
print("}")
i += 1
| 30.08 | 77 | 0.563165 |
f4839ea12416affbdf97e2da9180f382b711554d | 10,034 | py | Python | mobie/utils.py | platybrowser/mobie-python | 43341cd92742016a3a0d602325bb93b94c3b4c36 | [
"MIT"
] | 1 | 2020-03-03T01:33:06.000Z | 2020-03-03T01:33:06.000Z | mobie/utils.py | platybrowser/mobie-python | 43341cd92742016a3a0d602325bb93b94c3b4c36 | [
"MIT"
] | 4 | 2020-05-15T09:27:59.000Z | 2020-05-29T19:15:00.000Z | mobie/utils.py | platybrowser/mobie-python | 43341cd92742016a3a0d602325bb93b94c3b4c36 | [
"MIT"
] | 2 | 2020-06-08T07:06:01.000Z | 2020-06-08T07:08:08.000Z | import argparse
import json
import multiprocessing
import os
from copy import deepcopy
import mobie.metadata as metadata
from cluster_tools.cluster_tasks import BaseClusterTask
from elf.io import open_file
from mobie.validation import validate_view_metadata
from pybdv.util import get_key
FILE_FORMATS = [
"bdv.hdf5",
"bdv.n5",
"bdv.n5.s3",
"ome.zarr",
"ome.zarr.s3",
"openOrganelle.s3"
]
def get_data_key(file_format, scale, path=None):
if file_format.startswith("bdv"):
is_h5 = file_format == "bdv.hdf5"
key = get_key(is_h5, timepoint=0, setup_id=0, scale=scale)
elif file_format == "ome.zarr":
assert path is not None
with open_file(path, "r") as f:
mscales = f.attrs["multiscales"][0]
key = mscales["datasets"][0]["path"]
else:
raise NotImplementedError(file_format)
return key
def get_internal_paths(dataset_folder, file_format, name):
if file_format not in FILE_FORMATS:
raise ValueError(f"Unknown file format {file_format}.")
file_format_ = file_format.replace(".", "-")
if file_format == "bdv.hdf5":
data_path = os.path.join(dataset_folder, "images", file_format_, f"{name}.h5")
xml_path = os.path.join(dataset_folder, "images", file_format_, f"{name}.xml")
return data_path, xml_path
elif file_format == "bdv.n5":
data_path = os.path.join(dataset_folder, "images", file_format_, f"{name}.n5")
xml_path = os.path.join(dataset_folder, "images", file_format_, f"{name}.xml")
return data_path, xml_path
elif file_format == "ome.zarr":
data_path = os.path.join(dataset_folder, "images", file_format_, f"{name}.ome.zarr")
return data_path, data_path
raise ValueError(f"Data creation for the file format {file_format} is not supported.")
def require_dataset(root, dataset_name, file_format):
# check if we have the project and dataset already
proj_exists = metadata.project_exists(root)
if proj_exists:
if not metadata.has_file_format(root, file_format):
raise ValueError("")
ds_exists = metadata.dataset_exists(root, dataset_name)
else:
metadata.create_project_metadata(root, [file_format])
ds_exists = False
return ds_exists
def require_dataset_and_view(root, dataset_name, file_format,
source_type, source_name, menu_name,
view, is_default_dataset, contrast_limits=None):
ds_exists = require_dataset(root, dataset_name, file_format)
dataset_folder = os.path.join(root, dataset_name)
if view is None:
kwargs = {"contrastLimits": contrast_limits} if source_type == "image" else {}
view = metadata.get_default_view(source_type, source_name, menu_name=menu_name, **kwargs)
else:
update_view = {}
if menu_name is not None:
update_view["uiSelectionGroup"] = menu_name
if source_type == "image" and contrast_limits is None:
update_view["contrastLimits"] = contrast_limits
if update_view:
view.update(update_view)
validate_view_metadata(view, sources=[source_name])
if not ds_exists:
metadata.create_dataset_structure(root, dataset_name, [file_format])
default_view = deepcopy(view)
default_view.update({"uiSelectionGroup": "bookmark"})
metadata.create_dataset_metadata(dataset_folder, views={"default": default_view})
metadata.add_dataset(root, dataset_name, is_default_dataset)
return view
# TODO default arguments for scale-factors and chunks
def get_base_parser(description, transformation_file=False):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--input_path", type=str,
help="path to the input data", required=True)
parser.add_argument("--input_key", type=str,
help="key for the input data, e.g. internal path for h5/n5 data or patterns like *.tif",
required=True)
parser.add_argument("--root", type=str,
help="root folder under which the MoBIE project is saved",
required=True)
parser.add_argument("--dataset_name", type=str,
help="name of the dataset to which the image data is added",
required=True)
parser.add_argument("--name", type=str,
help="name of the source to be added",
required=True)
parser.add_argument("--resolution", type=str,
help="resolution of the data in micrometer, json-encoded",
required=True)
parser.add_argument("--scale_factors", type=str,
help="factors used for downscaling the data, json-encoded",
required=True)
parser.add_argument("--chunks", type=str,
help="chunks of the data that is added, json-encoded",
required=True)
parser.add_argument("--menu_name", type=str, default=None,
help="the menu name which will be used when grouping this source in the UI")
parser.add_argument("--view", type=str, default=None,
help="default view settings for this source, json encoded or path to a json file")
if transformation_file:
parser.add_argument("--transformation", type=str, required=True,
help="file defining elastix transformation to be applied")
else:
parser.add_argument("--transformation", type=str, default=None,
help="affine transformation parameters in bdv convention, json encoded")
parser.add_argument("--unit", type=str, default="micrometer",
help="physical unit of the source data")
parser.add_argument("--tmp_folder", type=str, default=None,
help="folder for temporary computation files")
parser.add_argument("--target", type=str, default="local",
help="computation target")
parser.add_argument("--max_jobs", type=int, default=multiprocessing.cpu_count(),
help="number of jobs")
hlp = "whether to set new dataset as default dataset. Only applies if the dataset is being created."
parser.add_argument("--is_default_dataset", type=int, default=0, help=hlp)
return parser
def parse_spatial_args(args, parse_transformation=True):
resolution = json.loads(args.resolution)
if args.scale_factors is None:
scale_factors = None
else:
scale_factors = json.loads(args.scale_factors)
if args.chunks is None:
chunks = None
else:
chunks = json.loads(args.chunks)
if not parse_transformation:
return resolution, scale_factors, chunks
if args.transformation is None:
transformation = None
else:
transformation = json.loads(args.transformation)
return resolution, scale_factors, chunks, transformation
def parse_view(args):
view = args.view
if view is None:
return view
if os.path.exists(view):
with open(view) as f:
return json.loads(f)
return json.loads(view)
def clone_dataset(root, src_dataset, dst_dataset, is_default=False, copy_misc=None):
""" Initialize a MoBIE dataset by cloning an existing dataset.
Arguments:
root [str] - root folder of the MoBIE project
src_dataset [str] - name of the MoBIE dataset to be cloned
dst_dataset [str] - name of the MoBIE dataset to be added
is_default [bool] - set this dataset as default dataset (default: False)
copy_misc [callable] - function to copy additonal misc data (default: None)
"""
# check that we have the src dataset and don"t have the dst dataset already
if not metadata.dataset_exists(root, src_dataset):
raise ValueError(f"Could not find dataset {src_dataset}")
if metadata.dataset_exists(root, dst_dataset):
raise ValueError(f"A dataset with name {dst_dataset} is already present.")
if copy_misc is not None and not callable(copy_misc):
raise ValueError("copy_misc must be callable")
file_formats = metadata.get_file_formats(root)
dst_folder = metadata.create_dataset_structure(root, dst_dataset, file_formats)
src_folder = os.path.join(root, src_dataset)
metadata.copy_dataset_folder(src_folder, dst_folder, copy_misc=copy_misc)
metadata.add_dataset(root, dst_dataset, is_default)
def write_global_config(config_folder,
block_shape=None,
roi_begin=None,
roi_end=None,
qos=None,
require3d=True):
os.makedirs(config_folder, exist_ok=True)
conf_path = os.path.join(config_folder, "global.config")
if os.path.exists(conf_path):
with open(conf_path) as f:
global_config = json.load(f)
else:
global_config = BaseClusterTask.default_global_config()
if block_shape is not None:
if require3d and len(block_shape) != 3:
raise ValueError(f"Invalid block_shape given: {block_shape}")
global_config["block_shape"] = block_shape
if roi_begin is not None:
# NOTE rois are only applicable if the data is 3d, so we don"t add the "require3d" check here
if len(roi_begin) != 3:
raise ValueError(f"Invalid roi_begin given: {roi_begin}")
global_config["roi_begin"] = roi_begin
if roi_end is not None:
# NOTE rois are only applicable if the data is 3d, so we don"t add the "require3d" check here
if len(roi_end) != 3:
raise ValueError(f"Invalid roi_end given: {roi_end}")
global_config["roi_end"] = roi_end
if qos is not None:
global_config["qos"] = qos
with open(conf_path, "w") as f:
json.dump(global_config, f)
| 40.297189 | 112 | 0.64999 |
09b7144e50e8de2fe90f37d811761c53dd3b297a | 2,127 | py | Python | kubernetes/test/test_com_coreos_monitoring_v1_alertmanager_spec_security_context1_windows_options.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_com_coreos_monitoring_v1_alertmanager_spec_security_context1_windows_options.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_com_coreos_monitoring_v1_alertmanager_spec_security_context1_windows_options.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_security_context1_windows_options import ComCoreosMonitoringV1AlertmanagerSpecSecurityContext1WindowsOptions # noqa: E501
from kubernetes.client.rest import ApiException
class TestComCoreosMonitoringV1AlertmanagerSpecSecurityContext1WindowsOptions(unittest.TestCase):
"""ComCoreosMonitoringV1AlertmanagerSpecSecurityContext1WindowsOptions unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ComCoreosMonitoringV1AlertmanagerSpecSecurityContext1WindowsOptions
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_security_context1_windows_options.ComCoreosMonitoringV1AlertmanagerSpecSecurityContext1WindowsOptions() # noqa: E501
if include_optional :
return ComCoreosMonitoringV1AlertmanagerSpecSecurityContext1WindowsOptions(
gmsa_credential_spec = '0',
gmsa_credential_spec_name = '0',
run_as_user_name = '0'
)
else :
return ComCoreosMonitoringV1AlertmanagerSpecSecurityContext1WindowsOptions(
)
def testComCoreosMonitoringV1AlertmanagerSpecSecurityContext1WindowsOptions(self):
"""Test ComCoreosMonitoringV1AlertmanagerSpecSecurityContext1WindowsOptions"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 38.672727 | 203 | 0.762106 |
5950cd1eacec191a06f75982c1b43d7e14c2ed52 | 1,396 | py | Python | review_heatmap/libaddon/_vendor/common/__init__.py | kb1900/Anki-Addons | 3b764af8657065c369d404025a3f11c964192a33 | [
"MIT"
] | 1 | 2019-06-23T04:46:24.000Z | 2019-06-23T04:46:24.000Z | review_heatmap/libaddon/_vendor/common/__init__.py | kb1900/Anki-Addons | 3b764af8657065c369d404025a3f11c964192a33 | [
"MIT"
] | null | null | null | review_heatmap/libaddon/_vendor/common/__init__.py | kb1900/Anki-Addons | 3b764af8657065c369d404025a3f11c964192a33 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Libaddon for Anki
#
# Copyright (C) 2018 Aristotelis P. <https//glutanimate.com/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version, with the additions
# listed at the end of the accompanied license file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# NOTE: This program is subject to certain additional terms pursuant to
# Section 7 of the GNU Affero General Public License. You should have
# received a copy of these additional terms immediately following the
# terms and conditions of the GNU Affero General Public License which
# accompanied this program.
#
# If not, please request a copy through one of the means of contact
# listed here: <https://glutanimate.com/contact/>.
#
# Any modifications to this file must keep this entire header intact.
"""
Packages common to both Anki 2.0 and 2.1
"""
| 39.885714 | 74 | 0.757163 |
e543c20c79fa89a4ab7fc69ec96b992dfd5a925c | 6,626 | py | Python | example/python2/py2_scan.py | rocky/python-spark | d3f966a4e8c191c51b1dcfa444026b4c6831984f | [
"MIT"
] | 43 | 2016-04-24T15:20:16.000Z | 2022-03-19T21:01:29.000Z | example/python2/py2_scan.py | rocky/python-spark | d3f966a4e8c191c51b1dcfa444026b4c6831984f | [
"MIT"
] | 11 | 2016-06-01T16:06:38.000Z | 2020-05-20T20:15:32.000Z | example/python2/py2_scan.py | rocky/python-spark | d3f966a4e8c191c51b1dcfa444026b4c6831984f | [
"MIT"
] | 12 | 2016-05-24T12:15:04.000Z | 2021-11-20T02:14:00.000Z | """
Simple SPARK-style scanner
Copyright (c) 2016 Rocky Bernstein
"""
# from __future__ import print_function
from spark_parser.scanner import GenericScanner
from py2_token import PythonToken
import re
RESERVED_WORDS = re.split("\s+",
"""and as assert break class continue def del eval exec else elif for from global
if in import lambda or pass print return while with yield None""")
BRACKET2NAME = {
'(': 'LPAREN', ')': 'RPAREN',
'{': 'LBRACE', '}': 'RBRACE',
'[': 'LBRACKET', ']': 'RBRACKET',
}
SYMBOL2NAME = {
'@': 'AT', '`': 'BACKTICK',
':': 'COLON', ',': 'COMMA',
'.': 'DOT',
}
ENDMARKER = r'' # ctrl-d
class Python2Scanner(GenericScanner):
def error(self, s, pos):
"""Show text and a carot under that. For example:
x = 2y + z
^
"""
print("Lexical error:")
print("%s" % s[:pos+10]) # + 10 for trailing context
print("%s^" % (" "*(pos-1)))
for t in self.rv: print(t)
raise SystemExit
def __init__(self):
self.is_newline = True
self.indents = [0]
self.lineno = 1
self.column = 0
GenericScanner.__init__(self)
def tokenize(self, string):
self.rv = []
GenericScanner.tokenize(self, string)
return self.rv
def add_token(self, name, s, is_newline=False):
self.column += len(s)
t = PythonToken(name, s, self.lineno, self.column)
if is_newline:
self.lineno += 1
self.column = 0
if self.is_newline and name not in ['DEDENT', 'INDENT']:
while 0 < self.indents[-1]:
self.indents = self.indents[0:-1]
self.rv.append(PythonToken('DEDENT', '', self.lineno, self.column))
pass
self.is_newline = is_newline
self.rv.append(t)
# The function names below begin with 't_'.
# This indicates to GenericScanner that these routines
# form the tokens. GenericScanner introspects on the
# method names of this class and the docstrings to come
# up with both the names of the tokens and the regular expressions
# that make up those tokens
def t_paren(self, s):
r'[(){}[\]]'
self.add_token(BRACKET2NAME[s], s)
def t_symbol(self, s):
r'[@:,.`]'
self.add_token(SYMBOL2NAME[s], s)
def t_endmarker(self, s):
""""""
self.add_token('ENDMARKER', s)
# These can a appear as unary operators. Some are also binary operators
UNOP2NAME = {'+': 'PLUS', '-': 'MINUS', '~': 'TILDE'}
def t_op(self, s):
r'\+=|-=|\*=|/=|%=|&=|\|=|^=|<<=|>>=|\*\*=|//=|//|==|<=|>=|<<|>>|[<>%^&+/=~-]'
# Operators need to be further classified since the grammar requires this
if s in ('<', '>', '==', '>=', '<=', '<>', '!='):
self.add_token('COMP_OP', s)
elif s in ('+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '**=',
'//='):
self.add_token('AUGASSIGN', s)
elif s in self.UNOP2NAME.keys():
self.add_token(self.UNOP2NAME[s], s)
elif s in ('|', '^', '&', '<<', '>>', '**', '/', '%', '//'):
# These are *ONLY* binary operators. Operators which are exclusively or
# can be unary operators were handled previously
self.add_token('BINOP', s)
elif s == '=':
self.add_token('EQUAL', s)
else:
print("Internal error: Unknown operator %s" % s)
raise SystemExit
def t_linesep(self, s):
r';'
self.add_token('SEMICOLON', s)
def t_nl(self, s):
r'\n'
self.add_token('NEWLINE', s, is_newline=True)
def t_name(self, s):
r'[A-Za-z_][A-Za-z_0-9]*'
if s in RESERVED_WORDS:
self.add_token(s.upper(), s)
else:
self.add_token('NAME', s)
# A way to handle the problem of having to match two different
# tokens with a single regular expression.
# We can't have two separate defs because then it would be indeterminate
# whether we get two single stars or one double star.
def t_star_star(self, s):
r'\*\*?'
token_name = "STARSTAR" if len(s) == 2 else 'STAR'
self.add_token(token_name, s)
# CONSTANTS
# ---------
def t_string(self, s):
r"([\"]{3}(.|[\n])*[\"]{3})|('{3}(.|[\n])*'{3})|('[^']*')|(\"[^\"]*\")"
self.add_token('STRING', s)
# numbers; int, float, and complex
# Note we have to put longer matches earlier. Specifically radix notation and
# fixed-point notation
def t_number(self, s):
r'(0x[0-9a-f]+|0b[01]+|0o[0-7]+|\d+\.\d|\d+)j?'
self.add_token('NUMBER', s)
# Ugh. Handle Python's indent/dedent mess.
def handle_indent_dedent(self, s):
indent = len(s)
if indent > self.indents[-1]:
self.add_token('INDENT', s)
self.indents.append(indent)
if indent == self.indents[-1]:
self.is_newline = False
pass
else:
# May need several levels of dedent
while indent < self.indents[-1]:
self.indents = self.indents[0:-1]
self.add_token('DEDENT', s)
pass
pass
return
# Combine comment and whitespace because we want to
# capture the space before a comment.
def t_whitespace_or_comment(self, s):
r'([ \t]*[#].*[^\x04][\n]?)|([ \t]+)'
if '#' in s:
# We have a comment
matches = re.match('(\s+)(.*[\n]?)', s)
if matches and self.is_newline:
self.handle_indent_dedent(matches.group(1))
s = matches.group(2)
if s.endswith("\n"):
self.add_token('COMMENT', s[:-1])
self.add_token('NEWLINE', "\n")
else:
self.add_token('COMMENT', s)
elif self.is_newline:
self.handle_indent_dedent(s)
pass
return
if __name__ == "__main__":
scan = Python2Scanner()
def showit(expr):
print(expr)
tokens = scan.tokenize(expr + ENDMARKER)
for t in tokens: print(t)
print('-' * 30)
return
# showit("1 # hi")
showit("""def foo():
# comment
return
""")
# showit("(10.5 + 2 / 30) // 3 >> 1")
# showit("1 + 2")
# showit("""
# () { } + - 'abc' \"abc\" 10 10j 0x10 # foo
# # bar
# """)
# showit("""
# for i in range(x):
# if True:
# pass
# pass
# pass""")
# showit("""
# for i in range(x):
# while True:
# break
# """)
| 29.9819 | 87 | 0.518714 |
7a03ffda9e3d03301b9dabfad506172532a6931e | 718 | py | Python | examples/opencv_face_detect.py | aallan/picamera2 | d64fbe669e071402d11c043cf044f52f6b2edc57 | [
"BSD-2-Clause"
] | null | null | null | examples/opencv_face_detect.py | aallan/picamera2 | d64fbe669e071402d11c043cf044f52f6b2edc57 | [
"BSD-2-Clause"
] | null | null | null | examples/opencv_face_detect.py | aallan/picamera2 | d64fbe669e071402d11c043cf044f52f6b2edc57 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3
import cv2
from null_preview import *
from picamera2 import *
# Grab images as numpy arrays and leave everything else to OpenCV.
face_detector = cv2.CascadeClassifier("/usr/local/lib/python3.9/dist-packages/cv2/data/haarcascade_frontalface_default.xml")
cv2.startWindowThread()
picam2 = Picamera2()
preview = NullPreview(picam2)
picam2.configure(picam2.preview_configuration(main={"size": (640, 480)}))
picam2.start()
while True:
im = picam2.capture_array()
grey = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(grey, 1.1, 5)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0))
cv2.imshow("Camera", im)
| 25.642857 | 124 | 0.707521 |
839af6b68107ff0bc06f0d5b8cf93cdebaf85231 | 730 | py | Python | venv/Classes/aula14a.py | jonassignoreti/Python-CursoemVideo | 24f5932bed2fe98308321be7dd9326e65a942d4b | [
"MIT"
] | null | null | null | venv/Classes/aula14a.py | jonassignoreti/Python-CursoemVideo | 24f5932bed2fe98308321be7dd9326e65a942d4b | [
"MIT"
] | null | null | null | venv/Classes/aula14a.py | jonassignoreti/Python-CursoemVideo | 24f5932bed2fe98308321be7dd9326e65a942d4b | [
"MIT"
] | null | null | null | '''ESTRUTURA DE REPETIÇÃO WHILE, LAÇOS DE REPETIÇÃO (PARTE 2)'''
print('*' * 40, '\nEquanto c < 10 faça:')
c = 0
while c < 10: #Enquanto c < 10 faça:
print(c, end=' ')
c += 1
print('END')
print('*' * 40, '\nEnquanto o valor digitado NÃO for 0 faça:')
n = 1
while n != 0: #condição de PARADA
n = int(input('Digite um valor: '))
print('END')
print('*' * 40, '\n:Enquanto não for digitado ZERO(0), conte quantos números são pares e ímpares, e informe no final.')
n = 1
par = impar = 0
while n != 0:
n = int(input('Digite um valor: '))
if n != 0:
if n % 2 == 0:
par += 1
else:
impar += 1
print('Foram digitados {} números pares, e {} números ímpares.'.format(par, impar))
| 28.076923 | 119 | 0.571233 |
4f459f0a06b63bd9ba2588f050a27ac5718b7828 | 3,937 | py | Python | setup.py | agreenbaum/nrm_analysis | 4c7dfd9df6e6b14002266ecc984214e95f8a4ef8 | [
"BSD-3-Clause"
] | 2 | 2020-01-22T21:08:31.000Z | 2022-01-21T16:34:26.000Z | setup.py | agreenbaum/nrm_analysis | 4c7dfd9df6e6b14002266ecc984214e95f8a4ef8 | [
"BSD-3-Clause"
] | 2 | 2019-03-04T15:33:49.000Z | 2019-10-23T15:02:03.000Z | setup.py | agreenbaum/nrm_analysis | 4c7dfd9df6e6b14002266ecc984214e95f8a4ef8 | [
"BSD-3-Clause"
] | 7 | 2018-10-05T16:11:24.000Z | 2021-03-22T13:01:02.000Z | #!/usr/bin/env python
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension, Command
from setuptools.command.test import test as TestCommand
try:
from distutils.config import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read(['setup.cfg'])
# Get some config values
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', '')
AUTHOR = metadata.get('author', 'STScI')
AUTHOR_EMAIL = metadata.get('author_email', 'help@stsci.edu')
URL = metadata.get('url', 'https://www.stsci.edu/')
LICENSE = metadata.get('license', 'BSD')
if os.path.exists('relic'):
sys.path.insert(1, 'relic')
import relic.release
else:
try:
import relic.release
except ImportError:
try:
subprocess.check_call(['git', 'clone',
'https://github.com/jhunkeler/relic.git'])
sys.path.insert(1, 'relic')
import relic.release
except subprocess.CalledProcessError as e:
print(e)
exit(1)
version = relic.release.get_info()
relic.release.write_template(version, PACKAGENAME)
# make sure oifits.py is available
try:
import oifits
except ImportError:
try:
subprocess.check_call(['git', 'clone',
'https://github.com/pboley/oifits.git'])
sys.path.insert(1, 'oifits')
import oifits
except subprocess.CalledProcessError as e:
print(e)
exit(1)
# allows you to build sphinx docs from the pacakge
# main directory with python setup.py build_sphinx
try:
from sphinx.cmd.build import build_main
from sphinx.setup_command import BuildDoc
class BuildSphinx(BuildDoc):
"""Build Sphinx documentation after compiling C source files"""
description = 'Build Sphinx documentation'
def initialize_options(self):
BuildDoc.initialize_options(self)
def finalize_options(self):
BuildDoc.finalize_options(self)
def run(self):
build_cmd = self.reinitialize_command('build_ext')
build_cmd.inplace = 1
self.run_command('build_ext')
build_main(['-b', 'html', './docs', './docs/_build/html'])
except ImportError:
class BuildSphinx(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print('!\n! Sphinx is not installed!\n!', file=sys.stderr)
exit(1)
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['nrm_analysis/tests']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name=PACKAGENAME,
version=version.pep386,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'astropy', 'scipy', 'matplotlib', 'linearfit', 'poppy', 'uncertainties', 'logging', 'aplpy'
],
tests_require=['pytest', 'scipy', 'matplotlib', 'linearfit', 'poppy', 'uncertainties', 'logging', 'aplpy'],
packages=find_packages(),
package_data={PACKAGENAME: ['pars/*']},
cmdclass={
'test': PyTest,
'build_sphinx': BuildSphinx
},)
| 28.528986 | 111 | 0.635763 |
5c63000b55378dc9dccde06a2a5b1fa20cfb5257 | 6,346 | py | Python | Question_41_50/answers/answer_45.py | OverHall27/Gasyori100knock | 341c528eb4c0789034898ee1f7d0a4b2f8b23eff | [
"MIT"
] | null | null | null | Question_41_50/answers/answer_45.py | OverHall27/Gasyori100knock | 341c528eb4c0789034898ee1f7d0a4b2f8b23eff | [
"MIT"
] | null | null | null | Question_41_50/answers/answer_45.py | OverHall27/Gasyori100knock | 341c528eb4c0789034898ee1f7d0a4b2f8b23eff | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
# Gray scale
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
def Canny(img):
# Gaussian filter for grayscale
def gaussian_filter(img, K_size=3, sigma=1.3):
if len(img.shape) == 3:
H, W, C = img.shape
else:
img = np.expand_dims(img, axis=-1)
H, W, C = img.shape
## Zero padding
pad = K_size // 2
out = np.zeros([H + pad * 2, W + pad * 2, C], dtype=np.float)
out[pad: pad + H, pad: pad + W] = img.copy().astype(np.float)
## prepare Kernel
K = np.zeros((K_size, K_size), dtype=np.float)
for x in range(-pad, -pad + K_size):
for y in range(-pad, -pad + K_size):
K[y+pad, x+pad] = np.exp( -(x ** 2 + y ** 2) / (2 * (sigma ** 2)))
K /= (sigma * np.sqrt(2 * np.pi))
K /= K.sum()
tmp = out.copy()
# filtering
for y in range(H):
for x in range(W):
for c in range(C):
out[pad + y, pad + x, c] = np.sum(K * tmp[y: y + K_size, x: x + K_size, c])
out = out[pad: pad + H, pad: pad + W].astype(np.uint8)
out = out[..., 0]
return out
# sobel filter
def sobel_filter(img, K_size=3):
H, W = img.shape
# Zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)
tmp = out.copy()
out_v = out.copy()
out_h = out.copy()
## Sobel vertical
Kv = [[1., 2., 1.],[0., 0., 0.], [-1., -2., -1.]]
## Sobel horizontal
Kh = [[1., 0., -1.],[2., 0., -2.],[1., 0., -1.]]
# filtering
for y in range(H):
for x in range(W):
out_v[pad + y, pad + x] = np.sum(Kv * (tmp[y: y + K_size, x: x + K_size]))
out_h[pad + y, pad + x] = np.sum(Kh * (tmp[y: y + K_size, x: x + K_size]))
out_v = np.clip(out_v, 0, 255)
out_h = np.clip(out_h, 0, 255)
out_v = out_v[pad: pad + H, pad: pad + W].astype(np.uint8)
out_h = out_h[pad: pad + H, pad: pad + W].astype(np.uint8)
return out_v, out_h
def get_edge_tan(fx, fy):
# get edge strength
edge = np.sqrt(np.power(fx.astype(np.float32), 2) + np.power(fy.astype(np.float32), 2))
edge = np.clip(edge, 0, 255)
fx = np.maximum(fx, 1e-5)
#fx[np.abs(fx) <= 1e-5] = 1e-5
# get edge angle
tan = np.arctan(fy / fx)
return edge, tan
def angle_quantization(tan):
angle = np.zeros_like(tan, dtype=np.uint8)
angle[np.where((tan > -0.4142) & (tan <= 0.4142))] = 0
angle[np.where((tan > 0.4142) & (tan < 2.4142))] = 45
angle[np.where((tan >= 2.4142) | (tan <= -2.4142))] = 95
angle[np.where((tan > -2.4142) & (tan <= -0.4142))] = 135
return angle
def non_maximum_suppression(angle, edge):
H, W = angle.shape
for y in range(H):
for x in range(W):
if angle[y, x] == 0:
dx1, dy1, dx2, dy2 = -1, 0, 1, 0
elif angle[y, x] == 45:
dx1, dy1, dx2, dy2 = -1, 1, 1, -1
elif angle[y, x] == 90:
dx1, dy1, dx2, dy2 = 0, -1, 0, 1
elif angle[y, x] == 135:
dx1, dy1, dx2, dy2 = -1, -1, 1, 1
if x == 0:
dx1 = max(dx1, 0)
dx2 = max(dx2, 0)
if x == W-1:
dx1 = min(dx1, 0)
dx2 = min(dx2, 0)
if y == 0:
dy1 = max(dy1, 0)
dy2 = max(dy2, 0)
if y == H-1:
dy1 = min(dy1, 0)
dy2 = min(dy2, 0)
if max(max(edge[y, x], edge[y+dy1, x+dx1]), edge[y+dy2, x+dx2]) != edge[y, x]:
edge[y, x] = 0
return edge
def hysterisis(edge, HT=100, LT=30):
H, W = edge.shape
# Histeresis threshold
edge[edge >= HT] = 255
edge[edge <= LT] = 0
_edge = np.zeros((H+2, W+2), dtype=np.float32)
_edge[1:H+1, 1:W+1] = edge
## 8 - Nearest neighbor
nn = np.array(((1., 1., 1.), (1., 0., 1.), (1., 1., 1.)), dtype=np.float32)
for y in range(1, H+2):
for x in range(1, W+2):
if _edge[y, x] < LT or _edge[y, x] > HT:
continue
if np.max(_edge[y-1:y+2, x-1:x+2] * nn) >= HT:
_edge[y, x] = 255
else:
_edge[y, x] = 0
edge = _edge[1:H+1, 1:W+1]
return edge
# grayscale
gray = BGR2GRAY(img)
# gaussian filtering
gaussian = gaussian_filter(gray, K_size=5, sigma=1.4)
# sobel filtering
fy, fx = sobel_filter(gaussian, K_size=3)
# get edge strength, angle
edge, tan = get_edge_tan(fx, fy)
# angle quantization
angle = angle_quantization(tan)
# non maximum suppression
edge = non_maximum_suppression(angle, edge)
# hysterisis threshold
out = hysterisis(edge)
return out
def Hough_Line_step2(edge):
## Voting
def voting(edge):
H, W = edge.shape
drho = 1
dtheta = 1
# get rho max length
rho_max = np.ceil(np.sqrt(H ** 2 + W ** 2)).astype(np.int)
# hough table
hough = np.zeros((rho_max, 180), dtype=np.int)
# get index of edge
ind = np.where(edge == 255)
## hough transformation
for y, x in zip(ind[0], ind[1]):
for theta in range(0, 180, dtheta):
# get polar coordinat4s
t = np.pi / 180 * theta
rho = int(x * np.cos(t) + y * np.sin(t))
# vote
hough[rho, theta] += 1
out = hough.astype(np.uint8)
return out
# non maximum suppression
def non_maximum_suppression(hough):
rho_max, _ = hough.shape
## non maximum suppression
for y in range(rho_max):
for x in range(180):
# get 8 nearest neighbor
x1 = max(x-1, 0)
x2 = min(x+2, 180)
y1 = max(y-1, 0)
y2 = min(y+2, rho_max-1)
if np.max(hough[y1:y2, x1:x2]) == hough[y,x] and hough[y, x] != 0:
pass
#hough[y,x] = 255
else:
hough[y,x] = 0
# for hough visualization
# get top-10 x index of hough table
ind_x = np.argsort(hough.ravel())[::-1][:10]
# get y index
ind_y = ind_x.copy()
thetas = ind_x % 180
rhos = ind_y // 180
_hough = np.zeros_like(hough, dtype=np.int)
_hough[rhos, thetas] = 255
return _hough
# voting
hough = voting(edge)
# non maximum suppression
out = non_maximum_suppression(hough)
return out
# Read image
img = cv2.imread("../thorino.jpg").astype(np.float32)
gray = BGR2GRAY(img)
# Canny
#edge = Canny(img)
edge = cv2.Canny(gray, threshold1=30., threshold2=100., apertureSize=3, L2gradient=True)
# Hough
out = Hough_Line_step2(edge)
out = out.astype(np.uint8)
# Save result
cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 22.827338 | 89 | 0.56508 |
308307e6843263f37c4c98801b28e64237ba2aac | 4,583 | py | Python | iperf_wrapper.py | AleksandrVin/SpeedtestService | 9385513e656a4935de0b29212ee9d62037db6ccc | [
"BSD-3-Clause"
] | null | null | null | iperf_wrapper.py | AleksandrVin/SpeedtestService | 9385513e656a4935de0b29212ee9d62037db6ccc | [
"BSD-3-Clause"
] | null | null | null | iperf_wrapper.py | AleksandrVin/SpeedtestService | 9385513e656a4935de0b29212ee9d62037db6ccc | [
"BSD-3-Clause"
] | null | null | null | import os
import shlex
import argparse
import datetime
import subprocess
from typing import IO
from io import TextIOWrapper
from threading import Thread
from balancer_routine import balancer_routine
from balancer_routine import env_data
class Iperf_wrapper():
def __init__(self, parameters: str = "-s -u", verbose: bool = False) -> None:
self.threads: list = []
self.iperf_waiting_thread: Thread = None
self.iperf_process: subprocess.Popen = None
self.verbose: bool = verbose
self.is_started: bool = False
self.iperf_parameters: str = parameters
def __logger_thread(self, stream: IO, file: TextIOWrapper):
def logger(stream: IO, file: TextIOWrapper):
for stdout_line in iter(stream.readline, ""):
file.writelines(stdout_line)
file.flush()
if self.verbose:
print(stdout_line.replace('\n', ""))
stream.close()
file.close()
t = Thread(target=logger, args=(stream, file))
t.daemon = True
t.start()
return t
def __create_logs_stream(self):
logs_dir = "iperf_logs"
if not os.path.exists(logs_dir):
try:
os.mkdir(logs_dir)
except OSError:
print(f"Creation of the directory {logs_dir} failed")
curr_datetime = datetime.datetime.now().strftime("%Y-%m-%d_%I-%M-%S")
output_file = open(f"{logs_dir}/iperf_log-{curr_datetime}.txt", 'w')
error_file = open(f"{logs_dir}/iperf_errors-{curr_datetime}.txt", 'w')
return output_file, error_file
def __waiting_thread(self):
self.iperf_process.wait()
return_code = self.iperf_process.poll()
for t in self.threads:
t.join()
self.is_started = False
balancer_routine.post_to_server(port=int(balancer_routine.env_data['SERVICE_PORT']), port_iperf=int(balancer_routine.env_data['IPERF_PORT']), ip=balancer_routine.env_data['SERVICE_IP_ADDRESS'])
print(f"iPerf stopped with status {return_code}")
def start(self, port_iperf):
if not self.is_started:
output_file, error_file = self.__create_logs_stream()
cmd = shlex.split("./iperf.elf " + '-p ' + port_iperf + ' ' + self.iperf_parameters)
self.iperf_process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
print("iPerf is started")
self.is_started = True
self.iperf_waiting_thread = Thread(target=self.__waiting_thread)
self.iperf_waiting_thread.start()
self.threads = []
if self.iperf_process.stdout is not None:
self.threads.append(self.__logger_thread(
self.iperf_process.stdout, output_file))
if self.iperf_process.stderr is not None:
self.threads.append(self.__logger_thread(
self.iperf_process.stderr, error_file))
return True
else:
return False
def stop(self):
self.iperf_process.terminate()
self.iperf_waiting_thread.join()
return_code = self.iperf_process.poll()
return return_code
def read_env_data():
env_data = {}
env_data['SPEED_TEST_SERVICE_NAME'] = os.environ.get(
'SPEED_TEST_SERVICE_NAME')
env_data['SERVICE_IP_ADDRESS'] = os.environ.get('SERVICE_IP_ADDRESS')
env_data['SERVICE_LOCATION'] = os.environ.get('SERVICE_LOCATION')
env_data['BALANCER_ADDRESS'] = os.environ.get('BALANCER_ADDRESS')
env_data['IPERF_PORT'] = os.getenv('IPERF_PORT', '5001')
env_data['SERVICE_PORT'] = os.getenv('SERVICE_PORT', '5000')
env_data['CONNECTING_TIMEOUT'] = os.getenv('CONNECTING_TIMEOUT', '120')
return env_data
def create_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-V', '--verbose', action='store_true')
parser.add_argument('-p', '--parameters', help="parameters for iPerf", type=str,
action="store", default='-s -u')
return parser
if __name__ == "__main__":
arg_parser = create_arg_parser()
namespace = arg_parser.parse_args()
env_data = read_env_data()
for key, value in env_data.items():
print(f'{key}: {value}')
iperf_wrapper = Iperf_wrapper(namespace.parameters, True)
iperf_wrapper.start(env_data['IPERF_PORT'])
try:
while True:
pass
except KeyboardInterrupt:
iperf_wrapper.stop()
| 33.948148 | 201 | 0.63561 |
b9d5db6c80a1a68774629157919b04b22b96d589 | 17,600 | py | Python | detection/dpt_models/dpt.py | CASIA-IVA-Lab/DPT | 62e6322b42f8de35d215aa3cdd458088161b5949 | [
"Apache-2.0"
] | 85 | 2021-07-07T06:54:06.000Z | 2022-03-08T03:03:37.000Z | detection/dpt_models/dpt.py | CASIA-IVA-Lab/DPT | 62e6322b42f8de35d215aa3cdd458088161b5949 | [
"Apache-2.0"
] | 11 | 2021-08-10T12:14:21.000Z | 2022-03-03T01:39:20.000Z | detection/dpt_models/dpt.py | CASIA-IVA-Lab/DPT | 62e6322b42f8de35d215aa3cdd458088161b5949 | [
"Apache-2.0"
] | 12 | 2021-08-02T09:04:24.000Z | 2021-10-03T12:03:43.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import _cfg
from mmdet.models.builder import BACKBONES
from mmdet.utils import get_root_logger
from mmcv.runner import load_checkpoint
from .box_coder import *
from .depatch_embed import Simple_DePatch
#__all__ = [
# 'depvt_tiny'#, 'pvt_small', 'pvt_medium', 'pvt_large'
#]
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
f"img_size {img_size} should be divided by patch_size {patch_size}."
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
H, W = H // self.patch_size[0], W // self.patch_size[1]
return x, (H, W)
class DeformablePatchTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], F4=False, patch_embeds=None):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.F4 = F4
# patch_embed
self.patch_embed1, self.patch_embed2, self.patch_embed3, self.patch_embed4 = patch_embeds
# pos_embed
self.pos_embed1 = nn.Parameter(torch.zeros(1, self.patch_embed1.num_patches, embed_dims[0]))
self.pos_drop1 = nn.Dropout(p=drop_rate)
self.pos_embed2 = nn.Parameter(torch.zeros(1, self.patch_embed2.num_patches, embed_dims[1]))
self.pos_drop2 = nn.Dropout(p=drop_rate)
self.pos_embed3 = nn.Parameter(torch.zeros(1, self.patch_embed3.num_patches, embed_dims[2]))
self.pos_drop3 = nn.Dropout(p=drop_rate)
self.pos_embed4 = nn.Parameter(torch.zeros(1, self.patch_embed4.num_patches + 1, embed_dims[3]))
self.pos_drop4 = nn.Dropout(p=drop_rate)
# transformer encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
# init weights
trunc_normal_(self.pos_embed1, std=.02)
trunc_normal_(self.pos_embed2, std=.02)
trunc_normal_(self.pos_embed3, std=.02)
trunc_normal_(self.pos_embed4, std=.02)
self.apply(self._init_weights)
# new
self.patch_embed2.reset_offset()
self.patch_embed3.reset_offset()
self.patch_embed4.reset_offset()
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def _get_pos_embed(self, pos_embed, patch_embed, H, W):
if H * W == self.patch_embed1.num_patches:
return pos_embed
else:
return F.interpolate(
pos_embed.reshape(1, patch_embed.H, patch_embed.W, -1).permute(0, 3, 1, 2),
size=(H, W), mode="bilinear").reshape(1, -1, H * W).permute(0, 2, 1)
def forward_features(self, x):
outs = []
B = x.shape[0]
# stage 1
x, (H, W) = self.patch_embed1(x)
pos_embed1 = self._get_pos_embed(self.pos_embed1, self.patch_embed1, H, W)
x = x + pos_embed1
x = self.pos_drop1(x)
for blk in self.block1:
x = blk(x, H, W)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 2
x, (H, W) = self.patch_embed2(x)
pos_embed2 = self._get_pos_embed(self.pos_embed2, self.patch_embed2, H, W)
x = x + pos_embed2
x = self.pos_drop2(x)
for blk in self.block2:
x = blk(x, H, W)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 3
x, (H, W) = self.patch_embed3(x)
pos_embed3 = self._get_pos_embed(self.pos_embed3, self.patch_embed3, H, W)
x = x + pos_embed3
x = self.pos_drop3(x)
for blk in self.block3:
x = blk(x, H, W)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 4
x, (H, W) = self.patch_embed4(x)
pos_embed4 = self._get_pos_embed(self.pos_embed4[:, 1:], self.patch_embed4, H, W)
x = x + pos_embed4
x = self.pos_drop4(x)
for blk in self.block4:
x = blk(x, H, W)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
return outs
def forward(self, x):
x = self.forward_features(x)
if self.F4:
x = x[3:4]
return x
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
@BACKBONES.register_module()
class dpt_tiny(DeformablePatchTransformer):
def __init__(self, **kwargs):
# patch_embed
embed_dims=[64, 128, 320, 512]
img_size = 224
Depatch = [False, True, True, True]
patch_embeds=[]
for i in range(4):
inchans = embed_dims[i-1] if i>0 else 3
in_size = img_size // 2**(i+1) if i>0 else img_size
patch_size = 2 if i > 0 else 4
if Depatch[i]:
box_coder = pointwhCoder(input_size=in_size, patch_count=in_size//patch_size, weights=(1.,1.,1.,1.), pts=3, tanh=True, wh_bias=torch.tensor(5./3.).sqrt().log())
patch_embeds.append(
Simple_DePatch(box_coder, img_size=in_size, patch_size=patch_size, patch_pixel=3, patch_count=in_size//patch_size,
in_chans=inchans, embed_dim=embed_dims[i], another_linear=True, use_GE=True, with_norm=True))
else:
patch_embeds.append(
PatchEmbed(img_size=in_size, patch_size=patch_size, in_chans=inchans,
embed_dim=embed_dims[i]))
super(dpt_tiny, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1], drop_rate=0.0, drop_path_rate=0.1, patch_embeds=patch_embeds)
@BACKBONES.register_module()
class dpt_small(DeformablePatchTransformer):
def __init__(self, **kwargs):
# patch_embed
embed_dims=[64, 128, 320, 512]
img_size = 224
Depatch = [False, True, True, True]
patch_embeds=[]
for i in range(4):
inchans = embed_dims[i-1] if i>0 else 3
in_size = img_size // 2**(i+1) if i>0 else img_size
patch_size = 2 if i > 0 else 4
if Depatch[i]:
box_coder = pointwhCoder(input_size=in_size, patch_count=in_size//patch_size, weights=(1.,1.,1.,1.), pts=3, tanh=True, wh_bias=torch.tensor(5./3.).sqrt().log())
patch_embeds.append(
Simple_DePatch(box_coder, img_size=in_size, patch_size=patch_size, patch_pixel=3, patch_count=in_size//patch_size,
in_chans=inchans, embed_dim=embed_dims[i], another_linear=True, use_GE=True, with_norm=True))
else:
patch_embeds.append(
PatchEmbed(img_size=in_size, patch_size=patch_size, in_chans=inchans,
embed_dim=embed_dims[i]))
super(dpt_small, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3],
sr_ratios=[8, 4, 2, 1], drop_rate=0.0, drop_path_rate=0.1, patch_embeds=patch_embeds)
@BACKBONES.register_module()
class dpt_medium(DeformablePatchTransformer):
def __init__(self, **kwargs):
# patch_embed
embed_dims=[64, 128, 320, 512]
img_size = 224
Depatch = [False, True, True, True]
patch_embeds=[]
for i in range(4):
inchans = embed_dims[i-1] if i>0 else 3
in_size = img_size // 2**(i+1) if i>0 else img_size
patch_size = 2 if i > 0 else 4
if Depatch[i]:
box_coder = pointwhCoder(input_size=in_size, patch_count=in_size//patch_size, weights=(1.,1.,1.,1.), pts=3, tanh=True, wh_bias=torch.tensor(5./3.).sqrt().log())
patch_embeds.append(
Simple_DePatch(box_coder, img_size=in_size, patch_size=patch_size, patch_pixel=3, patch_count=in_size//patch_size,
in_chans=inchans, embed_dim=embed_dims[i], another_linear=True, use_GE=True, with_norm=True))
else:
patch_embeds.append(
PatchEmbed(img_size=in_size, patch_size=patch_size, in_chans=inchans,
embed_dim=embed_dims[i]))
super(dpt_medium, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 18, 3],
sr_ratios=[8, 4, 2, 1], drop_rate=0.0, drop_path_rate=0.1, patch_embeds=patch_embeds)
@BACKBONES.register_module()
class dpt_small_f4(DeformablePatchTransformer):
def __init__(self, **kwargs):
# patch_embed
embed_dims=[64, 128, 320, 512]
img_size = 224
Depatch = [False, True, True, True]
patch_embeds=[]
for i in range(4):
inchans = embed_dims[i-1] if i>0 else 3
in_size = img_size // 2**(i+1) if i>0 else img_size
patch_size = 2 if i > 0 else 4
if Depatch[i]:
box_coder = pointwhCoder(input_size=in_size, patch_count=in_size//patch_size, weights=(1.,1.,1.,1.), pts=3, tanh=True, wh_bias=torch.tensor(5./3.).sqrt().log())
patch_embeds.append(
Simple_DePatch(box_coder, img_size=in_size, patch_size=patch_size, patch_pixel=3, patch_count=in_size//patch_size,
in_chans=inchans, embed_dim=embed_dims[i], another_linear=True, use_GE=True, with_norm=True))
else:
patch_embeds.append(
PatchEmbed(img_size=in_size, patch_size=patch_size, in_chans=inchans,
embed_dim=embed_dims[i]))
super(dpt_small_f4, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3],
sr_ratios=[8, 4, 2, 1], drop_rate=0.0, drop_path_rate=0.1, F4=True, patch_embeds=patch_embeds)
| 42.512077 | 176 | 0.603182 |
c8dcc67b62594216ed466662e5138a004682a292 | 107,232 | py | Python | modules/s3/s3data.py | arnavsharma93/eden | 2e559a277c4144ba4f4cdcd108460d025923671d | [
"MIT"
] | null | null | null | modules/s3/s3data.py | arnavsharma93/eden | 2e559a277c4144ba4f4cdcd108460d025923671d | [
"MIT"
] | null | null | null | modules/s3/s3data.py | arnavsharma93/eden | 2e559a277c4144ba4f4cdcd108460d025923671d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" S3 Data Views
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
@group Data Views: S3DataTable,
S3DataList,
S3PivotTable
"""
import datetime
import sys
import time
from itertools import product, islice
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import current
from gluon.dal import Expression, Field
from gluon.html import *
from gluon.languages import lazyT
from gluon.storage import Storage
from gluon.validators import IS_EMPTY_OR, IS_IN_SET
from s3utils import s3_flatlist, s3_has_foreign_key, s3_orderby_fields, s3_truncate, s3_unicode, S3MarkupStripper, s3_represent_value
from s3validators import IS_NUMBER
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3 Data Representations: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class S3DataTable(object):
""" Class representing a data table """
# The dataTable id if no explicit value has been provided
id_counter = 1
# -------------------------------------------------------------------------
# Standard API
# -------------------------------------------------------------------------
def __init__(self,
rfields,
data,
start=0,
limit=None,
filterString=None,
orderby=None,
empty=False,
):
"""
S3DataTable constructor
@param rfields: A list of S3Resourcefield
@param data: A list of Storages the key is of the form table.field
The value is the data to be displayed in the dataTable
@param start: the first row to return from the data
@param limit: the (maximum) number of records to return
@param filterString: The string that was used in filtering the records
@param orderby: the DAL orderby construct
"""
self.data = data
self.rfields = rfields
self.empty = empty
colnames = []
heading = {}
append = colnames.append
for rfield in rfields:
colname = rfield.colname
heading[colname] = rfield.label
append(colname)
self.colnames = colnames
self.heading = heading
max = len(data)
if start < 0:
start == 0
if start > max:
start = max
if limit == None:
end = max
else:
end = start + limit
if end > max:
end = max
self.start = start
self.end = end
self.filterString = filterString
if orderby:
_orderby = []
INVERT = current.db._adapter.INVERT
for f in s3_orderby_fields(None, orderby, expr=True):
if type(f) is Expression:
colname = str(f.first)
direction = "desc" \
if f.op == INVERT else "asc"
else:
colname = str(f)
direction = "asc"
for idx, rfield in enumerate(rfields):
if rfield.colname == colname:
_orderby.append([idx, direction])
break
else:
_orderby = [[1, "asc"]]
self.orderby = _orderby
# -------------------------------------------------------------------------
def html(self,
totalrows,
filteredrows,
id = None,
sEcho = 1,
**attr
):
"""
Method to render the dataTable into html
@param totalrows: The total rows in the unfiltered query.
@param filteredrows: The total rows in the filtered query.
@param id: The id of the table these need to be unique if more
than one dataTable is to be rendered on the same page.
If this is not passed in then a unique id will be
generated. Regardless the id is stored in self.id
so it can be easily accessed after rendering.
@param sEcho: An unaltered copy of sEcho sent from the client used
by dataTables as a draw count.
@param attr: dictionary of attributes which can be passed in
"""
flist = self.colnames
if not id:
id = "list_%s" % self.id_counter
self.id_counter += 1
self.id = id
bulkActions = attr.get("dt_bulk_actions", None)
bulkCol = attr.get("dt_bulk_col", 0)
if bulkCol > len(flist):
bulkCol = len(flist)
action_col = attr.get("dt_action_col", 0)
if action_col != 0:
if action_col == -1 or action_col >= len(flist):
action_col = len(flist) -1
attr["dt_action_col"] = action_col
flist = flist[1:action_col+1] + [flist[0]] + flist[action_col+1:]
# Get the details for any bulk actions. If we have at least one bulk
# action then a column will be added, either at the start or in the
# column identified by dt_bulk_col
if bulkActions:
flist.insert(bulkCol, "BULK")
if bulkCol <= action_col:
action_col += 1
pagination = attr.get("dt_pagination", "true") == "true"
if pagination:
real_end = self.end
self.end = self.start + 1
table = self.table(id, flist, action_col)
cache = None
if pagination:
self.end = real_end
aadata = self.aadata(totalrows,
filteredrows,
id,
sEcho,
flist,
action_col=action_col,
stringify=False,
**attr)
cache = {"iCacheLower": self.start,
"iCacheUpper": self.end if filteredrows > self.end else filteredrows,
"lastJson": aadata,
}
html = self.htmlConfig(table,
id,
self.orderby,
self.rfields,
cache,
filteredrows,
**attr
)
return html
# -------------------------------------------------------------------------
@staticmethod
def i18n():
"""
Return the i18n strings needed by dataTables
- called by views/dataTables.html
"""
T = current.T
scripts = ['''i18n.sSortAscending="%s"''' % T("activate to sort column ascending"),
'''i18n.sSortDescending="%s"''' % T("activate to sort column descending"),
'''i18n.sFirst="%s"''' % T("First"),
'''i18n.sLast="%s"''' % T("Last"),
'''i18n.sNext="%s"''' % T("Next"),
'''i18n.sPrevious="%s"''' % T("Previous"),
'''i18n.sEmptyTable="%s"''' % T("No records found"), #T("No data available in table"),
'''i18n.sInfo="%s"''' % T("Showing _START_ to _END_ of _TOTAL_ entries"),
'''i18n.sInfoEmpty="%s"''' % T("Showing 0 to 0 of 0 entries"),
'''i18n.sInfoFiltered="%s"''' % T("(filtered from _MAX_ total entries)"),
'''i18n.sInfoThousands="%s"''' % current.deployment_settings.get_L10n_thousands_separator(),
'''i18n.sLengthMenu="%s"''' % T("Show _MENU_ entries"),
'''i18n.sLoadingRecords="%s"''' % T("Loading"),
'''i18n.sProcessing="%s"''' % T("Processing"),
'''i18n.sSearch="%s"''' % T("Search"),
'''i18n.sZeroRecords="%s"''' % T("No matching records found"),
'''i18n.sSelectAll="%s"''' % T("Select All")
]
script = "\n".join(scripts)
return script
# -------------------------------------------------------------------------
def json(self,
totalrows,
displayrows,
id,
sEcho,
stringify=True,
**attr
):
"""
Method to render the data into a json object
@param totalrows: The total rows in the unfiltered query.
@param displayrows: The total rows in the filtered query.
@param id: The id of the table for which this ajax call will
respond to.
@param sEcho: An unaltered copy of sEcho sent from the client used
by dataTables as a draw count.
@param attr: dictionary of attributes which can be passed in
dt_action_col: The column where the action buttons will be placed
dt_bulk_actions: list of labels for the bulk actions.
dt_bulk_col: The column in which the checkboxes will appear,
by default it will be the column immediately
before the first data item
dt_group_totals: The number of record in each group.
This will be displayed in parenthesis
after the group title.
"""
flist = self.colnames
action_col = attr.get("dt_action_col", 0)
if action_col != 0:
if action_col == -1 or action_col >= len(flist):
action_col = len(flist) - 1
flist = flist[1:action_col+1] + [flist[0]] + flist[action_col+1:]
# Get the details for any bulk actions. If we have at least one bulk
# action then a column will be added, either at the start or in the
# column identified by dt_bulk_col
bulkActions = attr.get("dt_bulk_actions", None)
bulkCol = attr.get("dt_bulk_col", 0)
if bulkActions:
if bulkCol > len(flist):
bulkCol = len(flist)
flist.insert(bulkCol, "BULK")
if bulkCol <= action_col:
action_col += 1
return self.aadata(totalrows,
displayrows,
id,
sEcho,
flist,
action_col=action_col,
stringify=stringify,
**attr)
# -------------------------------------------------------------------------
# Extended API
# -------------------------------------------------------------------------
@staticmethod
def getConfigData():
"""
Method to extract the configuration data from S3 globals and
store them as an attr variable.
- used by Survey module
@return: dictionary of attributes which can be passed into html()
@param attr: dictionary of attributes which can be passed in
dt_displayLength : The default number of records that will be shown
dt_pagination: Enable pagination
dt_pagination_type: type of pagination, either:
(default) full_numbers
OR two_button
dt_bFilter: Enable or disable filtering of data.
dt_group: The colum that is used to group the data
dt_ajax_url: The URL to be used for the Ajax call
dt_action_col: The column where the action buttons will be placed
dt_bulk_actions: list of labels for the bulk actions.
dt_bulk_col: The column in which the checkboxes will appear,
by default it will be the column immediately
before the first data item
dt_bulk_selected: A list of selected items
dt_actions: dictionary of actions
dt_styles: dictionary of styles to be applied to a list of ids
for example:
{"warning" : [1,3,6,7,9],
"alert" : [2,10,13]}
"""
s3 = current.response.s3
attr = Storage()
if s3.datatable_ajax_source:
attr.dt_ajax_url = s3.datatable_ajax_source
if s3.actions:
attr.dt_actions = s3.actions
if s3.dataTableBulkActions:
attr.dt_bulk_actions = s3.dataTableBulkActions
if s3.dataTable_iDisplayLength:
attr.dt_displayLength = s3.dataTable_iDisplayLength
attr.dt_pagination = "false" if s3.no_sspag else "true"
if s3.dataTable_sPaginationType:
attr.dt_pagination_type = s3.dataTable_sPaginationType
if s3.dataTable_group:
attr.dt_group = s3.dataTable_group
if s3.dataTable_NobFilter:
attr.dt_bFilter = not s3.dataTable_NobFilter
if s3.dataTable_sDom:
attr.dt_sDom = s3.dataTable_sDom
if s3.dataTableDisplay:
attr.dt_display = s3.dataTableDisplay
if s3.dataTableStyleDisabled or s3.dataTableStyleWarning or s3.dataTableStyleAlert:
attr.dt_styles = {}
if s3.dataTableStyleDisabled:
attr.dt_styles["dtdisable"] = s3.dataTableStyleDisabled
if s3.dataTableStyleWarning:
attr.dt_styles["dtwarning"] = s3.dataTableStyleWarning
if s3.dataTableStyleAlert:
attr.dt_styles["dtalert"] = s3.dataTableStyleAlert
return attr
# -------------------------------------------------------------------------
@staticmethod
def getControlData(rfields, vars):
"""
Method that will return the orderby and filter from the vars
returned by the browser, from an ajax call.
@param rfields: A list of S3Resourcefield
@param vars: A list of variables sent from the dataTable
"""
# @todo: does not sort properly in option fields nor FK references
if not vars.iSortingCols:
return (False, "")
sort_cols = int(vars.iSortingCols)
orderby = False
for x in range(sort_cols):
index = int(vars["iSortCol_%s" % x])
f = rfields[index].field
if vars["sSortDir_%s" % x] == "desc":
f = ~f
if not orderby:
orderby = f
else:
orderby |= f
# @todo: does not search properly in option fields nor FK references
words = vars.sSearch
if not words:
return (orderby, "")
words = words.split()
query = None
for rf in rfields:
if rf.ftype in ("string", "text") :
if not query:
query = rf.field.contains(words)
else:
query |= (rf.field.contains(words))
return (orderby, query)
# -------------------------------------------------------------------------
@staticmethod
def listFormats(id, rfields=None, permalink=None, base_url=None):
"""
Calculate the export formats that can be added to the table
@param id: the unique dataTable ID
@param rfields: optional list of field selectors for exports
@param permalink: search result URL
@param base_url: the base URL of the datatable (without
method or query vars) to construct format URLs
"""
T = current.T
s3 = current.response.s3
request = current.request
if base_url is None:
base_url = request.url
# @todo: this needs rework
# - other data formats could have other list_fields,
# hence applying the datatable sorting/filters is
# not transparent
if s3.datatable_ajax_source:
end = s3.datatable_ajax_source.find(".aadata")
default_url = s3.datatable_ajax_source[:end] # strip '.aadata' extension
else:
default_url = base_url
# Keep any URL filters
get_vars = request.get_vars
if get_vars:
query = "&".join("%s=%s" % (k, v) for k, v in get_vars.items())
default_url = "%s?%s" % (default_url, query)
div = DIV(_id = "%s_list_formats" % id, # Used by s3.filter.js to update URLs
_class = "list_formats")
if permalink is not None:
link = A(T("Link to this result"),
_href=permalink,
_class="permalink")
div.append(link)
div.append(" | ")
export_formats = current.deployment_settings.get_ui_export_formats()
if export_formats:
div.append("%s:" % current.T("Export as"))
iconList = []
formats = s3.formats
EXPORT = T("Export in %(format)s format")
# In reverse-order of appearance due to float-right
if "map" in formats and "map" in export_formats:
iconList.append(DIV(_class="export_map",
_onclick="S3.dataTables.formatRequest('map','%s','%s');" % (id, formats.map),
_title=T("Show on Map"),
))
if "kml" in export_formats:
if "kml" in formats:
iconList.append(DIV(_class="export_kml",
_onclick="S3.dataTables.formatRequest('kml','%s','%s');" % (id, formats.kml),
_title=EXPORT % dict(format="KML"),
))
elif rfields:
kml_list = ["location_id",
"site_id",
]
for r in rfields:
if r.fname in kml_list:
iconList.append(DIV(_class="export_kml",
_onclick="S3.dataTables.formatRequest('kml','%s','%s');" % (id, default_url),
_title=EXPORT % dict(format="KML"),
))
break
if "have" in formats and "have" in export_formats:
iconList.append(DIV(_class="export_have",
_onclick="S3.dataTables.formatRequest('have','%s','%s');" % (id, formats.have),
_title=EXPORT % dict(format="HAVE"),
))
if "xml" in export_formats:
url = formats.xml if formats.xml else default_url
iconList.append(DIV(_class="export_xml",
_onclick="S3.dataTables.formatRequest('xml','%s','%s');" % (id, url),
_title=EXPORT % dict(format="XML"),
))
if "rss" in export_formats:
url = formats.rss if formats.rss else default_url
iconList.append(DIV(_class="export_rss",
_onclick="S3.dataTables.formatRequest('rss','%s','%s');" % (id, url),
_title=EXPORT % dict(format="RSS"),
))
if "xls" in export_formats:
url = formats.xls if formats.xls else default_url
iconList.append(DIV(_class="export_xls",
_onclick="S3.dataTables.formatRequest('xls','%s','%s');" % (id, url),
_title=EXPORT % dict(format="XLS"),
))
if "pdf" in export_formats:
url = formats.pdf if formats.pdf else default_url
iconList.append(DIV(_class="export_pdf",
_onclick="S3.dataTables.formatRequest('pdf','%s','%s');" % (id, url),
_title=EXPORT % dict(format="PDF"),
))
for icon in iconList:
div.append(icon)
return div
# -------------------------------------------------------------------------
@staticmethod
def defaultActionButtons(resource,
custom_actions=None,
r=None
):
"""
Configure default action buttons
@param resource: the resource
@param r: the request, if specified, all action buttons will
be linked to the controller/function of this request
rather than to prefix/name of the resource
@param custom_actions: custom actions as list of dicts like
{"label":label, "url":url, "_class":class},
will be appended to the default actions
@ToDo: DRY with S3CRUD.action_buttons()
"""
from s3crud import S3CRUD
s3 = current.response.s3
auth = current.auth
actions = s3.actions
table = resource.table
actions = None
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
labels = s3.crud_labels
args = ["[id]"]
# Choose controller/function to link to
if r is not None:
c = r.controller
f = r.function
else:
c = resource.prefix
f = resource.name
tablename = resource.tablename
get_config = current.s3db.get_config
# "Open" button
editable = get_config(tablename, "editable", True)
if editable and has_permission("update", table) and \
not ownership_required("update", table):
update_url = URL(c=c, f=f, args=args + ["update"])
S3CRUD.action_button(labels.UPDATE, update_url,
_class="action-btn edit")
else:
read_url = URL(c=c, f=f, args=args)
S3CRUD.action_button(labels.READ, read_url,
_class="action-btn read")
# Delete button
# @todo: does not apply selective action (renders DELETE for
# all items even if the user is only permitted to delete
# some of them) => should implement "restrict", see
# S3CRUD.action_buttons
deletable = get_config(tablename, "deletable", True)
if deletable and \
has_permission("delete", table) and \
not ownership_required("delete", table):
delete_url = URL(c=c, f=f, args=args + ["delete"])
S3CRUD.action_button(labels.DELETE, delete_url,
_class="delete-btn")
# Append custom actions
if custom_actions:
actions = actions + custom_actions if actions else custom_actions
# -------------------------------------------------------------------------
@staticmethod
def htmlConfig(html,
id,
orderby,
rfields = None,
cache = None,
filteredrows = None,
**attr
):
"""
Method to wrap the html for a dataTable in a form, the list of formats
used for data export and add the config details required by dataTables,
@param html: The html table
@param id: The id of the table
@param orderby: the sort details see aaSort at http://datatables.net/ref
@param rfields: The list of resource fields
@param attr: dictionary of attributes which can be passed in
dt_lengthMenu: The menu options for the number of records to be shown
dt_displayLength : The default number of records that will be shown
dt_sDom : The Datatable DOM initialisation variable, describing
the order in which elements are displayed.
See http://datatables.net/ref for more details.
dt_pagination : Is pagination enabled, dafault 'true'
dt_pagination_type : How the pagination buttons are displayed
dt_bFilter: Enable or disable filtering of data.
dt_ajax_url: The URL to be used for the Ajax call
dt_action_col: The column where the action buttons will be placed
dt_bulk_actions: list of labels for the bulk actions.
dt_bulk_col: The column in which the checkboxes will appear,
by default it will be the column immediately
before the first data item
dt_group: The column(s) that is(are) used to group the data
dt_group_totals: The number of record in each group.
This will be displayed in parenthesis
after the group title.
dt_group_titles: The titles to be used for each group.
These are a list of lists with the inner list
consisting of two values, the repr from the
db and the label to display. This can be more than
the actual number of groups (giving an empty group).
dt_group_space: Insert a space between the group heading and the next group
dt_bulk_selected: A list of selected items
dt_actions: dictionary of actions
dt_styles: dictionary of styles to be applied to a list of ids
for example:
{"warning" : [1,3,6,7,9],
"alert" : [2,10,13]}
dt_text_maximum_len: The maximum length of text before it is condensed
dt_text_condense_len: The length displayed text is condensed down to
dt_shrink_groups: If set then the rows within a group will be hidden
two types are supported, 'individulal' and 'accordion'
dt_group_types: The type of indicator for groups that can be 'shrunk'
Permitted valies are: 'icon' (the default) 'text' and 'none'
dt_base_url: base URL to construct export format URLs, resource
default URL without any URL method or query part
@global current.response.s3.actions used to get the RowActions
"""
from gluon.serializers import json as jsons
request = current.request
s3 = current.response.s3
dataTableID = s3.dataTableID
if not dataTableID or not isinstance(dataTableID, list):
dataTableID = s3.dataTableID = [id]
elif id not in dataTableID:
dataTableID.append(id)
# The configuration parameter from the server to the client will be
# sent in a json object stored in an hidden input field. This object
# will then be parsed by s3.dataTable.js and the values used.
config = Storage()
config.id = id
config.lengthMenu = attr.get("dt_lengthMenu",
[[ 25, 50, -1], [ 25, 50, str(current.T("All"))]]
)
config.displayLength = attr.get("dt_displayLength", s3.ROWSPERPAGE)
config.sDom = attr.get("dt_sDom", 'fril<"dataTable_table"t>pi')
config.pagination = attr.get("dt_pagination", "true")
config.paginationType = attr.get("dt_pagination_type", "full_numbers")
config.bFilter = attr.get("dt_bFilter", "true")
config.ajaxUrl = attr.get("dt_ajax_url", URL(c=request.controller,
f=request.function,
extension="aadata",
args=request.args,
vars=request.get_vars,
))
config.rowStyles = attr.get("dt_styles", [])
rowActions = attr.get("dt_row_actions", s3.actions)
if rowActions:
config.rowActions = rowActions
else:
config.rowActions = []
bulkActions = attr.get("dt_bulk_actions", None)
if bulkActions and not isinstance(bulkActions, list):
bulkActions = [bulkActions]
config.bulkActions = bulkActions
config.bulkCol = bulkCol = attr.get("dt_bulk_col", 0)
action_col = attr.get("dt_action_col", 0)
if bulkActions and bulkCol <= action_col:
action_col += 1
config.actionCol = action_col
group_list = attr.get("dt_group", [])
if not isinstance(group_list, list):
group_list = [group_list]
dt_group = []
for group in group_list:
if bulkActions and bulkCol <= group:
group += 1
if action_col >= group:
group -= 1
dt_group.append([group, "asc"])
config.group = dt_group
config.groupTotals = attr.get("dt_group_totals", [])
config.groupTitles = attr.get("dt_group_titles", [])
config.groupSpacing = attr.get("dt_group_space", "false")
for order in orderby:
if bulkActions:
if bulkCol <= order[0]:
order[0] += 1
if action_col >= order[0]:
order[0] -= 1
config.aaSort = orderby
config.textMaxLength = attr.get("dt_text_maximum_len", 80)
config.textShrinkLength = attr.get("dt_text_condense_len", 75)
config.shrinkGroupedRows = attr.get("dt_shrink_groups", "false")
config.groupIcon = attr.get("dt_group_types", [])
# Wrap the table in a form and add some data in hidden fields
form = FORM(_class="dt-wrapper")
if not s3.no_formats and len(html) > 0:
permalink = attr.get("dt_permalink", None)
base_url = attr.get("dt_base_url", None)
form.append(S3DataTable.listFormats(id, rfields,
permalink=permalink,
base_url=base_url))
form.append(html)
# Add the configuration details for this dataTable
form.append(INPUT(_type="hidden",
_id="%s_configurations" % id,
_name="config",
_value=jsons(config)))
# If we have a cache set up then pass it in
if cache:
form.append(INPUT(_type="hidden",
_id="%s_dataTable_cache" %id,
_name="cache",
_value=jsons(cache)))
# If we have bulk actions then add the hidden fields
if bulkActions:
form.append(INPUT(_type="hidden",
_id="%s_dataTable_bulkMode" % id,
_name="mode",
_value="Inclusive"))
bulk_selected = attr.get("dt_bulk_selected", "")
if isinstance(bulk_selected, list):
bulk_selected = ",".join(bulk_selected)
form.append(INPUT(_type="hidden",
_id="%s_dataTable_bulkSelection" % id,
_name="selected",
_value="[%s]" % bulk_selected))
form.append(INPUT(_type="hidden",
_id="%s_dataTable_filterURL" % id,
_class="dataTable_filterURL",
_name="filterURL",
_value="%s" % config.ajaxUrl))
return form
# -------------------------------------------------------------------------
# Helper methods
# -------------------------------------------------------------------------
def table(self, id, flist=None, action_col=0):
"""
Method to render the data as an html table. This is of use if
and html table is required without the dataTable goodness. However
if you want html for a dataTable then use the html() method
@param id: The id of the table
@param flist: The list of fields
@param action_col: The column where action columns will be displayed
(this is required by dataTables)
"""
data = self.data
heading = self.heading
start = self.start
end = self.end
if not flist:
flist = self.colnames
# Build the header row
header = THEAD()
tr = TR()
for field in flist:
if field == "BULK":
tr.append(TH(""))
else:
tr.append(TH(heading[field]))
header.append(tr)
body = TBODY()
if data:
# Build the body rows (the actual data)
rc = 0
for i in xrange(start, end):
row = data[i]
if rc % 2 == 0:
_class = "even"
else:
_class = "odd"
rc += 1
tr = TR(_class=_class)
for field in flist:
# Insert a checkbox for bulk select
if field == "BULK":
tr.append(TD(INPUT(_id="select%s" % row[flist[action_col]],
_type="checkbox",
_class="bulkcheckbox",
)))
else:
tr.append(TD(row[field]))
body.append(tr)
table = TABLE([header, body], _id=id, _class="dataTable display")
return table
# -------------------------------------------------------------------------
def aadata(self,
totalrows,
displayrows,
id,
sEcho,
flist,
stringify=True,
action_col=None,
**attr
):
"""
Method to render the data into a json object
@param totalrows: The total rows in the unfiltered query.
@param displayrows: The total rows in the filtered query.
@param id: The id of the table for which this ajax call will
respond to.
@param sEcho: An unaltered copy of sEcho sent from the client used
by dataTables as a draw count.
@param flist: The list of fields
@param attr: dictionary of attributes which can be passed in
dt_action_col: The column where the action buttons will be placed
dt_bulk_actions: list of labels for the bulk actions.
dt_bulk_col: The column in which the checkboxes will appear,
by default it will be the column immediately
before the first data item
dt_group_totals: The number of record in each group.
This will be displayed in parenthesis
after the group title.
"""
data = self.data
if not flist:
flist = self.colnames
start = self.start
end = self.end
if action_col is None:
action_col = attr.get("dt_action_col", 0)
structure = {}
aadata = []
for i in xrange(start, end):
row = data[i]
details = []
for field in flist:
if field == "BULK":
details.append("<INPUT id='select%s' type='checkbox' class='bulkcheckbox'>" % \
row[flist[action_col]])
else:
details.append(s3_unicode(row[field]))
aadata.append(details)
structure["dataTable_id"] = id
structure["dataTable_filter"] = self.filterString
structure["dataTable_groupTotals"] = attr.get("dt_group_totals", [])
structure["dataTable_sort"] = self.orderby
structure["aaData"] = aadata
structure["iTotalRecords"] = totalrows
structure["iTotalDisplayRecords"] = displayrows
structure["sEcho"] = sEcho
if stringify:
from gluon.serializers import json
return json(structure)
else:
return structure
# =============================================================================
class S3DataList(object):
""" Class representing a data list """
# -------------------------------------------------------------------------
# Standard API
# -------------------------------------------------------------------------
def __init__(self,
resource,
list_fields,
records,
start=None,
limit=None,
total=None,
list_id=None,
layout=None,
row_layout=None):
"""
Constructor
@param resource: the S3Resource
@param list_fields: the list fields
(list of field selector strings)
@param records: the records
@param start: index of the first item
@param limit: maximum number of items
@param total: total number of available items
@param list_id: the HTML ID for this list
@param layout: item renderer (optional) as function
(list_id, item_id, resource, rfields, record)
@param row_layout: row renderer (optional) as
function(list_id, resource, rowsize, items)
"""
self.resource = resource
self.list_fields = list_fields
self.records = records
if list_id is None:
self.list_id = "datalist"
else:
self.list_id = list_id
if layout is not None:
self.layout = layout
else:
self.layout = S3DataListLayout()
self.row_layout = row_layout
self.start = start if start else 0
self.limit = limit if limit else 0
self.total = total if total else 0
# ---------------------------------------------------------------------
def html(self,
start=None,
limit=None,
pagesize=None,
rowsize=None,
ajaxurl=None,
empty=None,
popup_url=None,
popup_title=None,
):
"""
Render list data as HTML (nested DIVs)
@param start: index of the first item (in this page)
@param limit: (actual) number of items (in this page)
@param pagesize: maximum number of items per page
@param rowsize: number of items per row
@param ajaxurl: the URL to Ajax-update the datalist
@param popup_url: the URL for the modal used for the 'more'
button (=> we deactivate InfiniteScroll)
@param popup_title: the title for the modal
"""
T = current.T
resource = self.resource
list_fields = self.list_fields
rfields = resource.resolve_selectors(list_fields)[0]
list_id = self.list_id
render = self.layout
render_row = self.row_layout
if not rowsize:
rowsize = 1
pkey = str(resource._id)
records = self.records
if records is not None:
items = [
DIV(T("Total Records: %(numrows)s") % {"numrows": self.total},
_class="dl-header",
_id="%s-header" % list_id)
]
if empty is None:
empty = resource.crud.crud_string(resource.tablename,
"msg_no_match")
empty = DIV(empty, _class="dl-empty")
if self.total > 0:
empty.update(_style="display:none;")
items.append(empty)
row_idx = int(self.start / rowsize) + 1
for group in self.groups(records, rowsize):
row = []
col_idx = 0
for record in group:
if pkey in record:
item_id = "%s-%s" % (list_id, record[pkey])
else:
# template
item_id = "%s-[id]" % list_id
item = render(list_id,
item_id,
resource,
rfields,
record)
if hasattr(item, "add_class"):
_class = "dl-item dl-%s-cols dl-col-%s" % (rowsize, col_idx)
item.add_class(_class)
row.append(item)
col_idx += 1
_class = "dl-row %s" % ((row_idx % 2) and "even" or "odd")
if render_row:
row = render_row(list_id,
resource,
rowsize,
row)
if hasattr(row, "add_class"):
row.add_class(_class)
else:
row = DIV(row, _class=_class)
items.append(row)
row_idx += 1
else:
# template
raise NotImplementedError
dl = DIV(items,
_class="dl",
_id=list_id,
)
dl_data = {"startindex": start,
"maxitems": limit,
"totalitems": self.total,
"pagesize": pagesize,
"rowsize": rowsize,
"ajaxurl": ajaxurl,
}
if popup_url:
input_class = "dl-pagination"
a_class = "s3_modal"
#dl_data["popup_url"] = popup_url
#dl_data["popup_title"] = popup_title
else:
input_class = "dl-pagination dl-scroll"
a_class = ""
from gluon.serializers import json as jsons
dl_data = jsons(dl_data)
dl.append(DIV(FORM(INPUT(_type="hidden",
_class=input_class,
_value=dl_data)
),
A(T("more..."),
_href=popup_url or ajaxurl,
_class=a_class,
_title=popup_title,
),
_class="dl-navigation",
))
return dl
# ---------------------------------------------------------------------
@staticmethod
def groups(iterable, length):
"""
Iterator to group data list items into rows
@param iterable: the items iterable
@param length: the number of items per row
"""
iterable = iter(iterable)
group = list(islice(iterable, length))
while group:
yield group
group = list(islice(iterable, length))
raise StopIteration
# =============================================================================
class S3DataListLayout(object):
""" DataList default layout """
item_class = "thumbnail"
# ---------------------------------------------------------------------
def __call__(self, list_id, item_id, resource, rfields, record):
"""
Wrapper for render_item.
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
# Render the item
item = DIV(_id=item_id, _class=self.item_class)
header = self.render_header(list_id,
item_id,
resource,
rfields,
record)
if header is not None:
item.append(header)
body = self.render_body(list_id,
item_id,
resource,
rfields,
record)
if body is not None:
item.append(body)
return item
# ---------------------------------------------------------------------
def render_header(self, list_id, item_id, resource, rfields, record):
"""
@todo: Render the card header
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
#DIV(
#I(_class="icon"),
#SPAN(" %s" % title, _class="card-title"),
#toolbox,
#_class="card-header",
#),
return None
# ---------------------------------------------------------------------
def render_body(self, list_id, item_id, resource, rfields, record):
"""
Render the card body
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
pkey = str(resource._id)
body = DIV(_class="media-body")
render_column = self.render_column
for rfield in rfields:
if not rfield.show or rfield.colname == pkey:
continue
column = render_column(item_id, rfield, record)
if column is not None:
table_class = "dl-table-%s" % rfield.tname
field_class = "dl-field-%s" % rfield.fname
body.append(DIV(column,
_class = "dl-field %s %s" % (table_class,
field_class)))
return DIV(body, _class="media")
# ---------------------------------------------------------------------
def render_icon(self, list_id, resource):
"""
@todo: Render a body icon
@param list_id: the HTML ID of the list
@param resource: the S3Resource to render
"""
return None
# ---------------------------------------------------------------------
def render_toolbox(self, list_id, resource, record):
"""
@todo: Render the toolbox
@param list_id: the HTML ID of the list
@param resource: the S3Resource to render
@param record: the record as dict
"""
return None
# ---------------------------------------------------------------------
def render_column(self, item_id, rfield, record):
"""
Render a data column.
@param item_id: the HTML element ID of the item
@param rfield: the S3ResourceField for the column
@param record: the record (from S3Resource.select)
"""
colname = rfield.colname
if colname not in record:
return None
value = record[colname]
value_id = "%s-%s" % (item_id, rfield.colname.replace(".", "_"))
label = LABEL("%s:" % rfield.label,
_for = value_id,
_class = "dl-field-label")
value = SPAN(value,
_id = value_id,
_class = "dl-field-value")
return TAG[""](label, value)
# =============================================================================
class S3PivotTable(object):
""" Class representing a pivot table of a resource """
#: Supported aggregation methods
METHODS = {"list": "List",
"count": "Count",
"min": "Minimum",
"max": "Maximum",
"sum": "Total",
"avg": "Average",
#"std": "Standard Deviation"
}
def __init__(self, resource, rows, cols, layers, strict=True):
"""
Constructor - extracts all unique records, generates a
pivot table from them with the given dimensions and
computes the aggregated values for each cell.
@param resource: the S3Resource
@param rows: field selector for the rows dimension
@param cols: field selector for the columns dimension
@param layers: list of tuples of (field selector, method)
for the value aggregation(s)
@param strict: filter out dimension values which don't match
the resource filter
"""
# Initialize ----------------------------------------------------------
#
if not rows and not cols:
raise SyntaxError("No rows or columns specified for pivot table")
self.resource = resource
self.lfields = None
self.dfields = None
self.rfields = None
self.rows = rows
self.cols = cols
self.layers = layers
# API variables -------------------------------------------------------
#
self.records = None
""" All records in the pivot table as a Storage like:
{
<record_id>: <Row>
}
"""
self.empty = False
""" Empty-flag (True if no records could be found) """
self.numrows = None
""" The number of rows in the pivot table """
self.numcols = None
""" The number of columns in the pivot table """
self.cell = None
""" Array of pivot table cells in [rows[columns]]-order, each
cell is a Storage like:
{
records: <list_of_record_ids>,
(<fact>, <method>): <aggregated_value>, ...per layer
}
"""
self.row = None
""" List of row headers, each header is a Storage like:
{
value: <dimension value>,
records: <list_of_record_ids>,
(<fact>, <method>): <total value>, ...per layer
}
"""
self.col = None
""" List of column headers, each header is a Storage like:
{
value: <dimension value>,
records: <list_of_record_ids>,
(<fact>, <method>): <total value>, ...per layer
}
"""
self.totals = Storage()
""" The grand total values for each layer, as a Storage like:
{
(<fact>, <method): <total value>, ...per layer
}
"""
# Get the fields ------------------------------------------------------
#
tablename = resource.tablename
# The "report_fields" table setting defines which additional
# fields shall be included in the report base layer. This is
# useful to provide easy access to the record data behind a
# pivot table cell.
fields = current.s3db.get_config(tablename, "report_fields", [])
self._get_fields(fields=fields)
rows = self.rows
cols = self.cols
if DEBUG:
_start = datetime.datetime.now()
_debug("S3PivotTable %s starting" % tablename)
# Retrieve the records ------------------------------------------------
#
data = resource.select(self.rfields.keys(), limit=None)
drows = data["rows"]
if drows:
key = str(resource.table._id)
records = Storage([(i[key], i) for i in drows])
# Generate the data frame -----------------------------------------
#
gfields = self.gfields
pkey_colname = gfields[self.pkey]
rows_colname = gfields[rows]
cols_colname = gfields[cols]
if strict:
rfields = self.rfields
axes = (rfield
for rfield in (rfields[rows], rfields[cols])
if rfield != None)
axisfilter = resource.axisfilter(axes)
else:
axisfilter = None
dataframe = []
insert = dataframe.append
expand = self._expand
for _id in records:
row = records[_id]
item = {key: _id}
if rows_colname:
item[rows_colname] = row[rows_colname]
if cols_colname:
item[cols_colname] = row[cols_colname]
dataframe.extend(expand(item, axisfilter=axisfilter))
self.records = records
#if DEBUG:
#duration = datetime.datetime.now() - _start
#duration = '{:.2f}'.format(duration.total_seconds())
#_debug("Dataframe complete after %s seconds" % duration)
# Group the records -----------------------------------------------
#
matrix, rnames, cnames = self._pivot(dataframe,
pkey_colname,
rows_colname,
cols_colname)
#if DEBUG:
#duration = datetime.datetime.now() - _start
#duration = '{:.2f}'.format(duration.total_seconds())
#_debug("Pivoting complete after %s seconds" % duration)
# Initialize columns and rows -------------------------------------
#
if cols:
self.col = [Storage({"value": v}) for v in cnames]
self.numcols = len(self.col)
else:
self.col = [Storage({"value": None})]
self.numcols = 1
if rows:
self.row = [Storage({"value": v}) for v in rnames]
self.numrows = len(self.row)
else:
self.row = [Storage({"value": None})]
self.numrows = 1
# Add the layers --------------------------------------------------
#
add_layer = self._add_layer
layers = list(self.layers)
for f, m in self.layers:
add_layer(matrix, f, m)
#if DEBUG:
#duration = datetime.datetime.now() - _start
#duration = '{:.2f}'.format(duration.total_seconds())
#_debug("Layers complete after %s seconds" % duration)
else:
# No items to report on -------------------------------------------
#
self.empty = True
if DEBUG:
duration = datetime.datetime.now() - _start
duration = '{:.2f}'.format(duration.total_seconds())
_debug("S3PivotTable completed in %s seconds" % duration)
# -------------------------------------------------------------------------
# API methods
# -------------------------------------------------------------------------
def __len__(self):
""" Total number of records in the report """
items = self.records
if items is None:
return 0
else:
return len(self.records)
# -------------------------------------------------------------------------
def html(self,
show_totals=True,
url=None,
filter_query=None,
**attributes):
"""
Render this pivot table as HTML
@param show_totals: show totals for rows and columns
@param url: link cells to this base-URL
@param filter_query: use this S3ResourceQuery with the base-URL
@param attributes: the HTML attributes for the table
"""
T = current.T
TOTAL = T("Total")
components = []
layers = self.layers
resource = self.resource
tablename = resource.tablename
cols = self.cols
rows = self.rows
numcols = self.numcols
numrows = self.numrows
rfields = self.rfields
get_label = self._get_field_label
get_total = self._totals
# Representation methods
represent_method = self._represent_method
cols_repr = represent_method(cols)
rows_repr = represent_method(rows)
layers_repr = dict([(f, represent_method(f)) for f, m in layers])
layer_label = None
col_titles = []
add_col_title = col_titles.append
col_totals = []
add_col_total = col_totals.append
row_titles = []
add_row_title = row_titles.append
row_totals = []
add_row_total = row_totals.append
# Layer titles:
# Get custom labels from report options
layer_labels = Storage()
report_options = resource.get_config("report_options", None)
if report_options and "fact" in report_options:
layer_opts = report_options["fact"]
for item in layer_opts:
if isinstance(item, (tuple, list)) and len(item) == 3:
if not "." in item[0].split("$")[0]:
item = ("%s.%s" % (resource.alias, item[0]),
item[1],
item[2])
layer_labels[(item[0], item[1])] = item[2]
labels = []
get_mname = self._get_method_label
for layer in layers:
if layer in layer_labels:
# Custom label
label = layer_labels[layer]
if not labels:
layer_label = label
labels.append(s3_unicode(label))
else:
# Construct label from field-label and method
label = get_label(rfields, layer[0], resource, "fact")
mname = get_mname(layer[1])
if not labels:
m = layer[1] == "list" and get_mname("count") or mname
layer_label = "%s (%s)" % (label, m)
labels.append("%s (%s)" % (label, mname))
layers_title = TH(" / ".join(labels))
# Columns field title
if cols:
col_label = get_label(rfields, cols, resource, "cols")
_colspan = numcols + 1
else:
col_label = ""
_colspan = numcols
cols_title = TH(col_label, _colspan=_colspan, _scope="col")
titles = TR(layers_title, cols_title)
# Sort dimensions:
cells = self.cell
# Sort rows
rvals = self.row
rows_list = []
for i in xrange(numrows):
row = rvals[i]
# Add representation value of the row header
row["text"] = rows_repr(row.value)
rows_list.append((row, cells[i]))
self._sortdim(rows_list, rfields[rows], 0)
# Sort columns
cvals = self.col
cols_list = []
for j in xrange(numcols):
column = cvals[j]
column["text"] = cols_repr(column.value)
cols_list.append((column, j))
self._sortdim(cols_list, rfields[cols], 0)
# Build the column headers:
# Header for the row-titles column
row_label = get_label(rfields, rows, resource, "rows")
rows_title = TH(row_label, _scope="col")
headers = TR(rows_title)
add_header = headers.append
# Headers for the cell columns
for j in xrange(numcols):
v = cols_list[j][0].text
add_col_title(s3_truncate(unicode(v)))
colhdr = TH(v, _scope="col")
add_header(colhdr)
# Header for the row-totals column
if show_totals and cols is not None:
add_header(TH(TOTAL, _class="totals_header rtotal", _scope="col"))
thead = THEAD(titles, headers)
# Render the table body:
tbody = TBODY()
add_row = tbody.append
# Lookup table for cell list values
cell_lookup_table = {} # {{}, {}}
cell_vals = Storage()
for i in xrange(numrows):
# Initialize row
_class = i % 2 and "odd" or "even"
tr = TR(_class=_class)
add_cell = tr.append
# Row header
row = rows_list[i][0]
v = row["text"]
add_row_title(s3_truncate(unicode(v)))
rowhdr = TD(v)
add_cell(rowhdr)
row_cells = rows_list[i][1]
# Result cells
for j in xrange(numcols):
cell_idx = cols_list[j][1]
cell = row_cells[cell_idx]
vals = []
cell_ids = []
add_value = vals.append
for layer_idx, layer in enumerate(layers):
f, m = layer
represent = layers_repr[f]
value = cell[layer]
if m == "list":
if isinstance(value, list):
l = [represent(v) for v in value]
elif value is None:
l = ["-"]
else:
if type(value) in (int, float):
l = IS_NUMBER.represent(value)
else:
l = unicode(value)
#add_value(", ".join(l))
add_value(UL([LI(v) for v in l]))
else:
if type(value) in (int, float):
add_value(IS_NUMBER.represent(value))
else:
add_value(unicode(value))
layer_ids = []
layer_values = cell_lookup_table.get(layer_idx, {})
if m == "count":
rfield = rfields[f]
field = rfield.field
colname = rfield.colname
has_fk = field is not None and s3_has_foreign_key(field)
for id in cell.records:
record = self.records[id]
try:
fvalue = record[colname]
except AttributeError:
fvalue = None
if fvalue is not None:
if has_fk:
if type(fvalue) is not list:
fvalue = [fvalue]
# list of foreign keys
for fk in fvalue:
if fk is not None and fk not in layer_ids:
layer_ids.append(int(fk))
if fk not in layer_values:
layer_values[fk] = s3_unicode(field.represent(fk))
else:
if type(fvalue) is not list:
fvalue = [fvalue]
for val in fvalue:
if val is not None:
if val not in cell_vals:
next_id = len(cell_vals)
cell_vals[val] = next_id
layer_ids.append(next_id)
layer_values[next_id] = s3_unicode(represent(val))
else:
prev_id = cell_vals[val]
if prev_id not in layer_ids:
layer_ids.append(prev_id)
layer_ids.sort(key=lambda i: layer_values[i])
cell_ids.append(layer_ids)
cell_lookup_table[layer_idx] = layer_values
vals = [DIV(v, _class="report-cell-value") for v in vals]
if any(cell_ids):
cell_attr = {"_data-records": cell_ids}
vals.append(DIV(_class="report-cell-zoom"))
else:
cell_attr = {}
add_cell(TD(vals, **cell_attr))
# Row total
totals = get_total(row, layers, append=add_row_total)
if show_totals and cols is not None:
add_cell(TD(totals))
add_row(tr)
# Table footer:
i = numrows
_class = i % 2 and "odd" or "even"
_class = "%s %s" % (_class, "totals_row")
col_total = TR(_class=_class)
add_total = col_total.append
add_total(TH(TOTAL, _class="totals_header", _scope="row"))
# Column totals
for j in xrange(numcols):
cell_idx = cols_list[j][1]
col = self.col[cell_idx]
totals = get_total(col, layers, append=add_col_total)
add_total(TD(IS_NUMBER.represent(totals)))
# Grand total
if cols is not None:
grand_totals = get_total(self.totals, layers)
add_total(TD(grand_totals))
tfoot = TFOOT(col_total)
# Wrap up:
append = components.append
append(thead)
append(tbody)
if show_totals:
append(tfoot)
# Chart data:
layer_label = s3_unicode(layer_label)
BY = s3_unicode(T("by"))
row_label = "%s %s" % (BY, s3_unicode(row_label))
if col_label:
col_label = "%s %s" % (BY, s3_unicode(col_label))
if filter_query and hasattr(filter_query, "serialize_url"):
filter_vars = filter_query.serialize_url(resource=self.resource)
else:
filter_vars = {}
hide_opts = current.deployment_settings.get_ui_hide_report_options()
json_data = json.dumps(dict(t=layer_label,
x=col_label,
y=row_label,
r=self.rows,
c=self.cols,
d=self.compact(maxrows=50,
maxcols=30,
represent=True),
u=url,
f=filter_vars,
h=hide_opts,
cell_lookup_table=cell_lookup_table))
self.report_data = Storage(row_label=row_label,
col_label=col_label,
layer_label=layer_label,
json_data=json_data)
return TABLE(components, **attributes)
# -------------------------------------------------------------------------
def json(self,
layer=None,
maxrows=None,
maxcols=None,
least=False,
represent=True):
"""
Render the pivot table data as JSON-serializable dict
@param layer: the layer
@param maxrows: maximum number of rows (None for all)
@param maxcols: maximum number of columns (None for all)
@param least: render the least n rows/columns rather than
the top n (with maxrows/maxcols)
@param represent: represent values
{
labels: {
layer:
rows:
cols:
total:
},
cells: [rows[cols]],
rows: [rows[index, value, label, total]],
cols: [cols[index, value, label, total]],
total: <grand total>,
filter: [rows selector, cols selector]
}
"""
rfields = self.rfields
resource = self.resource
tablename = resource.tablename
T = current.T
OTHER = "__other__"
# The layer
if layer is None:
layer = self.layers[0]
field, method = layer
rows_dim = self.rows
cols_dim = self.cols
# The data
orows = []
ocols = []
ocells = []
lookup = {}
if not self.empty:
if method == "min":
least = not least
numeric = lambda x: isinstance(x, (int, long, float))
hmethod = "sum" if method in ("list", "count") else method
if represent:
row_repr = self._represent_method(rows_dim)
col_repr = self._represent_method(cols_dim)
else:
row_repr = col_repr = lambda v: s3_unicode(v)
others = s3_unicode(current.T("Others"))
irows = self.row
icols = self.col
rows = []
cols = []
rtail = (None, None)
ctail = (None, None)
# Group and sort the rows
is_numeric = None
for i in xrange(self.numrows):
irow = irows[i]
total = irow[layer]
if is_numeric is None:
is_numeric = numeric(total)
if not is_numeric:
total = len(irow.records)
header = Storage(value = irow.value,
text = irow.text if "text" in irow
else row_repr(irow.value))
rows.append((i, total, header))
if maxrows is not None:
rtail = self._tail(rows, maxrows, least=least, method=hmethod)
self._sortdim(rows, rfields[rows_dim])
if rtail[1] is not None:
rows.append((OTHER, rtail[1], Storage(value=None, text=others)))
row_indices = [i[0] for i in rows]
# Group and sort the cols
is_numeric = None
for i in xrange(self.numcols):
icol = icols[i]
total = icol[layer]
if is_numeric is None:
is_numeric = numeric(total)
if not is_numeric:
total = len(icol["records"])
header = Storage(value = icol.value,
text = icol.text if "text" in icol
else col_repr(icol.value))
cols.append((i, total, header))
if maxcols is not None:
ctail = self._tail(cols, maxcols, least=least, method=hmethod)
self._sortdim(cols, rfields[cols_dim])
if ctail[1] is not None:
cols.append((OTHER, ctail[1], Storage(value=None, text=others)))
col_indices = [i[0] for i in cols]
rothers = rtail[0] or []
cothers = ctail[0] or []
# Group and sort the cells
icell = self.cell
cells = {}
for i in xrange(self.numrows):
irow = icell[i]
ridx = (i, OTHER) if rothers and i in rothers else (i,)
for j in xrange(self.numcols):
cell = irow[j]
cidx = (j, OTHER) if cothers and j in cothers else (j,)
cell_records = cell["records"]
items = cell[layer]
value = items if is_numeric \
else len(cell_records)
for ri in ridx:
if ri not in cells:
orow = cells[ri] = {}
else:
orow = cells[ri]
for ci in cidx:
if ci not in orow:
ocell = orow[ci] = {}
if OTHER in (ci, ri):
ocell["value"] = [value]
ocell["items"] = [items]
else:
ocell["value"] = value
ocell["items"] = items
ocell["records"] = cell_records
else:
ocell = orow[ci]
ocell["value"].append(value)
ocell["items"].append(items)
ocell["records"].extend(cell_records)
# Aggregate the grouped values
ctotals = True
value_map = {}
rappend = orows.append
cappend = ocols.append
rfield = rfields[field]
f = rfield.field
has_fk = f is not None and s3_has_foreign_key(f)
if has_fk:
_repr = lambda v: s3_unicode(f.represent(v))
else:
_repr = lambda v: s3_unicode(self._represent_method(field)(v))
for rindex, rtotal, rtitle in rows:
orow = []
rval = s3_unicode(rtitle.value) \
if rtitle.value is not None and rindex != OTHER else None
if represent:
rappend((rindex,
rindex in rothers,
rtotal,
rval,
rtitle.text))
else:
rappend((rindex,
rindex in rothers,
rtotal,
rval))
for cindex, ctotal, ctitle in cols:
cell = cells[rindex][cindex]
items = cell["items"]
value = cell["value"]
cell_records = cell["records"]
if type(value) is list:
value = self._aggregate(value, hmethod)
if method == "list":
if type(items) is list:
items = [item for item in s3_flatlist(items)
if item is not None]
else:
items = value
# Build a lookup table for field values if counting
if method == "count":
keys = []
for record_id in cell_records:
record = self.records[record_id]
try:
fvalue = record[rfield.colname]
except AttributeError:
continue
if fvalue is None:
continue
if type(fvalue) is not list:
fvalue = [fvalue]
for v in fvalue:
if v is None:
continue
if has_fk:
if v not in keys:
keys.append(v)
if v not in lookup:
lookup[v] = _repr(v)
else:
if v not in value_map:
next_id = len(value_map)
value_map[v] = next_id
keys.append(next_id)
lookup[next_id] = _repr(v)
else:
prev_id = value_map[v]
if prev_id not in keys:
keys.append(prev_id)
keys.sort(key=lambda i: lookup[i])
else:
keys = None
orow.append({"keys": keys,
"items": items,
"value": value})
if ctotals:
cval = s3_unicode(ctitle.value) \
if ctitle.value is not None and cindex != OTHER else None
if represent:
cappend((cindex,
cindex in cothers,
ctotal,
cval,
ctitle.text))
else:
cappend((cindex,
cindex in cothers,
ctotal,
cval))
ctotals = False
ocells.append(orow)
output = {"rows": orows,
"cols": ocols,
"cells": ocells,
"lookup": lookup if lookup else None,
"total": self._totals(self.totals, [layer]),
"nodata": None if not self.empty else str(T("No data available"))}
# Lookup labels
get_label = self._get_field_label
get_mname = self._get_method_label
labels = {"total": str(T("Total")),
"none": str(current.messages["NONE"]),
"per": str(T("per")),
"breakdown": str(T("Breakdown")),
}
# Layer label
layer_label = None
field_label = None
report_options = resource.get_config("report_options", None)
if report_options and "fact" in report_options:
# Custom label from report options?
import re
layer_pattern = re.compile("([a-zA-Z]+)\((.*)\)\Z")
prefix = resource.prefix_selector
selector = prefix(field)
for item in report_options["fact"]:
if type(item) is tuple:
label, s = item
match = layer_pattern.match(s)
if match is not None:
s, m = match.group(2), match.group(1)
else:
m = None
if prefix(s) == selector:
if m == method:
# Specific layer label
layer_label = s3_unicode(label)
break
else:
# Field label
field_label = label
if layer_label is None:
# Construct label from field and method
if field_label is None:
field_label = get_label(rfields, field, resource, "fact")
method_label = get_mname(method)
layer_label = "%s (%s)" % (field_label, method_label)
labels["layer"] = layer_label
# Rows title
if rows_dim:
labels["rows"] = str(get_label(rfields,
rows_dim,
resource,
"rows"))
else:
labels["rows"] = ""
# Columns title
if cols_dim:
labels["cols"] = str(get_label(rfields,
cols_dim,
resource,
"cols"))
else:
labels["cols"] = ""
output["labels"] = labels
# Filter-URL and axis selectors
prefix = resource.prefix_selector
output["filter"] = (prefix(rows_dim) if rows_dim else None,
prefix(cols_dim) if cols_dim else None)
return output
# -------------------------------------------------------------------------
def compact(self,
maxrows=50,
maxcols=50,
layer=None,
least=False,
represent=False):
"""
Get the top/least n numeric results for a layer, used to
generate the input data for charts.
@param n: maximum dimension size, extracts the n-1 top/least
rows/cols and aggregates the rest under "__other__"
@param layer: the layer
@param least: use the least n instead of the top n results
@param represent: represent the row/col dimension values as
strings using the respective field
representation
"""
default = {"rows": [], "cols": [], "cells": []}
if self.empty or layer and layer not in self.layers:
return default
elif not layer:
layer = self.layers[0]
method = layer[-1]
if method == "min":
least = not least
numeric = lambda x: isinstance(x, (int, long, float))
rfields = self.rfields
OTHER = "__other__"
if represent:
row_repr = self._represent_method(self.rows)
col_repr = self._represent_method(self.cols)
else:
row_repr = col_repr = lambda v: s3_unicode(v)
others = s3_unicode(current.T("Others"))
irows = self.row
icols = self.col
rows = []
cols = []
# Group and sort the rows
is_numeric = None
for i in xrange(self.numrows):
r = irows[i]
total = r[layer]
if is_numeric is None:
is_numeric = numeric(total)
if not is_numeric:
total = len(r["records"])
header = Storage(value = r.value,
text = r.text
if "text" in r else row_repr(r.value))
rows.append((i, total, header))
if maxrows is not None:
rows = self._top(rows, maxrows,
least=least, method=method, other=OTHER)
last = rows.pop(-1) if rows[-1][0] == OTHER else None
self._sortdim(rows, rfields[self.rows])
if last:
last = (last[0], last[1], Storage(value=None, text=others))
rows.append(last)
row_indices = [i[0] for i in rows]
# Group and sort the cols
is_numeric = None
for i in xrange(self.numcols):
c = icols[i]
total = c[layer]
if is_numeric is None:
is_numeric = numeric(total)
if not is_numeric:
total = len(c["records"])
header = Storage(value = c.value,
text = c.text
if "text" in c else col_repr(c.value))
cols.append((i, total, header))
if maxcols is not None:
cols = self._top(cols, maxcols,
least=least, method=method, other=OTHER)
last = cols.pop(-1) if cols[-1][0] == OTHER else None
self._sortdim(cols, rfields[self.cols])
if last:
last = (last[0], last[1], Storage(value=None, text=others))
cols.append(last)
col_indices = [i[0] for i in cols]
# Group and sort the cells
icell = self.cell
cells = {}
for i in xrange(self.numrows):
irow = icell[i]
ridx = i if i in row_indices else OTHER
if ridx not in cells:
orow = cells[ridx] = {}
else:
orow = cells[ridx]
for j in xrange(self.numcols):
cell = irow[j]
cidx = j if j in col_indices else OTHER
value = cell[layer] if is_numeric else len(cell["records"])
if cidx not in orow:
orow[cidx] = [value] if cidx == OTHER or ridx == OTHER else value
else:
orow[cidx].append(value)
# Aggregate the grouped values
orows = []
ocols = []
ocells = []
ctotals = True
rappend = orows.append
cappend = ocols.append
for ri, rt, rh in rows:
orow = []
if represent:
rappend((ri, s3_unicode(rh.value), rh.text, rt))
else:
rappend((ri, s3_unicode(rh.value), rt))
for ci, ct, ch in cols:
value = cells[ri][ci]
if type(value) is list:
value = self._aggregate(value, method)
orow.append(value)
if ctotals:
if represent:
cappend((ci, s3_unicode(ch.value), ch.text, ct))
else:
cappend((ci, s3_unicode(ch.value), ct))
ctotals = False
ocells.append(orow)
return {"rows": orows, "cols": ocols, "cells": ocells}
# -------------------------------------------------------------------------
# Internal methods
# -------------------------------------------------------------------------
def _pivot(self, items, pkey_colname, rows_colname, cols_colname):
"""
2-dimensional pivoting of a list of unique items
@param items: list of unique items as dicts
@param pkey_colname: column name of the primary key
@param rows_colname: column name of the row dimension
@param cols_colname: column name of the column dimension
@return: tuple of (cell matrix, row headers, column headers),
where cell matrix is a 2-dimensional array [rows[columns]]
and row headers and column headers each are lists (in the
same order as the cell matrix)
"""
rvalues = Storage()
cvalues = Storage()
cells = Storage()
# All unique rows values
rindex = 0
cindex = 0
for item in items:
rvalue = item[rows_colname] if rows_colname else None
cvalue = item[cols_colname] if cols_colname else None
if rvalue not in rvalues:
r = rvalues[rvalue] = rindex
rindex += 1
else:
r = rvalues[rvalue]
if cvalue not in cvalues:
c = cvalues[cvalue] = cindex
cindex += 1
else:
c = cvalues[cvalue]
if (r, c) not in cells:
cells[(r, c)] = [item[pkey_colname]]
else:
cells[(r, c)].append(item[pkey_colname])
matrix = []
for r in xrange(len(rvalues)):
row = []
for c in xrange(len(cvalues)):
row.append(cells[(r, c)])
matrix.append(row)
rnames = [None] * len(rvalues)
for k, v in rvalues.items():
rnames[v] = k
cnames = [None] * len(cvalues)
for k, v in cvalues.items():
cnames[v] = k
return matrix, rnames, cnames
# -------------------------------------------------------------------------
def _add_layer(self, matrix, fact, method):
"""
Compute an aggregation layer, updates:
- self.cell: the aggregated values per cell
- self.row: the totals per row
- self.col: the totals per column
- self.totals: the overall totals per layer
@param matrix: the cell matrix
@param fact: the fact field
@param method: the aggregation method
"""
if method not in self.METHODS:
raise SyntaxError("Unsupported aggregation method: %s" % method)
items = self.records
rfields = self.rfields
rows = self.row
cols = self.col
records = self.records
extract = self._extract
aggregate = self._aggregate
resource = self.resource
RECORDS = "records"
VALUES = "values"
table = resource.table
pkey = table._id.name
if method is None:
method = "list"
layer = (fact, method)
numcols = len(self.col)
numrows = len(self.row)
# Initialize cells
if self.cell is None:
self.cell = [[Storage()
for i in xrange(numcols)]
for j in xrange(numrows)]
cells = self.cell
all_values = []
for r in xrange(numrows):
# Initialize row header
row = rows[r]
row[RECORDS] = []
row[VALUES] = []
row_records = row[RECORDS]
row_values = row[VALUES]
for c in xrange(numcols):
# Initialize column header
col = cols[c]
if RECORDS not in col:
col[RECORDS] = []
col_records = col[RECORDS]
if VALUES not in col:
col[VALUES] = []
col_values = col[VALUES]
# Get the records
cell = cells[r][c]
if RECORDS in cell and cell[RECORDS] is not None:
ids = cell[RECORDS]
else:
data = matrix[r][c]
if data:
remove = data.remove
while None in data:
remove(None)
ids = data
else:
ids = []
cell[RECORDS] = ids
row_records.extend(ids)
col_records.extend(ids)
# Get the values
if fact is None:
fact = pkey
values = ids
row_values = row_records
col_values = row_records
all_values = records.keys()
else:
values = []
append = values.append
for i in ids:
value = extract(records[i], fact)
if value is None:
continue
append(value)
values = list(s3_flatlist(values))
if method in ("list", "count"):
values = list(set(values))
row_values.extend(values)
col_values.extend(values)
all_values.extend(values)
# Aggregate values
value = aggregate(values, method)
cell[layer] = value
# Compute row total
row[layer] = aggregate(row_values, method)
del row[VALUES]
# Compute column total
for c in xrange(numcols):
col = cols[c]
col[layer] = aggregate(col[VALUES], method)
del col[VALUES]
# Compute overall total
self.totals[layer] = aggregate(all_values, method)
return
# -------------------------------------------------------------------------
@staticmethod
def _aggregate(values, method):
"""
Compute an aggregation of a list of atomic values
@param values: the values as list
@param method: the aggregation method
"""
if values is None:
return None
if method is None or method == "list":
if values:
return values
else:
return None
elif method == "count":
return len([v for v in values if v is not None])
elif method == "min":
try:
return min(values)
except (TypeError, ValueError):
return None
elif method == "max":
try:
return max(values)
except (TypeError, ValueError):
return None
elif method == "sum":
try:
return sum(values)
except (TypeError, ValueError):
return None
elif method in ("avg"):
try:
if len(values):
return sum(values) / float(len(values))
else:
return 0.0
except (TypeError, ValueError):
return None
#elif method == "std":
#import numpy
#if not values:
#return 0.0
#try:
#return numpy.std(values)
#except (TypeError, ValueError):
#return None
else:
return None
# -------------------------------------------------------------------------
@staticmethod
def _sortdim(items, rfield, index=2):
"""
Sort a dimension (sorts items in-place)
@param items: the items as list of tuples
(index, total, {value: value, text: text})
@param rfield: the dimension (S3ResourceField)
@param index: alternative index of the value/text dict
within each item
"""
if not rfield:
return
ftype = rfield.ftype
sortby = "value"
if ftype == "integer":
requires = rfield.requires
if isinstance(requires, (tuple, list)):
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if isinstance(requires, IS_IN_SET):
sortby = "text"
elif ftype[:9] == "reference":
sortby = "text"
items.sort(key=lambda item: item[index][sortby])
return
# -------------------------------------------------------------------------
def _top(self, items, length=10, least=False, method=None, other="__other__"):
"""
Find the top/least <length> items (by total)
@param items: the items as list of tuples
(index, total, {value: value, text: text})
@param length: the number of items
@param least: find least rather than top
"""
try:
if len(items) > length:
m = length - 1
l = list(items)
l.sort(lambda x, y: int(y[1]-x[1]))
if least:
l.reverse()
ts = (other, self._aggregate([t[1] for t in l[m:]], method))
l = l[:m] + [ts]
return l
except (TypeError, ValueError):
pass
return items
# -------------------------------------------------------------------------
@classmethod
def _tail(cls, items, length=10, least=False, method=None):
"""
Find the top/least <length> items (by total)
@param items: the items as list of tuples
(index, total, {value: value, text: text})
@param length: the number of items
@param least: find least rather than top
"""
try:
if len(items) > length:
l = list(items)
l.sort(lambda x, y: int(y[1]-x[1]))
if least:
l.reverse()
tail = dict((item[0], item[1]) for item in l[length-1:])
return (tail.keys(),
cls._aggregate(tail.values(), method))
except (TypeError, ValueError):
pass
return (None, None)
# -------------------------------------------------------------------------
def _get_fields(self, fields=None):
"""
Determine the fields needed to generate the report
@param fields: fields to include in the report (all fields)
"""
resource = self.resource
table = resource.table
# Lambda to prefix all field selectors
alias = resource.alias
def prefix(s):
if isinstance(s, (tuple, list)):
return prefix(s[-1])
if "." not in s.split("$", 1)[0]:
return "%s.%s" % (alias, s)
elif s[:2] == "~.":
return "%s.%s" % (alias, s[2:])
else:
return s
self.pkey = pkey = prefix(table._id.name)
self.rows = rows = self.rows and prefix(self.rows) or None
self.cols = cols = self.cols and prefix(self.cols) or None
if not fields:
fields = []
# dfields (data-fields): fields to generate the layers
dfields = [prefix(s) for s in fields]
if rows and rows not in dfields:
dfields.append(rows)
if cols and cols not in dfields:
dfields.append(cols)
if pkey not in dfields:
dfields.append(pkey)
for i in xrange(len(self.layers)):
f, m = self.layers[i]
s = prefix(f)
self.layers[i] = (s, m)
if s not in dfields:
dfields.append(f)
self.dfields = dfields
# rfields (resource-fields): dfields resolved into a ResourceFields map
rfields, joins, left, distinct = resource.resolve_selectors(dfields)
rfields = Storage([(f.selector.replace("~", alias), f) for f in rfields])
self.rfields = rfields
# gfields (grouping-fields): fields to group the records by
self.gfields = {pkey: rfields[pkey].colname,
rows: rfields[rows].colname if rows else None,
cols: rfields[cols].colname if cols else None}
return
# -------------------------------------------------------------------------
def _represent_method(self, field):
"""
Get the representation method for a field in the report
@param field: the field selector
"""
rfields = self.rfields
default = lambda value: None
if field and field in rfields:
rfield = rfields[field]
if rfield.field:
def repr_method(value):
return s3_represent_value(rfield.field, value,
strip_markup=True)
elif rfield.virtual:
stripper = S3MarkupStripper()
def repr_method(val):
if val is None:
return "-"
text = s3_unicode(val)
if "<" in text:
stripper.feed(text)
return stripper.stripped() # = totally naked ;)
else:
return text
else:
repr_method = default
else:
repr_method = default
return repr_method
# -------------------------------------------------------------------------
@staticmethod
def _totals(values, layers, append=None):
"""
Get the totals of a row/column/report
@param values: the values dictionary
@param layers: the layers
@param append: callback to collect the totals for JSON data
(currently only collects the first layer)
"""
totals = []
for layer in layers:
f, m = layer
value = values[layer]
if m == "list":
value = value and len(value) or 0
if not len(totals) and append is not None:
append(value)
totals.append(s3_unicode(IS_NUMBER.represent(value)))
totals = " / ".join(totals)
return totals
# -------------------------------------------------------------------------
def _extract(self, row, field):
"""
Extract a field value from a DAL row
@param row: the row
@param field: the fieldname (list_fields syntax)
"""
rfields = self.rfields
if field not in rfields:
raise KeyError("Invalid field name: %s" % field)
rfield = rfields[field]
try:
return rfield.extract(row)
except AttributeError:
return None
# -------------------------------------------------------------------------
def _expand(self, row, axisfilter=None):
"""
Expand a data frame row into a list of rows for list:type values
@param row: the row
@param field: the field to expand (None for all fields)
@param axisfilter: dict of filtered field values by column names
"""
pairs = []
append = pairs.append
for colname in self.gfields.values():
if not colname:
continue
else:
value = row[colname]
if type(value) is list:
if axisfilter and colname in axisfilter:
p = [(colname, v) for v in value
if v in axisfilter[colname]]
if not p:
raise RuntimeError("record does not match query")
else:
append(p)
else:
append([(colname, v) for v in value])
else:
append([(colname, value)])
result = [dict(i) for i in product(*pairs)]
return result
# -------------------------------------------------------------------------
@staticmethod
def _get_field_label(rfields, field, resource, key):
"""
Get the label for a field
@param rfields: the resource field map
@param field: the key for the resource field map
@param resource: the S3Resource
@param key: the key for the report_options
"""
DEFAULT = ""
if field in rfields:
rfield = rfields[field]
else:
return DEFAULT
get_config = resource.get_config
fields = None
report_options = get_config("report_options")
if report_options and key in report_options:
fields = report_options[key]
if not fields:
fields = get_config("list_fields")
prefix = resource.prefix_selector
selector = prefix(rfield.selector)
if fields:
for f in fields:
if isinstance(f, (tuple, list)) and prefix(f[1]) == selector:
return f[0]
if rfield:
if rfield.ftype == "id":
return current.T("Records")
return rfield.label
else:
return DEFAULT
# -------------------------------------------------------------------------
@classmethod
def _get_method_label(cls, code):
"""
Get a label for a method
@param code: the method code
@return: the label (lazyT), or None for unsupported methods
"""
methods = cls.METHODS
if code is None:
code = "list"
if code in methods:
return current.T(methods[code])
else:
return None
# END =========================================================================
| 38.066028 | 133 | 0.457923 |
87d809e99021a385f9a913188cfe0f0e53382d26 | 8,187 | py | Python | src/deepnox/loggers/formatters/base_formatter.py | deepnox-io/python-wipbox | cd919ddb551b0b80f0c162c2d5e99a0ba6c81bba | [
"MIT"
] | 2 | 2022-01-19T11:31:35.000Z | 2022-01-19T11:50:07.000Z | src/deepnox/loggers/formatters/base_formatter.py | deepnox-io/python-wipbox | cd919ddb551b0b80f0c162c2d5e99a0ba6c81bba | [
"MIT"
] | 3 | 2022-01-20T04:26:44.000Z | 2022-03-22T02:03:49.000Z | src/deepnox/loggers/formatters/base_formatter.py | deepnox-io/python-wipbox | cd919ddb551b0b80f0c162c2d5e99a0ba6c81bba | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Module: deepnox.loggers.formatters.base_formatter
This file is a part of python-wipbox project.
(c) 2021, Deepnox SAS.
"""
import logging
import os
import socket
import traceback
from datetime import datetime
from typing import Any
from deepnox.serializers.base_serializer import BaseSerializer
def _value(record: logging.LogRecord, field_name_or_value: Any) -> Any:
"""
Retrieve value from record if possible. Otherwise use value.
:param record: The record to extract a field named as in field_name_or_value.
:param field_name_or_value: The field name to extract from record or the default value to use if not present.
"""
try:
return getattr(record, field_name_or_value)
except AttributeError:
return field_name_or_value
class BaseFormatter(logging.Formatter):
"""
The base class of extended logging formatter.
"""
def __init__(
self,
*args,
fields: dict = None,
serializer: BaseSerializer = None,
**kwargs
):
super().__init__(*args, **kwargs)
self.fields: dict = fields or {}
self._internal_serializer: BaseSerializer = serializer
self.usesTime = lambda: "asctime" in self.fields.values()
self.hostname = socket.gethostname()
self.tags = self.fields.get("tags") or []
# def format(self, record: logging.LogRecord):
# # Let python set every additional record field
# super().format(record)
# print(record)
#
# message = {
# field_name: _value(record, field_value)
# for field_name, field_value in self.fields.items()
# }
# if isinstance(record.msg, collections.abc.Mapping):
# message.update(record.msg)
# else:
# message["msg"] = super().formatMessage(record)
#
# if record.exc_info:
# message["exception"] = {
# "type": record.exc_info[0].__name__,
# "message": str(record.exc_info[1]),
# "stack": self.formatException(record.exc_info),
# }
#
# return (
# super().formatMessage(record)
# if (len(message) == 1 and "msg" in message)
# else json.dumps(message)
# )
def formatMessage(self, record: logging.LogRecord) -> str:
# Speed up this step by doing nothing
return ""
def add_extra_fields(self, message, record: logging.LogRecord):
"""Add extra fields.
:param message: Message to complete.
:param record: The loggers record.
:return: The formatted record.
"""
message["extra"] = self.get_extra_fields(record)
return message
def format(self, record: logging.LogRecord):
"""Format record.
:param record: The loggers record.
:return: The formatted record.
"""
record = BaseFormatter.get_short_path(record)
content = self.get_message(record)
content = self.add_extra_fields(content, record)
content = self.add_debug(content, record)
return self._internal_serializer.dump(content)
@staticmethod
def get_short_path(record: logging.LogRecord):
"""Return a short path.
:param record: The loggers record.
:return: The updated record including short path.
"""
filename = os.path.basename(record.filename)
if len(filename) > 20:
filename = "{}~{}".format(filename[:3], filename[-16:])
record.pathname = filename
return record
@classmethod
def format_timestamp(cls, time):
"""Format a timestamp as ISO.
:param time: Time to format.
:return: The ISO formatted time.
"""
ts = datetime.utcfromtimestamp(time)
return (
ts.strftime("%Y-%m-%dT%H:%M:%S")
+ ".%03d" % (ts.microsecond / 1000)
+ "Z"
)
@classmethod
def format_exception(cls, exc_info: Exception):
"""Format a Python exception.
:param exc_info: Exception.
:return: The formatted exception.
"""
if exc_info:
trace = traceback.format_exception(*exc_info)
if isinstance(trace, list) and trace[0] != "NoneType: None":
return list(
filter(
lambda x: len(x) > 0,
map(lambda s: s.strip(), "".join(trace).split("\n")),
)
)
return
def add_debug(self, message: str, record: logging.LogRecord):
"""If exception, add debug info.
:param message: Message to complete.
:param record: The loggers record.
:return: The formatted record.
"""
if record.exc_info:
message["debug"] = self.get_debug_fields(record)
return message
def add_tag(self, message, record: logging.LogRecord):
if len(self.tags) > 0:
message["metadata"]["tags"] = self.tags
return message
def get_extra_fields(self, record: logging.LogRecord):
"""Returns extra fields of the provided loggers record.
The list contains all the attributes listed in [Python loggers documentation](http://docs.python.org/library/logging.html#logrecord-attributes).
:param record: The record.
:return:
"""
skip_list = (
"args",
"asctime",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"id",
"levelname",
"levelno",
"lineno",
"module",
"msecs",
"msecs",
"message",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"thread",
"threadName",
"stack_info",
)
easy_types = (str, bool, dict, float, int, list, type(None))
fields = {}
for key, value in record.__dict__.items():
if key not in skip_list:
if isinstance(value, easy_types):
fields[key] = value
else:
try:
fields[key] = repr(value)
except TypeError as e:
fields[key] = "Unavailable representation: __repr__(self)"
return fields
def get_debug_fields(self, record: logging.LogRecord):
"""Returns debug fields of the provided loggers record.
:record: The loggers record.
:returns: debug fields of the provided loggers record.
"""
fields = {
"stack_trace": self.format_exception(record.exc_info),
"lineno": record.lineno,
"process": record.process,
"thread_name": record.threadName,
}
# funcName was added in 2.5
if not getattr(record, "funcName", None):
fields["funcName"] = record.funcName
# processName was added in 2.6
if not getattr(record, "processName", None):
fields["processName"] = record.processName
return fields
def get_message(self, record: logging.LogRecord):
"""Format record.
:param record: The loggers record.
:return: The formatted record.
"""
# Create app message dict
return {
"date": self.format_timestamp(record.created),
"message": record.getMessage(),
"@timestamp": self.format_timestamp(record.created),
"hostname": self.hostname,
"logger_name": record.name,
"level": record.levelname,
"pathname": record.pathname,
}
def dump(self, message: object = None):
"""
Serialize as message using `dump` coro of selected serializer.
:param message: The record as dict.
:type message: dict
:return: The serialized log.
:rtype: str
"""
return self._internal_serializer.dump(message)
| 30.662921 | 152 | 0.562844 |
b4586f8e70b068082992bbcda3c20bf2a25e8ac0 | 1,473 | py | Python | venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_ML.py | exdeam/opencrm | dfdcfdf99f0b42eb3959171927cb6574583f5ee0 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_ML.py | exdeam/opencrm | dfdcfdf99f0b42eb3959171927cb6574583f5ee0 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/phonenumbers/shortdata/region_ML.py | exdeam/opencrm | dfdcfdf99f0b42eb3959171927cb6574583f5ee0 | [
"MIT"
] | null | null | null | """Auto-generated file, do not edit by hand. ML metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_ML = PhoneMetadata(id='ML', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[136-8]\\d{1,4}', possible_length=(2, 3, 4, 5)),
toll_free=PhoneNumberDesc(national_number_pattern='1[578]|35200|67(?:00|7\\d)|74(?:02|4\\d)|8000\\d', example_number='15', possible_length=(2, 4, 5)),
premium_rate=PhoneNumberDesc(national_number_pattern='(?:12|800)2\\d|3(?:52(?:11|2[02]|3[04-6]|99)|7574)', example_number='1220', possible_length=(4, 5)),
emergency=PhoneNumberDesc(national_number_pattern='1[578]', example_number='15', possible_length=(2,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:[013-9]\\d|2)|2(?:1[02-469]|2[13])|[578])|3(?:5(?:0(?:35|57)|2\\d\\d)|[67]\\d{3})|67(?:0[09]|[59]9|77|8[89])|74(?:0[02]|44|55)|800[0-2][12]', example_number='15', possible_length=(2, 3, 4, 5)),
standard_rate=PhoneNumberDesc(national_number_pattern='37(?:433|575)|7400|8001\\d', example_number='7400', possible_length=(4, 5)),
carrier_specific=PhoneNumberDesc(national_number_pattern='(?:3(?:503|[67]\\d\\d)|800\\d)\\d', example_number='35030', possible_length=(5,)),
sms_services=PhoneNumberDesc(national_number_pattern='3(?:6\\d{3}|7(?:4(?:0[24-9]|[1-9]\\d)|5\\d\\d))|7400', example_number='7400', possible_length=(4, 5)),
short_data=True)
| 105.214286 | 257 | 0.696538 |
f37e6bf5da7e1bbc794d70863a687876d8d7a512 | 1,629 | py | Python | ansible/lib/ansible/modules/core/utilities/logic/assert.py | kiv-box/redis | 966a0c3f0a51282cd173b42a6e249d23f4e89dec | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/core/utilities/logic/assert.py | kiv-box/redis | 966a0c3f0a51282cd173b42a6e249d23f4e89dec | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/core/utilities/logic/assert.py | kiv-box/redis | 966a0c3f0a51282cd173b42a6e249d23f4e89dec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: assert
short_description: Asserts given expressions are true
description:
- This module asserts that given expressions are true with an optional custom message.
version_added: "1.5"
options:
that:
description:
- "A string expression of the same form that can be passed to the 'when' statement"
- "Alternatively, a list of string expressions"
required: true
msg:
description:
- "The customized message used for a failing assertion"
required: false
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
- assert: { that: "ansible_os_family != 'RedHat'" }
- assert:
that:
- "'foo' in some_command_result.stdout"
- "number_of_the_counting == 3"
- assert:
that:
- "my_param <= 100"
- "my_param >= 0"
msg: "'my_param' must be between 0 and 100"
'''
| 28.578947 | 91 | 0.691222 |
23b23910d540b57e25c1a4730a0263a90ebf3465 | 3,228 | py | Python | packages/arb-compiler-evm/tests/sol-syscall/truffle_runner.py | pangxieshousi/arbitrum | 5a5d2c26970cb0495cd6772a7a895b6a0a90c413 | [
"Apache-2.0"
] | 1 | 2019-09-07T00:12:06.000Z | 2019-09-07T00:12:06.000Z | packages/arb-compiler-evm/tests/sol-syscall/truffle_runner.py | pangxieshousi/arbitrum | 5a5d2c26970cb0495cd6772a7a895b6a0a90c413 | [
"Apache-2.0"
] | null | null | null | packages/arb-compiler-evm/tests/sol-syscall/truffle_runner.py | pangxieshousi/arbitrum | 5a5d2c26970cb0495cd6772a7a895b6a0a90c413 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019, Offchain Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eth_utils
import json
import sys
import arbitrum as arb
from arbitrum.evm.contract import create_evm_vm
from arbitrum.evm.contract_abi import ContractABI, create_output_handler
def run_until_halt(vm):
i = 0
while True:
try:
run = arb.run_vm_once(vm)
if not run:
print("Hit blocked insn")
break
i += 1
except Exception as err:
print("Error at", vm.pc.pc - 1, vm.code[vm.pc.pc - 1])
print("Context", vm.code[vm.pc.pc - 6 : vm.pc.pc + 4])
raise err
if vm.halted:
break
logs = vm.logs
vm.logs = []
print("Ran VM for {} steps".format(i))
return logs
def make_msg_val(calldata):
return arb.value.Tuple([calldata, 0, 0, 0])
def main():
if len(sys.argv) != 2:
raise Exception("Call as truffle_runner.py [compiled.json]")
with open(sys.argv[1]) as json_file:
raw_contracts = json.load(json_file)
contracts = [ContractABI(contract) for contract in raw_contracts]
vm = create_evm_vm(contracts)
output_handler = create_output_handler(contracts)
with open("code.txt", "w") as f:
for instr in vm.code:
f.write("{} {}".format(instr, instr.path))
f.write("\n")
contract = contracts[0]
person_a = "0x1111111122222222000000000000000000000000"
person_b = "0x2222222222222222222222222222222222222222"
person_a_int = eth_utils.to_int(hexstr=person_a)
person_b_int = eth_utils.to_int(hexstr=person_b)
print("person_a_int", person_a_int)
print("person_b_int", person_b_int)
erc20_address = "0x89d24A6b4CcB1B6fAA2625fE562bDD9a23260359"
erc721_address = "0x06012c8cf97BEaD5deAe237070F9587f8E7A266d"
vm.env.send_message(
[
make_msg_val(contract.deposit(10)),
person_a_int,
10000000,
eth_utils.to_int(hexstr=erc20_address + "00"),
]
)
vm.env.send_message(
[make_msg_val(contract.sendERC20(12, erc20_address, 5432)), person_a_int, 0, 0]
)
vm.env.send_message(
[
make_msg_val(contract.deposit(10)),
person_a_int,
10000000,
eth_utils.to_int(hexstr=erc721_address + "01"),
]
)
vm.env.send_message(
[
make_msg_val(contract.sendERC721(12, erc721_address, 10000000)),
person_a_int,
0,
0,
]
)
vm.env.deliver_pending()
logs = run_until_halt(vm)
for log in logs:
print(output_handler(log))
if __name__ == "__main__":
main()
| 28.566372 | 87 | 0.635068 |
caa4ae7f3f2df81b353e8c96f0da934c63006eb8 | 47,702 | py | Python | resources/python/flink/plan/DataSet.py | ATCP/flink-1.1.1 | bf9aedcd16239409aa864b207f47739d3bf542ce | [
"BSD-3-Clause"
] | null | null | null | resources/python/flink/plan/DataSet.py | ATCP/flink-1.1.1 | bf9aedcd16239409aa864b207f47739d3bf542ce | [
"BSD-3-Clause"
] | null | null | null | resources/python/flink/plan/DataSet.py | ATCP/flink-1.1.1 | bf9aedcd16239409aa864b207f47739d3bf542ce | [
"BSD-3-Clause"
] | null | null | null | # ###############################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import collections
import types as TYPES
from flink.functions.Aggregation import AggregationFunction, Min, Max, Sum
from flink.plan.Constants import _Identifier, WriteMode, _createKeyValueTypeInfo, _createArrayTypeInfo
from flink.plan.OperationInfo import OperationInfo
from flink.functions.CoGroupFunction import CoGroupFunction
from flink.functions.FilterFunction import FilterFunction
from flink.functions.FlatMapFunction import FlatMapFunction
from flink.functions.CrossFunction import CrossFunction
from flink.functions.GroupReduceFunction import GroupReduceFunction
from flink.functions.JoinFunction import JoinFunction
from flink.functions.MapFunction import MapFunction
from flink.functions.MapPartitionFunction import MapPartitionFunction
from flink.functions.ReduceFunction import ReduceFunction
from flink.functions.KeySelectorFunction import KeySelectorFunction
class Stringify(MapFunction):
def map(self, value):
if isinstance(value, (tuple, list)):
return "(" + ", ".join([self.map(x) for x in value]) + ")"
else:
return str(value)
class CsvStringify(MapFunction):
def __init__(self, f_delim):
super(CsvStringify, self).__init__()
self.delim = f_delim
def map(self, value):
return self.delim.join([self._map(field) for field in value])
def _map(self, value):
if isinstance(value, (tuple, list)):
return "(" + b", ".join([self.map(x) for x in value]) + ")"
else:
return str(value)
class DataSink(object):
def __init__(self, env, info):
self._env = env
self._info = info
info.id = env._counter
env._counter += 1
def name(self, name):
self._info.name = name
return self
def set_parallelism(self, parallelism):
self._info.parallelism.value = parallelism
return self
class DataSet(object):
def __init__(self, env, info):
self._env = env
self._info = info
info.id = env._counter
env._counter += 1
def output(self, to_error=False):
"""
Writes a DataSet to the standard output stream (stdout).
"""
return self.map(Stringify())._output(to_error)
def _output(self, to_error):
child = OperationInfo()
child_set = DataSink(self._env, child)
child.identifier = _Identifier.SINK_PRINT
child.parent = self._info
child.to_err = to_error
self._info.parallelism = child.parallelism
self._info.sinks.append(child)
self._env._sinks.append(child)
return child_set
def write_text(self, path, write_mode=WriteMode.NO_OVERWRITE):
"""
Writes a DataSet as a text file to the specified location.
:param path: he path pointing to the location the text file is written to.
:param write_mode: OutputFormat.WriteMode value, indicating whether files should be overwritten
"""
return self.map(Stringify())._write_text(path, write_mode)
def _write_text(self, path, write_mode):
child = OperationInfo()
child_set = DataSink(self._env, child)
child.identifier = _Identifier.SINK_TEXT
child.parent = self._info
child.path = path
child.write_mode = write_mode
self._info.parallelism = child.parallelism
self._info.sinks.append(child)
self._env._sinks.append(child)
return child_set
def write_csv(self, path, line_delimiter="\n", field_delimiter=',', write_mode=WriteMode.NO_OVERWRITE):
"""
Writes a Tuple DataSet as a CSV file to the specified location.
Note: Only a Tuple DataSet can written as a CSV file.
:param path: The path pointing to the location the CSV file is written to.
:param write_mode: OutputFormat.WriteMode value, indicating whether files should be overwritten
"""
return self.map(CsvStringify(field_delimiter))._write_csv(path, line_delimiter, field_delimiter, write_mode)
def _write_csv(self, path, line_delimiter, field_delimiter, write_mode):
child = OperationInfo()
child_set = DataSink(self._env, child)
child.identifier = _Identifier.SINK_CSV
child.path = path
child.parent = self._info
child.delimiter_field = field_delimiter
child.delimiter_line = line_delimiter
child.write_mode = write_mode
self._info.parallelism = child.parallelism
self._info.sinks.append(child)
self._env._sinks.append(child)
return child_set
def reduce_group(self, operator, combinable=False):
"""
Applies a GroupReduce transformation.
The transformation calls a GroupReduceFunction once for each group of the DataSet, or one when applied on a
non-grouped DataSet.
The GroupReduceFunction can iterate over all elements of the DataSet and
emit any number of output elements including none.
:param operator: The GroupReduceFunction that is applied on the DataSet.
:return:A GroupReduceOperator that represents the reduced DataSet.
"""
child = self._reduce_group(operator, combinable)
child_set = OperatorSet(self._env, child)
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def _reduce_group(self, operator, combinable=False):
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = GroupReduceFunction()
operator.reduce = f
child = OperationInfo()
child.identifier = _Identifier.GROUPREDUCE
child.parent = self._info
child.operator = operator
child.types = _createArrayTypeInfo()
child.name = "PythonGroupReduce"
return child
def reduce(self, operator):
"""
Applies a Reduce transformation on a non-grouped DataSet.
The transformation consecutively calls a ReduceFunction until only a single element remains which is the result
of the transformation. A ReduceFunction combines two elements into one new element of the same type.
:param operator:The ReduceFunction that is applied on the DataSet.
:return:A ReduceOperator that represents the reduced DataSet.
"""
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = ReduceFunction()
operator.reduce = f
child = OperationInfo()
child_set = OperatorSet(self._env, child)
child.identifier = _Identifier.REDUCE
child.parent = self._info
child.operator = operator
child.name = "PythonReduce"
child.types = _createArrayTypeInfo()
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def aggregate(self, aggregation, field):
"""
Applies an Aggregate transformation (using a GroupReduceFunction) on a non-grouped Tuple DataSet.
:param aggregation: The built-in aggregation function to apply on the DataSet.
:param field: The index of the Tuple field on which to perform the function.
:return: An AggregateOperator that represents the aggregated DataSet.
"""
child = self._reduce_group(AggregationFunction(aggregation, field), combinable=True)
child.name = "PythonAggregate" + aggregation.__name__ # include aggregation type in name
child_set = AggregateOperator(self._env, child)
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def min(self, field):
"""
Syntactic sugar for the minimum aggregation.
:param field: The index of the Tuple field on which to perform the function.
:return: An AggregateOperator that represents the aggregated DataSet.
"""
return self.aggregate(Min, field)
def max(self, field):
"""
Syntactic sugar for the maximum aggregation.
:param field: The index of the Tuple field on which to perform the function.
:return: An AggregateOperator that represents the aggregated DataSet.
"""
return self.aggregate(Max, field)
def sum(self, field):
"""
Syntactic sugar for the sum aggregation.
:param field: The index of the Tuple field on which to perform the function.
:return: An AggregateOperator that represents the aggregated DataSet.
"""
return self.aggregate(Sum, field)
def project(self, *fields):
"""
Applies a Project transformation on a Tuple DataSet.
Note: Only Tuple DataSets can be projected. The transformation projects each Tuple of the DataSet onto a
(sub)set of fields.
:param fields: The field indexes of the input tuples that are retained.
The order of fields in the output tuple corresponds to the order of field indexes.
:return: The projected DataSet.
"""
return self.map(lambda x: tuple([x[key] for key in fields]))
def group_by(self, *keys):
"""
Groups a Tuple DataSet using field position keys.
Note: Field position keys only be specified for Tuple DataSets.
The field position keys specify the fields of Tuples on which the DataSet is grouped.
This method returns an UnsortedGrouping on which one of the following grouping transformation can be applied.
sort_group() to get a SortedGrouping.
reduce() to apply a Reduce transformation.
group_reduce() to apply a GroupReduce transformation.
:param keys: One or more field positions on which the DataSet will be grouped.
:return:A Grouping on which a transformation needs to be applied to obtain a transformed DataSet.
"""
return self.map(lambda x: x)._group_by(keys)
def _group_by(self, keys):
child = OperationInfo()
child_chain = []
child_set = UnsortedGrouping(self._env, child, child_chain)
child.identifier = _Identifier.GROUP
child.parent = self._info
child.keys = keys
child_chain.append(child)
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def co_group(self, other_set):
"""
Initiates a CoGroup transformation which combines the elements of two DataSets into on DataSet.
It groups each DataSet individually on a key and gives groups of both DataSets with equal keys together into a
CoGroupFunction. If a DataSet has a group with no matching key in the other DataSet,
the CoGroupFunction is called with an empty group for the non-existing group.
The CoGroupFunction can iterate over the elements of both groups and return any number of elements
including none.
:param other_set: The other DataSet of the CoGroup transformation.
:return:A CoGroupOperator to continue the definition of the CoGroup transformation.
"""
child = OperationInfo()
other_set._info.children.append(child)
child_set = CoGroupOperatorWhere(self._env, child)
child.identifier = _Identifier.COGROUP
child.parent_set = self
child.other_set = other_set
return child_set
def cross(self, other_set):
"""
Initiates a Cross transformation which combines the elements of two DataSets into one DataSet.
It builds all pair combinations of elements of both DataSets, i.e., it builds a Cartesian product.
:param other_set: The other DataSet with which this DataSet is crossed.
:return:A CrossOperator to continue the definition of the Cross transformation.
"""
return self._cross(other_set, _Identifier.CROSS)
def cross_with_huge(self, other_set):
"""
Initiates a Cross transformation which combines the elements of two DataSets into one DataSet.
It builds all pair combinations of elements of both DataSets, i.e., it builds a Cartesian product.
This method also gives the hint to the optimizer that
the second DataSet to cross is much larger than the first one.
:param other_set: The other DataSet with which this DataSet is crossed.
:return:A CrossOperator to continue the definition of the Cross transformation.
"""
return self._cross(other_set, _Identifier.CROSSH)
def cross_with_tiny(self, other_set):
"""
Initiates a Cross transformation which combines the elements of two DataSets into one DataSet.
It builds all pair combinations of elements of both DataSets, i.e., it builds a Cartesian product.
This method also gives the hint to the optimizer that
the second DataSet to cross is much smaller than the first one.
:param other_set: The other DataSet with which this DataSet is crossed.
:return:A CrossOperator to continue the definition of the Cross transformation.
"""
return self._cross(other_set, _Identifier.CROSST)
def _cross(self, other_set, identifier):
child = OperationInfo()
child_set = CrossOperator(self._env, child)
child.identifier = identifier
child.parent = self._info
child.other = other_set._info
self._info.children.append(child)
other_set._info.children.append(child)
self._env._sets.append(child)
return child_set
def distinct(self, *fields):
"""
Returns a distinct set of a tuple DataSet using field position keys.
:param fields: One or more field positions on which the distinction of the DataSet is decided.
:return: The distinct DataSet.
"""
f = None
if len(fields) == 0:
f = lambda x: (x,)
fields = (0,)
if isinstance(fields[0], TYPES.FunctionType):
f = lambda x: (fields[0](x),)
if isinstance(fields[0], KeySelectorFunction):
f = lambda x: (fields[0].get_key(x),)
if f is None:
f = lambda x: tuple([x[key] for key in fields])
return self.map(lambda x: (f(x), x)).name("DistinctPreStep")._distinct(tuple([x for x in range(len(fields))]))
def _distinct(self, fields):
self._info.types = _createKeyValueTypeInfo(len(fields))
child = OperationInfo()
child_set = DataSet(self._env, child)
child.identifier = _Identifier.DISTINCT
child.parent = self._info
child.keys = fields
self._info.parallelism = child.parallelism
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def filter(self, operator):
"""
Applies a Filter transformation on a DataSet.
he transformation calls a FilterFunction for each element of the DataSet and retains only those element
for which the function returns true. Elements for which the function returns false are filtered.
:param operator: The FilterFunction that is called for each element of the DataSet.
:return:A FilterOperator that represents the filtered DataSet.
"""
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = FilterFunction()
operator.filter = f
child = OperationInfo()
child_set = OperatorSet(self._env, child)
child.identifier = _Identifier.FILTER
child.parent = self._info
child.operator = operator
child.name = "PythonFilter"
child.types = _createArrayTypeInfo()
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def first(self, count):
"""
Returns a new set containing the first n elements in this DataSet.
:param count: The desired number of elements.
:return: A DataSet containing the elements.
"""
child = OperationInfo()
child_set = DataSet(self._env, child)
child.identifier = _Identifier.FIRST
child.parent = self._info
child.count = count
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def flat_map(self, operator):
"""
Applies a FlatMap transformation on a DataSet.
The transformation calls a FlatMapFunction for each element of the DataSet.
Each FlatMapFunction call can return any number of elements including none.
:param operator: The FlatMapFunction that is called for each element of the DataSet.
:return:A FlatMapOperator that represents the transformed DataSe
"""
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = FlatMapFunction()
operator.flat_map = f
child = OperationInfo()
child_set = OperatorSet(self._env, child)
child.identifier = _Identifier.FLATMAP
child.parent = self._info
child.operator = operator
child.types = _createArrayTypeInfo()
child.name = "PythonFlatMap"
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def join(self, other_set):
"""
Initiates a Join transformation.
A Join transformation joins the elements of two DataSets on key equality.
:param other_set: The other DataSet with which this DataSet is joined
:return:A JoinOperator to continue the definition of the Join transformation.
"""
return self._join(other_set, _Identifier.JOIN)
def join_with_huge(self, other_set):
"""
Initiates a Join transformation.
A Join transformation joins the elements of two DataSets on key equality.
This method also gives the hint to the optimizer that
the second DataSet to join is much larger than the first one.
:param other_set: The other DataSet with which this DataSet is joined
:return:A JoinOperator to continue the definition of the Join transformation.
"""
return self._join(other_set, _Identifier.JOINH)
def join_with_tiny(self, other_set):
"""
Initiates a Join transformation.
A Join transformation joins the elements of two DataSets on key equality.
This method also gives the hint to the optimizer that
the second DataSet to join is much smaller than the first one.
:param other_set: The other DataSet with which this DataSet is joined
:return:A JoinOperator to continue the definition of the Join transformation.
"""
return self._join(other_set, _Identifier.JOINT)
def _join(self, other_set, identifier):
child = OperationInfo()
child_set = JoinOperatorWhere(self._env, child)
child.identifier = identifier
child.parent_set = self
child.other_set = other_set
return child_set
def map(self, operator):
"""
Applies a Map transformation on a DataSet.
The transformation calls a MapFunction for each element of the DataSet.
Each MapFunction call returns exactly one element.
:param operator: The MapFunction that is called for each element of the DataSet.
:return:A MapOperator that represents the transformed DataSet
"""
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = MapFunction()
operator.map = f
child = OperationInfo()
child_set = OperatorSet(self._env, child)
child.identifier = _Identifier.MAP
child.parent = self._info
child.operator = operator
child.types = _createArrayTypeInfo()
child.name = "PythonMap"
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def map_partition(self, operator):
"""
Applies a MapPartition transformation on a DataSet.
The transformation calls a MapPartitionFunction once per parallel partition of the DataSet.
The entire partition is available through the given Iterator.
Each MapPartitionFunction may return an arbitrary number of results.
The number of elements that each instance of the MapPartition function
sees is non deterministic and depends on the degree of parallelism of the operation.
:param operator: The MapFunction that is called for each element of the DataSet.
:return:A MapOperator that represents the transformed DataSet
"""
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = MapPartitionFunction()
operator.map_partition = f
child = OperationInfo()
child_set = OperatorSet(self._env, child)
child.identifier = _Identifier.MAPPARTITION
child.parent = self._info
child.operator = operator
child.types = _createArrayTypeInfo()
child.name = "PythonMapPartition"
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def partition_by_hash(self, *fields):
f = None
if len(fields) == 0:
f = lambda x: (x,)
if isinstance(fields[0], TYPES.FunctionType):
f = lambda x: (fields[0](x),)
if isinstance(fields[0], KeySelectorFunction):
f = lambda x: (fields[0].get_key(x),)
if f is None:
f = lambda x: tuple([x[key] for key in fields])
return self.map(lambda x: (f(x), x)).name("HashPartitionPreStep")._partition_by_hash(tuple([x for x in range(len(fields))]))
def _partition_by_hash(self, fields):
"""
Hash-partitions a DataSet on the specified key fields.
Important:This operation shuffles the whole DataSet over the network and can take significant amount of time.
:param fields: The field indexes on which the DataSet is hash-partitioned.
:return: The partitioned DataSet.
"""
self._info.types = _createKeyValueTypeInfo(len(fields))
child = OperationInfo()
child_set = DataSet(self._env, child)
child.identifier = _Identifier.PARTITION_HASH
child.parent = self._info
child.keys = fields
self._info.parallelism = child.parallelism
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def rebalance(self):
"""
Enforces a re-balancing of the DataSet, i.e., the DataSet is evenly distributed over all parallel instances of the
following task. This can help to improve performance in case of heavy data skew and compute intensive operations.
Important:This operation shuffles the whole DataSet over the network and can take significant amount of time.
:return: The re-balanced DataSet.
"""
child = OperationInfo()
child_set = DataSet(self._env, child)
child.identifier = _Identifier.REBALANCE
child.parent = self._info
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def union(self, other_set):
"""
Creates a union of this DataSet with an other DataSet.
The other DataSet must be of the same data type.
:param other_set: The other DataSet which is unioned with the current DataSet.
:return:The resulting DataSet.
"""
child = OperationInfo()
child_set = DataSet(self._env, child)
child.identifier = _Identifier.UNION
child.parent = self._info
child.other = other_set._info
self._info.children.append(child)
other_set._info.children.append(child)
self._env._sets.append(child)
return child_set
def name(self, name):
self._info.name = name
return self
def set_parallelism(self, parallelism):
self._info.parallelism.value = parallelism
return self
def count_elements_per_partition(self):
"""
Method that goes over all the elements in each partition in order to retrieve the total number of elements.
:return: A DataSet containing Tuples of subtask index, number of elements mappings.
"""
class CountElementsPerPartitionMapper(MapPartitionFunction):
def map_partition(self, iterator, collector):
counter = 0
for x in iterator:
counter += 1
collector.collect((self.context.get_index_of_this_subtask(), counter))
return self.map_partition(CountElementsPerPartitionMapper())
def zip_with_index(self):
"""
Method that assigns a unique Long value to all elements of the DataSet. The generated values are consecutive.
:return: A DataSet of Tuples consisting of consecutive ids and initial values.
"""
element_count = self.count_elements_per_partition()
class ZipWithIndexMapper(MapPartitionFunction):
start = -1
def _run(self):
offsets = self.context.get_broadcast_variable("counts")
offsets = sorted(offsets, key=lambda t: t[0]) # sort by task ID
offsets = collections.deque(offsets)
# compute the offset for each partition
for i in range(self.context.get_index_of_this_subtask()):
self.start += offsets[i][1]
super(ZipWithIndexMapper, self)._run()
def map_partition(self, iterator, collector):
for value in iterator:
self.start += 1
collector.collect((self.start, value))
return self\
.map_partition(ZipWithIndexMapper())\
.with_broadcast_set("counts", element_count)
class OperatorSet(DataSet):
def __init__(self, env, info):
super(OperatorSet, self).__init__(env, info)
def with_broadcast_set(self, name, set):
child = OperationInfo()
child.identifier = _Identifier.BROADCAST
child.parent = self._info
child.other = set._info
child.name = name
self._info.bcvars.append(child)
set._info.children.append(child)
self._env._broadcast.append(child)
return self
class Grouping(object):
def __init__(self, env, info, child_chain):
self._env = env
self._child_chain = child_chain
self._info = info
info.id = env._counter
env._counter += 1
def _finalize(self):
pass
def first(self, count):
"""
Returns a new set containing the first n elements in this DataSet.
:param count: The desired number of elements.
:return: A DataSet containing the elements.
"""
self._finalize()
child = OperationInfo()
child_set = DataSet(self._env, child)
child.identifier = _Identifier.FIRST
child.parent = self._info
child.count = count
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def reduce_group(self, operator, combinable=False):
"""
Applies a GroupReduce transformation.
The transformation calls a GroupReduceFunction once for each group of the DataSet, or one when applied on a
non-grouped DataSet.
The GroupReduceFunction can iterate over all elements of the DataSet and
emit any number of output elements including none.
:param operator: The GroupReduceFunction that is applied on the DataSet.
:return:A GroupReduceOperator that represents the reduced DataSet.
"""
child = self._reduce_group(operator, combinable)
child_set = OperatorSet(self._env, child)
self._info.parallelism = child.parallelism
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def _reduce_group(self, operator, combinable=False):
self._finalize()
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = GroupReduceFunction()
operator.reduce = f
child = OperationInfo()
child.identifier = _Identifier.GROUPREDUCE
child.parent = self._info
child.operator = operator
child.types = _createArrayTypeInfo()
child.name = "PythonGroupReduce"
child.key1 = self._child_chain[0].keys
return child
def sort_group(self, field, order):
"""
Sorts Tuple elements within a group on the specified field in the specified Order.
Note: Only groups of Tuple elements can be sorted.
Groups can be sorted by multiple fields by chaining sort_group() calls.
:param field:The Tuple field on which the group is sorted.
:param order: The Order in which the specified Tuple field is sorted. See DataSet.Order.
:return:A SortedGrouping with specified order of group element.
"""
child = OperationInfo()
child_set = SortedGrouping(self._env, child, self._child_chain)
child.identifier = _Identifier.SORT
child.parent = self._info
child.field = field
child.order = order
self._info.children.append(child)
self._child_chain.append(child)
self._env._sets.append(child)
return child_set
class UnsortedGrouping(Grouping):
def __init__(self, env, info, child_chain):
super(UnsortedGrouping, self).__init__(env, info, child_chain)
def reduce(self, operator):
"""
Applies a Reduce transformation on a non-grouped DataSet.
The transformation consecutively calls a ReduceFunction until only a single element remains which is the result
of the transformation. A ReduceFunction combines two elements into one new element of the same type.
:param operator:The ReduceFunction that is applied on the DataSet.
:return:A ReduceOperator that represents the reduced DataSet.
"""
self._finalize()
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = ReduceFunction()
operator.reduce = f
child = OperationInfo()
child_set = OperatorSet(self._env, child)
child.identifier = _Identifier.REDUCE
child.parent = self._info
child.operator = operator
child.name = "PythonReduce"
child.types = _createArrayTypeInfo()
child.key1 = self._child_chain[0].keys
self._info.parallelism = child.parallelism
self._info.children.append(child)
self._env._sets.append(child)
return child_set
def aggregate(self, aggregation, field):
"""
Applies an Aggregate transformation (using a GroupReduceFunction) on a Tuple UnsortedGrouping.
:param aggregation: The built-in aggregation function to apply on the UnsortedGrouping.
:param field: The index of the Tuple field on which to perform the function.
:return: An AggregateOperator that represents the aggregated UnsortedGrouping.
"""
child = self._reduce_group(AggregationFunction(aggregation, field), combinable=True)
child.name = "PythonAggregate" + aggregation.__name__ # include aggregation type in name
child_set = AggregateOperator(self._env, child)
self._env._sets.append(child)
self._info.children.append(child)
return child_set
def min(self, field):
"""
Syntactic sugar for the minimum aggregation.
:param field: The index of the Tuple field on which to perform the function.
:return: An AggregateOperator that represents the aggregated UnsortedGrouping.
"""
return self.aggregate(Min, field)
def max(self, field):
"""
Syntactic sugar for the maximum aggregation.
:param field: The index of the Tuple field on which to perform the function.
:return: An AggregateOperator that represents the aggregated UnsortedGrouping.
"""
return self.aggregate(Max, field)
def sum(self, field):
"""
Syntactic sugar for the sum aggregation.
:param field: The index of the Tuple field on which to perform the function.
:return: An AggregateOperator that represents the aggregated UnsortedGrouping.
"""
return self.aggregate(Sum, field)
def _finalize(self):
grouping = self._child_chain[0]
keys = grouping.keys
f = None
if isinstance(keys[0], TYPES.FunctionType):
f = lambda x: (keys[0](x),)
if isinstance(keys[0], KeySelectorFunction):
f = lambda x: (keys[0].get_key(x),)
if f is None:
f = lambda x: tuple([x[key] for key in keys])
grouping.parent.operator.map = lambda x: (f(x), x)
grouping.parent.types = _createKeyValueTypeInfo(len(keys))
grouping.keys = tuple([i for i in range(len(grouping.keys))])
class SortedGrouping(Grouping):
def __init__(self, env, info, child_chain):
super(SortedGrouping, self).__init__(env, info, child_chain)
def _finalize(self):
grouping = self._child_chain[0]
sortings = self._child_chain[1:]
#list of used index keys to prevent duplicates and determine final index
index_keys = set()
if not isinstance(grouping.keys[0], (TYPES.FunctionType, KeySelectorFunction)):
index_keys = index_keys.union(set(grouping.keys))
#list of sorts using indices
index_sorts = []
#list of sorts using functions
ksl_sorts = []
for s in sortings:
if not isinstance(s.field, (TYPES.FunctionType, KeySelectorFunction)):
index_keys.add(s.field)
index_sorts.append(s)
else:
ksl_sorts.append(s)
used_keys = sorted(index_keys)
#all data gathered
#construct list of extractor lambdas
lambdas = []
i = 0
for key in used_keys:
lambdas.append(lambda x, k=key: x[k])
i += 1
if isinstance(grouping.keys[0], (TYPES.FunctionType, KeySelectorFunction)):
lambdas.append(grouping.keys[0])
for ksl_op in ksl_sorts:
lambdas.append(ksl_op.field)
grouping.parent.operator.map = lambda x: (tuple([l(x) for l in lambdas]), x)
grouping.parent.types = _createKeyValueTypeInfo(len(lambdas))
#modify keys
ksl_offset = len(used_keys)
if not isinstance(grouping.keys[0], (TYPES.FunctionType, KeySelectorFunction)):
grouping.keys = tuple([used_keys.index(key) for key in grouping.keys])
else:
grouping.keys = (ksl_offset,)
ksl_offset += 1
for iop in index_sorts:
iop.field = used_keys.index(iop.field)
for kop in ksl_sorts:
kop.field = ksl_offset
ksl_offset += 1
class CoGroupOperatorWhere(object):
def __init__(self, env, info):
self._env = env
self._info = info
def where(self, *fields):
"""
Continues a CoGroup transformation.
Defines the Tuple fields of the first co-grouped DataSet that should be used as grouping keys.
Note: Fields can only be selected as grouping keys on Tuple DataSets.
:param fields: The indexes of the Tuple fields of the first co-grouped DataSets that should be used as keys.
:return: An incomplete CoGroup transformation.
"""
f = None
if isinstance(fields[0], TYPES.FunctionType):
f = lambda x: (fields[0](x),)
if isinstance(fields[0], KeySelectorFunction):
f = lambda x: (fields[0].get_key(x),)
if f is None:
f = lambda x: tuple([x[key] for key in fields])
new_parent_set = self._info.parent_set.map(lambda x: (f(x), x))
new_parent_set._info.types = _createKeyValueTypeInfo(len(fields))
self._info.parent = new_parent_set._info
self._info.parent.children.append(self._info)
self._info.key1 = fields
return CoGroupOperatorTo(self._env, self._info)
class CoGroupOperatorTo(object):
def __init__(self, env, info):
self._env = env
self._info = info
def equal_to(self, *fields):
"""
Continues a CoGroup transformation.
Defines the Tuple fields of the second co-grouped DataSet that should be used as grouping keys.
Note: Fields can only be selected as grouping keys on Tuple DataSets.
:param fields: The indexes of the Tuple fields of the second co-grouped DataSet that should be used as keys.
:return: An incomplete CoGroup transformation.
"""
f = None
if isinstance(fields[0], TYPES.FunctionType):
f = lambda x: (fields[0](x),)
if isinstance(fields[0], KeySelectorFunction):
f = lambda x: (fields[0].get_key(x),)
if f is None:
f = lambda x: tuple([x[key] for key in fields])
new_other_set = self._info.other_set.map(lambda x: (f(x), x))
new_other_set._info.types = _createKeyValueTypeInfo(len(fields))
self._info.other = new_other_set._info
self._info.other.children.append(self._info)
self._info.key2 = fields
return CoGroupOperatorUsing(self._env, self._info)
class CoGroupOperatorUsing(object):
def __init__(self, env, info):
self._env = env
self._info = info
def using(self, operator):
"""
Finalizes a CoGroup transformation.
Applies a CoGroupFunction to groups of elements with identical keys.
Each CoGroupFunction call returns an arbitrary number of keys.
:param operator: The CoGroupFunction that is called for all groups of elements with identical keys.
:return:An CoGroupOperator that represents the co-grouped result DataSet.
"""
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = CoGroupFunction()
operator.co_group = f
new_set = OperatorSet(self._env, self._info)
self._info.key1 = tuple([x for x in range(len(self._info.key1))])
self._info.key2 = tuple([x for x in range(len(self._info.key2))])
operator._keys1 = self._info.key1
operator._keys2 = self._info.key2
self._info.parent.parallelism = self._info.parallelism
self._info.other.parallelism = self._info.parallelism
self._info.operator = operator
self._info.types = _createArrayTypeInfo()
self._info.name = "PythonCoGroup"
self._env._sets.append(self._info)
return new_set
class JoinOperatorWhere(object):
def __init__(self, env, info):
self._env = env
self._info = info
def where(self, *fields):
"""
Continues a Join transformation.
Defines the Tuple fields of the first join DataSet that should be used as join keys.
Note: Fields can only be selected as join keys on Tuple DataSets.
:param fields: The indexes of the Tuple fields of the first join DataSets that should be used as keys.
:return:An incomplete Join transformation.
"""
f = None
if isinstance(fields[0], TYPES.FunctionType):
f = lambda x: (fields[0](x),)
if isinstance(fields[0], KeySelectorFunction):
f = lambda x: (fields[0].get_key(x),)
if f is None:
f = lambda x: tuple([x[key] for key in fields])
new_parent_set = self._info.parent_set.map(lambda x: (f(x), x))
new_parent_set._info.types = _createKeyValueTypeInfo(len(fields))
self._info.parent = new_parent_set._info
self._info.parent.parallelism = self._info.parallelism
self._info.parent.children.append(self._info)
self._info.key1 = tuple([x for x in range(len(fields))])
return JoinOperatorTo(self._env, self._info)
class JoinOperatorTo(object):
def __init__(self, env, info):
self._env = env
self._info = info
def equal_to(self, *fields):
"""
Continues a Join transformation.
Defines the Tuple fields of the second join DataSet that should be used as join keys.
Note: Fields can only be selected as join keys on Tuple DataSets.
:param fields:The indexes of the Tuple fields of the second join DataSet that should be used as keys.
:return:An incomplete Join Transformation.
"""
f = None
if isinstance(fields[0], TYPES.FunctionType):
f = lambda x: (fields[0](x),)
if isinstance(fields[0], KeySelectorFunction):
f = lambda x: (fields[0].get_key(x),)
if f is None:
f = lambda x: tuple([x[key] for key in fields])
new_other_set = self._info.other_set.map(lambda x: (f(x), x))
new_other_set._info.types = _createKeyValueTypeInfo(len(fields))
self._info.other = new_other_set._info
self._info.other.parallelism = self._info.parallelism
self._info.other.children.append(self._info)
self._info.key2 = tuple([x for x in range(len(fields))])
self._env._sets.append(self._info)
return JoinOperator(self._env, self._info)
class Projector(DataSet):
def __init__(self, env, info):
super(Projector, self).__init__(env, info)
def project_first(self, *fields):
"""
Initiates a Project transformation.
Projects the first input.
If the first input is a Tuple DataSet, fields can be selected by their index.
If the first input is not a Tuple DataSet, no parameters should be passed.
:param fields: The indexes of the selected fields.
:return: An incomplete Projection.
"""
for field in fields:
self._info.projections.append((0, field))
self._info.operator.map = lambda x : tuple([x[side][index] for side, index in self._info.projections])
return self
def project_second(self, *fields):
"""
Initiates a Project transformation.
Projects the second input.
If the second input is a Tuple DataSet, fields can be selected by their index.
If the second input is not a Tuple DataSet, no parameters should be passed.
:param fields: The indexes of the selected fields.
:return: An incomplete Projection.
"""
for field in fields:
self._info.projections.append((1, field))
self._info.operator.map = lambda x : tuple([x[side][index] for side, index in self._info.projections])
return self
class Projectable:
def __init__(self):
pass
def project_first(self, *fields):
"""
Initiates a Project transformation.
Projects the first input.
If the first input is a Tuple DataSet, fields can be selected by their index.
If the first input is not a Tuple DataSet, no parameters should be passed.
:param fields: The indexes of the selected fields.
:return: An incomplete Projection.
"""
return Projectable._createProjector(self._env, self._info).project_first(*fields)
def project_second(self, *fields):
"""
Initiates a Project transformation.
Projects the second input.
If the second input is a Tuple DataSet, fields can be selected by their index.
If the second input is not a Tuple DataSet, no parameters should be passed.
:param fields: The indexes of the selected fields.
:return: An incomplete Projection.
"""
return Projectable._createProjector(self._env, self._info).project_second(*fields)
@staticmethod
def _createProjector(env, info):
child = OperationInfo()
child_set = Projector(env, child)
child.identifier = _Identifier.MAP
child.operator = MapFunction()
child.parent = info
child.types = _createArrayTypeInfo()
child.name = "Projector"
child.parallelism = info.parallelism
info.children.append(child)
env._sets.append(child)
return child_set
class JoinOperator(DataSet, Projectable):
def __init__(self, env, info):
super(JoinOperator, self).__init__(env, info)
def using(self, operator):
"""
Finalizes a Join transformation.
Applies a JoinFunction to each pair of joined elements. Each JoinFunction call returns exactly one element.
:param operator:The JoinFunction that is called for each pair of joined elements.
:return:An Set that represents the joined result DataSet.
"""
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = JoinFunction()
operator.join = f
self._info.operator = operator
self._info.types = _createArrayTypeInfo()
self._info.name = "PythonJoin"
self._info.uses_udf = True
return OperatorSet(self._env, self._info)
class CrossOperator(DataSet, Projectable):
def __init__(self, env, info):
super(CrossOperator, self).__init__(env, info)
def using(self, operator):
"""
Finalizes a Cross transformation.
Applies a CrossFunction to each pair of joined elements. Each CrossFunction call returns exactly one element.
:param operator:The CrossFunction that is called for each pair of joined elements.
:return:An Set that represents the joined result DataSet.
"""
if isinstance(operator, TYPES.FunctionType):
f = operator
operator = CrossFunction()
operator.cross = f
self._info.operator = operator
self._info.types = _createArrayTypeInfo()
self._info.name = "PythonCross"
self._info.uses_udf = True
return OperatorSet(self._env, self._info)
class AggregateOperator(OperatorSet):
def __init__(self, env, info):
super(AggregateOperator, self).__init__(env, info)
def and_agg(self, aggregation, field):
"""
Applies an additional Aggregate transformation.
:param aggregation: The built-in aggregation operation to apply on the DataSet.
:param field: The index of the Tuple field on which to perform the function.
:return: An AggregateOperator that represents the aggregated DataSet.
"""
self._info.operator.add_aggregation(aggregation, field)
return self
| 39.390586 | 132 | 0.652908 |
3f2a2494c459b49f53d1469b92e3f1d6d5c7fcac | 2,668 | py | Python | flowtorch/bijectors/ops/affine.py | sankethvedula/flowtorch | 44a0f0eff842dd33ca17b01f4e02d8cdda005aa8 | [
"MIT"
] | 29 | 2020-12-19T00:29:42.000Z | 2021-08-12T19:11:47.000Z | flowtorch/bijectors/ops/affine.py | sankethvedula/flowtorch | 44a0f0eff842dd33ca17b01f4e02d8cdda005aa8 | [
"MIT"
] | 30 | 2020-12-29T04:42:38.000Z | 2021-02-19T22:29:38.000Z | flowtorch/bijectors/ops/affine.py | sankethvedula/flowtorch | 44a0f0eff842dd33ca17b01f4e02d8cdda005aa8 | [
"MIT"
] | 1 | 2021-05-06T21:25:45.000Z | 2021-05-06T21:25:45.000Z | # Copyright (c) Meta Platforms, Inc
from typing import Optional, Tuple
import flowtorch
import torch
from flowtorch.bijectors.base import Bijector
from flowtorch.ops import clamp_preserve_gradients
from torch.distributions.utils import _sum_rightmost
class Affine(Bijector):
r"""
Affine mapping :math:`\mathbf{y} = \mu + \sigma \otimes \mathbf{x}` where
$\mu$ and $\sigma$ are learnable parameters.
"""
def __init__(
self,
params: Optional[flowtorch.Lazy] = None,
*,
shape: torch.Size,
context_shape: Optional[torch.Size] = None,
log_scale_min_clip: float = -5.0,
log_scale_max_clip: float = 3.0,
sigmoid_bias: float = 2.0,
) -> None:
super().__init__(params, shape=shape, context_shape=context_shape)
self.log_scale_min_clip = log_scale_min_clip
self.log_scale_max_clip = log_scale_max_clip
self.sigmoid_bias = sigmoid_bias
def _forward(
self,
x: torch.Tensor,
context: Optional[torch.Tensor] = None,
) -> torch.Tensor:
params = self.params
assert params is not None
mean, log_scale = params(x, context=context)
log_scale = clamp_preserve_gradients(
log_scale, self.log_scale_min_clip, self.log_scale_max_clip
)
scale = torch.exp(log_scale)
y = scale * x + mean
return y
def _inverse(
self,
y: torch.Tensor,
x: Optional[torch.Tensor] = None,
context: Optional[torch.Tensor] = None,
) -> torch.Tensor:
params = self.params
assert params is not None
mean, log_scale = params(x, context=context)
log_scale = clamp_preserve_gradients(
log_scale, self.log_scale_min_clip, self.log_scale_max_clip
)
inverse_scale = torch.exp(-log_scale)
x_new = (y - mean) * inverse_scale
return x_new
def _log_abs_det_jacobian(
self,
x: torch.Tensor,
y: torch.Tensor,
context: Optional[torch.Tensor] = None,
) -> torch.Tensor:
params = self.params
assert params is not None
# Note: params will take care of caching "mean, log_scale = params(x)"
_, log_scale = params(x, context=context)
log_scale = clamp_preserve_gradients(
log_scale, self.log_scale_min_clip, self.log_scale_max_clip
)
return _sum_rightmost(log_scale, self.domain.event_dim)
def param_shapes(self, shape: torch.Size) -> Tuple[torch.Size, torch.Size]:
# A mean and log variance for every dimension of the event shape
return shape, shape
| 31.023256 | 79 | 0.634183 |
3b9f6bc0e32695973532260e323778e5f0eb10d2 | 10,981 | py | Python | rl3/ddpg.py | viclen/machine_learning_examples_v2 | d47d572629899efe23bb26edffedfd464c0f77e4 | [
"Apache-2.0"
] | 1 | 2020-08-04T23:07:31.000Z | 2020-08-04T23:07:31.000Z | rl3/ddpg.py | viclen/machine_learning_examples_v2 | d47d572629899efe23bb26edffedfd464c0f77e4 | [
"Apache-2.0"
] | null | null | null | rl3/ddpg.py | viclen/machine_learning_examples_v2 | d47d572629899efe23bb26edffedfd464c0f77e4 | [
"Apache-2.0"
] | null | null | null | # https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence
import numpy as np
import tensorflow as tf
import gym
import matplotlib.pyplot as plt
from datetime import datetime
### avoid crashing on Mac
# doesn't seem to work
from sys import platform as sys_pf
if sys_pf == 'darwin':
import matplotlib
matplotlib.use("TkAgg")
# simple feedforward neural net
def ANN(x, layer_sizes, hidden_activation=tf.nn.relu, output_activation=None):
for h in layer_sizes[:-1]:
x = tf.compat.v1.layers.dense(x, units=h, activation=hidden_activation)
return tf.compat.v1.layers.dense(x, units=layer_sizes[-1], activation=output_activation)
# get all variables within a scope
def get_vars(scope):
return [x for x in tf.compat.v1.global_variables() if scope in x.name]
### Create both the actor and critic networks at once ###
### Q(s, mu(s)) returns the maximum Q for a given state s ###
def CreateNetworks(
s, a,
num_actions,
action_max,
hidden_sizes=(300,),
hidden_activation=tf.nn.relu,
output_activation=tf.tanh):
with tf.compat.v1.variable_scope('mu'):
mu = action_max * ANN(s, list(hidden_sizes)+[num_actions], hidden_activation, output_activation)
with tf.compat.v1.variable_scope('q'):
input_ = tf.concat([s, a], axis=-1) # (state, action)
q = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1)
with tf.compat.v1.variable_scope('q', reuse=True):
# reuse is True, so it reuses the weights from the previously defined Q network
input_ = tf.concat([s, mu], axis=-1) # (state, mu(state))
q_mu = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1)
return mu, q, q_mu
### The experience replay memory ###
class ReplayBuffer:
def __init__(self, obs_dim, act_dim, size):
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(s=self.obs1_buf[idxs],
s2=self.obs2_buf[idxs],
a=self.acts_buf[idxs],
r=self.rews_buf[idxs],
d=self.done_buf[idxs])
### Implement the DDPG algorithm ###
def ddpg(
env_fn,
ac_kwargs=dict(),
seed=0,
save_folder=None,
num_train_episodes=100,
test_agent_every=25,
replay_size=int(1e6),
gamma=0.99,
decay=0.995,
mu_lr=1e-3,
q_lr=1e-3,
batch_size=100,
start_steps=10000,
action_noise=0.1,
max_episode_length=1000):
tf.compat.v1.set_random_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(), env_fn()
# comment out this line if you don't want to record a video of the agent
if save_folder is not None:
test_env = gym.wrappers.Monitor(test_env, save_folder)
# get size of state space and action space
num_states = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
# Maximum value of action
# Assumes both low and high values are the same
# Assumes all actions have the same bounds
# May NOT be the case for all environments
action_max = env.action_space.high[0]
# Create Tensorflow placeholders (neural network inputs)
X = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, num_states)) # state
A = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, num_actions)) # action
X2 = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, num_states)) # next state
R = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None,)) # reward
D = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None,)) # done
# Main network outputs
with tf.compat.v1.variable_scope('main'):
mu, q, q_mu = CreateNetworks(X, A, num_actions, action_max, **ac_kwargs)
# Target networks
with tf.compat.v1.variable_scope('target'):
# We don't need the Q network output with arbitrary input action A
# because that's not actually used in our loss functions
# NOTE 1: The state input is X2, NOT X
# We only care about max_a{ Q(s', a) }
# Where this is equal to Q(s', mu(s'))
# This is because it's used in the target calculation: r + gamma * max_a{ Q(s',a) }
# Where s' = X2
# NOTE 2: We ignore the first 2 networks for the same reason
_, _, q_mu_targ = CreateNetworks(X2, A, num_actions, action_max, **ac_kwargs)
# Experience replay memory
replay_buffer = ReplayBuffer(obs_dim=num_states, act_dim=num_actions, size=replay_size)
# Target value for the Q-network loss
# We use stop_gradient to tell Tensorflow not to differentiate
# q_mu_targ wrt any params
# i.e. consider q_mu_targ values constant
q_target = tf.stop_gradient(R + gamma * (1 - D) * q_mu_targ)
# DDPG losses
mu_loss = -tf.reduce_mean(input_tensor=q_mu)
q_loss = tf.reduce_mean(input_tensor=(q - q_target)**2)
# Train each network separately
mu_optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=mu_lr)
q_optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=q_lr)
mu_train_op = mu_optimizer.minimize(mu_loss, var_list=get_vars('main/mu'))
q_train_op = q_optimizer.minimize(q_loss, var_list=get_vars('main/q'))
# Use soft updates to update the target networks
target_update = tf.group(
[tf.compat.v1.assign(v_targ, decay*v_targ + (1 - decay)*v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))
]
)
# Copy main network params to target networks
target_init = tf.group(
[tf.compat.v1.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))
]
)
# boilerplate (and copy to the target networks!)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(target_init)
def get_action(s, noise_scale):
a = sess.run(mu, feed_dict={X: s.reshape(1,-1)})[0]
a += noise_scale * np.random.randn(num_actions)
return np.clip(a, -action_max, action_max)
test_returns = []
def test_agent(num_episodes=5):
t0 = datetime.now()
n_steps = 0
for j in range(num_episodes):
s, episode_return, episode_length, d = test_env.reset(), 0, 0, False
while not (d or (episode_length == max_episode_length)):
# Take deterministic actions at test time (noise_scale=0)
test_env.render()
s, r, d, _ = test_env.step(get_action(s, 0))
episode_return += r
episode_length += 1
n_steps += 1
print('test return:', episode_return, 'episode_length:', episode_length)
test_returns.append(episode_return)
# print("test steps per sec:", n_steps / (datetime.now() - t0).total_seconds())
# Main loop: play episode and train
returns = []
q_losses = []
mu_losses = []
num_steps = 0
for i_episode in range(num_train_episodes):
# reset env
s, episode_return, episode_length, d = env.reset(), 0, 0, False
while not (d or (episode_length == max_episode_length)):
# For the first `start_steps` steps, use randomly sampled actions
# in order to encourage exploration.
if num_steps > start_steps:
a = get_action(s, action_noise)
else:
a = env.action_space.sample()
# Keep track of the number of steps done
num_steps += 1
if num_steps == start_steps:
print("USING AGENT ACTIONS NOW")
# Step the env
s2, r, d, _ = env.step(a)
episode_return += r
episode_length += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d_store = False if episode_length == max_episode_length else d
# Store experience to replay buffer
replay_buffer.store(s, a, r, s2, d_store)
# Assign next state to be the current state on the next round
s = s2
# Perform the updates
for _ in range(episode_length):
batch = replay_buffer.sample_batch(batch_size)
feed_dict = {
X: batch['s'],
X2: batch['s2'],
A: batch['a'],
R: batch['r'],
D: batch['d']
}
# Q network update
# Note: plot the Q loss if you want
ql, _, _ = sess.run([q_loss, q, q_train_op], feed_dict)
q_losses.append(ql)
# Policy update
# (And target networks update)
# Note: plot the mu loss if you want
mul, _, _ = sess.run([mu_loss, mu_train_op, target_update], feed_dict)
mu_losses.append(mul)
print("Episode:", i_episode + 1, "Return:", episode_return, 'episode_length:', episode_length)
returns.append(episode_return)
# Test the agent
if i_episode > 0 and i_episode % test_agent_every == 0:
test_agent()
# on Mac, plotting results in an error, so just save the results for later
# if you're not on Mac, feel free to uncomment the below lines
np.savez('ddpg_results.npz', train=returns, test=test_returns, q_losses=q_losses, mu_losses=mu_losses)
# plt.plot(returns)
# plt.plot(smooth(np.array(returns)))
# plt.title("Train returns")
# plt.show()
# plt.plot(test_returns)
# plt.plot(smooth(np.array(test_returns)))
# plt.title("Test returns")
# plt.show()
# plt.plot(q_losses)
# plt.title('q_losses')
# plt.show()
# plt.plot(mu_losses)
# plt.title('mu_losses')
# plt.show()
def smooth(x):
# last 100
n = len(x)
y = np.zeros(n)
for i in range(n):
start = max(0, i - 99)
y[i] = float(x[start:(i+1)].sum()) / (i - start + 1)
return y
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--hidden_layer_sizes', type=int, default=300)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--num_train_episodes', type=int, default=200)
parser.add_argument('--save_folder', type=str, default='ddpg_monitor')
args = parser.parse_args()
ddpg(
lambda : gym.make(args.env),
ac_kwargs=dict(hidden_sizes=[args.hidden_layer_sizes]*args.num_layers),
gamma=args.gamma,
seed=args.seed,
save_folder=args.save_folder,
num_train_episodes=args.num_train_episodes,
)
| 33.891975 | 104 | 0.674164 |
e57bba9ed47eee5c0ff4bb57dd0b9812dc3b8e71 | 7,784 | py | Python | ionoscloud/models/network_load_balancer_forwarding_rule_target.py | ionos-cloud/ionos-cloud-sdk-python | 3c5804697c262898e6f6a438dc40e1b45a4bb5c9 | [
"Apache-2.0"
] | null | null | null | ionoscloud/models/network_load_balancer_forwarding_rule_target.py | ionos-cloud/ionos-cloud-sdk-python | 3c5804697c262898e6f6a438dc40e1b45a4bb5c9 | [
"Apache-2.0"
] | null | null | null | ionoscloud/models/network_load_balancer_forwarding_rule_target.py | ionos-cloud/ionos-cloud-sdk-python | 3c5804697c262898e6f6a438dc40e1b45a4bb5c9 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
CLOUD API
IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. # noqa: E501
The version of the OpenAPI document: 6.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class NetworkLoadBalancerForwardingRuleTarget(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ip': 'str',
'port': 'int',
'weight': 'int',
'health_check': 'NetworkLoadBalancerForwardingRuleTargetHealthCheck',
}
attribute_map = {
'ip': 'ip',
'port': 'port',
'weight': 'weight',
'health_check': 'healthCheck',
}
def __init__(self, ip=None, port=None, weight=None, health_check=None, local_vars_configuration=None): # noqa: E501
"""NetworkLoadBalancerForwardingRuleTarget - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._ip = None
self._port = None
self._weight = None
self._health_check = None
self.discriminator = None
self.ip = ip
self.port = port
self.weight = weight
if health_check is not None:
self.health_check = health_check
@property
def ip(self):
"""Gets the ip of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
The IP of the balanced target VM. # noqa: E501
:return: The ip of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this NetworkLoadBalancerForwardingRuleTarget.
The IP of the balanced target VM. # noqa: E501
:param ip: The ip of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
:type ip: str
"""
if self.local_vars_configuration.client_side_validation and ip is None: # noqa: E501
raise ValueError("Invalid value for `ip`, must not be `None`") # noqa: E501
self._ip = ip
@property
def port(self):
"""Gets the port of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
The port of the balanced target service; valid range is 1 to 65535. # noqa: E501
:return: The port of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this NetworkLoadBalancerForwardingRuleTarget.
The port of the balanced target service; valid range is 1 to 65535. # noqa: E501
:param port: The port of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
:type port: int
"""
if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
@property
def weight(self):
"""Gets the weight of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
Traffic is distributed in proportion to target weight, relative to the combined weight of all targets. A target with higher weight receives a greater share of traffic. Valid range is 0 to 256 and default is 1. Targets with weight of 0 do not participate in load balancing but still accept persistent connections. It is best to assign weights in the middle of the range to leave room for later adjustments. # noqa: E501
:return: The weight of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
:rtype: int
"""
return self._weight
@weight.setter
def weight(self, weight):
"""Sets the weight of this NetworkLoadBalancerForwardingRuleTarget.
Traffic is distributed in proportion to target weight, relative to the combined weight of all targets. A target with higher weight receives a greater share of traffic. Valid range is 0 to 256 and default is 1. Targets with weight of 0 do not participate in load balancing but still accept persistent connections. It is best to assign weights in the middle of the range to leave room for later adjustments. # noqa: E501
:param weight: The weight of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
:type weight: int
"""
if self.local_vars_configuration.client_side_validation and weight is None: # noqa: E501
raise ValueError("Invalid value for `weight`, must not be `None`") # noqa: E501
self._weight = weight
@property
def health_check(self):
"""Gets the health_check of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
:return: The health_check of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
:rtype: NetworkLoadBalancerForwardingRuleTargetHealthCheck
"""
return self._health_check
@health_check.setter
def health_check(self, health_check):
"""Sets the health_check of this NetworkLoadBalancerForwardingRuleTarget.
:param health_check: The health_check of this NetworkLoadBalancerForwardingRuleTarget. # noqa: E501
:type health_check: NetworkLoadBalancerForwardingRuleTargetHealthCheck
"""
self._health_check = health_check
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkLoadBalancerForwardingRuleTarget):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NetworkLoadBalancerForwardingRuleTarget):
return True
return self.to_dict() != other.to_dict()
| 36.037037 | 438 | 0.648381 |
3f3a62db02f2cf9d4f940f932f2cc0066ebaf7bb | 685 | py | Python | setup.py | gregology/spot-memair | 301c95e98ebca8c97b5e52dbe115b4b5c5f87b1a | [
"MIT"
] | null | null | null | setup.py | gregology/spot-memair | 301c95e98ebca8c97b5e52dbe115b4b5c5f87b1a | [
"MIT"
] | null | null | null | setup.py | gregology/spot-memair | 301c95e98ebca8c97b5e52dbe115b4b5c5f87b1a | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='spot-memair',
version='2018.5.28.0',
description='updates memair with spot data',
long_description=open('README.rst').read(),
url='https://github.com/gregology/spot-memair',
author='Greg Clarke',
author_email='greg@gho.st',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python'
],
keywords='spot memair location',
packages=find_packages(),
package_data={
'spot_memair': []
}
)
| 27.4 | 51 | 0.643796 |
da9c8131c05e52fef593d5d6cedd57a9836fed95 | 281 | py | Python | examples/hotel_ratings/hotel_ratings.py | akshitsingla/amadeus-python | d8f3595e556b674998156f98d8a318045bb4c21c | [
"MIT"
] | null | null | null | examples/hotel_ratings/hotel_ratings.py | akshitsingla/amadeus-python | d8f3595e556b674998156f98d8a318045bb4c21c | [
"MIT"
] | null | null | null | examples/hotel_ratings/hotel_ratings.py | akshitsingla/amadeus-python | d8f3595e556b674998156f98d8a318045bb4c21c | [
"MIT"
] | null | null | null | from amadeus import Client, ResponseError
amadeus = Client()
try:
'''
What travelers think about this hotel?
'''
response = amadeus.e_reputation.hotel_sentiments.get(hotelIds = 'ADNYCCTB')
# print(response.data)
except ResponseError as error:
raise error
| 21.615385 | 79 | 0.708185 |
6018ceb1118cc8a46cae97e5a041ed5350696beb | 2,516 | py | Python | nets/deeplabv3_training.py | andy1747369004/deeplabv3-plus-pytorch | 296d3420c9940263fe598b7df8064ef6e8f5e495 | [
"MIT"
] | 1 | 2021-09-10T01:16:37.000Z | 2021-09-10T01:16:37.000Z | nets/deeplabv3_training.py | andy1747369004/deeplabv3-plus-pytorch | 296d3420c9940263fe598b7df8064ef6e8f5e495 | [
"MIT"
] | null | null | null | nets/deeplabv3_training.py | andy1747369004/deeplabv3-plus-pytorch | 296d3420c9940263fe598b7df8064ef6e8f5e495 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import nn
def CE_Loss(inputs, target, num_classes=21):
n, c, h, w = inputs.size()
nt, ht, wt = target.size()
if h != ht and w != wt:
inputs = F.interpolate(inputs, size=(ht, wt), mode="bilinear", align_corners=True)
temp_inputs = inputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
temp_target = target.view(-1)
CE_loss = nn.NLLLoss(ignore_index=num_classes)(F.log_softmax(temp_inputs, dim = -1), temp_target)
return CE_loss
def Dice_loss(inputs, target, beta=1, smooth = 1e-5):
n, c, h, w = inputs.size()
nt, ht, wt, ct = target.size()
if h != ht and w != wt:
inputs = F.interpolate(inputs, size=(ht, wt), mode="bilinear", align_corners=True)
temp_inputs = torch.softmax(inputs.transpose(1, 2).transpose(2, 3).contiguous().view(n, -1, c),-1)
temp_target = target.view(n, -1, ct)
#--------------------------------------------#
# 计算dice loss
#--------------------------------------------#
tp = torch.sum(temp_target[...,:-1] * temp_inputs, axis=[0,1])
fp = torch.sum(temp_inputs , axis=[0,1]) - tp
fn = torch.sum(temp_target[...,:-1] , axis=[0,1]) - tp
score = ((1 + beta ** 2) * tp + smooth) / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + smooth)
dice_loss = 1 - torch.mean(score)
return dice_loss
def weights_init(net, init_type='normal', init_gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and classname.find('Conv') != -1:
if init_type == 'normal':
torch.nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
print('initialize network with %s type' % init_type)
net.apply(init_func)
| 44.140351 | 103 | 0.560413 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.