hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72881b56cfdfd0a1cd37017b902f6c5a5e701b5 | 1,047 | py | Python | src/operators/test_bitwise.py | ClarkChiu/learn-python-tw | 5ee1d84437d55c052999f9edc182dad737b7fbd8 | [
"MIT"
] | 3 | 2021-12-21T03:24:53.000Z | 2022-02-07T00:58:52.000Z | src/operators/test_bitwise.py | ClarkChiu/learn-python-tw | 5ee1d84437d55c052999f9edc182dad737b7fbd8 | [
"MIT"
] | null | null | null | src/operators/test_bitwise.py | ClarkChiu/learn-python-tw | 5ee1d84437d55c052999f9edc182dad737b7fbd8 | [
"MIT"
] | null | null | null | """位元運算子
@詳見:https://www.w3schools.com/python/python_operators.asp
我們可以透過位元運算子在位元層級執行數學運算
"""
def test_bitwise_operators():
"""位元運算子"""
# 及閘(AND Gate)
# 當兩個輸入皆為 1 時,輸出才為 1
#
# 範例:
# 5 = 0b0101
# 3 = 0b0011
assert 5 & 3 == 1 # 0b0001
# 或閘(OR Gate)
# 當兩個輸入任一為 1 時,輸出為 1
#
# 範例:
# 5 = 0b0101
# 3 = 0b0011
assert 5 | 3 == 7 # 0b0111
# 反相閘(NOT Gate)
# 將輸入反向後輸出(二補數運算,十進制結果為:-x-1)
# ~5 = ~0101
# = -(0101 + 1)
# = -(0110)
# = -6(十進制)
assert ~5 == -6
# 互斥或閘(XOR Gate)
# 輸入相同則輸出為 0、輸入不同則輸出為 1
#
# 範例:
# 5 = 0b0101
# 3 = 0b0011
number = 5 # 0b0101
number ^= 3 # 0b0011
assert 5 ^ 3 == 6 # 0b0110
# 右移運算子
# 右移運算子會將輸入的位元往右移指定的位數(除以 2 的次方)
#
# 範例:
# 5 = 0b0101
assert 5 >> 1 == 2 # 0b0010
assert 5 >> 2 == 1 # 0b0001
# 左移運算子
# 左移運算子會將輸入的位元往左移指定的位數(乘以 2 的次方)
#
# 範例:
# 5 = 0b0101
assert 5 << 1 == 10 # 0b1010
assert 5 << 2 == 20 # 0b10100
| 17.163934 | 57 | 0.47469 |
def test_bitwise_operators():
assert 5 & 3 == 1
assert 5 | 3 == 7
assert ~5 == -6
number = 5
number ^= 3
assert 5 ^ 3 == 6
assert 5 >> 1 == 2
assert 5 >> 2 == 1
assert 5 << 1 == 10
assert 5 << 2 == 20
| true | true |
f72882a4af107a0fc89d7b9667acadc487be9627 | 1,157 | py | Python | portal/__init__.py | alexarirok/county-portal | 9e17d83ea825e451bbe59c267204662f05289a25 | [
"MIT"
] | null | null | null | portal/__init__.py | alexarirok/county-portal | 9e17d83ea825e451bbe59c267204662f05289a25 | [
"MIT"
] | null | null | null | portal/__init__.py | alexarirok/county-portal | 9e17d83ea825e451bbe59c267204662f05289a25 | [
"MIT"
] | null | null | null | import os
from flask import Flask
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY = "DEVELOPMENT",
DATABASE=os.path.join(app.instance_path, "portal.sqlite3"),
)
if test_config is None:
#load instance config if it exist, when not testing
app.config.from_pyfile("config.py", silent=True)
else:
# load test config if passed in
#app.config.from_mapping(test_config)
app.config.update(test_config)
#ensure instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
#simple page that say hello you
@app.route('/hello')
def hello():
return "Hello you!"
#return app
#def create_app():
#app = ...
# existing code omitted
#register the db commands
from portal import db
db.init_app(app)
# apply the blueprint to the app
from portal import auth
app.register_blueprint(auth.bp)
from portal import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
return app | 26.906977 | 67 | 0.654278 | import os
from flask import Flask
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY = "DEVELOPMENT",
DATABASE=os.path.join(app.instance_path, "portal.sqlite3"),
)
if test_config is None:
app.config.from_pyfile("config.py", silent=True)
else:
app.config.update(test_config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route('/hello')
def hello():
return "Hello you!"
from portal import db
db.init_app(app)
from portal import auth
app.register_blueprint(auth.bp)
from portal import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
return app | true | true |
f728837796729d9ab7ed0e9f6244dfbd6431c072 | 1,884 | py | Python | utils/config.py | michaelnation26/skateboard_trick_classification | 452476f38250eafc295ba474d1eb0ec971a7cca7 | [
"MIT"
] | 6 | 2020-12-27T20:39:01.000Z | 2022-02-28T06:58:44.000Z | utils/config.py | michaelnation26/skateboard_trick_classification | 452476f38250eafc295ba474d1eb0ec971a7cca7 | [
"MIT"
] | null | null | null | utils/config.py | michaelnation26/skateboard_trick_classification | 452476f38250eafc295ba474d1eb0ec971a7cca7 | [
"MIT"
] | 2 | 2019-08-16T07:52:56.000Z | 2021-12-24T04:11:19.000Z | RGB_CLASS_NAMES = [
'kickflip',
'360_kickflip',
'50-50',
'nosegrind',
'boardslide',
'tailslide',
'fail'
]
RGB_CLASS_NAME_TO_IDX = {class_name: idx
for idx, class_name in enumerate(RGB_CLASS_NAMES)}
RGB_N_CLASSES = 7
RGB_FRAME_HEIGHT = 224
RGB_FRAME_WIDTH = 224
CHANNELS = 3
RGB_N_FRAMES = 64
RGB_TRAINING_BATCH_SIZE = 6
RGB_VALIDATION_BATCH_SIZE = 4
RGB_TEST_BATCH_SIZE = 16
AUDIO_CLASS_NAMES = ['air', 'fail', 'grind', 'slide']
N_AUDIO_CLASSES = len(AUDIO_CLASS_NAMES)
SPECTROGRAM_HEIGHT = 224
SPECTROGRAM_WIDTH = 224
AUDIO_TRAINING_BATCH_SIZE = 32
AUDIO_VALIDATION_BATCH_SIZE = 8
AUDIO_TEST_BATCH_SIZE = 32
MODELS_DIR = 'models'
RGB_MODEL_FILEPATH = f'{MODELS_DIR}/rgb_model.h5'
AUDIO_MODEL_FILEPATH = f'{MODELS_DIR}/audio_model.h5'
RGB_FROZEN_AUDIO_MODEL_FILEPATH = f'{MODELS_DIR}/rgb_frozen_audio_model.h5'
RGB_AUDIO_MODEL_FILEPATH = f'{MODELS_DIR}/rgb_audio_model.h5'
VALIDATION_SPLIT = 0.2
VIDEO_TRAINING_VALIDATION_DIR = 'data/training_validation/video'
VIDEO_TRAINING_DIR = 'data/training/video'
VIDEO_VALIDATION_DIR = 'data/validation/video'
VIDEO_TEST_DIR = 'data/test/video'
WAV_TRAINING_DIR = 'data/training/audio/wav'
WAV_VALIDATION_DIR = 'data/validation/audio/wav'
WAV_TEST_DIR = 'data/test/audio/wav'
SPECTROGRAM_TRAINING_DIR = 'data/training/audio/spectrogram'
SPECTROGRAM_VALIDATION_DIR = 'data/validation/audio/spectrogram'
SPECTROGRAM_TEST_DIR = 'data/test/audio/spectrogram'
AUDIO_CLASS_NAMES = [
'air',
'fail',
'grind',
'slide'
]
VIDEO_TO_AUDIO_LABEL_MAPPING = {
'360_kickflip': 'air',
'heelflip': 'air',
'kickflip': 'air',
'nollie_fakie_heelflip': 'air',
'nollie_fakie_kickflip': 'air',
'bs_kickflip': 'air',
'fs_kickflip': 'air',
'50-50': 'grind',
'nosegrind': 'grind',
'boardslide': 'slide',
'tailslide': 'slide',
'fail': 'fail'
}
| 26.914286 | 75 | 0.724522 | RGB_CLASS_NAMES = [
'kickflip',
'360_kickflip',
'50-50',
'nosegrind',
'boardslide',
'tailslide',
'fail'
]
RGB_CLASS_NAME_TO_IDX = {class_name: idx
for idx, class_name in enumerate(RGB_CLASS_NAMES)}
RGB_N_CLASSES = 7
RGB_FRAME_HEIGHT = 224
RGB_FRAME_WIDTH = 224
CHANNELS = 3
RGB_N_FRAMES = 64
RGB_TRAINING_BATCH_SIZE = 6
RGB_VALIDATION_BATCH_SIZE = 4
RGB_TEST_BATCH_SIZE = 16
AUDIO_CLASS_NAMES = ['air', 'fail', 'grind', 'slide']
N_AUDIO_CLASSES = len(AUDIO_CLASS_NAMES)
SPECTROGRAM_HEIGHT = 224
SPECTROGRAM_WIDTH = 224
AUDIO_TRAINING_BATCH_SIZE = 32
AUDIO_VALIDATION_BATCH_SIZE = 8
AUDIO_TEST_BATCH_SIZE = 32
MODELS_DIR = 'models'
RGB_MODEL_FILEPATH = f'{MODELS_DIR}/rgb_model.h5'
AUDIO_MODEL_FILEPATH = f'{MODELS_DIR}/audio_model.h5'
RGB_FROZEN_AUDIO_MODEL_FILEPATH = f'{MODELS_DIR}/rgb_frozen_audio_model.h5'
RGB_AUDIO_MODEL_FILEPATH = f'{MODELS_DIR}/rgb_audio_model.h5'
VALIDATION_SPLIT = 0.2
VIDEO_TRAINING_VALIDATION_DIR = 'data/training_validation/video'
VIDEO_TRAINING_DIR = 'data/training/video'
VIDEO_VALIDATION_DIR = 'data/validation/video'
VIDEO_TEST_DIR = 'data/test/video'
WAV_TRAINING_DIR = 'data/training/audio/wav'
WAV_VALIDATION_DIR = 'data/validation/audio/wav'
WAV_TEST_DIR = 'data/test/audio/wav'
SPECTROGRAM_TRAINING_DIR = 'data/training/audio/spectrogram'
SPECTROGRAM_VALIDATION_DIR = 'data/validation/audio/spectrogram'
SPECTROGRAM_TEST_DIR = 'data/test/audio/spectrogram'
AUDIO_CLASS_NAMES = [
'air',
'fail',
'grind',
'slide'
]
VIDEO_TO_AUDIO_LABEL_MAPPING = {
'360_kickflip': 'air',
'heelflip': 'air',
'kickflip': 'air',
'nollie_fakie_heelflip': 'air',
'nollie_fakie_kickflip': 'air',
'bs_kickflip': 'air',
'fs_kickflip': 'air',
'50-50': 'grind',
'nosegrind': 'grind',
'boardslide': 'slide',
'tailslide': 'slide',
'fail': 'fail'
}
| true | true |
f728857a6e0c14f66707d36a44fb12b12e721504 | 1,039 | py | Python | pydra/tasks/fsl/utils/slice.py | htwangtw/pydra-fsl | 84b18e32eb181f61780bff75240be7fa05efa637 | [
"Apache-2.0"
] | 1 | 2021-06-17T09:58:06.000Z | 2021-06-17T09:58:06.000Z | pydra/tasks/fsl/utils/slice.py | htwangtw/pydra-fsl | 84b18e32eb181f61780bff75240be7fa05efa637 | [
"Apache-2.0"
] | 16 | 2020-11-03T13:56:12.000Z | 2022-01-31T17:07:13.000Z | pydra/tasks/fsl/utils/slice.py | htwangtw/pydra-fsl | 84b18e32eb181f61780bff75240be7fa05efa637 | [
"Apache-2.0"
] | 4 | 2020-06-16T17:40:37.000Z | 2021-02-18T09:42:48.000Z | from pydra.engine import specs
from pydra import ShellCommandTask
import typing as ty
input_fields = [
(
"in_file",
specs.File,
{
"help_string": "input filename",
"argstr": "{in_file}",
"copyfile": False,
"mandatory": True,
"position": 0,
},
),
(
"out_base_name",
str,
{"help_string": "outputs prefix", "argstr": "{out_base_name}", "position": 1},
),
]
Slice_input_spec = specs.SpecInfo(name="Input", fields=input_fields, bases=(specs.ShellSpec,))
output_fields = []
Slice_output_spec = specs.SpecInfo(
name="Output", fields=output_fields, bases=(specs.ShellOutSpec,)
)
class Slice(ShellCommandTask):
"""
Example
-------
>>> task = Slice()
>>> task.inputs.in_file = "test.nii.gz"
>>> task.inputs.out_base_name = "sl"
>>> task.cmdline
'fslslice test.nii.gz sl'
"""
input_spec = Slice_input_spec
output_spec = Slice_output_spec
executable = "fslslice"
| 23.088889 | 94 | 0.582291 | from pydra.engine import specs
from pydra import ShellCommandTask
import typing as ty
input_fields = [
(
"in_file",
specs.File,
{
"help_string": "input filename",
"argstr": "{in_file}",
"copyfile": False,
"mandatory": True,
"position": 0,
},
),
(
"out_base_name",
str,
{"help_string": "outputs prefix", "argstr": "{out_base_name}", "position": 1},
),
]
Slice_input_spec = specs.SpecInfo(name="Input", fields=input_fields, bases=(specs.ShellSpec,))
output_fields = []
Slice_output_spec = specs.SpecInfo(
name="Output", fields=output_fields, bases=(specs.ShellOutSpec,)
)
class Slice(ShellCommandTask):
input_spec = Slice_input_spec
output_spec = Slice_output_spec
executable = "fslslice"
| true | true |
f7288602befa32a9b16151fefb7ee2dd0c78f067 | 377 | py | Python | python/high-scores/high_scores.py | parkerbxyz/exercism | 2648a2654f067b0f44450ac0663ac49ee270565d | [
"MIT"
] | null | null | null | python/high-scores/high_scores.py | parkerbxyz/exercism | 2648a2654f067b0f44450ac0663ac49ee270565d | [
"MIT"
] | null | null | null | python/high-scores/high_scores.py | parkerbxyz/exercism | 2648a2654f067b0f44450ac0663ac49ee270565d | [
"MIT"
] | null | null | null | from heapq import nlargest
from typing import List
Scores = List[int]
def latest(scores: Scores) -> int:
"""The last added score."""
return scores[-1]
def personal_best(scores: Scores) -> int:
"""The highest score."""
return max(scores)
def personal_top_three(scores: Scores) -> Scores:
"""The three highest scores."""
return nlargest(3, scores)
| 18.85 | 49 | 0.668435 | from heapq import nlargest
from typing import List
Scores = List[int]
def latest(scores: Scores) -> int:
return scores[-1]
def personal_best(scores: Scores) -> int:
return max(scores)
def personal_top_three(scores: Scores) -> Scores:
return nlargest(3, scores)
| true | true |
f72886901723e0cfbb1b95af1f0b53bf4c3d3541 | 9,730 | py | Python | setup.py | skyw/NeMo | c51685e03f52d3428d19b7edccc1bbd0da5d8edb | [
"Apache-2.0"
] | 1 | 2021-06-19T19:27:19.000Z | 2021-06-19T19:27:19.000Z | setup.py | AbdullahMu/NeMo | 3886aa251f7be7c2e43aeb7315afc6b8924228aa | [
"Apache-2.0"
] | null | null | null | setup.py | AbdullahMu/NeMo | 3886aa251f7be7c2e43aeb7315afc6b8924228aa | [
"Apache-2.0"
] | null | null | null | # ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
import codecs
import os
import subprocess
import sys
from distutils import cmd as distutils_cmd
from distutils import log as distutils_log
from itertools import chain
import setuptools
def is_build_action():
if len(sys.argv) <= 1:
return False
BUILD_TOKENS = ["egg_info", "dist", "bdist", "sdist", "install", "build", "develop", "style", "clean"]
if any([sys.argv[1].startswith(x) for x in BUILD_TOKENS]):
return True
else:
return False
if is_build_action():
os.environ['NEMO_PACKAGE_BUILDING'] = 'True'
from nemo.package_info import (
__contact_emails__,
__contact_names__,
__description__,
__download_url__,
__homepage__,
__keywords__,
__license__,
__package_name__,
__repository_url__,
__version__,
)
if os.path.exists('nemo/README.md'):
with open("nemo/README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
elif os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), 'r', 'utf-8',
).read()
long_description_content_type = "text/x-rst"
else:
long_description = 'See ' + __homepage__
###############################################################################
# Dependency Loading #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
def req_file(filename, folder="requirements"):
with open(os.path.join(folder, filename)) as f:
content = f.readlines()
# you may also want to remove whitespace characters
# Example: `\n` at the end of each line
return [x.strip() for x in content]
install_requires = req_file("requirements.txt")
extras_require = {
# User packages
'test': req_file("requirements_test.txt"),
# Collections Packages
'asr': req_file("requirements_asr.txt"),
'cv': req_file("requirements_cv.txt"),
'nlp': req_file("requirements_nlp.txt"),
'tts': req_file("requirements_tts.txt"),
}
extras_require['all'] = list(chain(extras_require.values()))
# TTS depends on ASR
extras_require['tts'] = list(chain([extras_require['tts'], extras_require['asr']]))
tests_requirements = extras_require["test"]
########################## VERSION MISMATCH PATCH #############################
# REMOVE AFTER 21.03 Container is released !
try:
import torch
version = torch.__version__
SUPPORTED_TORCH_VERSION = f"torch=={version}"
if 'a' in version or 'b' in version:
# It is githash release, force to supported Pytorch Lightning branch
SUPPORTED_PYTORCH_LIGHTNING = "pytorch-lightning==1.2.2"
else:
SUPPORTED_PYTORCH_LIGHTNING = "pytorch-lightning>=1.2.3"
except (ImportError, ModuleNotFoundError):
# Since no torch is installed, pip install torch will install latest torch and latest pytorch lightning
SUPPORTED_TORCH_VERSION = "torch"
SUPPORTED_PYTORCH_LIGHTNING = "pytorch-lightning>=1.2.3"
install_requires_buffer = []
for ix, line in enumerate(install_requires):
if 'lightning' in line:
install_requires_buffer.append(SUPPORTED_PYTORCH_LIGHTNING)
elif 'torch' in line:
install_requires_buffer.append(SUPPORTED_TORCH_VERSION)
# Pytorch 1.7.1 must use torchtext==0.8.0, torchaudio==0.7.2 and torchvision==0.8.2
if SUPPORTED_TORCH_VERSION == "torch<=1.7.1":
install_requires_buffer.append("torchvision==0.8.2")
install_requires_buffer.append("torchaudio==0.7.2")
install_requires_buffer.append("torchtext==0.8.0")
else:
install_requires_buffer.append(line)
# override install requires
install_requires = install_requires_buffer
###############################################################################
# Code style checkers #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
class StyleCommand(distutils_cmd.Command):
__LINE_WIDTH = 119
__ISORT_BASE = (
'isort '
# These two lines makes isort compatible with black.
'--multi-line=3 --trailing-comma --force-grid-wrap=0 '
f'--use-parentheses --line-width={__LINE_WIDTH} -rc -ws'
)
__BLACK_BASE = f'black --skip-string-normalization --line-length={__LINE_WIDTH}'
description = 'Checks overall project code style.'
user_options = [
('scope=', None, 'Folder of file to operate within.'),
('fix', None, 'True if tries to fix issues in-place.'),
]
def __call_checker(self, base_command, scope, check):
command = list(base_command)
command.append(scope)
if check:
command.extend(['--check', '--diff'])
self.announce(
msg='Running command: %s' % str(' '.join(command)), level=distutils_log.INFO,
)
return_code = subprocess.call(command)
return return_code
def _isort(self, scope, check):
return self.__call_checker(base_command=self.__ISORT_BASE.split(), scope=scope, check=check,)
def _black(self, scope, check):
return self.__call_checker(base_command=self.__BLACK_BASE.split(), scope=scope, check=check,)
def _pass(self):
self.announce(msg='\033[32mPASS\x1b[0m', level=distutils_log.INFO)
def _fail(self):
self.announce(msg='\033[31mFAIL\x1b[0m', level=distutils_log.INFO)
# noinspection PyAttributeOutsideInit
def initialize_options(self):
self.scope = '.'
self.fix = ''
def run(self):
scope, check = self.scope, not self.fix
isort_return = self._isort(scope=scope, check=check)
black_return = self._black(scope=scope, check=check)
if isort_return == 0 and black_return == 0:
self._pass()
else:
self._fail()
exit(isort_return if isort_return != 0 else black_return)
def finalize_options(self):
pass
###############################################################################
setuptools.setup(
name=__package_name__,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=__description__,
long_description=long_description,
long_description_content_type=long_description_content_type,
# The project's main homepage.
url=__repository_url__,
download_url=__download_url__,
# Author details
author=__contact_names__,
author_email=__contact_emails__,
# maintainer Details
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
# The licence under which the project is released
license=__license__,
classifiers=[
# How mature is this project? Common values are
# 1 - Planning
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 6 - Mature
# 7 - Inactive
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
# Indicate what your project relates to
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Supported python versions
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Additional Setting
'Environment :: Console',
'Natural Language :: English',
'Operating System :: OS Independent',
],
packages=setuptools.find_packages(),
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=tests_requirements,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# $ pip install -e ".[all]"
# $ pip install nemo_toolkit[all]
extras_require=extras_require,
# Add in any packaged data.
include_package_data=True,
zip_safe=False,
# PyPI package information.
keywords=__keywords__,
# Custom commands.
cmdclass={'style': StyleCommand},
)
| 33.902439 | 107 | 0.633505 |
import codecs
import os
import subprocess
import sys
from distutils import cmd as distutils_cmd
from distutils import log as distutils_log
from itertools import chain
import setuptools
def is_build_action():
if len(sys.argv) <= 1:
return False
BUILD_TOKENS = ["egg_info", "dist", "bdist", "sdist", "install", "build", "develop", "style", "clean"]
if any([sys.argv[1].startswith(x) for x in BUILD_TOKENS]):
return True
else:
return False
if is_build_action():
os.environ['NEMO_PACKAGE_BUILDING'] = 'True'
from nemo.package_info import (
__contact_emails__,
__contact_names__,
__description__,
__download_url__,
__homepage__,
__keywords__,
__license__,
__package_name__,
__repository_url__,
__version__,
)
if os.path.exists('nemo/README.md'):
with open("nemo/README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
elif os.path.exists('README.rst'):
long_description = codecs.open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), 'r', 'utf-8',
).read()
long_description_content_type = "text/x-rst"
else:
long_description = 'See ' + __homepage__
| true | true |
f72888e274e6c29826609f05c71de716386bfb11 | 233 | py | Python | tenth/tenth/apps/gathering/serializers.py | TanDeemo/Tenth | 52f721d4433edfa336e989e6eeedd288d4e38674 | [
"MIT"
] | null | null | null | tenth/tenth/apps/gathering/serializers.py | TanDeemo/Tenth | 52f721d4433edfa336e989e6eeedd288d4e38674 | [
"MIT"
] | null | null | null | tenth/tenth/apps/gathering/serializers.py | TanDeemo/Tenth | 52f721d4433edfa336e989e6eeedd288d4e38674 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from gathering.models import Gathering
class GatheringSerializer(serializers.ModelSerializer):
"""活动序列化器"""
class Meta:
model = Gathering
fields = '__all__'
| 21.181818 | 56 | 0.690987 | from rest_framework import serializers
from gathering.models import Gathering
class GatheringSerializer(serializers.ModelSerializer):
class Meta:
model = Gathering
fields = '__all__'
| true | true |
f72888e863e4c1d7f5e86f2635a47aaa2c4221cf | 332 | py | Python | src/graph_generator/typeparsing/__init__.py | carolemieux/typilus-action | 0e8627cf6db38d2ec153b927ae82c156a865b64f | [
"MIT"
] | 41 | 2020-05-18T21:00:44.000Z | 2022-01-26T23:06:58.000Z | src/graph_generator/typeparsing/__init__.py | carolemieux/typilus-action | 0e8627cf6db38d2ec153b927ae82c156a865b64f | [
"MIT"
] | 7 | 2020-05-18T10:07:12.000Z | 2021-09-28T12:17:37.000Z | src/graph_generator/typeparsing/__init__.py | carolemieux/typilus-action | 0e8627cf6db38d2ec153b927ae82c156a865b64f | [
"MIT"
] | 2 | 2020-06-10T11:15:04.000Z | 2020-06-20T11:17:48.000Z | from .visitor import TypeAnnotationVisitor
from .nodes import *
from .aliasreplacement import AliasReplacementVisitor
from .erasure import EraseOnceTypeRemoval
from .inheritancerewrite import DirectInheritanceRewriting
from .pruneannotations import PruneAnnotationVisitor
from .rewriterulevisitor import RewriteRuleVisitor
| 36.888889 | 59 | 0.864458 | from .visitor import TypeAnnotationVisitor
from .nodes import *
from .aliasreplacement import AliasReplacementVisitor
from .erasure import EraseOnceTypeRemoval
from .inheritancerewrite import DirectInheritanceRewriting
from .pruneannotations import PruneAnnotationVisitor
from .rewriterulevisitor import RewriteRuleVisitor
| true | true |
f728890a0d42d2f6f6382489586517b38e0aa9d2 | 6,731 | py | Python | test/util/ogfuncoin-util-test.py | ogfuncoin/ogfuncoin | 18d00bc1d93335c86ae6f2971321e93e627ae570 | [
"MIT"
] | null | null | null | test/util/ogfuncoin-util-test.py | ogfuncoin/ogfuncoin | 18d00bc1d93335c86ae6f2971321e93e627ae570 | [
"MIT"
] | null | null | null | test/util/ogfuncoin-util-test.py | ogfuncoin/ogfuncoin | 18d00bc1d93335c86ae6f2971321e93e627ae570 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for ogfuncoin utils.
Runs automatically during `make check`.
Can also be run manually."""
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.readfp(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "ogfuncoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, ogfuncoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| 37.187845 | 125 | 0.63735 |
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.readfp(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "ogfuncoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:]
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# That stderr is empty if no errors are expected. However, ogfuncoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| true | true |
f72889b91d938b6d23210c99b7478a6dee176746 | 35,328 | py | Python | gui/kivy/main_window.py | gpdionisio/electrum-pivx | d17e7f3f295da745ec4c9839624ec03cf458d524 | [
"MIT"
] | 1 | 2019-01-11T01:05:47.000Z | 2019-01-11T01:05:47.000Z | gui/kivy/main_window.py | gpdionisio/electrum-pivx | d17e7f3f295da745ec4c9839624ec03cf458d524 | [
"MIT"
] | null | null | null | gui/kivy/main_window.py | gpdionisio/electrum-pivx | d17e7f3f295da745ec4c9839624ec03cf458d524 | [
"MIT"
] | 6 | 2018-08-30T18:32:58.000Z | 2019-10-20T02:38:31.000Z | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'PIV')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
| 37.663113 | 127 | 0.615914 | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
rom .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
notification = app = ref = None
util = False
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'PIV')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
run_hook('init_kivy', self)
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
self.switch_to('history')
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
if keycode in (24, 25):
self.stop()
elif keycode == 27:
self.update_wallet()
elif keycode == 112:
pass
elif keycode == 117:
pass
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
if key in (319, 282):
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
self.funds_error = False
self.screens = {}
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
| true | true |
f72889e6429a67c26e47ede38f20bf0484fc1a19 | 4,695 | py | Python | slackups/emoji.py | davr/slackups | 21a44f00f2b337716204df2acd8365f5480e13e7 | [
"MIT"
] | 1 | 2016-07-29T17:50:16.000Z | 2016-07-29T17:50:16.000Z | slackups/emoji.py | davr/slackups | 21a44f00f2b337716204df2acd8365f5480e13e7 | [
"MIT"
] | null | null | null | slackups/emoji.py | davr/slackups | 21a44f00f2b337716204df2acd8365f5480e13e7 | [
"MIT"
] | null | null | null | import json
import re
import unicodedata
import string
import hashlib
def smileys_to_ascii(s):
res = []
for i, c in enumerate(s):
if c in SMILEYS:
res.append(SMILEYS[c])
if i < len(s) - 1 and s[i + 1] in SMILEYS: # separate smileys
res.append(' ')
elif ord(c) > 128 and unicodedata.category(c)[0] == 'S':
try:
name = ':'+unicodedata.name(c).lower().replace(' ','-')+':'
res.append(name)
except:
res.append(c)
else:
res.append(c)
return ''.join(res)
def ascii_to_smileys(s):
res = []
words = s.split(' ')
for word in words:
if word in ASCIIS:
res.append(ASCIIS[word])
elif word[0]==':' and word[-1]==':':
try:
emoji = unicodedata.lookup(word[1:-1].upper().replace('-',' '))
res.append(emoji)
except:
res.append(word)
else:
res.append(word)
return ''.join(res)
def emoji_to_shortcode(message):
res = []
for i, c in enumerate(message):
if ord(c) > 128 and unicodedata.category(c)[0] == 'S':
name = ':'+unicodedata.name(c).lower().replace(' ','-')+':'
res.append(name)
else:
res.append(c)
return ''.join(res)
def shortcode_to_emoji(message):
parts = message.split(":")
out = ""
c = False
for part in parts:
if part in name_to_emoji:
out += name_to_emoji[part]
c = False
else:
if c:
out += ':'
else:
c = True
out += part
return out
with open('emoji/gemoji.js', 'rb') as fp:
data = fp.read()
data = data.decode('utf-8')
gemoji = json.loads(data)
name_to_emoji = {}
for emoji, data in gemoji.items():
for name in data['names']:
name_to_emoji[name] = emoji
SMILEYS = {chr(k): v for k, v in {
0x263a: ':)',
0x1f494: '</3',
0x1f49c: '<3',
0x2764: '<3',
0x1f60a: '=D',
0x1f600: ':D',
0x1f601: '^_^',
0x1f602: ':\'D',
0x1f603: ':D',
0x1f604: ':D',
0x1f605: ':D',
0x1f606: ':D',
0x1f607: '0:)',
0x1f608: '}:)',
0x1f609: ';)',
0x1f60e: '8)',
0x1f610: ':|',
0x1f611: '-_-',
0x1f613: 'o_o',
0x1f614: 'u_u',
0x1f615: ':/',
0x1f616: ':s',
0x1f617: ':*',
0x1f618: ';*',
0x1f61B: ':P',
0x1f61C: ';P',
0x1f61E: ':(',
0x1f621: '>:(',
0x1f622: ';_;',
0x1f623: '>_<',
0x1f626: 'D:',
0x1f62E: ':o',
0x1f632: ':O',
0x1f635: 'x_x',
0x1f638: ':3',
0x1f620: '>:(',
0x1f62c: '>:(',
0x1f62a: '(-_-)zzz',
0x1f634: '(-_-).zZ',
0x1f4a4: '.zZ',
0x1f624: '>:(',
0x1f625: 'D:',
0x1f627: 'D:',
0x1f619: ':*',
0x1f61a: ':*',
0x1f612: ':|',
0x1f636: ':|',
0x1f613: ':O',
0x1f630: ':O',
0x1f628: 'o_o',
0x1f631: 'O_O',
0x1f62d: ':''(',
0x1f61d: ';P',
0x1f64d: '>:|',
0x1f626: '>:O',
0x1f61f: ':/',
0x2639: ':(',
0x1f60b: ';P',
0x1f60d: '<3<3<3',
0x1f642: ':)',
0x1f917: ':hug:',
0x1f914: ':/ hmm',
0x1f644: '(e_e)',
0x1f62f: ':-o',
0x1f62b: "'>_<",
0x1f913: 'B-)',
0x1f641: ':(',
0x1f629: '>_<',
}.items()}
ASCIIS = {v: chr(k) for k, v in {
0x1f62a: '(-_-)zzz',
0x1f634: '(-_-).zZ',
0x1f4a4: '.zZ',
0x1f631: 'O_O',
0x1f62d: ":''(",
0x1f64d: '>:|',
0x1f626: '>:O',
0x2764: ':heart:',
0x263a: ':)',
0x1f494: '</3',
0x1f49c: '<3',
0x1f60a: '=D',
0x1f600: ':D',
0x1f601: '^_^',
0x1f602: ':\'D',
0x1f607: '0:)',
0x1f608: '}:)',
0x1f609: ';)',
0x1f60e: '8)',
0x1f610: ':|',
0x1f611: '-_-',
0x1f613: 'o_o',
0x1f614: 'u_u',
0x1f615: ':/',
0x1f616: ':s',
0x1f617: ':*',
0x1f618: ';*',
0x1f61B: ':P',
0x1f61C: ';P',
0x1f61e: ':(',
0x1f621: '>:(',
0x1f622: ';_;',
0x1f622: ';(',
0x1f622: ":'(",
0x1f623: '>_<',
0x1f626: 'D:',
0x1f62E: ':o',
0x1f632: ':O',
0x1f635: 'x_x',
0x1f638: ':3',
0x1f917: ':hug:',
0x1f644: '(e_e)',
}.items()}
| 23.712121 | 79 | 0.405112 | import json
import re
import unicodedata
import string
import hashlib
def smileys_to_ascii(s):
res = []
for i, c in enumerate(s):
if c in SMILEYS:
res.append(SMILEYS[c])
if i < len(s) - 1 and s[i + 1] in SMILEYS:
res.append(' ')
elif ord(c) > 128 and unicodedata.category(c)[0] == 'S':
try:
name = ':'+unicodedata.name(c).lower().replace(' ','-')+':'
res.append(name)
except:
res.append(c)
else:
res.append(c)
return ''.join(res)
def ascii_to_smileys(s):
res = []
words = s.split(' ')
for word in words:
if word in ASCIIS:
res.append(ASCIIS[word])
elif word[0]==':' and word[-1]==':':
try:
emoji = unicodedata.lookup(word[1:-1].upper().replace('-',' '))
res.append(emoji)
except:
res.append(word)
else:
res.append(word)
return ''.join(res)
def emoji_to_shortcode(message):
res = []
for i, c in enumerate(message):
if ord(c) > 128 and unicodedata.category(c)[0] == 'S':
name = ':'+unicodedata.name(c).lower().replace(' ','-')+':'
res.append(name)
else:
res.append(c)
return ''.join(res)
def shortcode_to_emoji(message):
parts = message.split(":")
out = ""
c = False
for part in parts:
if part in name_to_emoji:
out += name_to_emoji[part]
c = False
else:
if c:
out += ':'
else:
c = True
out += part
return out
with open('emoji/gemoji.js', 'rb') as fp:
data = fp.read()
data = data.decode('utf-8')
gemoji = json.loads(data)
name_to_emoji = {}
for emoji, data in gemoji.items():
for name in data['names']:
name_to_emoji[name] = emoji
SMILEYS = {chr(k): v for k, v in {
0x263a: ':)',
0x1f494: '</3',
0x1f49c: '<3',
0x2764: '<3',
0x1f60a: '=D',
0x1f600: ':D',
0x1f601: '^_^',
0x1f602: ':\'D',
0x1f603: ':D',
0x1f604: ':D',
0x1f605: ':D',
0x1f606: ':D',
0x1f607: '0:)',
0x1f608: '}:)',
0x1f609: ';)',
0x1f60e: '8)',
0x1f610: ':|',
0x1f611: '-_-',
0x1f613: 'o_o',
0x1f614: 'u_u',
0x1f615: ':/',
0x1f616: ':s',
0x1f617: ':*',
0x1f618: ';*',
0x1f61B: ':P',
0x1f61C: ';P',
0x1f61E: ':(',
0x1f621: '>:(',
0x1f622: ';_;',
0x1f623: '>_<',
0x1f626: 'D:',
0x1f62E: ':o',
0x1f632: ':O',
0x1f635: 'x_x',
0x1f638: ':3',
0x1f620: '>:(',
0x1f62c: '>:(',
0x1f62a: '(-_-)zzz',
0x1f634: '(-_-).zZ',
0x1f4a4: '.zZ',
0x1f624: '>:(',
0x1f625: 'D:',
0x1f627: 'D:',
0x1f619: ':*',
0x1f61a: ':*',
0x1f612: ':|',
0x1f636: ':|',
0x1f613: ':O',
0x1f630: ':O',
0x1f628: 'o_o',
0x1f631: 'O_O',
0x1f62d: ':''(',
0x1f61d: ';P',
0x1f64d: '>:|',
0x1f626: '>:O',
0x1f61f: ':/',
0x2639: ':(',
0x1f60b: ';P',
0x1f60d: '<3<3<3',
0x1f642: ':)',
0x1f917: ':hug:',
0x1f914: ':/ hmm',
0x1f644: '(e_e)',
0x1f62f: ':-o',
0x1f62b: "'>_<",
0x1f913: 'B-)',
0x1f641: ':(',
0x1f629: '>_<',
}.items()}
ASCIIS = {v: chr(k) for k, v in {
0x1f62a: '(-_-)zzz',
0x1f634: '(-_-).zZ',
0x1f4a4: '.zZ',
0x1f631: 'O_O',
0x1f62d: ":''(",
0x1f64d: '>:|',
0x1f626: '>:O',
0x2764: ':heart:',
0x263a: ':)',
0x1f494: '</3',
0x1f49c: '<3',
0x1f60a: '=D',
0x1f600: ':D',
0x1f601: '^_^',
0x1f602: ':\'D',
0x1f607: '0:)',
0x1f608: '}:)',
0x1f609: ';)',
0x1f60e: '8)',
0x1f610: ':|',
0x1f611: '-_-',
0x1f613: 'o_o',
0x1f614: 'u_u',
0x1f615: ':/',
0x1f616: ':s',
0x1f617: ':*',
0x1f618: ';*',
0x1f61B: ':P',
0x1f61C: ';P',
0x1f61e: ':(',
0x1f621: '>:(',
0x1f622: ';_;',
0x1f622: ';(',
0x1f622: ":'(",
0x1f623: '>_<',
0x1f626: 'D:',
0x1f62E: ':o',
0x1f632: ':O',
0x1f635: 'x_x',
0x1f638: ':3',
0x1f917: ':hug:',
0x1f644: '(e_e)',
}.items()}
| true | true |
f7288b6c97be42cbd408fd69733e079523f6e671 | 10,446 | py | Python | tests/MyGame/MonsterExtra.py | tsturm/flatbuffers | c1daa6ba0cda58f53e1cda35e0be26c55f5fbcbd | [
"Apache-2.0"
] | null | null | null | tests/MyGame/MonsterExtra.py | tsturm/flatbuffers | c1daa6ba0cda58f53e1cda35e0be26c55f5fbcbd | [
"Apache-2.0"
] | null | null | null | tests/MyGame/MonsterExtra.py | tsturm/flatbuffers | c1daa6ba0cda58f53e1cda35e0be26c55f5fbcbd | [
"Apache-2.0"
] | 2 | 2020-09-14T08:16:47.000Z | 2021-01-15T10:26:43.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: MyGame
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class MonsterExtra(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = MonsterExtra()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsMonsterExtra(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def MonsterExtraBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x45", size_prefixed=size_prefixed)
# MonsterExtra
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# MonsterExtra
def D0(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return float('nan')
# MonsterExtra
def D1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return float('nan')
# MonsterExtra
def D2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return float('inf')
# MonsterExtra
def D3(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return float('-inf')
# MonsterExtra
def F0(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('nan')
# MonsterExtra
def F1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('nan')
# MonsterExtra
def F2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('inf')
# MonsterExtra
def F3(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('-inf')
# MonsterExtra
def Dvec(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# MonsterExtra
def DvecAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float64Flags, o)
return 0
# MonsterExtra
def DvecLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.VectorLen(o)
return 0
# MonsterExtra
def DvecIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
return o == 0
# MonsterExtra
def Fvec(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# MonsterExtra
def FvecAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# MonsterExtra
def FvecLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.VectorLen(o)
return 0
# MonsterExtra
def FvecIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
return o == 0
def MonsterExtraStart(builder): builder.StartObject(11)
def Start(builder):
return MonsterExtraStart(builder)
def MonsterExtraAddD0(builder, d0): builder.PrependFloat64Slot(0, d0, float('nan'))
def AddD0(builder, d0):
return MonsterExtraAddD0(builder, d0)
def MonsterExtraAddD1(builder, d1): builder.PrependFloat64Slot(1, d1, float('nan'))
def AddD1(builder, d1):
return MonsterExtraAddD1(builder, d1)
def MonsterExtraAddD2(builder, d2): builder.PrependFloat64Slot(2, d2, float('inf'))
def AddD2(builder, d2):
return MonsterExtraAddD2(builder, d2)
def MonsterExtraAddD3(builder, d3): builder.PrependFloat64Slot(3, d3, float('-inf'))
def AddD3(builder, d3):
return MonsterExtraAddD3(builder, d3)
def MonsterExtraAddF0(builder, f0): builder.PrependFloat32Slot(4, f0, float('nan'))
def AddF0(builder, f0):
return MonsterExtraAddF0(builder, f0)
def MonsterExtraAddF1(builder, f1): builder.PrependFloat32Slot(5, f1, float('nan'))
def AddF1(builder, f1):
return MonsterExtraAddF1(builder, f1)
def MonsterExtraAddF2(builder, f2): builder.PrependFloat32Slot(6, f2, float('inf'))
def AddF2(builder, f2):
return MonsterExtraAddF2(builder, f2)
def MonsterExtraAddF3(builder, f3): builder.PrependFloat32Slot(7, f3, float('-inf'))
def AddF3(builder, f3):
return MonsterExtraAddF3(builder, f3)
def MonsterExtraAddDvec(builder, dvec): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(dvec), 0)
def AddDvec(builder, dvec):
return MonsterExtraAddDvec(builder, dvec)
def MonsterExtraStartDvecVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def StartDvecVector(builder, numElems):
return MonsterExtraStartDvecVector(builder, numElems)
def MonsterExtraAddFvec(builder, fvec): builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(fvec), 0)
def AddFvec(builder, fvec):
return MonsterExtraAddFvec(builder, fvec)
def MonsterExtraStartFvecVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def StartFvecVector(builder, numElems):
return MonsterExtraStartFvecVector(builder, numElems)
def MonsterExtraEnd(builder): return builder.EndObject()
def End(builder):
return MonsterExtraEnd(builder)
try:
from typing import List
except:
pass
class MonsterExtraT(object):
# MonsterExtraT
def __init__(self):
self.d0 = float('nan') # type: float
self.d1 = float('nan') # type: float
self.d2 = float('inf') # type: float
self.d3 = float('-inf') # type: float
self.f0 = float('nan') # type: float
self.f1 = float('nan') # type: float
self.f2 = float('inf') # type: float
self.f3 = float('-inf') # type: float
self.dvec = None # type: List[float]
self.fvec = None # type: List[float]
@classmethod
def InitFromBuf(cls, buf, pos):
monsterExtra = MonsterExtra()
monsterExtra.Init(buf, pos)
return cls.InitFromObj(monsterExtra)
@classmethod
def InitFromObj(cls, monsterExtra):
x = MonsterExtraT()
x._UnPack(monsterExtra)
return x
# MonsterExtraT
def _UnPack(self, monsterExtra):
if monsterExtra is None:
return
self.d0 = monsterExtra.D0()
self.d1 = monsterExtra.D1()
self.d2 = monsterExtra.D2()
self.d3 = monsterExtra.D3()
self.f0 = monsterExtra.F0()
self.f1 = monsterExtra.F1()
self.f2 = monsterExtra.F2()
self.f3 = monsterExtra.F3()
if not monsterExtra.DvecIsNone():
if np is None:
self.dvec = []
for i in range(monsterExtra.DvecLength()):
self.dvec.append(monsterExtra.Dvec(i))
else:
self.dvec = monsterExtra.DvecAsNumpy()
if not monsterExtra.FvecIsNone():
if np is None:
self.fvec = []
for i in range(monsterExtra.FvecLength()):
self.fvec.append(monsterExtra.Fvec(i))
else:
self.fvec = monsterExtra.FvecAsNumpy()
# MonsterExtraT
def Pack(self, builder):
if self.dvec is not None:
if np is not None and type(self.dvec) is np.ndarray:
dvec = builder.CreateNumpyVector(self.dvec)
else:
MonsterExtraStartDvecVector(builder, len(self.dvec))
for i in reversed(range(len(self.dvec))):
builder.PrependFloat64(self.dvec[i])
dvec = builder.EndVector()
if self.fvec is not None:
if np is not None and type(self.fvec) is np.ndarray:
fvec = builder.CreateNumpyVector(self.fvec)
else:
MonsterExtraStartFvecVector(builder, len(self.fvec))
for i in reversed(range(len(self.fvec))):
builder.PrependFloat32(self.fvec[i])
fvec = builder.EndVector()
MonsterExtraStart(builder)
MonsterExtraAddD0(builder, self.d0)
MonsterExtraAddD1(builder, self.d1)
MonsterExtraAddD2(builder, self.d2)
MonsterExtraAddD3(builder, self.d3)
MonsterExtraAddF0(builder, self.f0)
MonsterExtraAddF1(builder, self.f1)
MonsterExtraAddF2(builder, self.f2)
MonsterExtraAddF3(builder, self.f3)
if self.dvec is not None:
MonsterExtraAddDvec(builder, dvec)
if self.fvec is not None:
MonsterExtraAddFvec(builder, fvec)
monsterExtra = MonsterExtraEnd(builder)
return monsterExtra
| 37.985455 | 135 | 0.652786 |
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class MonsterExtra(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = MonsterExtra()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsMonsterExtra(cls, buf, offset=0):
return cls.GetRootAs(buf, offset)
@classmethod
def MonsterExtraBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x45", size_prefixed=size_prefixed)
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def D0(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return float('nan')
def D1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return float('nan')
def D2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return float('inf')
def D3(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return float('-inf')
def F0(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('nan')
def F1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('nan')
def F2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('inf')
def F3(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return float('-inf')
def Dvec(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
def DvecAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float64Flags, o)
return 0
def DvecLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.VectorLen(o)
return 0
def DvecIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
return o == 0
def Fvec(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
def FvecAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
def FvecLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.VectorLen(o)
return 0
def FvecIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
return o == 0
def MonsterExtraStart(builder): builder.StartObject(11)
def Start(builder):
return MonsterExtraStart(builder)
def MonsterExtraAddD0(builder, d0): builder.PrependFloat64Slot(0, d0, float('nan'))
def AddD0(builder, d0):
return MonsterExtraAddD0(builder, d0)
def MonsterExtraAddD1(builder, d1): builder.PrependFloat64Slot(1, d1, float('nan'))
def AddD1(builder, d1):
return MonsterExtraAddD1(builder, d1)
def MonsterExtraAddD2(builder, d2): builder.PrependFloat64Slot(2, d2, float('inf'))
def AddD2(builder, d2):
return MonsterExtraAddD2(builder, d2)
def MonsterExtraAddD3(builder, d3): builder.PrependFloat64Slot(3, d3, float('-inf'))
def AddD3(builder, d3):
return MonsterExtraAddD3(builder, d3)
def MonsterExtraAddF0(builder, f0): builder.PrependFloat32Slot(4, f0, float('nan'))
def AddF0(builder, f0):
return MonsterExtraAddF0(builder, f0)
def MonsterExtraAddF1(builder, f1): builder.PrependFloat32Slot(5, f1, float('nan'))
def AddF1(builder, f1):
return MonsterExtraAddF1(builder, f1)
def MonsterExtraAddF2(builder, f2): builder.PrependFloat32Slot(6, f2, float('inf'))
def AddF2(builder, f2):
return MonsterExtraAddF2(builder, f2)
def MonsterExtraAddF3(builder, f3): builder.PrependFloat32Slot(7, f3, float('-inf'))
def AddF3(builder, f3):
return MonsterExtraAddF3(builder, f3)
def MonsterExtraAddDvec(builder, dvec): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(dvec), 0)
def AddDvec(builder, dvec):
return MonsterExtraAddDvec(builder, dvec)
def MonsterExtraStartDvecVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def StartDvecVector(builder, numElems):
return MonsterExtraStartDvecVector(builder, numElems)
def MonsterExtraAddFvec(builder, fvec): builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(fvec), 0)
def AddFvec(builder, fvec):
return MonsterExtraAddFvec(builder, fvec)
def MonsterExtraStartFvecVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def StartFvecVector(builder, numElems):
return MonsterExtraStartFvecVector(builder, numElems)
def MonsterExtraEnd(builder): return builder.EndObject()
def End(builder):
return MonsterExtraEnd(builder)
try:
from typing import List
except:
pass
class MonsterExtraT(object):
def __init__(self):
self.d0 = float('nan')
self.d1 = float('nan')
self.d2 = float('inf')
self.d3 = float('-inf')
self.f0 = float('nan')
self.f1 = float('nan')
self.f2 = float('inf')
self.f3 = float('-inf')
self.dvec = None
self.fvec = None
@classmethod
def InitFromBuf(cls, buf, pos):
monsterExtra = MonsterExtra()
monsterExtra.Init(buf, pos)
return cls.InitFromObj(monsterExtra)
@classmethod
def InitFromObj(cls, monsterExtra):
x = MonsterExtraT()
x._UnPack(monsterExtra)
return x
def _UnPack(self, monsterExtra):
if monsterExtra is None:
return
self.d0 = monsterExtra.D0()
self.d1 = monsterExtra.D1()
self.d2 = monsterExtra.D2()
self.d3 = monsterExtra.D3()
self.f0 = monsterExtra.F0()
self.f1 = monsterExtra.F1()
self.f2 = monsterExtra.F2()
self.f3 = monsterExtra.F3()
if not monsterExtra.DvecIsNone():
if np is None:
self.dvec = []
for i in range(monsterExtra.DvecLength()):
self.dvec.append(monsterExtra.Dvec(i))
else:
self.dvec = monsterExtra.DvecAsNumpy()
if not monsterExtra.FvecIsNone():
if np is None:
self.fvec = []
for i in range(monsterExtra.FvecLength()):
self.fvec.append(monsterExtra.Fvec(i))
else:
self.fvec = monsterExtra.FvecAsNumpy()
def Pack(self, builder):
if self.dvec is not None:
if np is not None and type(self.dvec) is np.ndarray:
dvec = builder.CreateNumpyVector(self.dvec)
else:
MonsterExtraStartDvecVector(builder, len(self.dvec))
for i in reversed(range(len(self.dvec))):
builder.PrependFloat64(self.dvec[i])
dvec = builder.EndVector()
if self.fvec is not None:
if np is not None and type(self.fvec) is np.ndarray:
fvec = builder.CreateNumpyVector(self.fvec)
else:
MonsterExtraStartFvecVector(builder, len(self.fvec))
for i in reversed(range(len(self.fvec))):
builder.PrependFloat32(self.fvec[i])
fvec = builder.EndVector()
MonsterExtraStart(builder)
MonsterExtraAddD0(builder, self.d0)
MonsterExtraAddD1(builder, self.d1)
MonsterExtraAddD2(builder, self.d2)
MonsterExtraAddD3(builder, self.d3)
MonsterExtraAddF0(builder, self.f0)
MonsterExtraAddF1(builder, self.f1)
MonsterExtraAddF2(builder, self.f2)
MonsterExtraAddF3(builder, self.f3)
if self.dvec is not None:
MonsterExtraAddDvec(builder, dvec)
if self.fvec is not None:
MonsterExtraAddFvec(builder, fvec)
monsterExtra = MonsterExtraEnd(builder)
return monsterExtra
| true | true |
f7288b94c92a855c127eb8f1e957ee46cddd8033 | 597 | py | Python | blog_server_django/blog/urls.py | kfrime/yonder_old | f086baba25bed0959ee91ca1b63865bd1fd9cf33 | [
"MIT"
] | null | null | null | blog_server_django/blog/urls.py | kfrime/yonder_old | f086baba25bed0959ee91ca1b63865bd1fd9cf33 | [
"MIT"
] | 4 | 2021-03-09T08:37:20.000Z | 2021-06-10T22:02:22.000Z | blog_server_django/blog/urls.py | kfrime/yonder_old | f086baba25bed0959ee91ca1b63865bd1fd9cf33 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from django.urls import path, include
from rest_framework import routers
from . import views
api_router = routers.DefaultRouter()
api_router.register(r'topics', views.TopicAPIView, base_name='api_topics')
api_router.register(r'tags', views.TagAPIView, base_name='api_tags')
api_router.register(r'articles', views.ArticleAPIView, base_name='api_articles')
api_router.register(r'archives', views.ArchiveAPIView, base_name='api_archives')
api_router.register(r'about', views.AboutAPIView, base_name='api_about')
urlpatterns = [
path('api/', include(api_router.urls)),
]
| 35.117647 | 80 | 0.778894 |
from django.urls import path, include
from rest_framework import routers
from . import views
api_router = routers.DefaultRouter()
api_router.register(r'topics', views.TopicAPIView, base_name='api_topics')
api_router.register(r'tags', views.TagAPIView, base_name='api_tags')
api_router.register(r'articles', views.ArticleAPIView, base_name='api_articles')
api_router.register(r'archives', views.ArchiveAPIView, base_name='api_archives')
api_router.register(r'about', views.AboutAPIView, base_name='api_about')
urlpatterns = [
path('api/', include(api_router.urls)),
]
| true | true |
f7288c24c41fa9ec41287c08c1264f4516abf764 | 21,323 | py | Python | rusty_green_kernel/test/test_rusty_green_kernel.py | rusty-fast-solvers/rusty-green-kernel | 9317f88e873550270c482473005250a9d2df2950 | [
"BSD-3-Clause"
] | 7 | 2021-04-26T14:28:44.000Z | 2021-06-15T05:09:12.000Z | rusty_green_kernel/test/test_rusty_green_kernel.py | rusty-fast-solvers/rusty-green-kernel | 9317f88e873550270c482473005250a9d2df2950 | [
"BSD-3-Clause"
] | null | null | null | rusty_green_kernel/test/test_rusty_green_kernel.py | rusty-fast-solvers/rusty-green-kernel | 9317f88e873550270c482473005250a9d2df2950 | [
"BSD-3-Clause"
] | null | null | null | """Unit tests for direct assembly and evaluation of kernels."""
import numpy as np
import pytest
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_laplace_assemble(dtype, rtol, parallel):
"""Test the Laplace kernel."""
from rusty_green_kernel import assemble_laplace_kernel
nsources = 10
ntargets = 20
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
actual = assemble_laplace_kernel(sources, targets, dtype=dtype, parallel=parallel)
# Calculate expected result
# A divide by zero error is expected to happen here.
# So just ignore the warning.
old_param = np.geterr()["divide"]
np.seterr(divide="ignore")
expected = np.empty((ntargets, nsources), dtype=dtype)
for index, target in enumerate(targets.T):
expected[index, :] = 1.0 / (
4 * np.pi * np.linalg.norm(sources - target.reshape(3, 1), axis=0)
)
# Reset the warnings
np.seterr(divide=old_param)
expected[0, 0] = 0 # First source and target are identical.
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_laplace_evaluate_only_values(dtype, rtol, parallel):
"""Test the Laplace kernel."""
from rusty_green_kernel import evaluate_laplace_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
charges = rng.random((ncharge_vecs, nsources), dtype=dtype)
actual = evaluate_laplace_kernel(
sources, targets, charges, dtype=dtype, parallel=parallel
)
# Calculate expected result
# A divide by zero error is expected to happen here.
# So just ignore the warning.
old_param = np.geterr()["divide"]
np.seterr(divide="ignore")
expected = np.empty((nsources, ntargets), dtype=dtype)
for index, target in enumerate(targets.T):
expected[:, index] = 1.0 / (
4 * np.pi * np.linalg.norm(sources - target.reshape(3, 1), axis=0)
)
# Reset the warnings
np.seterr(divide=old_param)
expected[0, 0] = 0 # First source and target are identical.
expected = np.expand_dims(charges @ expected, -1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_laplace_evaluate_values_and_deriv(dtype, rtol, parallel):
"""Test the Laplace kernel."""
from rusty_green_kernel import evaluate_laplace_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
charges = rng.random((ncharge_vecs, nsources), dtype=dtype)
actual = evaluate_laplace_kernel(
sources, targets, charges, dtype=dtype, return_gradients=True, parallel=parallel
)
# Calculate expected result
# A divide by zero error is expected to happen here.
# So just ignore the warning.
old_params = np.geterr()
np.seterr(all="ignore")
expected = np.empty((nsources, ntargets, 4), dtype=dtype)
for index, target in enumerate(targets.T):
diff = sources - target.reshape(3, 1)
dist = np.linalg.norm(diff, axis=0)
expected[:, index, 0] = 1.0 / (4 * np.pi * dist)
expected[:, index, 1:] = diff.T / (4 * np.pi * dist.reshape(nsources, 1) ** 3)
expected[dist == 0, index, :] = 0
# Reset the warnings
np.seterr(**old_params)
expected = np.tensordot(charges, expected, 1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.complex128, 1e-14), (np.complex64, 5e-6)])
def test_helmholtz_assemble(dtype, rtol, parallel):
"""Test the Laplace kernel."""
from rusty_green_kernel import assemble_helmholtz_kernel
wavenumber = 2.5
nsources = 10
ntargets = 20
if dtype == np.complex128:
real_type = np.float64
elif dtype == np.complex64:
real_type = np.float32
else:
raise ValueError(f"Unsupported type: {dtype}.")
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=real_type)
sources = rng.random((3, nsources), dtype=real_type)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
actual = assemble_helmholtz_kernel(
sources, targets, wavenumber, dtype=dtype, parallel=parallel
)
# Calculate expected result
# A divide by zero error is expected to happen here.
# So just ignore the warning.
old_params = np.geterr()
np.seterr(all="ignore")
expected = np.empty((ntargets, nsources), dtype=dtype)
for index, target in enumerate(targets.T):
dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)
expected[index, :] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)
expected[index, dist == 0] = 0
# Reset the warnings
np.seterr(**old_params)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("dtype,rtol", [(np.complex128, 1e-14), (np.complex64, 5e-6)])
def test_helmholtz_evaluate_only_values(dtype, rtol):
"""Test the Laplace kernel."""
from rusty_green_kernel import evaluate_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
wavenumber = 2.5 + 1.3j
if dtype == np.complex128:
real_type = np.float64
elif dtype == np.complex64:
real_type = np.float32
else:
raise ValueError(f"Unsupported type: {dtype}.")
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=real_type)
sources = rng.random((3, nsources), dtype=real_type)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
charges = rng.random((ncharge_vecs, nsources), dtype=real_type) + 1j * rng.random(
(ncharge_vecs, nsources), dtype=real_type
)
actual = evaluate_helmholtz_kernel(
sources, targets, charges, wavenumber, dtype=dtype, parallel=False
)
# Calculate expected result
# A divide by zero error is expected to happen here.
# So just ignore the warning.
old_param = np.geterr()
np.seterr(all="ignore")
expected = np.empty((nsources, ntargets), dtype=dtype)
for index, target in enumerate(targets.T):
dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)
expected[:, index] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)
expected[dist == 0, index] = 0
# Reset the warnings
np.seterr(**old_param)
expected = np.expand_dims(np.tensordot(charges, expected, 1), -1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.complex128, 1e-14), (np.complex64, 5e-6)])
def test_helmholtz_evaluate_values_and_deriv(dtype, rtol, parallel):
"""Test the Laplace kernel."""
from rusty_green_kernel import evaluate_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
wavenumber = 2.5 + 1.3j
if dtype == np.complex128:
real_type = np.float64
elif dtype == np.complex64:
real_type = np.float32
else:
raise ValueError(f"Unsupported type: {dtype}.")
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=real_type)
sources = rng.random((3, nsources), dtype=real_type)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
charges = rng.random((ncharge_vecs, nsources), dtype=real_type) + 1j * rng.random(
(ncharge_vecs, nsources), dtype=real_type
)
actual = evaluate_helmholtz_kernel(
sources,
targets,
charges,
wavenumber,
dtype=dtype,
return_gradients=True,
parallel=parallel,
)
# Calculate expected result
# A divide by zero error is expected to happen here.
# So just ignore the warning.
old_params = np.geterr()
np.seterr(all="ignore")
expected = np.empty((nsources, ntargets, 4), dtype=dtype)
for index, target in enumerate(targets.T):
diff = target.reshape(3, 1) - sources
dist = np.linalg.norm(diff, axis=0)
expected[:, index, 0] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)
expected[:, index, 1:] = (
diff.T
* expected[:, index, 0].reshape(nsources, 1)
/ dist.reshape(nsources, 1) ** 2
* (1j * wavenumber * dist.reshape(nsources, 1) - 1)
)
expected[dist == 0, index, :] = 0
# Reset the warnings
np.seterr(**old_params)
expected = np.tensordot(charges, expected, 1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_modified_helmholtz_assemble(dtype, rtol, parallel):
"""Test the modified Helmholtz kernel."""
from rusty_green_kernel import assemble_modified_helmholtz_kernel
nsources = 10
ntargets = 20
omega = 2.5
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
actual = assemble_modified_helmholtz_kernel(
sources, targets, omega, dtype=dtype, parallel=parallel
)
# Calculate expected result
# A divide by zero error is expected to happen here.
# So just ignore the warning.
old_param = np.geterr()["divide"]
np.seterr(divide="ignore")
expected = np.empty((ntargets, nsources), dtype=dtype)
for index, target in enumerate(targets.T):
dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)
expected[index, :] = np.exp(-omega * dist) / (4 * np.pi * dist)
# Reset the warnings
np.seterr(divide=old_param)
expected[0, 0] = 0 # First source and target are identical.
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_modified_helmholtz_evaluate_only_values(dtype, rtol, parallel):
"""Test the modified Helmholtz kernel."""
from rusty_green_kernel import evaluate_modified_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
omega = 2.5
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
charges = rng.random((ncharge_vecs, nsources), dtype=dtype)
actual = evaluate_modified_helmholtz_kernel(
sources, targets, charges, omega, dtype=dtype, parallel=parallel
)
# Calculate expected result
# A divide by zero error is expected to happen here.
# So just ignore the warning.
old_param = np.geterr()["divide"]
np.seterr(divide="ignore")
expected = np.empty((nsources, ntargets), dtype=dtype)
for index, target in enumerate(targets.T):
dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)
expected[:, index] = np.exp(-omega * dist) / (4 * np.pi * dist)
# Reset the warnings
np.seterr(divide=old_param)
expected[0, 0] = 0 # First source and target are identical.
expected = np.expand_dims(charges @ expected, -1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_modified_helmholtz_evaluate_values_and_deriv(dtype, rtol, parallel):
"""Test the modified Helmholtz kernel."""
from rusty_green_kernel import evaluate_modified_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
omega = 2.5
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
charges = rng.random((ncharge_vecs, nsources), dtype=dtype)
actual = evaluate_modified_helmholtz_kernel(
sources,
targets,
charges,
omega,
dtype=dtype,
return_gradients=True,
parallel=parallel,
)
# Calculate expected result
# A divide by zero error is expected to happen here.
# So just ignore the warning.
old_params = np.geterr()
np.seterr(all="ignore")
expected = np.empty((nsources, ntargets, 4), dtype=dtype)
for index, target in enumerate(targets.T):
diff = target.reshape(3, 1) - sources
dist = np.linalg.norm(diff, axis=0)
expected[:, index, 0] = np.exp(-omega * dist) / (4 * np.pi * dist)
expected[:, index, 1:] = (
diff.T
/ (4 * np.pi * dist.reshape(nsources, 1) ** 3)
* np.exp(-omega * dist.reshape(nsources, 1))
* (-omega * dist.reshape(nsources, 1) - 1)
)
expected[dist == 0, index, :] = 0
# Reset the warnings
np.seterr(**old_params)
expected = np.tensordot(charges, expected, 1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
def test_laplace_derivative_is_correct():
"""Test that the Gradient of the Laplace kernel is correct."""
from rusty_green_kernel import evaluate_laplace_kernel
nsources = 10
eps = 1e-10
dtype = np.float64
targets = np.array(
[
[1.1, 1.5, 2.3],
[1.1 + eps, 1.5, 2.3],
[1.1 - eps, 1.5, 2.3],
[1.1, 1.5 + eps, 2.3],
[1.1, 1.5 - eps, 2.3],
[1.1, 1.5, 2.3 + eps],
[1.1, 1.5, 2.3 - eps],
]
).T
rng = np.random.default_rng(seed=0)
sources = rng.random((3, nsources), dtype=dtype)
charges = rng.random((1, nsources), dtype=dtype)
# Evalute derivative approximately.
values = evaluate_laplace_kernel(sources, targets, charges)
x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)
y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)
z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)
expected = np.array([x_deriv, y_deriv, z_deriv])
actual = evaluate_laplace_kernel(sources, targets, charges, return_gradients=True)[
0, 0, 1:
]
np.testing.assert_allclose(actual, expected, rtol=1e-5)
def test_helmholtz_derivative_is_correct():
"""Test that the Gradient of the Helmholtz kernel is correct."""
from rusty_green_kernel import evaluate_helmholtz_kernel
nsources = 10
wavenumber = 2.5 + 1.3j
eps = 1e-10
dtype = np.float64
targets = np.array(
[
[1.1, 1.5, 2.3],
[1.1 + eps, 1.5, 2.3],
[1.1 - eps, 1.5, 2.3],
[1.1, 1.5 + eps, 2.3],
[1.1, 1.5 - eps, 2.3],
[1.1, 1.5, 2.3 + eps],
[1.1, 1.5, 2.3 - eps],
]
).T
rng = np.random.default_rng(seed=0)
sources = rng.random((3, nsources), dtype=dtype)
charges = rng.random((1, nsources), dtype=dtype)
# Evalute derivative approximately.
values = evaluate_helmholtz_kernel(sources, targets, charges, wavenumber)
x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)
y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)
z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)
expected = np.array([x_deriv, y_deriv, z_deriv])
actual = evaluate_helmholtz_kernel(
sources, targets, charges, wavenumber, return_gradients=True
)[0, 0, 1:]
np.testing.assert_allclose(actual, expected, rtol=1e-5)
def test_modified_helmholtz_derivative_is_correct():
"""Test that the Gradient of the Helmholtz kernel is correct."""
from rusty_green_kernel import evaluate_modified_helmholtz_kernel
nsources = 10
omega = 1.3
eps = 1e-10
dtype = np.float64
targets = np.array(
[
[1.1, 1.5, 2.3],
[1.1 + eps, 1.5, 2.3],
[1.1 - eps, 1.5, 2.3],
[1.1, 1.5 + eps, 2.3],
[1.1, 1.5 - eps, 2.3],
[1.1, 1.5, 2.3 + eps],
[1.1, 1.5, 2.3 - eps],
]
).T
rng = np.random.default_rng(seed=0)
sources = rng.random((3, nsources), dtype=dtype)
charges = rng.random((1, nsources), dtype=dtype)
# Evalute derivative approximately.
values = evaluate_modified_helmholtz_kernel(sources, targets, charges, omega)
x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)
y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)
z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)
expected = np.array([x_deriv, y_deriv, z_deriv])
actual = evaluate_modified_helmholtz_kernel(
sources, targets, charges, omega, return_gradients=True
)[0, 0, 1:]
np.testing.assert_allclose(actual, expected, rtol=1e-5)
def test_helmholtz_at_zero_agrees_with_laplace():
"""Test if Helmholtz with wavenumber 0 agrees with Laplace."""
from rusty_green_kernel import evaluate_helmholtz_kernel
from rusty_green_kernel import evaluate_laplace_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
wavenumber = 0
dtype = np.float64
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
charges = rng.random((ncharge_vecs, nsources), dtype=dtype) + 1j * rng.random(
(ncharge_vecs, nsources), dtype=dtype
)
values_helmholtz = evaluate_helmholtz_kernel(
sources, targets, charges, wavenumber, return_gradients=True
)
values_laplace = evaluate_laplace_kernel(
sources, targets, np.real(charges), return_gradients=True
) + 1j * evaluate_laplace_kernel(
sources, targets, np.imag(charges), return_gradients=True
)
np.testing.assert_allclose(values_helmholtz, values_laplace, rtol=1E-14)
def test_helmholtz_imaginary_wavenumber_agrees_with_modified_helmholtz():
"""Test if Helmholtz with wavenumber 0 agrees with Laplace."""
from rusty_green_kernel import evaluate_helmholtz_kernel
from rusty_green_kernel import evaluate_modified_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
wavenumber = 1.3j
dtype = np.float64
rng = np.random.default_rng(seed=0)
# Construct target and sources so that they do not overlap
# apart from the first point.
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0] # Test what happens if source = target
charges = rng.random((ncharge_vecs, nsources), dtype=dtype) + 1j * rng.random(
(ncharge_vecs, nsources), dtype=dtype
)
values_helmholtz = evaluate_helmholtz_kernel(
sources, targets, charges, wavenumber, return_gradients=True
)
values_modified_helmholtz = evaluate_modified_helmholtz_kernel(
sources, targets, np.real(charges), np.imag(wavenumber), return_gradients=True
) + 1j * evaluate_modified_helmholtz_kernel(
sources, targets, np.imag(charges), np.imag(wavenumber), return_gradients=True
)
np.testing.assert_allclose(values_helmholtz, values_modified_helmholtz, rtol=1E-14) | 31.777943 | 88 | 0.645406 | import numpy as np
import pytest
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_laplace_assemble(dtype, rtol, parallel):
from rusty_green_kernel import assemble_laplace_kernel
nsources = 10
ntargets = 20
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0]
actual = assemble_laplace_kernel(sources, targets, dtype=dtype, parallel=parallel)
old_param = np.geterr()["divide"]
np.seterr(divide="ignore")
expected = np.empty((ntargets, nsources), dtype=dtype)
for index, target in enumerate(targets.T):
expected[index, :] = 1.0 / (
4 * np.pi * np.linalg.norm(sources - target.reshape(3, 1), axis=0)
)
np.seterr(divide=old_param)
expected[0, 0] = 0
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_laplace_evaluate_only_values(dtype, rtol, parallel):
from rusty_green_kernel import evaluate_laplace_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0]
charges = rng.random((ncharge_vecs, nsources), dtype=dtype)
actual = evaluate_laplace_kernel(
sources, targets, charges, dtype=dtype, parallel=parallel
)
old_param = np.geterr()["divide"]
np.seterr(divide="ignore")
expected = np.empty((nsources, ntargets), dtype=dtype)
for index, target in enumerate(targets.T):
expected[:, index] = 1.0 / (
4 * np.pi * np.linalg.norm(sources - target.reshape(3, 1), axis=0)
)
np.seterr(divide=old_param)
expected[0, 0] = 0
expected = np.expand_dims(charges @ expected, -1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_laplace_evaluate_values_and_deriv(dtype, rtol, parallel):
from rusty_green_kernel import evaluate_laplace_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0]
charges = rng.random((ncharge_vecs, nsources), dtype=dtype)
actual = evaluate_laplace_kernel(
sources, targets, charges, dtype=dtype, return_gradients=True, parallel=parallel
)
old_params = np.geterr()
np.seterr(all="ignore")
expected = np.empty((nsources, ntargets, 4), dtype=dtype)
for index, target in enumerate(targets.T):
diff = sources - target.reshape(3, 1)
dist = np.linalg.norm(diff, axis=0)
expected[:, index, 0] = 1.0 / (4 * np.pi * dist)
expected[:, index, 1:] = diff.T / (4 * np.pi * dist.reshape(nsources, 1) ** 3)
expected[dist == 0, index, :] = 0
np.seterr(**old_params)
expected = np.tensordot(charges, expected, 1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.complex128, 1e-14), (np.complex64, 5e-6)])
def test_helmholtz_assemble(dtype, rtol, parallel):
from rusty_green_kernel import assemble_helmholtz_kernel
wavenumber = 2.5
nsources = 10
ntargets = 20
if dtype == np.complex128:
real_type = np.float64
elif dtype == np.complex64:
real_type = np.float32
else:
raise ValueError(f"Unsupported type: {dtype}.")
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=real_type)
sources = rng.random((3, nsources), dtype=real_type)
sources[:, 0] = targets[:, 0]
actual = assemble_helmholtz_kernel(
sources, targets, wavenumber, dtype=dtype, parallel=parallel
)
old_params = np.geterr()
np.seterr(all="ignore")
expected = np.empty((ntargets, nsources), dtype=dtype)
for index, target in enumerate(targets.T):
dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)
expected[index, :] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)
expected[index, dist == 0] = 0
np.seterr(**old_params)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("dtype,rtol", [(np.complex128, 1e-14), (np.complex64, 5e-6)])
def test_helmholtz_evaluate_only_values(dtype, rtol):
from rusty_green_kernel import evaluate_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
wavenumber = 2.5 + 1.3j
if dtype == np.complex128:
real_type = np.float64
elif dtype == np.complex64:
real_type = np.float32
else:
raise ValueError(f"Unsupported type: {dtype}.")
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=real_type)
sources = rng.random((3, nsources), dtype=real_type)
sources[:, 0] = targets[:, 0]
charges = rng.random((ncharge_vecs, nsources), dtype=real_type) + 1j * rng.random(
(ncharge_vecs, nsources), dtype=real_type
)
actual = evaluate_helmholtz_kernel(
sources, targets, charges, wavenumber, dtype=dtype, parallel=False
)
old_param = np.geterr()
np.seterr(all="ignore")
expected = np.empty((nsources, ntargets), dtype=dtype)
for index, target in enumerate(targets.T):
dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)
expected[:, index] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)
expected[dist == 0, index] = 0
np.seterr(**old_param)
expected = np.expand_dims(np.tensordot(charges, expected, 1), -1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.complex128, 1e-14), (np.complex64, 5e-6)])
def test_helmholtz_evaluate_values_and_deriv(dtype, rtol, parallel):
from rusty_green_kernel import evaluate_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
wavenumber = 2.5 + 1.3j
if dtype == np.complex128:
real_type = np.float64
elif dtype == np.complex64:
real_type = np.float32
else:
raise ValueError(f"Unsupported type: {dtype}.")
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=real_type)
sources = rng.random((3, nsources), dtype=real_type)
sources[:, 0] = targets[:, 0]
charges = rng.random((ncharge_vecs, nsources), dtype=real_type) + 1j * rng.random(
(ncharge_vecs, nsources), dtype=real_type
)
actual = evaluate_helmholtz_kernel(
sources,
targets,
charges,
wavenumber,
dtype=dtype,
return_gradients=True,
parallel=parallel,
)
old_params = np.geterr()
np.seterr(all="ignore")
expected = np.empty((nsources, ntargets, 4), dtype=dtype)
for index, target in enumerate(targets.T):
diff = target.reshape(3, 1) - sources
dist = np.linalg.norm(diff, axis=0)
expected[:, index, 0] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)
expected[:, index, 1:] = (
diff.T
* expected[:, index, 0].reshape(nsources, 1)
/ dist.reshape(nsources, 1) ** 2
* (1j * wavenumber * dist.reshape(nsources, 1) - 1)
)
expected[dist == 0, index, :] = 0
np.seterr(**old_params)
expected = np.tensordot(charges, expected, 1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_modified_helmholtz_assemble(dtype, rtol, parallel):
from rusty_green_kernel import assemble_modified_helmholtz_kernel
nsources = 10
ntargets = 20
omega = 2.5
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0]
actual = assemble_modified_helmholtz_kernel(
sources, targets, omega, dtype=dtype, parallel=parallel
)
old_param = np.geterr()["divide"]
np.seterr(divide="ignore")
expected = np.empty((ntargets, nsources), dtype=dtype)
for index, target in enumerate(targets.T):
dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)
expected[index, :] = np.exp(-omega * dist) / (4 * np.pi * dist)
np.seterr(divide=old_param)
expected[0, 0] = 0
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_modified_helmholtz_evaluate_only_values(dtype, rtol, parallel):
from rusty_green_kernel import evaluate_modified_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
omega = 2.5
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0]
charges = rng.random((ncharge_vecs, nsources), dtype=dtype)
actual = evaluate_modified_helmholtz_kernel(
sources, targets, charges, omega, dtype=dtype, parallel=parallel
)
old_param = np.geterr()["divide"]
np.seterr(divide="ignore")
expected = np.empty((nsources, ntargets), dtype=dtype)
for index, target in enumerate(targets.T):
dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)
expected[:, index] = np.exp(-omega * dist) / (4 * np.pi * dist)
np.seterr(divide=old_param)
expected[0, 0] = 0
expected = np.expand_dims(charges @ expected, -1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
@pytest.mark.parametrize("parallel", [True, False])
@pytest.mark.parametrize("dtype,rtol", [(np.float64, 1e-14), (np.float32, 5e-6)])
def test_modified_helmholtz_evaluate_values_and_deriv(dtype, rtol, parallel):
from rusty_green_kernel import evaluate_modified_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
omega = 2.5
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0]
charges = rng.random((ncharge_vecs, nsources), dtype=dtype)
actual = evaluate_modified_helmholtz_kernel(
sources,
targets,
charges,
omega,
dtype=dtype,
return_gradients=True,
parallel=parallel,
)
old_params = np.geterr()
np.seterr(all="ignore")
expected = np.empty((nsources, ntargets, 4), dtype=dtype)
for index, target in enumerate(targets.T):
diff = target.reshape(3, 1) - sources
dist = np.linalg.norm(diff, axis=0)
expected[:, index, 0] = np.exp(-omega * dist) / (4 * np.pi * dist)
expected[:, index, 1:] = (
diff.T
/ (4 * np.pi * dist.reshape(nsources, 1) ** 3)
* np.exp(-omega * dist.reshape(nsources, 1))
* (-omega * dist.reshape(nsources, 1) - 1)
)
expected[dist == 0, index, :] = 0
np.seterr(**old_params)
expected = np.tensordot(charges, expected, 1)
np.testing.assert_allclose(actual, expected, rtol=rtol)
def test_laplace_derivative_is_correct():
from rusty_green_kernel import evaluate_laplace_kernel
nsources = 10
eps = 1e-10
dtype = np.float64
targets = np.array(
[
[1.1, 1.5, 2.3],
[1.1 + eps, 1.5, 2.3],
[1.1 - eps, 1.5, 2.3],
[1.1, 1.5 + eps, 2.3],
[1.1, 1.5 - eps, 2.3],
[1.1, 1.5, 2.3 + eps],
[1.1, 1.5, 2.3 - eps],
]
).T
rng = np.random.default_rng(seed=0)
sources = rng.random((3, nsources), dtype=dtype)
charges = rng.random((1, nsources), dtype=dtype)
values = evaluate_laplace_kernel(sources, targets, charges)
x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)
y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)
z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)
expected = np.array([x_deriv, y_deriv, z_deriv])
actual = evaluate_laplace_kernel(sources, targets, charges, return_gradients=True)[
0, 0, 1:
]
np.testing.assert_allclose(actual, expected, rtol=1e-5)
def test_helmholtz_derivative_is_correct():
from rusty_green_kernel import evaluate_helmholtz_kernel
nsources = 10
wavenumber = 2.5 + 1.3j
eps = 1e-10
dtype = np.float64
targets = np.array(
[
[1.1, 1.5, 2.3],
[1.1 + eps, 1.5, 2.3],
[1.1 - eps, 1.5, 2.3],
[1.1, 1.5 + eps, 2.3],
[1.1, 1.5 - eps, 2.3],
[1.1, 1.5, 2.3 + eps],
[1.1, 1.5, 2.3 - eps],
]
).T
rng = np.random.default_rng(seed=0)
sources = rng.random((3, nsources), dtype=dtype)
charges = rng.random((1, nsources), dtype=dtype)
values = evaluate_helmholtz_kernel(sources, targets, charges, wavenumber)
x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)
y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)
z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)
expected = np.array([x_deriv, y_deriv, z_deriv])
actual = evaluate_helmholtz_kernel(
sources, targets, charges, wavenumber, return_gradients=True
)[0, 0, 1:]
np.testing.assert_allclose(actual, expected, rtol=1e-5)
def test_modified_helmholtz_derivative_is_correct():
from rusty_green_kernel import evaluate_modified_helmholtz_kernel
nsources = 10
omega = 1.3
eps = 1e-10
dtype = np.float64
targets = np.array(
[
[1.1, 1.5, 2.3],
[1.1 + eps, 1.5, 2.3],
[1.1 - eps, 1.5, 2.3],
[1.1, 1.5 + eps, 2.3],
[1.1, 1.5 - eps, 2.3],
[1.1, 1.5, 2.3 + eps],
[1.1, 1.5, 2.3 - eps],
]
).T
rng = np.random.default_rng(seed=0)
sources = rng.random((3, nsources), dtype=dtype)
charges = rng.random((1, nsources), dtype=dtype)
values = evaluate_modified_helmholtz_kernel(sources, targets, charges, omega)
x_deriv = (values[0, 1, 0] - values[0, 2, 0]) / (2 * eps)
y_deriv = (values[0, 3, 0] - values[0, 4, 0]) / (2 * eps)
z_deriv = (values[0, 5, 0] - values[0, 6, 0]) / (2 * eps)
expected = np.array([x_deriv, y_deriv, z_deriv])
actual = evaluate_modified_helmholtz_kernel(
sources, targets, charges, omega, return_gradients=True
)[0, 0, 1:]
np.testing.assert_allclose(actual, expected, rtol=1e-5)
def test_helmholtz_at_zero_agrees_with_laplace():
from rusty_green_kernel import evaluate_helmholtz_kernel
from rusty_green_kernel import evaluate_laplace_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
wavenumber = 0
dtype = np.float64
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0]
charges = rng.random((ncharge_vecs, nsources), dtype=dtype) + 1j * rng.random(
(ncharge_vecs, nsources), dtype=dtype
)
values_helmholtz = evaluate_helmholtz_kernel(
sources, targets, charges, wavenumber, return_gradients=True
)
values_laplace = evaluate_laplace_kernel(
sources, targets, np.real(charges), return_gradients=True
) + 1j * evaluate_laplace_kernel(
sources, targets, np.imag(charges), return_gradients=True
)
np.testing.assert_allclose(values_helmholtz, values_laplace, rtol=1E-14)
def test_helmholtz_imaginary_wavenumber_agrees_with_modified_helmholtz():
from rusty_green_kernel import evaluate_helmholtz_kernel
from rusty_green_kernel import evaluate_modified_helmholtz_kernel
nsources = 10
ntargets = 20
ncharge_vecs = 2
wavenumber = 1.3j
dtype = np.float64
rng = np.random.default_rng(seed=0)
targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
sources = rng.random((3, nsources), dtype=dtype)
sources[:, 0] = targets[:, 0]
charges = rng.random((ncharge_vecs, nsources), dtype=dtype) + 1j * rng.random(
(ncharge_vecs, nsources), dtype=dtype
)
values_helmholtz = evaluate_helmholtz_kernel(
sources, targets, charges, wavenumber, return_gradients=True
)
values_modified_helmholtz = evaluate_modified_helmholtz_kernel(
sources, targets, np.real(charges), np.imag(wavenumber), return_gradients=True
) + 1j * evaluate_modified_helmholtz_kernel(
sources, targets, np.imag(charges), np.imag(wavenumber), return_gradients=True
)
np.testing.assert_allclose(values_helmholtz, values_modified_helmholtz, rtol=1E-14) | true | true |
f7288c510b89bb28931dff9a779183a4991756e6 | 4,237 | py | Python | tools/third_party/pywebsocket3/test/test_memorizingfile.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 2,479 | 2018-05-28T14:51:29.000Z | 2022-03-30T14:41:18.000Z | tools/third_party/pywebsocket3/test/test_memorizingfile.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 7,642 | 2018-05-28T09:38:03.000Z | 2022-03-31T20:55:48.000Z | tools/third_party/pywebsocket3/test/test_memorizingfile.py | meyerweb/wpt | f04261533819893c71289614c03434c06856c13e | [
"BSD-3-Clause"
] | 1,303 | 2018-05-29T14:50:02.000Z | 2022-03-30T17:30:42.000Z | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for memorizingfile module."""
from __future__ import absolute_import
import unittest
import six
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import memorizingfile
class UtilTest(unittest.TestCase):
"""A unittest for memorizingfile module."""
def check(self, memorizing_file, num_read, expected_list):
for unused in range(num_read):
memorizing_file.readline()
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
self.assertEqual(expected, actual)
def check_with_size(self, memorizing_file, read_size, expected_list):
read_list = []
read_line = ''
while True:
line = memorizing_file.readline(read_size)
line_length = len(line)
self.assertTrue(line_length <= read_size)
if line_length == 0:
if read_line != '':
read_list.append(read_line)
break
read_line += line
if line[line_length - 1] == '\n':
read_list.append(read_line)
read_line = ''
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
self.assertEqual(len(expected_list), len(read_list))
for expected, actual, read in zip(expected_list, actual_list,
read_list):
self.assertEqual(expected, actual)
self.assertEqual(expected, read)
def test_get_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(
six.StringIO('Hello\nWorld\nWelcome'))
self.check(memorizing_file, 3, ['Hello\n', 'World\n', 'Welcome'])
def test_get_memorized_lines_limit_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(
six.StringIO('Hello\nWorld\nWelcome'), 2)
self.check(memorizing_file, 3, ['Hello\n', 'World\n'])
def test_get_memorized_lines_empty_file(self):
memorizing_file = memorizingfile.MemorizingFile(six.StringIO(''))
self.check(memorizing_file, 10, [])
def test_get_memorized_lines_with_size(self):
for size in range(1, 10):
memorizing_file = memorizingfile.MemorizingFile(
six.StringIO('Hello\nWorld\nWelcome'))
self.check_with_size(memorizing_file, size,
['Hello\n', 'World\n', 'Welcome'])
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| 41.950495 | 73 | 0.689403 |
from __future__ import absolute_import
import unittest
import six
import set_sys_path
from mod_pywebsocket import memorizingfile
class UtilTest(unittest.TestCase):
def check(self, memorizing_file, num_read, expected_list):
for unused in range(num_read):
memorizing_file.readline()
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
self.assertEqual(expected, actual)
def check_with_size(self, memorizing_file, read_size, expected_list):
read_list = []
read_line = ''
while True:
line = memorizing_file.readline(read_size)
line_length = len(line)
self.assertTrue(line_length <= read_size)
if line_length == 0:
if read_line != '':
read_list.append(read_line)
break
read_line += line
if line[line_length - 1] == '\n':
read_list.append(read_line)
read_line = ''
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
self.assertEqual(len(expected_list), len(read_list))
for expected, actual, read in zip(expected_list, actual_list,
read_list):
self.assertEqual(expected, actual)
self.assertEqual(expected, read)
def test_get_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(
six.StringIO('Hello\nWorld\nWelcome'))
self.check(memorizing_file, 3, ['Hello\n', 'World\n', 'Welcome'])
def test_get_memorized_lines_limit_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(
six.StringIO('Hello\nWorld\nWelcome'), 2)
self.check(memorizing_file, 3, ['Hello\n', 'World\n'])
def test_get_memorized_lines_empty_file(self):
memorizing_file = memorizingfile.MemorizingFile(six.StringIO(''))
self.check(memorizing_file, 10, [])
def test_get_memorized_lines_with_size(self):
for size in range(1, 10):
memorizing_file = memorizingfile.MemorizingFile(
six.StringIO('Hello\nWorld\nWelcome'))
self.check_with_size(memorizing_file, size,
['Hello\n', 'World\n', 'Welcome'])
if __name__ == '__main__':
unittest.main()
| true | true |
f7288fd249c2a48ff3791e7fc1c7fb3e4f094bd1 | 15,189 | py | Python | Alg2_ADMM_MNIST_model_1.py | Ialkhouri/Adv_attacks_big_picture_classification | 53edffc3b5bb313e476dcdbaf97ec776884cad50 | [
"MIT"
] | null | null | null | Alg2_ADMM_MNIST_model_1.py | Ialkhouri/Adv_attacks_big_picture_classification | 53edffc3b5bb313e476dcdbaf97ec776884cad50 | [
"MIT"
] | null | null | null | Alg2_ADMM_MNIST_model_1.py | Ialkhouri/Adv_attacks_big_picture_classification | 53edffc3b5bb313e476dcdbaf97ec776884cad50 | [
"MIT"
] | null | null | null | # Importing Libraries
from foolbox.criteria import TargetClass
from foolbox.criteria import Misclassification
from numpy import linalg as LA
import matplotlib.pyplot as plt
from foolbox.attacks import CarliniWagnerL2Attack
from foolbox.attacks import SaliencyMapAttack
from foolbox.attacks import GradientSignAttack
from foolbox.v1.attacks import FGSM
from foolbox.v1.attacks import MomentumIterativeAttack
#from foolbox.v1.attacks import GradientSignAttack
from skimage.measure import compare_ssim
from keras import layers, models
import numpy as np
from keras.utils import np_utils
from keras import backend as K
from keras.applications import vgg16
import tensorflow as tf
import pickle
import foolbox
import json
import timeit
start = timeit.default_timer()
import cvxpy as cp
from numpy import linalg as LA
from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl,get_S_T_S_T_comp_from_lbl,Imperceptibility,ADMM_,Attack_performance,cvxPy_pert_gen
########################################################################
############################################### Fashion MNIST dataset import
############################################################################
#tf.keras.backend.set_learning_phase(False)
# Keras Parameters
batch_size = 28
nb_classes = 10
nb_epoch = 2
img_rows, img_col = 28, 28
img_channels = 1
# download mnist data and split into train and test sets
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
print("")
y_train = np_utils.to_categorical(train_labels,10)
y_test = np_utils.to_categorical(test_labels,10)
X_train_1d = X_train.reshape(60000,784,1)
X_test_1d = X_test.reshape(10000,784,1)
################################################################################
############## Loading the model and preprocessing #####################
######################################################################################
########### load the propoer model here
model1 = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')
model1.summary()
####################################################################
####################################################################################
############RE-LABEL TRAIN_LABELS AND TEST_LABELS (Using a dictonary) #########################
######################################################################################
dic5 = {2:0, 4:0, 6:0, 5:2, 7:2, 9:2, 8:4}
train_labels_5 = [dic5[x] if x in dic5.keys() else x for x in train_labels]
test_labels_5 = [dic5[x] if x in dic5.keys() else x for x in test_labels]
'''
your mapping is different than mine. Here is the mapping from the paper you gave me.
0 ==> {0,2,4,6} top
1 ==> {1} bottom
2 ==> {5,7,9} shoes
3 ==> {3} dress
4 ==> {8}
'''
######################################################################################
# #####################################################################
################### loading Grads and testing the vectorization
#####################################################################
Grad_MNIST_model1 = pickle.load(open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/Grad_MNIST_model1_1d_before_SM.p","rb"))
disc_values = pickle.load(open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p","rb"))
################################################################################
##################################### BUILDING THE ALG - 1 PROBLEM WITH CVXPY ######
################################################################################
######## to save eta, ceate a vectorized empty np array of size 10000,28*28,1
number_of_observations = 10000
### tensors to save and to calc CApert, CApert_sup, ELA, RLA, and sigmas
eta_vec = np.zeros(shape=(number_of_observations,28*28,1))
imperceptibility_rho_2_save = np.nan*np.ones(shape=(number_of_observations,1))
imperceptibility_rho_i_save = np.nan*np.ones(shape=(number_of_observations,1))
imperceptibility_sssim_save = np.nan*np.ones(shape=(number_of_observations,1))
pred_pert_lbls = np.zeros(shape=(number_of_observations))
pred_pert_sup_lbls = np.zeros(shape=(number_of_observations))
pred_lbls = np.zeros(shape=(number_of_observations))
cnt = 0
Q = 3
epsilon_D = 0.18
######################### loading perturbations from MIFGSM
MIFGSM_perturbed_images = pickle.load(open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_perturbed_images.p","rb"))
MIFGSM_perturbations = pickle.load(open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_perturbations.p","rb"))
MIFGSM_pred_label_w_pert = pickle.load(open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_pred_label_w_pert.p","rb"))
MIFGSM_pred_label_w_pert_super_label = pickle.load(open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/MIFGSM_pred_super_label_w_pert.p","rb"))
for id in range(number_of_observations):
######## LET THE INPUT IMAGE be:
id = id
input_image = X_test_1d[id]
input_image_reshaped = input_image.reshape(784)
######## get tru_lbl
tru_lbl = test_labels[id]
######## get tru_sup_lbl
tru_sup_lbl = sup_lbl_from_lbl(tru_lbl)
######## get pred_lbl
pred_lbl = np.argmax(model1(input_image.reshape(1, 784, 1)))
pred_lbls[id] = pred_lbl
######## get_pred_sup_lbl
pred_sup_lbl = sup_lbl_from_lbl(pred_lbl)
######## get S_T and S_T_comp: this is based on the tru lbl not the predicted lbl
[S_T,S_T_comp] = get_S_T_S_T_comp_from_lbl(tru_lbl)
######## get vectozied gradients and disc values of of the disgnated lbl
Grad_MNIST_model1_vec_disgnated = Grad_MNIST_model1[id,:,:]
#print('Grad_MNIST_model1_vec_disgnated = ' , Grad_MNIST_model1_vec_disgnated.shape)
disc_values_disgnated = disc_values[id,:]
####### get S_T_comp_star as the reduced/sorted set with cardinality = Q
# get the indicies of the highest Q values from the f(input image), where f is the discriminant vector before the softmax
# vector before softmax is:
disc_values = pickle.load(
open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p", "rb"))
disc_values_disgnated = disc_values[id, :]
# remove S_T values and place them with -100.0
temp = disc_values[id, :]
disc_values_disgnated_excluding_S_T = temp
disc_values_disgnated_excluding_S_T[S_T] = -100.0
S_T_comp_star = (-disc_values_disgnated_excluding_S_T).argsort()[0:Q]
# # keep this to restart above variables in the case of using j_star from the NOC methid
disc_values = pickle.load(
open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p", "rb"))
disc_values_disgnated = disc_values[id, :]
###### SAVE eta[id] of each j \in S_T_comp
# initial
eta_vec_j = np.zeros(shape=(10,28*28,1))
# distance initial
D_j = 1000000*np.ones(shape=(10, 1))
####################################### Alg .II
## try MIFGSM; if good, then exit the program and we found eta^*
if MIFGSM_pred_label_w_pert_super_label[id] != tru_sup_lbl:
eta_cvx = MIFGSM_perturbations[id,:,:,:].reshape(784,1)
eta_vec[id, :, :] = eta_cvx.reshape(n, 1)
eta_source = 'MIFGSM'
cnt = cnt + 1
rho_2 = Imperceptibility(input_image, eta_cvx)[0]
rho_inf = Imperceptibility(input_image, eta_cvx)[1]
D_ssim = Imperceptibility(input_image, eta_cvx)[2]
imperceptibility_rho_2_save[id] = rho_2
imperceptibility_rho_i_save[id] = rho_inf
imperceptibility_sssim_save[id] = D_ssim
image_pert = eta_cvx + input_image
#pred_pert_sup_lbls[id] = sup_lbl_from_lbl(np.argmax(model1(image_pert.reshape(1, 784, 1))))
pred_pert_lbls[id] = MIFGSM_pred_label_w_pert[id]
pred_pert_sup_lbls[id] = MIFGSM_pred_label_w_pert_super_label[id]
print('id = ', id, "eta_source = " , 'MIFGSM' , ' ; winning_label = ', 'Nadaaaaaa', 'pred_sup_lbl = ', pred_sup_lbl, 'predecited_perturbed_super_lbl = ',
MIFGSM_pred_label_w_pert_super_label[id], ' (rho_2,rho_inf, ssim) = ', Imperceptibility(input_image,eta_cvx)[0:2], ' ; count = ', cnt)
## ELSE
else:
flag = 0
eta_source = 'not MIFGSM'
for jj in S_T_comp_star:
j_star = jj
# find eta_jj
########
epsilon = 10
####### get matrix G \in N \times |S_T| and b \in |S_T|, where G_columns = [grad_j_star - grad_l], for all l \in S_T
n = 28*28
card_S_T = len(S_T) # cardinality of the set S_T
mat_G = np.zeros(shape=(n,card_S_T)) # init mat_G
vec_b_wout = np.zeros(shape=(card_S_T,1) )
temp_jstar = Grad_MNIST_model1_vec_disgnated[j_star , : ,:]
temp_jstar = temp_jstar.reshape(n,)
b_jstar = disc_values_disgnated[j_star]
#b_jstar = b_jstar.reshape(1,)
for i in range(card_S_T):
temp1 = Grad_MNIST_model1_vec_disgnated[S_T[i] , : ,:]
temp1 = temp1.reshape(n,)
b_l = disc_values_disgnated[S_T[i]]
# b_l = b_l.reshape(1,)
mat_G[:,i] = temp_jstar - temp1
vec_b_wout[ i] = b_l - b_jstar
vec_b = vec_b_wout + epsilon
###############################################################################################
##### ADMM
#### algorithm parameters
r_penalty_factor = 0.0075
number_of_iterations_tau = 10
# eADMM stopping criteria
epsilon_A = 0.15
admm_type = "ADMM"
eta_cvx = ADMM_(input_image,model1,pred_sup_lbl,r_penalty_factor,number_of_iterations_tau,epsilon_A,mat_G, vec_b,admm_type)
################################################################################################
################# calculate the distance
image_pert_temp = input_image + eta_cvx
#D_j[jj] = LA.norm(eta_cvx, 2)
D_j[jj] = Imperceptibility(input_image,eta_cvx)[0]
if sup_lbl_from_lbl(np.argmax(model1(image_pert_temp.reshape(1, 784, 1)))) != pred_sup_lbl and D_j[jj] <= epsilon_D:
#print('break for is used')
flag = 1
eta_cvx = eta_cvx
eta_vec[id, :, :] = eta_cvx.reshape(n, 1)
cnt = cnt + 1
rho_2 = Imperceptibility(input_image, eta_cvx)[0]
rho_inf = Imperceptibility(input_image, eta_cvx)[1]
D_ssim = Imperceptibility(input_image, eta_cvx)[2]
imperceptibility_rho_2_save[id] = rho_2
imperceptibility_rho_i_save[id] = rho_inf
imperceptibility_sssim_save[id] = D_ssim
image_pert = eta_cvx + input_image
pred_pert_lbls[id] = np.argmax(model1(image_pert.reshape(1, 784, 1)))
pred_pert_sup_lbls[id] = sup_lbl_from_lbl(np.argmax(model1(image_pert.reshape(1, 784, 1))))
print('id = ', id, "eta_source = ", 'not MIFGSM and break is used', ' ; winning_label = ', jj, 'pred_sup_lbl = ',
pred_sup_lbl, 'predecited_perturbed_super_lbl = ',
pred_pert_sup_lbls[id], ' (rho_2,rho_inf, ssim) = ', Imperceptibility(input_image, eta_cvx)[0:2],
' ; count = ', cnt)
break
else:
# save the mother fucking eta_cvx to choose from in the future
# save eta for each j \in S_T_comp
eta_vec_j[jj,:,:] = eta_cvx.reshape(n,1)
if flag != 1:
winning_label = np.argmin(D_j)
eta_cvx = eta_vec_j[winning_label, :, :]
eta_cvx = eta_cvx
rho_2 = Imperceptibility(input_image, eta_cvx)[0]
rho_inf = Imperceptibility(input_image, eta_cvx)[1]
D_ssim = Imperceptibility(input_image, eta_cvx)[2]
# cnt is increased iff T(k(x+eta)) != T(k(x))
if sup_lbl_from_lbl(np.argmax(model1((input_image+eta_cvx).reshape(1, 784, 1)))) != pred_sup_lbl:
cnt = cnt + 1
imperceptibility_rho_2_save[id] = rho_2
imperceptibility_rho_i_save[id] = rho_inf
imperceptibility_sssim_save[id] = D_ssim
image_pert = eta_cvx + input_image
pred_pert_lbls[id] = np.argmax(model1(image_pert.reshape(1, 784, 1)))
pred_pert_sup_lbls[id] = sup_lbl_from_lbl(np.argmax(model1(image_pert.reshape(1, 784, 1))))
print('id = ', id, "eta_source = ", 'not MIFGSM and no break', ' ; winning_label = ', winning_label,
'pred_sup_lbl = ',
pred_sup_lbl, 'predecited_perturbed_super_lbl = ',
pred_pert_sup_lbls[id], ' (rho_2,rho_inf, ssim) = ', Imperceptibility(input_image, eta_cvx)[0:2],
' ; count = ', cnt)
attack_success = cnt / number_of_observations
print('ATTACK SUCCESS = ' , attack_success*100 , '%')
CA_pert, CA_pert_sup, RLA, ELA,RLA_sup, ELA_sup , sigma_2, sigma_inf, sigma_s = \
Attack_performance(test_labels[0:number_of_observations] ,
pred_lbls,
pred_pert_lbls ,
imperceptibility_rho_2_save,
imperceptibility_rho_i_save,
imperceptibility_sssim_save)
# attack performace
print('Number of observations = ', number_of_observations ,
'\n CA_pert = ' , CA_pert,
"\n CA_pert_sup = " , CA_pert_sup ,
"\n RLA = " , RLA ,
"\n ELA = " , ELA,
'\n RLA_sup = ' , RLA_sup,
'\n ELA_sup = ' , ELA_sup,
"\n sigma_2 = " , sigma_2 ,
"\n sigma_inf = " , sigma_inf ,
'\n ssim = ' , sigma_s)
# # #####################################################################
# # ################### Plotting images
# # #####################################################################
# print("")
#
# plt.figure()
# plt.subplot(1,3,1)
# plt.title('Original')
# plt.imshow(input_image.reshape(28,28))
# plt.axis('off')
#
#
# plt.subplot(1,3,2)
# plt.title('pertubations')
# plt.imshow(eta_cvx.reshape(28,28))
# plt.axis('off')
#
#
# plt.subplot(1,3,3)
# plt.title('perturbed image')
# plt.imshow(image_pert.reshape(28,28))
# plt.axis('off')
#
#
# plt.show()
# # ########################################################################
stop = timeit.default_timer()
print('Time: ', stop - start)
#pickle.dump(eta_vec, open("eta_vec_alg2_samples.p", "wb"))
print('break here')
| 36.42446 | 165 | 0.582724 |
from foolbox.criteria import TargetClass
from foolbox.criteria import Misclassification
from numpy import linalg as LA
import matplotlib.pyplot as plt
from foolbox.attacks import CarliniWagnerL2Attack
from foolbox.attacks import SaliencyMapAttack
from foolbox.attacks import GradientSignAttack
from foolbox.v1.attacks import FGSM
from foolbox.v1.attacks import MomentumIterativeAttack
from skimage.measure import compare_ssim
from keras import layers, models
import numpy as np
from keras.utils import np_utils
from keras import backend as K
from keras.applications import vgg16
import tensorflow as tf
import pickle
import foolbox
import json
import timeit
start = timeit.default_timer()
import cvxpy as cp
from numpy import linalg as LA
from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl,get_S_T_S_T_comp_from_lbl,Imperceptibility,ADMM_,Attack_performance,cvxPy_pert_gen
| true | true |
f7289065c4d52fe80d6531156b36dfd941d57e04 | 2,152 | py | Python | migrations/versions/0004_notification_stats_date.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 41 | 2019-11-28T16:58:41.000Z | 2022-01-28T21:11:16.000Z | migrations/versions/0004_notification_stats_date.py | cds-snc/notification-api | b1c1064f291eb860b494c3fa65ac256ad70bf47c | [
"MIT"
] | 1,083 | 2019-07-08T12:57:24.000Z | 2022-03-08T18:53:40.000Z | migrations/versions/0004_notification_stats_date.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 9 | 2020-01-24T19:56:43.000Z | 2022-01-27T21:36:53.000Z | """empty message
Revision ID: 0004_notification_stats_date
Revises: 0003_add_service_history
Create Date: 2016-04-20 13:59:01.132535
"""
# revision identifiers, used by Alembic.
revision = "0004_notification_stats_date"
down_revision = "0003_add_service_history"
import sqlalchemy as sa
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("uix_service_to_day", "notification_statistics")
op.alter_column("notification_statistics", "day", new_column_name="day_string")
op.add_column("notification_statistics", sa.Column("day", sa.Date(), nullable=True))
op.get_bind()
op.execute(
"UPDATE notification_statistics ns1 SET day = (SELECT to_date(day_string, 'YYYY-MM-DD') FROM notification_statistics ns2 WHERE ns1.id = ns2.id)"
)
op.alter_column("notification_statistics", "day", nullable=False)
op.create_index(
op.f("ix_notification_statistics_day"),
"notification_statistics",
["day"],
unique=False,
)
op.drop_column("notification_statistics", "day_string")
op.create_unique_constraint("uix_service_to_day", "notification_statistics", columns=["service_id", "day"])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_notification_statistics_day"), table_name="notification_statistics")
op.drop_constraint("uix_service_to_day", "notification_statistics")
op.alter_column("notification_statistics", "day", new_column_name="day_date")
op.add_column("notification_statistics", sa.Column("day", sa.String(), nullable=True))
op.get_bind()
op.execute(
"UPDATE notification_statistics ns1 SET day = (SELECT to_char(day_date, 'YYYY-MM-DD') FROM notification_statistics ns2 WHERE ns1.id = ns2.id)"
)
op.alter_column("notification_statistics", "day", nullable=False)
op.drop_column("notification_statistics", "day_date")
op.create_unique_constraint("uix_service_to_day", "notification_statistics", columns=["service_id", "day"])
### end Alembic commands ###
| 36.474576 | 152 | 0.72816 |
revision = "0004_notification_stats_date"
down_revision = "0003_add_service_history"
import sqlalchemy as sa
from alembic import op
def upgrade():
day", new_column_name="day_string")
op.add_column("notification_statistics", sa.Column("day", sa.Date(), nullable=True))
op.get_bind()
op.execute(
"UPDATE notification_statistics ns1 SET day = (SELECT to_date(day_string, 'YYYY-MM-DD') FROM notification_statistics ns2 WHERE ns1.id = ns2.id)"
)
op.alter_column("notification_statistics", "day", nullable=False)
op.create_index(
op.f("ix_notification_statistics_day"),
"notification_statistics",
["day"],
unique=False,
)
op.drop_column("notification_statistics", "day_string")
op.create_unique_constraint("uix_service_to_day", "notification_statistics", columns=["service_id", "day"])
statistics")
op.alter_column("notification_statistics", "day", new_column_name="day_date")
op.add_column("notification_statistics", sa.Column("day", sa.String(), nullable=True))
op.get_bind()
op.execute(
"UPDATE notification_statistics ns1 SET day = (SELECT to_char(day_date, 'YYYY-MM-DD') FROM notification_statistics ns2 WHERE ns1.id = ns2.id)"
)
op.alter_column("notification_statistics", "day", nullable=False)
op.drop_column("notification_statistics", "day_date")
op.create_unique_constraint("uix_service_to_day", "notification_statistics", columns=["service_id", "day"])
| true | true |
f72890be66b9eb5defdbca1703a26076d1df08f2 | 517 | py | Python | env/lib/python3.8/site-packages/plotly/validators/waterfall/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/waterfall/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/waterfall/_visible.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="waterfall", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
| 36.928571 | 81 | 0.646035 | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="waterfall", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
| true | true |
f72892821a3d308dd415a1fa6bb4038b413968a2 | 5,197 | py | Python | cogs/tags.py | MiningMark48/Tidal-Bot | 8db6ecb220fd35930ffe1df5653af7a1ca03c8e9 | [
"MIT"
] | 6 | 2020-08-09T15:43:07.000Z | 2022-03-11T15:12:21.000Z | cogs/tags.py | MiningMark48/Tidal-Bot | 8db6ecb220fd35930ffe1df5653af7a1ca03c8e9 | [
"MIT"
] | 6 | 2020-10-29T02:32:40.000Z | 2022-01-13T03:12:45.000Z | cogs/tags.py | MiningMark48/Tidal-Bot | 8db6ecb220fd35930ffe1df5653af7a1ca03c8e9 | [
"MIT"
] | 1 | 2021-06-09T08:06:31.000Z | 2021-06-09T08:06:31.000Z | from discord.ext import commands
from discord.utils import escape_markdown
from fuzzywuzzy import process as fwp
from util.data.guild_data import GuildData
class Tags(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="settag", aliases=["edittag", "newtag", "addtag"])
@commands.cooldown(1, 5)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def tag_set(self, ctx, tag_name: str, *, message: str):
"""
Create a new bot tag.
"""
tag_name = tag_name.lower()
message = message[:1900]
GuildData(str(ctx.guild.id)).tags.set(tag_name, message)
await ctx.send(f"Set tag `{tag_name}` to `{escape_markdown(message)}`.")
@commands.command(name="deletetag", aliases=["deltag", "tagdelete"])
@commands.cooldown(1, 5)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def tag_delete(self, ctx, *, tag_name: str):
"""
Delete a bot tag.
"""
tag_name = tag_name.lower()
result = GuildData(str(ctx.guild.id)).tags.delete(tag_name)
if result:
await ctx.send(f"Deleted tag `{tag_name}`.")
else:
await ctx.send("Invalid tag!")
@commands.command(name="taglist", aliases=["listtags", "tags"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_list(self, ctx):
"""
List available tags for the server.
"""
guild_tags = GuildData(str(ctx.guild.id)).tags.fetch_all()
if not len(guild_tags) > 0:
await ctx.send("No tags available!")
return
tags = f"{ctx.guild.name} Server Tags\n\n"
for t in sorted(guild_tags):
value = t[2]
value = value.replace("\n", "")
tags += f"[{t[1]}] {escape_markdown(value[:100])}{'...' if len(value) > 100 else ''}\n"
parts = [(tags[i:i + 750]) for i in range(0, len(tags), 750)]
for part in parts:
part = part.replace("```", "")
await ctx.send(f"```{part}```")
@commands.command(name="tagsearch", aliases=["searchtag"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_search(self, ctx, *, tag_name: str):
"""
Search for a tag.
"""
search_results = self.handle_search(ctx, tag_name)
if len(search_results) <= 0:
await ctx.send("No search results found!")
return
results_txt = f"Tag Search Results ({tag_name})\n\n"
for (res, _) in search_results:
results_txt += f"{res}\n"
await ctx.send(f"```{results_txt}```")
@commands.command()
@commands.cooldown(1, 2)
@commands.guild_only()
async def tag(self, ctx, *, tag_name: str):
"""
Call a bot tag.
"""
tag_name = tag_name.lower()
tags = GuildData(str(ctx.guild.id)).tags
if len(tags.fetch_all()) <= 0:
await ctx.send("No tags available!")
return
# response = self.tags[str(ctx.guild.id)][tag_name]
response = tags.fetch_by_name(tag_name)
if response:
response = self.handle_variables(response, ctx)
await ctx.send(response)
else:
search_results = self.handle_search(ctx, tag_name)[:3]
results_txt = ""
for (res, _) in search_results:
results_txt += f"{res}\n"
await ctx.send(f"Couldn't find that tag. Did you mean one of the following?\n```\n{results_txt}\n```")
@commands.command(name="tagvariables", aliases=["tagvars", "variables", "vars"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_variables(self, ctx):
"""
Get the list of supported tag variables.
Tag variables are parts of a string that get replace by specific data.
"""
variables = self.get_variables(ctx)
vs = f"Tag Variables\n\n"
for v in sorted(variables):
vs += f"[{v}] Ex: {variables[str(v)]}\n"
parts = [(vs[i:i + 750]) for i in range(0, len(vs), 750)]
for part in parts:
await ctx.send(f"```{part}```")
@staticmethod
def get_variables(ctx):
variables = {
"author": ctx.author.display_name,
"author_id": ctx.author.id,
"channel": ctx.channel.name,
"command_key": ctx.prefix,
"server_id": ctx.guild.id,
"server_name": ctx.guild.name
}
return variables
def handle_variables(self, message, ctx):
variables = self.get_variables(ctx)
def to_key(v_):
return f"${{{v_}}}"
for v in variables:
message = message.replace(to_key(v), str(variables[v]))
return message
@staticmethod
def handle_search(ctx, tag_name):
options = []
for tag in GuildData(str(ctx.guild.id)).tags.fetch_all():
options.append(tag[1])
search_results = fwp.extract(tag_name, options)
return search_results
def setup(bot):
bot.add_cog(Tags(bot))
| 29.697143 | 114 | 0.570714 | from discord.ext import commands
from discord.utils import escape_markdown
from fuzzywuzzy import process as fwp
from util.data.guild_data import GuildData
class Tags(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="settag", aliases=["edittag", "newtag", "addtag"])
@commands.cooldown(1, 5)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def tag_set(self, ctx, tag_name: str, *, message: str):
tag_name = tag_name.lower()
message = message[:1900]
GuildData(str(ctx.guild.id)).tags.set(tag_name, message)
await ctx.send(f"Set tag `{tag_name}` to `{escape_markdown(message)}`.")
@commands.command(name="deletetag", aliases=["deltag", "tagdelete"])
@commands.cooldown(1, 5)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def tag_delete(self, ctx, *, tag_name: str):
tag_name = tag_name.lower()
result = GuildData(str(ctx.guild.id)).tags.delete(tag_name)
if result:
await ctx.send(f"Deleted tag `{tag_name}`.")
else:
await ctx.send("Invalid tag!")
@commands.command(name="taglist", aliases=["listtags", "tags"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_list(self, ctx):
guild_tags = GuildData(str(ctx.guild.id)).tags.fetch_all()
if not len(guild_tags) > 0:
await ctx.send("No tags available!")
return
tags = f"{ctx.guild.name} Server Tags\n\n"
for t in sorted(guild_tags):
value = t[2]
value = value.replace("\n", "")
tags += f"[{t[1]}] {escape_markdown(value[:100])}{'...' if len(value) > 100 else ''}\n"
parts = [(tags[i:i + 750]) for i in range(0, len(tags), 750)]
for part in parts:
part = part.replace("```", "")
await ctx.send(f"```{part}```")
@commands.command(name="tagsearch", aliases=["searchtag"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_search(self, ctx, *, tag_name: str):
search_results = self.handle_search(ctx, tag_name)
if len(search_results) <= 0:
await ctx.send("No search results found!")
return
results_txt = f"Tag Search Results ({tag_name})\n\n"
for (res, _) in search_results:
results_txt += f"{res}\n"
await ctx.send(f"```{results_txt}```")
@commands.command()
@commands.cooldown(1, 2)
@commands.guild_only()
async def tag(self, ctx, *, tag_name: str):
tag_name = tag_name.lower()
tags = GuildData(str(ctx.guild.id)).tags
if len(tags.fetch_all()) <= 0:
await ctx.send("No tags available!")
return
response = tags.fetch_by_name(tag_name)
if response:
response = self.handle_variables(response, ctx)
await ctx.send(response)
else:
search_results = self.handle_search(ctx, tag_name)[:3]
results_txt = ""
for (res, _) in search_results:
results_txt += f"{res}\n"
await ctx.send(f"Couldn't find that tag. Did you mean one of the following?\n```\n{results_txt}\n```")
@commands.command(name="tagvariables", aliases=["tagvars", "variables", "vars"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_variables(self, ctx):
variables = self.get_variables(ctx)
vs = f"Tag Variables\n\n"
for v in sorted(variables):
vs += f"[{v}] Ex: {variables[str(v)]}\n"
parts = [(vs[i:i + 750]) for i in range(0, len(vs), 750)]
for part in parts:
await ctx.send(f"```{part}```")
@staticmethod
def get_variables(ctx):
variables = {
"author": ctx.author.display_name,
"author_id": ctx.author.id,
"channel": ctx.channel.name,
"command_key": ctx.prefix,
"server_id": ctx.guild.id,
"server_name": ctx.guild.name
}
return variables
def handle_variables(self, message, ctx):
variables = self.get_variables(ctx)
def to_key(v_):
return f"${{{v_}}}"
for v in variables:
message = message.replace(to_key(v), str(variables[v]))
return message
@staticmethod
def handle_search(ctx, tag_name):
options = []
for tag in GuildData(str(ctx.guild.id)).tags.fetch_all():
options.append(tag[1])
search_results = fwp.extract(tag_name, options)
return search_results
def setup(bot):
bot.add_cog(Tags(bot))
| true | true |
f728933e12b6cec90425b8c9b4184172c1867bfe | 2,547 | py | Python | epycom/univariate/approximate_entropy.py | ICRC-BME/epycom | 5bfa3fb9020f04536b7a08382533c8abf56ca85f | [
"Apache-2.0"
] | null | null | null | epycom/univariate/approximate_entropy.py | ICRC-BME/epycom | 5bfa3fb9020f04536b7a08382533c8abf56ca85f | [
"Apache-2.0"
] | 1 | 2020-10-22T19:10:57.000Z | 2020-10-22T21:09:02.000Z | epycom/univariate/approximate_entropy.py | ICRC-BME/epycom | 5bfa3fb9020f04536b7a08382533c8abf56ca85f | [
"Apache-2.0"
] | 1 | 2021-02-24T10:07:32.000Z | 2021-02-24T10:07:32.000Z | # -*- coding: utf-8 -*-
# Copyright (c) St. Anne's University Hospital in Brno. International Clinical
# Research Center, Biomedical Engineering. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Third pary imports
import numpy as np
from numba import njit
# Local imports
from ..utils.method import Method
@njit('f8(f8[:], f8[:])', cache=True)
def _maxdist(x_i, x_j):
dist = 0
leni = len(x_i)
lenj = len(x_j)
if leni < lenj:
n = len(x_i)
else:
n = len(x_j)
for ua in range(n):
if abs(x_i[ua] - x_j[ua]) > dist:
dist = abs(x_i[ua] - x_j[ua])
return dist
@njit('f8(i8, i8, f8, f8[:])', cache=True)
def _phi_jitted(m, N, r, sig):
z = N - m + 1
xlen = N - m + 1
x = np.full((xlen, m), np.inf, dtype='float64')
# Sampling the signal
for i in range(xlen):
x[i] = sig[i: i + m]
C = np.full(len(sig), np.inf, dtype='float64')
iterator = cnt = 0
for x_i in x:
for x_j in x:
if _maxdist(x_i, x_j) <= r:
cnt += 1
C[iterator] = cnt / (N - m + 1.0)
cnt = 0
iterator += 1
C = C[:iterator]
phi = 0
for c in C:
phi = phi+np.log(c)
return phi/z
@njit('f8(f8[:], f8, i8)', cache=True)
def compute_approximate_entropy(sig, r, m):
"""
Function computes approximate entropy of given signal
Parameters
----------
sig: np.ndarray
1D signal
r: np.float64
filtering treshold, recommended values: (0.1-0.25)*np.nanstd(sig)
m: int
window length of compared run of data, recommended (2-8)
Returns
-------
entro: numpy.float64
approximate entropy
Example
-------
signal_entropy = approximate_entropy(data, 0.1*np.nanstd(data))
"""
N = sig.shape[0]
return abs(_phi_jitted(m + 1, N, r, sig) - _phi_jitted(m, N, r, sig))
class ApproximateEntropy(Method):
algorithm = 'APPROXIMATE_ENTROPY'
algorithm_type = 'univariate'
version = '1.0.0'
dtype = [('apen', 'float32')]
def __init__(self, **kwargs):
"""
Approximate entropy
Parameters
----------
sig: np.ndarray
1D signal
m: int
window length of compared run of data, recommended (2-8)
r: float64
filtering treshold, recommended values: (0.1-0.25)*std
"""
super().__init__(compute_approximate_entropy, **kwargs)
self._event_flag = False
| 22.342105 | 78 | 0.56066 |
# Research Center, Biomedical Engineering. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Third pary imports
import numpy as np
from numba import njit
# Local imports
from ..utils.method import Method
@njit('f8(f8[:], f8[:])', cache=True)
def _maxdist(x_i, x_j):
dist = 0
leni = len(x_i)
lenj = len(x_j)
if leni < lenj:
n = len(x_i)
else:
n = len(x_j)
for ua in range(n):
if abs(x_i[ua] - x_j[ua]) > dist:
dist = abs(x_i[ua] - x_j[ua])
return dist
@njit('f8(i8, i8, f8, f8[:])', cache=True)
def _phi_jitted(m, N, r, sig):
z = N - m + 1
xlen = N - m + 1
x = np.full((xlen, m), np.inf, dtype='float64')
# Sampling the signal
for i in range(xlen):
x[i] = sig[i: i + m]
C = np.full(len(sig), np.inf, dtype='float64')
iterator = cnt = 0
for x_i in x:
for x_j in x:
if _maxdist(x_i, x_j) <= r:
cnt += 1
C[iterator] = cnt / (N - m + 1.0)
cnt = 0
iterator += 1
C = C[:iterator]
phi = 0
for c in C:
phi = phi+np.log(c)
return phi/z
@njit('f8(f8[:], f8, i8)', cache=True)
def compute_approximate_entropy(sig, r, m):
N = sig.shape[0]
return abs(_phi_jitted(m + 1, N, r, sig) - _phi_jitted(m, N, r, sig))
class ApproximateEntropy(Method):
algorithm = 'APPROXIMATE_ENTROPY'
algorithm_type = 'univariate'
version = '1.0.0'
dtype = [('apen', 'float32')]
def __init__(self, **kwargs):
super().__init__(compute_approximate_entropy, **kwargs)
self._event_flag = False
| true | true |
f7289370a5f8c41fbb9f0232b513bcd3c912330a | 42 | py | Python | tests/test_inputs/fail.py | bdice/flake8-force | 5536c01c09ff202a3a3545a466f39ff08ec1af99 | [
"MIT"
] | 4 | 2021-12-04T10:12:46.000Z | 2022-02-15T06:35:18.000Z | tests/test_inputs/fail.py | bdice/flake8-force | 5536c01c09ff202a3a3545a466f39ff08ec1af99 | [
"MIT"
] | null | null | null | tests/test_inputs/fail.py | bdice/flake8-force | 5536c01c09ff202a3a3545a466f39ff08ec1af99 | [
"MIT"
] | 2 | 2022-02-11T10:51:43.000Z | 2022-02-15T23:35:20.000Z | import sys
import os
print(sys.platform)
| 8.4 | 19 | 0.785714 | import sys
import os
print(sys.platform)
| true | true |
f728937dbe44547fdf4bac17a2c89b6b24065e31 | 84,164 | py | Python | awswrangler/s3.py | JPFrancoia/aws-data-wrangler | 5b08087d79b42683b03be91ba5ebc12ad4bd2d3d | [
"Apache-2.0"
] | null | null | null | awswrangler/s3.py | JPFrancoia/aws-data-wrangler | 5b08087d79b42683b03be91ba5ebc12ad4bd2d3d | [
"Apache-2.0"
] | null | null | null | awswrangler/s3.py | JPFrancoia/aws-data-wrangler | 5b08087d79b42683b03be91ba5ebc12ad4bd2d3d | [
"Apache-2.0"
] | null | null | null | """Amazon S3 Module."""
import concurrent.futures
import csv
import logging
import time
import uuid
from itertools import repeat
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import boto3 # type: ignore
import botocore.exceptions # type: ignore
import pandas as pd # type: ignore
import pandas.io.parsers # type: ignore
import pyarrow as pa # type: ignore
import pyarrow.lib # type: ignore
import pyarrow.parquet # type: ignore
import s3fs # type: ignore
from boto3.s3.transfer import TransferConfig # type: ignore
from pandas.io.common import infer_compression # type: ignore
from awswrangler import _data_types, _utils, catalog, exceptions
_COMPRESSION_2_EXT: Dict[Optional[str], str] = {None: "", "gzip": ".gz", "snappy": ".snappy"}
_logger: logging.Logger = logging.getLogger(__name__)
def get_bucket_region(bucket: str, boto3_session: Optional[boto3.Session] = None) -> str:
"""Get bucket region name.
Parameters
----------
bucket : str
Bucket name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
str
Region code (e.g. 'us-east-1').
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name')
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name', boto3_session=boto3.Session())
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
_logger.debug(f"bucket: {bucket}")
region: str = client_s3.get_bucket_location(Bucket=bucket)["LocationConstraint"]
region = "us-east-1" if region is None else region
_logger.debug(f"region: {region}")
return region
def does_object_exist(path: str, boto3_session: Optional[boto3.Session] = None) -> bool:
"""Check if object exists on S3.
Parameters
----------
path: str
S3 path (e.g. s3://bucket/key).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
bool
True if exists, False otherwise.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real')
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal')
False
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real', boto3_session=boto3.Session())
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal', boto3_session=boto3.Session())
False
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
bucket: str
key: str
bucket, key = path.replace("s3://", "").split("/", 1)
try:
client_s3.head_object(Bucket=bucket, Key=key)
return True
except botocore.exceptions.ClientError as ex:
if ex.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return False
raise ex # pragma: no cover
def list_objects(path: str, boto3_session: Optional[boto3.Session] = None) -> List[str]:
"""List Amazon S3 objects from a prefix.
Parameters
----------
path : str
S3 path (e.g. s3://bucket/prefix).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of objects paths.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix')
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix', boto3_session=boto3.Session())
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
paginator = client_s3.get_paginator("list_objects_v2")
bucket: str
prefix: str
bucket, prefix = _utils.parse_path(path=path)
response_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, PaginationConfig={"PageSize": 1000})
paths: List[str] = []
for page in response_iterator:
contents: Optional[List] = page.get("Contents")
if contents is not None:
for content in contents:
if (content is not None) and ("Key" in content):
key: str = content["Key"]
paths.append(f"s3://{bucket}/{key}")
return paths
def _path2list(path: Union[str, List[str]], boto3_session: Optional[boto3.Session]) -> List[str]:
if isinstance(path, str): # prefix
paths: List[str] = list_objects(path=path, boto3_session=boto3_session)
elif isinstance(path, list):
paths = path
else:
raise exceptions.InvalidArgumentType(f"{type(path)} is not a valid path type. Please, use str or List[str].")
return paths
def delete_objects(
path: Union[str, List[str]], use_threads: bool = True, boto3_session: Optional[boto3.Session] = None
) -> None:
"""Delete Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.delete_objects(['s3://bucket/key0', 's3://bucket/key1']) # Delete both objects
>>> wr.s3.delete_objects('s3://bucket/prefix') # Delete all objects under the received prefix
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
buckets: Dict[str, List[str]] = _split_paths_by_bucket(paths=paths)
for bucket, keys in buckets.items():
chunks: List[List[str]] = _utils.chunkify(lst=keys, max_length=1_000)
if use_threads is False:
for chunk in chunks:
_delete_objects(bucket=bucket, keys=chunk, client_s3=client_s3)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
executor.map(_delete_objects, repeat(bucket), chunks, repeat(client_s3))
def _split_paths_by_bucket(paths: List[str]) -> Dict[str, List[str]]:
buckets: Dict[str, List[str]] = {}
bucket: str
key: str
for path in paths:
bucket, key = _utils.parse_path(path=path)
if bucket not in buckets:
buckets[bucket] = []
buckets[bucket].append(key)
return buckets
def _delete_objects(bucket: str, keys: List[str], client_s3: boto3.client) -> None:
_logger.debug(f"len(keys): {len(keys)}")
batch: List[Dict[str, str]] = [{"Key": key} for key in keys]
client_s3.delete_objects(Bucket=bucket, Delete={"Objects": batch})
def describe_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Dict[str, Any]]:
"""Describe Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Fetch attributes like ContentLength, DeleteMarker, LastModified, ContentType, etc
The full list of attributes can be explored under the boto3 head_object documentation:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Dict[str, Any]]
Return a dictionary of objects returned from head_objects where the key is the object path.
The response object can be explored here:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Examples
--------
>>> import awswrangler as wr
>>> descs0 = wr.s3.describe_objects(['s3://bucket/key0', 's3://bucket/key1']) # Describe both objects
>>> descs1 = wr.s3.describe_objects('s3://bucket/prefix') # Describe all objects under the prefix
>>> descs2 = wr.s3.describe_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return {}
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resp_list: List[Tuple[str, Dict[str, Any]]]
if use_threads is False:
resp_list = [_describe_object(path=p, wait_time=wait_time, client_s3=client_s3) for p in paths]
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
resp_list = list(executor.map(_describe_object, paths, repeat(wait_time), repeat(client_s3)))
desc_list: Dict[str, Dict[str, Any]] = dict(resp_list)
return desc_list
def _describe_object(
path: str, wait_time: Optional[Union[int, float]], client_s3: boto3.client
) -> Tuple[str, Dict[str, Any]]:
wait_time = int(wait_time) if isinstance(wait_time, float) else wait_time
tries: int = wait_time if (wait_time is not None) and (wait_time > 0) else 1
bucket: str
key: str
bucket, key = _utils.parse_path(path=path)
desc: Dict[str, Any] = {}
for i in range(tries, 0, -1):
try:
desc = client_s3.head_object(Bucket=bucket, Key=key)
break
except botocore.exceptions.ClientError as e: # pragma: no cover
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404: # Not Found
_logger.debug(f"Object not found. {i} seconds remaining to wait.")
if i == 1: # Last try, there is no more need to sleep
break
time.sleep(1)
else:
raise e
return path, desc
def size_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Optional[int]]:
"""Get the size (ContentLength) in bytes of Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Optional[int]]
Dictionary where the key is the object path and the value is the object size.
Examples
--------
>>> import awswrangler as wr
>>> sizes0 = wr.s3.size_objects(['s3://bucket/key0', 's3://bucket/key1']) # Get the sizes of both objects
>>> sizes1 = wr.s3.size_objects('s3://bucket/prefix') # Get the sizes of all objects under the received prefix
>>> sizes2 = wr.s3.size_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
desc_list: Dict[str, Dict[str, Any]] = describe_objects(
path=path, wait_time=wait_time, use_threads=use_threads, boto3_session=boto3_session
)
size_list: Dict[str, Optional[int]] = {k: d.get("ContentLength", None) for k, d in desc_list.items()}
return size_list
def to_csv( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
sep: str = ",",
index: bool = True,
columns: Optional[List[str]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write CSV file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
sep : str
String of length 1. Field delimiter for the output file.
index : bool
Write row names (index).
columns : List[str], optional
Columns to write.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html
Returns
-------
None
None.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.csv'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
pandas_kwargs["sep"] = sep
pandas_kwargs["index"] = index
pandas_kwargs["columns"] = columns
_to_text(file_format="csv", df=df, path=path, fs=fs, **pandas_kwargs)
paths = [path]
else:
mode = "append" if mode is None else mode
exist: bool = False
if columns:
df = df[columns]
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_csv_dataset(
df=df,
path=path,
index=index,
sep=sep,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype, index_left=True
)
if (exist is False) or (mode == "overwrite"):
catalog.create_csv_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
sep=sep,
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_csv_partitions(
database=database, table=table, partitions_values=partitions_values, boto3_session=session, sep=sep
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_csv_dataset(
df: pd.DataFrame,
path: str,
index: bool,
sep: str,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
_logger.debug(f"dtypes: {df.dtypes}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=df,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=subgroup,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def to_json(
df: pd.DataFrame,
path: str,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
"""Write JSON file on Amazon S3.
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html
Returns
-------
None
None.
Examples
--------
Writing JSON file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... )
Writing CSV file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
"""
return _to_text(
file_format="json",
df=df,
path=path,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
**pandas_kwargs,
)
def _to_text(
file_format: str,
df: pd.DataFrame,
path: str,
fs: Optional[s3fs.S3FileSystem] = None,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
if df.empty is True: # pragma: no cover
raise exceptions.EmptyDataFrame()
if fs is None:
fs = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
with fs.open(path, "w") as f:
if file_format == "csv":
df.to_csv(f, **pandas_kwargs)
elif file_format == "json":
df.to_json(f, **pandas_kwargs)
def to_parquet( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
index: bool = False,
compression: Optional[str] = "snappy",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write Parquet file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``).
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
Returns
-------
Dict[str, Union[List[str], Dict[str, List[str]]]]
Dictionary with:
'paths': List of all stored files paths on S3.
'partitions_values': Dictionary of partitions added with keys as S3 path locations
and values as a list of partitions values as str.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.parquet'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
compression_ext: Optional[str] = _COMPRESSION_2_EXT.get(compression, None)
if compression_ext is None:
raise exceptions.InvalidCompression(f"{compression} is invalid, please use None, snappy or gzip.")
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
paths = [
_to_parquet_file(
df=df, path=path, schema=None, index=index, compression=compression, cpus=cpus, fs=fs, dtype={}
)
]
else:
mode = "append" if mode is None else mode
exist: bool = False
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_parquet_dataset(
df=df,
path=path,
index=index,
compression=compression,
compression_ext=compression_ext,
cpus=cpus,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype
)
if (exist is False) or (mode == "overwrite"):
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
compression=compression,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_parquet_dataset(
df: pd.DataFrame,
path: str,
index: bool,
compression: Optional[str],
compression_ext: str,
cpus: int,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(
df=df, index=index, ignore_cols=partition_cols, dtype=dtype
)
_logger.debug(f"schema: {schema}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=df, schema=schema, path=file_path, index=index, compression=compression, cpus=cpus, fs=fs, dtype=dtype
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=subgroup,
schema=schema,
path=file_path,
index=index,
compression=compression,
cpus=cpus,
fs=fs,
dtype=dtype,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def _to_parquet_file(
df: pd.DataFrame,
path: str,
schema: pa.Schema,
index: bool,
compression: Optional[str],
cpus: int,
fs: s3fs.S3FileSystem,
dtype: Dict[str, str],
) -> str:
table: pa.Table = pyarrow.Table.from_pandas(df=df, schema=schema, nthreads=cpus, preserve_index=index, safe=True)
for col_name, col_type in dtype.items():
if col_name in table.column_names:
col_index = table.column_names.index(col_name)
pyarrow_dtype = _data_types.athena2pyarrow(col_type)
field = pa.field(name=col_name, type=pyarrow_dtype)
table = table.set_column(col_index, field, table.column(col_name).cast(pyarrow_dtype))
_logger.debug(f"Casting column {col_name} ({col_index}) to {col_type} ({pyarrow_dtype})")
pyarrow.parquet.write_table(
table=table,
where=path,
write_statistics=True,
use_dictionary=True,
filesystem=fs,
coerce_timestamps="ms",
compression=compression,
flavor="spark",
)
return path
def read_csv(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read CSV file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_csv().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all CSV files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path='s3://bucket/prefix/')
Reading all CSV files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all CSV files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_csv,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_fwf(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read fixed-width formatted file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_fwf().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_fwf.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all fixed-width formatted (FWF) files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path='s3://bucket/prefix/')
Reading all fixed-width formatted (FWF) files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all fixed-width formatted (FWF) files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_fwf,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_json(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read JSON file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_json().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_json.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all JSON files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path='s3://bucket/prefix/')
Reading all JSON files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_json(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all JSON files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_json,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def _read_text(
parser_func: Callable,
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
if "iterator" in pandas_kwargs:
raise exceptions.InvalidArgument("Please, use chunksize instead of iterator.")
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if chunksize is not None:
dfs: Iterator[pd.DataFrame] = _read_text_chunksize(
parser_func=parser_func,
paths=paths,
boto3_session=boto3_session,
chunksize=chunksize,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
return dfs
if use_threads is False:
df: pd.DataFrame = pd.concat(
objs=[
_read_text_full(
parser_func=parser_func,
path=p,
boto3_session=boto3_session,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
for p in paths
],
ignore_index=True,
sort=False,
)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
df = pd.concat(
objs=executor.map(
_read_text_full,
repeat(parser_func),
paths,
repeat(boto3_session),
repeat(pandas_kwargs),
repeat(s3_additional_kwargs),
),
ignore_index=True,
sort=False,
)
return df
def _read_text_chunksize(
parser_func: Callable,
paths: List[str],
boto3_session: boto3.Session,
chunksize: int,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Iterator[pd.DataFrame]:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
for path in paths:
_logger.debug(f"path: {path}")
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
reader: pandas.io.parsers.TextFileReader = parser_func(f, chunksize=chunksize, **pandas_args)
for df in reader:
yield df
def _read_text_full(
parser_func: Callable,
path: str,
boto3_session: boto3.Session,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pd.DataFrame:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
return parser_func(f, **pandas_args)
def _read_parquet_init(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
categories: List[str] = None,
validate_schema: bool = True,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pyarrow.parquet.ParquetDataset:
"""Encapsulate all initialization before the use of the pyarrow.parquet.ParquetDataset."""
if dataset is False:
path_or_paths: Union[str, List[str]] = _path2list(path=path, boto3_session=boto3_session)
elif isinstance(path, str):
path_or_paths = path[:-1] if path.endswith("/") else path
else:
path_or_paths = path
_logger.debug(f"path_or_paths: {path_or_paths}")
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
data: pyarrow.parquet.ParquetDataset = pyarrow.parquet.ParquetDataset(
path_or_paths=path_or_paths,
filesystem=fs,
metadata_nthreads=cpus,
filters=filters,
read_dictionary=categories,
validate_schema=validate_schema,
)
return data
def read_parquet(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
validate_schema: bool = True,
chunked: bool = False,
dataset: bool = False,
categories: List[str] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet file(s) from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
validate_schema:
Check that individual file schemas are all the same / compatible. Schemas within a
folder prefix should all be the same. Disable if you have schemas that are different
and want to disable this check.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading all Parquet files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path='s3://bucket/prefix/')
Reading all Parquet files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all Parquet files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path=['s3://bucket/filename0.parquet', 's3://bucket/filename1.parquet'])
Reading in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path,
filters=filters,
dataset=dataset,
categories=categories,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
validate_schema=validate_schema,
)
if chunked is False:
return _read_parquet(
data=data, columns=columns, categories=categories, use_threads=use_threads, validate_schema=validate_schema
)
return _read_parquet_chunked(data=data, columns=columns, categories=categories, use_threads=use_threads)
def _read_parquet(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
validate_schema: bool = True,
) -> pd.DataFrame:
tables: List[pa.Table] = []
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
tables.append(table)
promote: bool = not validate_schema
table = pa.lib.concat_tables(tables, promote=promote)
return table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def _read_parquet_chunked(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
) -> Iterator[pd.DataFrame]:
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
yield table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def read_parquet_metadata(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]]]:
"""Read Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]]]
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}).
Examples
--------
Reading all Parquet files (with partitions) metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path='s3://bucket/prefix/', dataset=True)
Reading all Parquet files metadata from a list
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path=[
... 's3://bucket/filename0.parquet',
... 's3://bucket/filename1.parquet'
... ])
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=boto3_session
)
return _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=data.partitions
)
def store_parquet_metadata(
path: str,
database: str,
table: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
compression: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]:
"""Infer and store parquet metadata on AWS Glue Catalog.
Infer Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths
And then stores it on AWS Glue Catalog including all inferred partitions
(No need of 'MCSK REPAIR TABLE')
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
database : str
Glue/Athena catalog: Database name.
table : str
Glue/Athena catalog: Table name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``, etc).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]
The metadata used to create the Glue Table.
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}). /
partitions_values: Dictionary with keys as S3 path locations and values as a
list of partitions values as str (e.g. {'s3://bucket/prefix/y=2020/m=10/': ['2020', '10']}).
Examples
--------
Reading all Parquet files metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types, partitions_values = wr.s3.store_parquet_metadata(
... path='s3://bucket/prefix/',
... database='...',
... table='...',
... dataset=True
... )
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=session
)
partitions: Optional[pyarrow.parquet.ParquetPartitions] = data.partitions
columns_types, partitions_types = _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=partitions
)
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
)
partitions_values: Dict[str, List[str]] = _data_types.athena_partitions_from_pyarrow_partitions(
path=path, partitions=partitions
)
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return columns_types, partitions_types, partitions_values
def wait_objects_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects
"""
return _wait_objects(
waiter_name="object_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def wait_objects_not_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects not exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectNotExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_not_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects not exist
"""
return _wait_objects(
waiter_name="object_not_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def _wait_objects(
waiter_name: str,
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
delay = 5 if delay is None else delay
max_attempts = 20 if max_attempts is None else max_attempts
_delay: int = int(delay) if isinstance(delay, float) else delay
if len(paths) < 1:
return None
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
waiter = client_s3.get_waiter(waiter_name)
_paths: List[Tuple[str, str]] = [_utils.parse_path(path=p) for p in paths]
if use_threads is False:
for bucket, key in _paths:
waiter.wait(Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts})
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
futures: List[concurrent.futures.Future] = []
for bucket, key in _paths:
future: concurrent.futures.Future = executor.submit(
fn=waiter.wait, Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts}
)
futures.append(future)
for future in futures:
future.result()
return None
def read_parquet_table(
table: str,
database: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
categories: List[str] = None,
chunked: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet table registered on AWS Glue Catalog.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
table : str
AWS Glue Catalog table name.
database : str
AWS Glue Catalog database name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading Parquet Table
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(database='...', table='...')
Reading Parquet Table encrypted
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(
... database='...',
... table='...'
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading Parquet Table in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet_table(database='...', table='...', chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
path: str = catalog.get_table_location(database=database, table=table, boto3_session=boto3_session)
return read_parquet(
path=path,
filters=filters,
columns=columns,
categories=categories,
chunked=chunked,
dataset=True,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
)
def merge_datasets(
source_path: str,
target_path: str,
mode: str = "append",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Merge a source dataset into a target dataset.
Note
----
If you are merging tables (S3 datasets + Glue Catalog metadata),
remember that you will also need to update your partitions metadata in some cases.
(e.g. wr.athena.repair_table(table='...', database='...'))
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.merge_datasets(
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... mode="append"
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
paths: List[str] = list_objects(path=f"{source_path}/", boto3_session=session)
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
if mode == "overwrite":
_logger.debug(f"Deleting to overwrite: {target_path}/")
delete_objects(path=f"{target_path}/", use_threads=use_threads, boto3_session=session)
elif mode == "overwrite_partitions":
paths_wo_prefix: List[str] = [x.replace(f"{source_path}/", "") for x in paths]
paths_wo_filename: List[str] = [f"{x.rpartition('/')[0]}/" for x in paths_wo_prefix]
partitions_paths: List[str] = list(set(paths_wo_filename))
target_partitions_paths = [f"{target_path}/{x}" for x in partitions_paths]
for path in target_partitions_paths:
_logger.debug(f"Deleting to overwrite_partitions: {path}")
delete_objects(path=path, use_threads=use_threads, boto3_session=session)
elif mode != "append":
raise exceptions.InvalidArgumentValue(f"{mode} is a invalid mode option.")
new_objects: List[str] = copy_objects(
paths=paths, source_path=source_path, target_path=target_path, use_threads=use_threads, boto3_session=session
)
_logger.debug(f"len(new_objects): {len(new_objects)}")
return new_objects
def copy_objects(
paths: List[str],
source_path: str,
target_path: str,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Copy a list of S3 objects to another S3 directory.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/dir0/key0, s3://bucket/dir0/key1]).
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.copy_objects(
... paths=["s3://bucket0/dir0/key0", "s3://bucket0/dir0/key1"])
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
batch: List[Tuple[str, str]] = []
new_objects: List[str] = []
for path in paths:
path_wo_prefix: str = path.replace(f"{source_path}/", "")
path_final: str = f"{target_path}/{path_wo_prefix}"
new_objects.append(path_final)
batch.append((path, path_final))
_logger.debug(f"len(new_objects): {len(new_objects)}")
_copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)
return new_objects
def _copy_objects(batch: List[Tuple[str, str]], use_threads: bool, boto3_session: boto3.Session) -> None:
_logger.debug(f"len(batch): {len(batch)}")
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resource_s3: boto3.resource = _utils.resource(service_name="s3", session=boto3_session)
for source, target in batch:
source_bucket, source_key = _utils.parse_path(path=source)
copy_source: Dict[str, str] = {"Bucket": source_bucket, "Key": source_key}
target_bucket, target_key = _utils.parse_path(path=target)
resource_s3.meta.client.copy(
CopySource=copy_source,
Bucket=target_bucket,
Key=target_key,
SourceClient=client_s3,
Config=TransferConfig(num_download_attempts=15, use_threads=use_threads),
)
| 38.048825 | 120 | 0.632907 |
import concurrent.futures
import csv
import logging
import time
import uuid
from itertools import repeat
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import boto3
import botocore.exceptions
import pandas as pd
import pandas.io.parsers
import pyarrow as pa
import pyarrow.lib
import pyarrow.parquet
import s3fs
from boto3.s3.transfer import TransferConfig
from pandas.io.common import infer_compression
from awswrangler import _data_types, _utils, catalog, exceptions
_COMPRESSION_2_EXT: Dict[Optional[str], str] = {None: "", "gzip": ".gz", "snappy": ".snappy"}
_logger: logging.Logger = logging.getLogger(__name__)
def get_bucket_region(bucket: str, boto3_session: Optional[boto3.Session] = None) -> str:
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
_logger.debug(f"bucket: {bucket}")
region: str = client_s3.get_bucket_location(Bucket=bucket)["LocationConstraint"]
region = "us-east-1" if region is None else region
_logger.debug(f"region: {region}")
return region
def does_object_exist(path: str, boto3_session: Optional[boto3.Session] = None) -> bool:
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
bucket: str
key: str
bucket, key = path.replace("s3://", "").split("/", 1)
try:
client_s3.head_object(Bucket=bucket, Key=key)
return True
except botocore.exceptions.ClientError as ex:
if ex.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return False
raise ex
def list_objects(path: str, boto3_session: Optional[boto3.Session] = None) -> List[str]:
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
paginator = client_s3.get_paginator("list_objects_v2")
bucket: str
prefix: str
bucket, prefix = _utils.parse_path(path=path)
response_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, PaginationConfig={"PageSize": 1000})
paths: List[str] = []
for page in response_iterator:
contents: Optional[List] = page.get("Contents")
if contents is not None:
for content in contents:
if (content is not None) and ("Key" in content):
key: str = content["Key"]
paths.append(f"s3://{bucket}/{key}")
return paths
def _path2list(path: Union[str, List[str]], boto3_session: Optional[boto3.Session]) -> List[str]:
if isinstance(path, str):
paths: List[str] = list_objects(path=path, boto3_session=boto3_session)
elif isinstance(path, list):
paths = path
else:
raise exceptions.InvalidArgumentType(f"{type(path)} is not a valid path type. Please, use str or List[str].")
return paths
def delete_objects(
path: Union[str, List[str]], use_threads: bool = True, boto3_session: Optional[boto3.Session] = None
) -> None:
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
buckets: Dict[str, List[str]] = _split_paths_by_bucket(paths=paths)
for bucket, keys in buckets.items():
chunks: List[List[str]] = _utils.chunkify(lst=keys, max_length=1_000)
if use_threads is False:
for chunk in chunks:
_delete_objects(bucket=bucket, keys=chunk, client_s3=client_s3)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
executor.map(_delete_objects, repeat(bucket), chunks, repeat(client_s3))
def _split_paths_by_bucket(paths: List[str]) -> Dict[str, List[str]]:
buckets: Dict[str, List[str]] = {}
bucket: str
key: str
for path in paths:
bucket, key = _utils.parse_path(path=path)
if bucket not in buckets:
buckets[bucket] = []
buckets[bucket].append(key)
return buckets
def _delete_objects(bucket: str, keys: List[str], client_s3: boto3.client) -> None:
_logger.debug(f"len(keys): {len(keys)}")
batch: List[Dict[str, str]] = [{"Key": key} for key in keys]
client_s3.delete_objects(Bucket=bucket, Delete={"Objects": batch})
def describe_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Dict[str, Any]]:
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return {}
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resp_list: List[Tuple[str, Dict[str, Any]]]
if use_threads is False:
resp_list = [_describe_object(path=p, wait_time=wait_time, client_s3=client_s3) for p in paths]
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
resp_list = list(executor.map(_describe_object, paths, repeat(wait_time), repeat(client_s3)))
desc_list: Dict[str, Dict[str, Any]] = dict(resp_list)
return desc_list
def _describe_object(
path: str, wait_time: Optional[Union[int, float]], client_s3: boto3.client
) -> Tuple[str, Dict[str, Any]]:
wait_time = int(wait_time) if isinstance(wait_time, float) else wait_time
tries: int = wait_time if (wait_time is not None) and (wait_time > 0) else 1
bucket: str
key: str
bucket, key = _utils.parse_path(path=path)
desc: Dict[str, Any] = {}
for i in range(tries, 0, -1):
try:
desc = client_s3.head_object(Bucket=bucket, Key=key)
break
except botocore.exceptions.ClientError as e:
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
_logger.debug(f"Object not found. {i} seconds remaining to wait.")
if i == 1:
break
time.sleep(1)
else:
raise e
return path, desc
def size_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Optional[int]]:
desc_list: Dict[str, Dict[str, Any]] = describe_objects(
path=path, wait_time=wait_time, use_threads=use_threads, boto3_session=boto3_session
)
size_list: Dict[str, Optional[int]] = {k: d.get("ContentLength", None) for k, d in desc_list.items()}
return size_list
def to_csv(
df: pd.DataFrame,
path: str,
sep: str = ",",
index: bool = True,
columns: Optional[List[str]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
pandas_kwargs["sep"] = sep
pandas_kwargs["index"] = index
pandas_kwargs["columns"] = columns
_to_text(file_format="csv", df=df, path=path, fs=fs, **pandas_kwargs)
paths = [path]
else:
mode = "append" if mode is None else mode
exist: bool = False
if columns:
df = df[columns]
if (database is not None) and (table is not None):
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_csv_dataset(
df=df,
path=path,
index=index,
sep=sep,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype, index_left=True
)
if (exist is False) or (mode == "overwrite"):
catalog.create_csv_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
sep=sep,
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_csv_partitions(
database=database, table=table, partitions_values=partitions_values, boto3_session=session, sep=sep
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_csv_dataset(
df: pd.DataFrame,
path: str,
index: bool,
sep: str,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
_logger.debug(f"dtypes: {df.dtypes}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=df,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=subgroup,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def to_json(
df: pd.DataFrame,
path: str,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
return _to_text(
file_format="json",
df=df,
path=path,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
**pandas_kwargs,
)
def _to_text(
file_format: str,
df: pd.DataFrame,
path: str,
fs: Optional[s3fs.S3FileSystem] = None,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
if df.empty is True: # pragma: no cover
raise exceptions.EmptyDataFrame()
if fs is None:
fs = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
with fs.open(path, "w") as f:
if file_format == "csv":
df.to_csv(f, **pandas_kwargs)
elif file_format == "json":
df.to_json(f, **pandas_kwargs)
def to_parquet( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
index: bool = False,
compression: Optional[str] = "snappy",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
compression_ext: Optional[str] = _COMPRESSION_2_EXT.get(compression, None)
if compression_ext is None:
raise exceptions.InvalidCompression(f"{compression} is invalid, please use None, snappy or gzip.")
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
paths = [
_to_parquet_file(
df=df, path=path, schema=None, index=index, compression=compression, cpus=cpus, fs=fs, dtype={}
)
]
else:
mode = "append" if mode is None else mode
exist: bool = False
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_parquet_dataset(
df=df,
path=path,
index=index,
compression=compression,
compression_ext=compression_ext,
cpus=cpus,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype
)
if (exist is False) or (mode == "overwrite"):
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
compression=compression,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_parquet_dataset(
df: pd.DataFrame,
path: str,
index: bool,
compression: Optional[str],
compression_ext: str,
cpus: int,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(
df=df, index=index, ignore_cols=partition_cols, dtype=dtype
)
_logger.debug(f"schema: {schema}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=df, schema=schema, path=file_path, index=index, compression=compression, cpus=cpus, fs=fs, dtype=dtype
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=subgroup,
schema=schema,
path=file_path,
index=index,
compression=compression,
cpus=cpus,
fs=fs,
dtype=dtype,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def _to_parquet_file(
df: pd.DataFrame,
path: str,
schema: pa.Schema,
index: bool,
compression: Optional[str],
cpus: int,
fs: s3fs.S3FileSystem,
dtype: Dict[str, str],
) -> str:
table: pa.Table = pyarrow.Table.from_pandas(df=df, schema=schema, nthreads=cpus, preserve_index=index, safe=True)
for col_name, col_type in dtype.items():
if col_name in table.column_names:
col_index = table.column_names.index(col_name)
pyarrow_dtype = _data_types.athena2pyarrow(col_type)
field = pa.field(name=col_name, type=pyarrow_dtype)
table = table.set_column(col_index, field, table.column(col_name).cast(pyarrow_dtype))
_logger.debug(f"Casting column {col_name} ({col_index}) to {col_type} ({pyarrow_dtype})")
pyarrow.parquet.write_table(
table=table,
where=path,
write_statistics=True,
use_dictionary=True,
filesystem=fs,
coerce_timestamps="ms",
compression=compression,
flavor="spark",
)
return path
def read_csv(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
return _read_text(
parser_func=pd.read_csv,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_fwf(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
return _read_text(
parser_func=pd.read_fwf,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_json(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
return _read_text(
parser_func=pd.read_json,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def _read_text(
parser_func: Callable,
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
if "iterator" in pandas_kwargs:
raise exceptions.InvalidArgument("Please, use chunksize instead of iterator.")
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if chunksize is not None:
dfs: Iterator[pd.DataFrame] = _read_text_chunksize(
parser_func=parser_func,
paths=paths,
boto3_session=boto3_session,
chunksize=chunksize,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
return dfs
if use_threads is False:
df: pd.DataFrame = pd.concat(
objs=[
_read_text_full(
parser_func=parser_func,
path=p,
boto3_session=boto3_session,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
for p in paths
],
ignore_index=True,
sort=False,
)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
df = pd.concat(
objs=executor.map(
_read_text_full,
repeat(parser_func),
paths,
repeat(boto3_session),
repeat(pandas_kwargs),
repeat(s3_additional_kwargs),
),
ignore_index=True,
sort=False,
)
return df
def _read_text_chunksize(
parser_func: Callable,
paths: List[str],
boto3_session: boto3.Session,
chunksize: int,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Iterator[pd.DataFrame]:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
for path in paths:
_logger.debug(f"path: {path}")
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
reader: pandas.io.parsers.TextFileReader = parser_func(f, chunksize=chunksize, **pandas_args)
for df in reader:
yield df
def _read_text_full(
parser_func: Callable,
path: str,
boto3_session: boto3.Session,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pd.DataFrame:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
return parser_func(f, **pandas_args)
def _read_parquet_init(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
categories: List[str] = None,
validate_schema: bool = True,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pyarrow.parquet.ParquetDataset:
if dataset is False:
path_or_paths: Union[str, List[str]] = _path2list(path=path, boto3_session=boto3_session)
elif isinstance(path, str):
path_or_paths = path[:-1] if path.endswith("/") else path
else:
path_or_paths = path
_logger.debug(f"path_or_paths: {path_or_paths}")
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
data: pyarrow.parquet.ParquetDataset = pyarrow.parquet.ParquetDataset(
path_or_paths=path_or_paths,
filesystem=fs,
metadata_nthreads=cpus,
filters=filters,
read_dictionary=categories,
validate_schema=validate_schema,
)
return data
def read_parquet(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
validate_schema: bool = True,
chunked: bool = False,
dataset: bool = False,
categories: List[str] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path,
filters=filters,
dataset=dataset,
categories=categories,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
validate_schema=validate_schema,
)
if chunked is False:
return _read_parquet(
data=data, columns=columns, categories=categories, use_threads=use_threads, validate_schema=validate_schema
)
return _read_parquet_chunked(data=data, columns=columns, categories=categories, use_threads=use_threads)
def _read_parquet(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
validate_schema: bool = True,
) -> pd.DataFrame:
tables: List[pa.Table] = []
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
tables.append(table)
promote: bool = not validate_schema
table = pa.lib.concat_tables(tables, promote=promote)
return table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def _read_parquet_chunked(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
) -> Iterator[pd.DataFrame]:
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
yield table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def read_parquet_metadata(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]]]:
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=boto3_session
)
return _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=data.partitions
)
def store_parquet_metadata(
path: str,
database: str,
table: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
compression: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]:
session: boto3.Session = _utils.ensure_session(session=boto3_session)
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=session
)
partitions: Optional[pyarrow.parquet.ParquetPartitions] = data.partitions
columns_types, partitions_types = _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=partitions
)
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
)
partitions_values: Dict[str, List[str]] = _data_types.athena_partitions_from_pyarrow_partitions(
path=path, partitions=partitions
)
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return columns_types, partitions_types, partitions_values
def wait_objects_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
return _wait_objects(
waiter_name="object_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def wait_objects_not_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
return _wait_objects(
waiter_name="object_not_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def _wait_objects(
waiter_name: str,
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
delay = 5 if delay is None else delay
max_attempts = 20 if max_attempts is None else max_attempts
_delay: int = int(delay) if isinstance(delay, float) else delay
if len(paths) < 1:
return None
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
waiter = client_s3.get_waiter(waiter_name)
_paths: List[Tuple[str, str]] = [_utils.parse_path(path=p) for p in paths]
if use_threads is False:
for bucket, key in _paths:
waiter.wait(Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts})
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
futures: List[concurrent.futures.Future] = []
for bucket, key in _paths:
future: concurrent.futures.Future = executor.submit(
fn=waiter.wait, Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts}
)
futures.append(future)
for future in futures:
future.result()
return None
def read_parquet_table(
table: str,
database: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
categories: List[str] = None,
chunked: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
path: str = catalog.get_table_location(database=database, table=table, boto3_session=boto3_session)
return read_parquet(
path=path,
filters=filters,
columns=columns,
categories=categories,
chunked=chunked,
dataset=True,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
)
def merge_datasets(
source_path: str,
target_path: str,
mode: str = "append",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
paths: List[str] = list_objects(path=f"{source_path}/", boto3_session=session)
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
if mode == "overwrite":
_logger.debug(f"Deleting to overwrite: {target_path}/")
delete_objects(path=f"{target_path}/", use_threads=use_threads, boto3_session=session)
elif mode == "overwrite_partitions":
paths_wo_prefix: List[str] = [x.replace(f"{source_path}/", "") for x in paths]
paths_wo_filename: List[str] = [f"{x.rpartition('/')[0]}/" for x in paths_wo_prefix]
partitions_paths: List[str] = list(set(paths_wo_filename))
target_partitions_paths = [f"{target_path}/{x}" for x in partitions_paths]
for path in target_partitions_paths:
_logger.debug(f"Deleting to overwrite_partitions: {path}")
delete_objects(path=path, use_threads=use_threads, boto3_session=session)
elif mode != "append":
raise exceptions.InvalidArgumentValue(f"{mode} is a invalid mode option.")
new_objects: List[str] = copy_objects(
paths=paths, source_path=source_path, target_path=target_path, use_threads=use_threads, boto3_session=session
)
_logger.debug(f"len(new_objects): {len(new_objects)}")
return new_objects
def copy_objects(
paths: List[str],
source_path: str,
target_path: str,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
batch: List[Tuple[str, str]] = []
new_objects: List[str] = []
for path in paths:
path_wo_prefix: str = path.replace(f"{source_path}/", "")
path_final: str = f"{target_path}/{path_wo_prefix}"
new_objects.append(path_final)
batch.append((path, path_final))
_logger.debug(f"len(new_objects): {len(new_objects)}")
_copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)
return new_objects
def _copy_objects(batch: List[Tuple[str, str]], use_threads: bool, boto3_session: boto3.Session) -> None:
_logger.debug(f"len(batch): {len(batch)}")
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resource_s3: boto3.resource = _utils.resource(service_name="s3", session=boto3_session)
for source, target in batch:
source_bucket, source_key = _utils.parse_path(path=source)
copy_source: Dict[str, str] = {"Bucket": source_bucket, "Key": source_key}
target_bucket, target_key = _utils.parse_path(path=target)
resource_s3.meta.client.copy(
CopySource=copy_source,
Bucket=target_bucket,
Key=target_key,
SourceClient=client_s3,
Config=TransferConfig(num_download_attempts=15, use_threads=use_threads),
)
| true | true |
f728938c6b7c6c80232da33a114ac0511acc90c7 | 24,657 | py | Python | test/functional/rpc_rawtransaction.py | minblock/motherofweeddaycoin | eeb0625c0f2f35412b3a69da50bc55f6acd6806d | [
"MIT"
] | null | null | null | test/functional/rpc_rawtransaction.py | minblock/motherofweeddaycoin | eeb0625c0f2f35412b3a69da50bc55f6acd6806d | [
"MIT"
] | null | null | null | test/functional/rpc_rawtransaction.py | minblock/motherofweeddaycoin | eeb0625c0f2f35412b3a69da50bc55f6acd6806d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Motherofweeddaycoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
# Test `signrawtransactionwithwallet` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
if __name__ == '__main__':
RawTransactionsTest().main()
| 56.166287 | 263 | 0.652634 |
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes
class multidict(dict):
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={})
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Motherofweeddaycoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
awtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
(2, [addr1Obj['pubkey'], addr1])['address']
bal = self.nodes[2].getbalance()
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000'))
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal)
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False)
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
| true | true |
f72893cc483b92a6b0e156ad62e82c1b9d3307f1 | 1,638 | py | Python | IndexerQuery/model/QueryAnalizer.py | Llambi/Web_Semantica | 16f98a7d78ba08366a67caf2bd44f3f45af6ee21 | [
"MIT"
] | null | null | null | IndexerQuery/model/QueryAnalizer.py | Llambi/Web_Semantica | 16f98a7d78ba08366a67caf2bd44f3f45af6ee21 | [
"MIT"
] | null | null | null | IndexerQuery/model/QueryAnalizer.py | Llambi/Web_Semantica | 16f98a7d78ba08366a67caf2bd44f3f45af6ee21 | [
"MIT"
] | null | null | null | import numpy as np
from model.indexer_v1 import Indexer
class QueryAnalizer:
def __init__(self, query, document_list, enable_stemming=True, filter_stopwords=True):
self.__query = Indexer([query], enable_stemming=enable_stemming, filter_stopwords=filter_stopwords)
self.__indexer = Indexer(document_list, enable_stemming=enable_stemming, filter_stopwords=filter_stopwords)
self.result = None
def cosine_similarity(self):
if self.result is not None:
return self.result
result = {}
for query_term, value in self.__query.words_index.items():
indexer_term = self.__indexer.words_index[query_term]
tf_idf_query_term = self.__query.words_index[query_term]["idf"] * \
self.__query.words_index[query_term]["documents"][0]["tf"]
tf_documents = list(map(lambda doc: doc["tf"], indexer_term["documents"]))
dot_product = np.dot(tf_idf_query_term, tf_documents)
result[query_term] = list(zip(
list(
map(
lambda doc: doc["document"].text,
indexer_term["documents"]))
,
list(
map(
lambda elem: elem / (np.linalg.norm(tf_idf_query_term) + np.linalg.norm(tf_documents)),
dot_product
))
))
self.result = result
for key, elm in self.result.items():
self.result[key] = sorted(elm, key=lambda tup: tup[1], reverse=True)
return self.result
| 38.093023 | 115 | 0.581807 | import numpy as np
from model.indexer_v1 import Indexer
class QueryAnalizer:
def __init__(self, query, document_list, enable_stemming=True, filter_stopwords=True):
self.__query = Indexer([query], enable_stemming=enable_stemming, filter_stopwords=filter_stopwords)
self.__indexer = Indexer(document_list, enable_stemming=enable_stemming, filter_stopwords=filter_stopwords)
self.result = None
def cosine_similarity(self):
if self.result is not None:
return self.result
result = {}
for query_term, value in self.__query.words_index.items():
indexer_term = self.__indexer.words_index[query_term]
tf_idf_query_term = self.__query.words_index[query_term]["idf"] * \
self.__query.words_index[query_term]["documents"][0]["tf"]
tf_documents = list(map(lambda doc: doc["tf"], indexer_term["documents"]))
dot_product = np.dot(tf_idf_query_term, tf_documents)
result[query_term] = list(zip(
list(
map(
lambda doc: doc["document"].text,
indexer_term["documents"]))
,
list(
map(
lambda elem: elem / (np.linalg.norm(tf_idf_query_term) + np.linalg.norm(tf_documents)),
dot_product
))
))
self.result = result
for key, elm in self.result.items():
self.result[key] = sorted(elm, key=lambda tup: tup[1], reverse=True)
return self.result
| true | true |
f728946dae6ce406dd84a940b4c0b218d3e0a20f | 1,750 | py | Python | extraPackages/matplotlib-3.0.3/examples/images_contours_and_fields/contourf_log.py | dolboBobo/python3_ios | 877f8c2c5890f26292ddd14909bea62a04fe2889 | [
"BSD-3-Clause"
] | 130 | 2018-02-03T10:25:54.000Z | 2022-03-25T22:27:22.000Z | extraPackages/matplotlib-3.0.2/examples/images_contours_and_fields/contourf_log.py | spacetime314/python3_ios | e149f1bc2e50046c8810f83dae7739a8dea939ee | [
"BSD-3-Clause"
] | 9 | 2018-12-14T07:31:42.000Z | 2020-12-09T20:29:28.000Z | extraPackages/matplotlib-3.0.2/examples/images_contours_and_fields/contourf_log.py | spacetime314/python3_ios | e149f1bc2e50046c8810f83dae7739a8dea939ee | [
"BSD-3-Clause"
] | 64 | 2018-04-25T08:51:57.000Z | 2022-01-29T14:13:57.000Z | """
============================
Contourf and log color scale
============================
Demonstrate use of a log color scale in contourf
"""
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import ticker, cm
N = 100
x = np.linspace(-3.0, 3.0, N)
y = np.linspace(-2.0, 2.0, N)
X, Y = np.meshgrid(x, y)
# A low hump with a spike coming out.
# Needs to have z/colour axis on a log scale so we see both hump and spike.
# linear scale only shows the spike.
Z1 = np.exp(-(X)**2 - (Y)**2)
Z2 = np.exp(-(X * 10)**2 - (Y * 10)**2)
z = Z1 + 50 * Z2
# Put in some negative values (lower left corner) to cause trouble with logs:
z[:5, :5] = -1
# The following is not strictly essential, but it will eliminate
# a warning. Comment it out to see the warning.
z = ma.masked_where(z <= 0, z)
# Automatic selection of levels works; setting the
# log locator tells contourf to use a log scale:
fig, ax = plt.subplots()
cs = ax.contourf(X, Y, z, locator=ticker.LogLocator(), cmap=cm.PuBu_r)
# Alternatively, you can manually set the levels
# and the norm:
# lev_exp = np.arange(np.floor(np.log10(z.min())-1),
# np.ceil(np.log10(z.max())+1))
# levs = np.power(10, lev_exp)
# cs = ax.contourf(X, Y, z, levs, norm=colors.LogNorm())
cbar = fig.colorbar(cs)
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions, methods and classes is shown
# in this example:
import matplotlib
matplotlib.axes.Axes.contourf
matplotlib.pyplot.contourf
matplotlib.figure.Figure.colorbar
matplotlib.pyplot.colorbar
matplotlib.axes.Axes.legend
matplotlib.pyplot.legend
matplotlib.ticker.LogLocator
| 25.362319 | 77 | 0.632571 |
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import ticker, cm
N = 100
x = np.linspace(-3.0, 3.0, N)
y = np.linspace(-2.0, 2.0, N)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-(X)**2 - (Y)**2)
Z2 = np.exp(-(X * 10)**2 - (Y * 10)**2)
z = Z1 + 50 * Z2
z[:5, :5] = -1
z = ma.masked_where(z <= 0, z)
fig, ax = plt.subplots()
cs = ax.contourf(X, Y, z, locator=ticker.LogLocator(), cmap=cm.PuBu_r)
cbar = fig.colorbar(cs)
plt.show()
| true | true |
f728948f490c9d17a2da7ca3ac106a84c306235f | 1,262 | py | Python | Streaming Tweets from Twitter to Database.py | 224alpha/Python | e413cc5a53751191df2ce146f061a6460f6661e0 | [
"MIT"
] | null | null | null | Streaming Tweets from Twitter to Database.py | 224alpha/Python | e413cc5a53751191df2ce146f061a6460f6661e0 | [
"MIT"
] | null | null | null | Streaming Tweets from Twitter to Database.py | 224alpha/Python | e413cc5a53751191df2ce146f061a6460f6661e0 | [
"MIT"
] | null | null | null | import json
import time
import MySQLdb
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
# replace mysql.server with "localhost" if you are running via your own server!
# server MySQL username MySQL pass Database name.
conn = MySQLdb.connect("mysql.server", "beginneraccount", "cookies", "beginneraccount$tutorial")
c = conn.cursor()
# consumer key, consumer secret, access token, access secret.
ckey = "asdfsafsafsaf"
csecret = "asdfasdfsadfsa"
atoken = "asdfsadfsafsaf-asdfsaf"
asecret = "asdfsadfsadfsadfsadfsad"
class listener(StreamListener):
def on_data(self, data):
all_data = json.loads(data)
tweet = all_data["text"]
username = all_data["user"]["screen_name"]
c.execute("INSERT INTO taula (time, username, tweet) VALUES (%s,%s,%s)",
(time.time(), username, tweet))
conn.commit()
print((username, tweet))
return True
def on_error(self, status):
print(status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["car"])
| 25.755102 | 97 | 0.652932 | import json
import time
import MySQLdb
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
conn = MySQLdb.connect("mysql.server", "beginneraccount", "cookies", "beginneraccount$tutorial")
c = conn.cursor()
ckey = "asdfsafsafsaf"
csecret = "asdfasdfsadfsa"
atoken = "asdfsadfsafsaf-asdfsaf"
asecret = "asdfsadfsadfsadfsadfsad"
class listener(StreamListener):
def on_data(self, data):
all_data = json.loads(data)
tweet = all_data["text"]
username = all_data["user"]["screen_name"]
c.execute("INSERT INTO taula (time, username, tweet) VALUES (%s,%s,%s)",
(time.time(), username, tweet))
conn.commit()
print((username, tweet))
return True
def on_error(self, status):
print(status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["car"])
| true | true |
f728962f01068aac485157eee59d8b8eb5b48694 | 2,034 | py | Python | src/arknights/resource/dev/grab_pos.py | WaterHyacinthInNANHU/ArkOS | 1919b7a2f22bc407d0a5503a9c1db8e30bbbc092 | [
"MIT"
] | null | null | null | src/arknights/resource/dev/grab_pos.py | WaterHyacinthInNANHU/ArkOS | 1919b7a2f22bc407d0a5503a9c1db8e30bbbc092 | [
"MIT"
] | null | null | null | src/arknights/resource/dev/grab_pos.py | WaterHyacinthInNANHU/ArkOS | 1919b7a2f22bc407d0a5503a9c1db8e30bbbc092 | [
"MIT"
] | null | null | null | # used to grab template from screen
import sys
import signal
from arknights.player import Player
from arknights.resource import save_position
import cv2
from arknights.imgops import pil2cv
from .common import Bcolors
def log(s: str):
print(Bcolors.OKGREEN + s + Bcolors.ENDC)
def signal_handler(sig):
log('Caught ' + str(sig))
log('Exit')
del PLAYER
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
global display_img
global point
PLAYER = Player()
PLAYER.connect_device()
DISPLAY_WINDOW = 'screenshot'
EXTENSION = '.png'
def on_mouse(event, x, y, flags, param):
global display_img, point
img2 = display_img.copy()
if event == cv2.EVENT_LBUTTONDOWN:
point = (x, y)
cv2.circle(img2, point, 5, (0, 0, 255), 10)
cv2.imshow(DISPLAY_WINDOW, img2)
def grab(save=True):
global display_img, point
img = PLAYER.screenshot()
display_img = pil2cv(img)
cv2.namedWindow(DISPLAY_WINDOW, cv2.WINDOW_NORMAL)
cv2.setMouseCallback(DISPLAY_WINDOW, on_mouse)
cv2.imshow(DISPLAY_WINDOW, display_img)
cv2.waitKey(0)
cv2.destroyWindow(DISPLAY_WINDOW)
resolution = PLAYER.viewport
log('position {}'.format(point))
if save:
while True:
path = input('please input the path to save this position\n')
try:
save_path = save_position(point, resolution, path)
except KeyError:
log('position name has already exist, do you want to overwrite it?[y/n]')
ans = None
while ans not in ['y', 'n']:
ans = input()
if ans == 'y':
save_path = save_position(point, resolution, path, force=True)
log('position successfully saved to ' + save_path)
break
else:
break
else:
log('position successfully saved to ' + save_path)
break
| 27.863014 | 89 | 0.616519 |
import sys
import signal
from arknights.player import Player
from arknights.resource import save_position
import cv2
from arknights.imgops import pil2cv
from .common import Bcolors
def log(s: str):
print(Bcolors.OKGREEN + s + Bcolors.ENDC)
def signal_handler(sig):
log('Caught ' + str(sig))
log('Exit')
del PLAYER
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
global display_img
global point
PLAYER = Player()
PLAYER.connect_device()
DISPLAY_WINDOW = 'screenshot'
EXTENSION = '.png'
def on_mouse(event, x, y, flags, param):
global display_img, point
img2 = display_img.copy()
if event == cv2.EVENT_LBUTTONDOWN:
point = (x, y)
cv2.circle(img2, point, 5, (0, 0, 255), 10)
cv2.imshow(DISPLAY_WINDOW, img2)
def grab(save=True):
global display_img, point
img = PLAYER.screenshot()
display_img = pil2cv(img)
cv2.namedWindow(DISPLAY_WINDOW, cv2.WINDOW_NORMAL)
cv2.setMouseCallback(DISPLAY_WINDOW, on_mouse)
cv2.imshow(DISPLAY_WINDOW, display_img)
cv2.waitKey(0)
cv2.destroyWindow(DISPLAY_WINDOW)
resolution = PLAYER.viewport
log('position {}'.format(point))
if save:
while True:
path = input('please input the path to save this position\n')
try:
save_path = save_position(point, resolution, path)
except KeyError:
log('position name has already exist, do you want to overwrite it?[y/n]')
ans = None
while ans not in ['y', 'n']:
ans = input()
if ans == 'y':
save_path = save_position(point, resolution, path, force=True)
log('position successfully saved to ' + save_path)
break
else:
break
else:
log('position successfully saved to ' + save_path)
break
| true | true |
f728967beaa99aaa7a2879d5cca95a5810880667 | 4,965 | py | Python | kecpkg/create.py | jberends/kecpkg-tools | 3c288c5b91b619fe76cd3622615f3ffe43509725 | [
"Apache-2.0"
] | null | null | null | kecpkg/create.py | jberends/kecpkg-tools | 3c288c5b91b619fe76cd3622615f3ffe43509725 | [
"Apache-2.0"
] | 7 | 2017-12-07T11:16:07.000Z | 2019-12-11T15:25:07.000Z | kecpkg/create.py | KE-works/kecpkg-tools | 3c288c5b91b619fe76cd3622615f3ffe43509725 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import subprocess
import sys
import six
from kecpkg.files.rendering import render_to_file
from kecpkg.utils import (ensure_dir_exists, get_proper_python, NEED_SUBPROCESS_SHELL, venv,
echo_success, echo_failure, echo_info)
def create_package(package_dir, settings):
"""
Create the package directory.
package_name (or package_dir)
+-- README.md
+-- requirements.txt
+-- package_info.json
+-- main.py (settable with settings['entrypoint_script']
:param package_dir: the full path to the package dir
:param settings: settings dict
"""
ensure_dir_exists(package_dir)
render_to_file('README.md', content=settings, target_dir=package_dir)
render_to_file('requirements.txt', content=settings, target_dir=package_dir)
render_to_file('package_info.json', content=dict(requirements_txt='requirements.txt',
entrypoint_script=settings.get('entrypoint_script'),
entrypoint_func=settings.get('entrypoint_func')),
target_dir=package_dir)
render_to_file('.gitignore', content=dict(), target_dir=package_dir)
render_to_file('.env', content=dict(), target_dir=package_dir)
# runconfigurations
run_configurations_path = os.path.join(package_dir, '.idea', 'runConfigurations')
ensure_dir_exists(run_configurations_path)
render_to_file('Upload_the_kecpkg.xml', content=dict(), target_dir=run_configurations_path)
render_to_file('Build_the_kecpkg.xml', content=dict(), target_dir=run_configurations_path)
script_filename = '{}.py'.format(settings.get('entrypoint_script'))
render_to_file(script_filename, content=settings, template='script.py.template', target_dir=package_dir)
def create_venv(package_dir, settings, pypath=None, use_global=False, verbose=False):
"""
Create the virtual environment in `venv` for the package.
The virtual environment path name can be set in the settings.
package_dir
+-- venv (the virtual environment based on the choosen python version)
+-- ...
:param package_dir: the full path to the package directory
:param settings: the settings dict (including the venv_dir name to create the right venv)
:param pypath: absolute path to the python binary interpreter to create the virtual environment with
:param use_global: Use global sysem site packages when creating virtual environment (default False)
:param verbose: Use verbosity (default False)
"""
venv_dir = os.path.join(package_dir, settings.get('venv_dir'))
if not pypath:
from distutils.spawn import find_executable
pypath = find_executable(get_proper_python())
command = [sys.executable, '-m', 'virtualenv', venv_dir, '-p', pypath]
if use_global: # no cov
command.append('--system-site-packages')
if not verbose: # no cov
command.append('-qqq')
if six.PY3:
result = subprocess.run(command, shell=NEED_SUBPROCESS_SHELL)
return result.returncode
elif six.PY2:
result = subprocess.check_output(command, shell=NEED_SUBPROCESS_SHELL)
return result and 0 or -1
def pip_install_venv(package_dir, settings, verbose=False):
"""
Install requirements into the virtual environment.
:param package_dir: the full path to the package directory
:param settings: the settings dict (incluing the venv_dir name)
:param verbose: (optional) be more verbose if set to True, defaults to False
"""
venv_dir = os.path.join(package_dir, settings.get('venv_dir'))
if not os.path.exists(venv_dir):
echo_failure('virtual environment directory `{}` does not exists, nothing to install'.format(venv_dir))
sys.exit(1)
if not os.path.exists(os.path.join(package_dir, settings.get('requirements_filename'))):
echo_failure('could not find requirements.txt to install, check if `{}` exists or update settings'.format(
settings.get('requirements_filename')))
sys.exit(1)
install_command = [sys.executable, '-m', 'pip', 'install', '-r',
os.path.join(package_dir, settings.get('requirements_filename'))]
if not verbose: # no cov
install_command.append('-qqq')
with venv(venv_dir):
echo_info('Installing requirements from `{}` into the virtual environment `{}`'.
format(settings.get('requirements_filename'), settings.get('venv_dir')))
result = None
if six.PY3:
result = subprocess.run(install_command, shell=NEED_SUBPROCESS_SHELL)
return result.returncode
elif six.PY2:
result = subprocess.check_output(install_command, shell=NEED_SUBPROCESS_SHELL)
return result and 0 or -1
if result:
echo_success(str(result))
return result.returncode
| 40.696721 | 114 | 0.69144 | from __future__ import print_function
import os
import subprocess
import sys
import six
from kecpkg.files.rendering import render_to_file
from kecpkg.utils import (ensure_dir_exists, get_proper_python, NEED_SUBPROCESS_SHELL, venv,
echo_success, echo_failure, echo_info)
def create_package(package_dir, settings):
ensure_dir_exists(package_dir)
render_to_file('README.md', content=settings, target_dir=package_dir)
render_to_file('requirements.txt', content=settings, target_dir=package_dir)
render_to_file('package_info.json', content=dict(requirements_txt='requirements.txt',
entrypoint_script=settings.get('entrypoint_script'),
entrypoint_func=settings.get('entrypoint_func')),
target_dir=package_dir)
render_to_file('.gitignore', content=dict(), target_dir=package_dir)
render_to_file('.env', content=dict(), target_dir=package_dir)
run_configurations_path = os.path.join(package_dir, '.idea', 'runConfigurations')
ensure_dir_exists(run_configurations_path)
render_to_file('Upload_the_kecpkg.xml', content=dict(), target_dir=run_configurations_path)
render_to_file('Build_the_kecpkg.xml', content=dict(), target_dir=run_configurations_path)
script_filename = '{}.py'.format(settings.get('entrypoint_script'))
render_to_file(script_filename, content=settings, template='script.py.template', target_dir=package_dir)
def create_venv(package_dir, settings, pypath=None, use_global=False, verbose=False):
venv_dir = os.path.join(package_dir, settings.get('venv_dir'))
if not pypath:
from distutils.spawn import find_executable
pypath = find_executable(get_proper_python())
command = [sys.executable, '-m', 'virtualenv', venv_dir, '-p', pypath]
if use_global:
command.append('--system-site-packages')
if not verbose:
command.append('-qqq')
if six.PY3:
result = subprocess.run(command, shell=NEED_SUBPROCESS_SHELL)
return result.returncode
elif six.PY2:
result = subprocess.check_output(command, shell=NEED_SUBPROCESS_SHELL)
return result and 0 or -1
def pip_install_venv(package_dir, settings, verbose=False):
venv_dir = os.path.join(package_dir, settings.get('venv_dir'))
if not os.path.exists(venv_dir):
echo_failure('virtual environment directory `{}` does not exists, nothing to install'.format(venv_dir))
sys.exit(1)
if not os.path.exists(os.path.join(package_dir, settings.get('requirements_filename'))):
echo_failure('could not find requirements.txt to install, check if `{}` exists or update settings'.format(
settings.get('requirements_filename')))
sys.exit(1)
install_command = [sys.executable, '-m', 'pip', 'install', '-r',
os.path.join(package_dir, settings.get('requirements_filename'))]
if not verbose:
install_command.append('-qqq')
with venv(venv_dir):
echo_info('Installing requirements from `{}` into the virtual environment `{}`'.
format(settings.get('requirements_filename'), settings.get('venv_dir')))
result = None
if six.PY3:
result = subprocess.run(install_command, shell=NEED_SUBPROCESS_SHELL)
return result.returncode
elif six.PY2:
result = subprocess.check_output(install_command, shell=NEED_SUBPROCESS_SHELL)
return result and 0 or -1
if result:
echo_success(str(result))
return result.returncode
| true | true |
f728969984ccf88bea20ee9c61cec9a023d696fb | 1,805 | py | Python | app.py | saurabdongre/Covid-19_Assistant | 17f2ac4aabe5f5dedda8239cbeafdf1b4da866cd | [
"MIT"
] | null | null | null | app.py | saurabdongre/Covid-19_Assistant | 17f2ac4aabe5f5dedda8239cbeafdf1b4da866cd | [
"MIT"
] | null | null | null | app.py | saurabdongre/Covid-19_Assistant | 17f2ac4aabe5f5dedda8239cbeafdf1b4da866cd | [
"MIT"
] | null | null | null | from chatbot import chatbot
from flask import Flask, render_template, request
import random
import re
import webbrowser
import smtplib
import os
trainer_dict = []
app = Flask(__name__)
app.static_folder = 'static'
@app.route("/")
def home():
return render_template("index.html")
@app.route("/get")
def get_bot_response():
userText = request.args.get('msg')
if userText != 'exit':
trainer_dict.append(userText)
reply_text = str(chatbot.get_response(userText))
trainer_dict.append(reply_text)
return reply_text
else:
writeFile()
return "Goodbye"
os.exit(0)
def sendEmail(body):
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login("user", "pass")
SUBJECT = "Incident Creation"
TEXT = "Dummy Text"
msg = 'Subject: {}\n\n{}'.format(SUBJECT, TEXT)
server.sendmail("user@gmail.com", "user@gmail.com", msg)
server.quit()
def writeFile():
from datetime import datetime
#filename = '\training_data\'+datetime.now().strftime("%d%m%Y%I%M%S%p")+".txt"
#filename = r"\training_data"+r'\'+datetime.now().strftime("%d%m%Y%I%M%S%p")+".txt"
dir = os.path.dirname(os.path.abspath(__file__))
filename = datetime.now().strftime("%d%m%Y%I%M%S%p")+".txt"
rel_path = "training_data\\"+filename
path = os.path.join(dir, rel_path)
with open(path, 'w+') as f:
for item in trainer_dict:
f.write("%s\n" % item)
#path1 = '\training_data\' + str(filename)
#path = path1 + '.txt'
#if 'summary:' in text.lower():
# f= open("\training_data\"+filename+".txt","w+")
#else:
# f= open("\dummy.txt","a+")
#f.write(text+"\n")
if __name__ == "__main__":
webbrowser.open('http://localhost:5000')
app.run()
| 26.544118 | 87 | 0.614404 | from chatbot import chatbot
from flask import Flask, render_template, request
import random
import re
import webbrowser
import smtplib
import os
trainer_dict = []
app = Flask(__name__)
app.static_folder = 'static'
@app.route("/")
def home():
return render_template("index.html")
@app.route("/get")
def get_bot_response():
userText = request.args.get('msg')
if userText != 'exit':
trainer_dict.append(userText)
reply_text = str(chatbot.get_response(userText))
trainer_dict.append(reply_text)
return reply_text
else:
writeFile()
return "Goodbye"
os.exit(0)
def sendEmail(body):
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login("user", "pass")
SUBJECT = "Incident Creation"
TEXT = "Dummy Text"
msg = 'Subject: {}\n\n{}'.format(SUBJECT, TEXT)
server.sendmail("user@gmail.com", "user@gmail.com", msg)
server.quit()
def writeFile():
from datetime import datetime
dir = os.path.dirname(os.path.abspath(__file__))
filename = datetime.now().strftime("%d%m%Y%I%M%S%p")+".txt"
rel_path = "training_data\\"+filename
path = os.path.join(dir, rel_path)
with open(path, 'w+') as f:
for item in trainer_dict:
f.write("%s\n" % item)
if __name__ == "__main__":
webbrowser.open('http://localhost:5000')
app.run()
| true | true |
f72896cff2b507417aa89c6dab562cd14c7684c4 | 8,522 | py | Python | frappe-bench/env/lib/python2.7/site-packages/faker/providers/address/en_CA/__init__.py | ibrahmm22/library-management | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | [
"MIT"
] | null | null | null | frappe-bench/env/lib/python2.7/site-packages/faker/providers/address/en_CA/__init__.py | ibrahmm22/library-management | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | [
"MIT"
] | null | null | null | frappe-bench/env/lib/python2.7/site-packages/faker/providers/address/en_CA/__init__.py | ibrahmm22/library-management | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import re
from ..en import Provider as AddressProvider
class Provider(AddressProvider):
# Source: https://www.canadapost.ca/tools/pg/manual/PGaddress-e.asp#1449294
#
# 'W' and 'Z' are valid in non-initial position (easily verified in the
# wild), but online official documentation is hard to find, so just ignore
# them for now.
postal_code_letters = (
'A', 'B', 'C', 'E', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'R', 'S',
'T', 'V', 'X', 'Y',
)
city_prefixes = ('North', 'East', 'West', 'South', 'New', 'Lake', 'Port')
city_suffixes = (
'town',
'ton',
'land',
'ville',
'berg',
'burgh',
'borough',
'bury',
'view',
'port',
'mouth',
'stad',
'furt',
'chester',
'mouth',
'fort',
'haven',
'side',
'shire')
building_number_formats = ('#####', '####', '###')
street_suffixes = (
'Alley',
'Avenue',
'Branch',
'Bridge',
'Brook',
'Brooks',
'Burg',
'Burgs',
'Bypass',
'Camp',
'Canyon',
'Cape',
'Causeway',
'Center',
'Centers',
'Circle',
'Circles',
'Cliff',
'Cliffs',
'Club',
'Common',
'Corner',
'Corners',
'Course',
'Court',
'Courts',
'Cove',
'Coves',
'Creek',
'Crescent',
'Crest',
'Crossing',
'Crossroad',
'Curve',
'Dale',
'Dam',
'Divide',
'Drive',
'Drive',
'Drives',
'Estate',
'Estates',
'Expressway',
'Extension',
'Extensions',
'Fall',
'Falls',
'Ferry',
'Field',
'Fields',
'Flat',
'Flats',
'Ford',
'Fords',
'Forest',
'Forge',
'Forges',
'Fork',
'Forks',
'Fort',
'Freeway',
'Garden',
'Gardens',
'Gateway',
'Glen',
'Glens',
'Green',
'Greens',
'Grove',
'Groves',
'Harbor',
'Harbors',
'Haven',
'Heights',
'Highway',
'Hill',
'Hills',
'Hollow',
'Inlet',
'Inlet',
'Island',
'Island',
'Islands',
'Islands',
'Isle',
'Isle',
'Junction',
'Junctions',
'Key',
'Keys',
'Knoll',
'Knolls',
'Lake',
'Lakes',
'Land',
'Landing',
'Lane',
'Light',
'Lights',
'Loaf',
'Lock',
'Locks',
'Locks',
'Lodge',
'Lodge',
'Loop',
'Mall',
'Manor',
'Manors',
'Meadow',
'Meadows',
'Mews',
'Mill',
'Mills',
'Mission',
'Mission',
'Motorway',
'Mount',
'Mountain',
'Mountain',
'Mountains',
'Mountains',
'Neck',
'Orchard',
'Oval',
'Overpass',
'Park',
'Parks',
'Parkway',
'Parkways',
'Pass',
'Passage',
'Path',
'Pike',
'Pine',
'Pines',
'Place',
'Plain',
'Plains',
'Plains',
'Plaza',
'Plaza',
'Point',
'Points',
'Port',
'Port',
'Ports',
'Ports',
'Prairie',
'Prairie',
'Radial',
'Ramp',
'Ranch',
'Rapid',
'Rapids',
'Rest',
'Ridge',
'Ridges',
'River',
'Road',
'Road',
'Roads',
'Roads',
'Route',
'Row',
'Rue',
'Run',
'Shoal',
'Shoals',
'Shore',
'Shores',
'Skyway',
'Spring',
'Springs',
'Springs',
'Spur',
'Spurs',
'Square',
'Square',
'Squares',
'Squares',
'Station',
'Station',
'Stravenue',
'Stravenue',
'Stream',
'Stream',
'Street',
'Street',
'Streets',
'Summit',
'Summit',
'Terrace',
'Throughway',
'Trace',
'Track',
'Trafficway',
'Trail',
'Trail',
'Tunnel',
'Tunnel',
'Turnpike',
'Turnpike',
'Underpass',
'Union',
'Unions',
'Valley',
'Valleys',
'Via',
'Viaduct',
'View',
'Views',
'Village',
'Village',
'Villages',
'Ville',
'Vista',
'Vista',
'Walk',
'Walks',
'Wall',
'Way',
'Ways',
'Well',
'Wells')
postal_code_formats = ('?%? %?%', '?%?%?%')
provinces = (
'Alberta', 'British Columbia', 'Manitoba', 'New Brunswick',
'Newfoundland and Labrador', 'Northwest Territories',
'Nova Scotia', 'Nunavut', 'Ontario',
'Prince Edward Island', 'Quebec', 'Saskatchewan', 'Yukon Territory')
provinces_abbr = (
'AB', 'BC', 'MB', 'NB', 'NL', 'NT', 'NS',
'NU', 'ON', 'PE', 'QC', 'SK', 'YT')
provinces_postcode_prefixes = {
'NL': ['A'], 'NS': ['B'], 'PE': ['C'], 'NB': ['E'],
'QC': ['G', 'H', 'J'], 'ON': ['K', 'L', 'M', 'N', 'P'],
'MB': ['R'], 'SK': ['S'], 'AB': ['T'], 'BC': ['V'],
'NU': ['X'], 'NT': ['X'], 'YT': ['Y'],
}
city_formats = (
'{{city_prefix}} {{first_name}}{{city_suffix}}',
'{{city_prefix}} {{first_name}}',
'{{first_name}}{{city_suffix}}',
'{{last_name}}{{city_suffix}}',
)
street_name_formats = (
'{{first_name}} {{street_suffix}}',
'{{last_name}} {{street_suffix}}',
)
street_address_formats = (
'{{building_number}} {{street_name}}',
'{{building_number}} {{street_name}} {{secondary_address}}',
)
address_formats = (
"{{street_address}}\n{{city}}, {{province_abbr}} {{postalcode}}",
)
secondary_address_formats = ('Apt. ###', 'Suite ###')
def province(self):
"""
"""
return self.random_element(self.provinces)
def province_abbr(self):
return self.random_element(self.provinces_abbr)
def city_prefix(self):
return self.random_element(self.city_prefixes)
def secondary_address(self):
return self.numerify(
self.random_element(
self.secondary_address_formats))
def postal_code_letter(self):
"""
Returns a random letter from the list of allowable
letters in a canadian postal code
"""
return self.random_element(self.postal_code_letters)
def _postcode_replace(self, postal_code_format):
"""
Replaces all question mark ('?') occurrences with a random letter
from given postal_code_format, then passes result to numerify to insert
numbers
"""
temp = re.sub(r'\?',
lambda x: self.postal_code_letter(),
postal_code_format)
return self.numerify(temp)
def postcode(self):
"""
Returns a random postcode
"""
return self._postcode_replace(
self.random_element(self.postal_code_formats))
def postcode_in_province(self, province_abbr=None):
"""
Returns a random postcode within the provided province abbreviation
"""
if province_abbr is None:
province_abbr = self.random_element(self.provinces_abbr)
if province_abbr in self.provinces_abbr:
postal_code_format = self.random_element(self.postal_code_formats)
postal_code_format = postal_code_format.replace(
'?',
self.generator.random_element(
self.provinces_postcode_prefixes[province_abbr]),
1)
return self._postcode_replace(postal_code_format)
else:
raise Exception('Province Abbreviation not found in list')
def postalcode_in_province(self, province_abbr=None):
return self.postcode_in_province(province_abbr)
def postalcode(self):
return self.postcode()
| 22.786096 | 80 | 0.443558 | from __future__ import unicode_literals
import re
from ..en import Provider as AddressProvider
class Provider(AddressProvider):
postal_code_letters = (
'A', 'B', 'C', 'E', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'R', 'S',
'T', 'V', 'X', 'Y',
)
city_prefixes = ('North', 'East', 'West', 'South', 'New', 'Lake', 'Port')
city_suffixes = (
'town',
'ton',
'land',
'ville',
'berg',
'burgh',
'borough',
'bury',
'view',
'port',
'mouth',
'stad',
'furt',
'chester',
'mouth',
'fort',
'haven',
'side',
'shire')
building_number_formats = ('#####', '####', '###')
street_suffixes = (
'Alley',
'Avenue',
'Branch',
'Bridge',
'Brook',
'Brooks',
'Burg',
'Burgs',
'Bypass',
'Camp',
'Canyon',
'Cape',
'Causeway',
'Center',
'Centers',
'Circle',
'Circles',
'Cliff',
'Cliffs',
'Club',
'Common',
'Corner',
'Corners',
'Course',
'Court',
'Courts',
'Cove',
'Coves',
'Creek',
'Crescent',
'Crest',
'Crossing',
'Crossroad',
'Curve',
'Dale',
'Dam',
'Divide',
'Drive',
'Drive',
'Drives',
'Estate',
'Estates',
'Expressway',
'Extension',
'Extensions',
'Fall',
'Falls',
'Ferry',
'Field',
'Fields',
'Flat',
'Flats',
'Ford',
'Fords',
'Forest',
'Forge',
'Forges',
'Fork',
'Forks',
'Fort',
'Freeway',
'Garden',
'Gardens',
'Gateway',
'Glen',
'Glens',
'Green',
'Greens',
'Grove',
'Groves',
'Harbor',
'Harbors',
'Haven',
'Heights',
'Highway',
'Hill',
'Hills',
'Hollow',
'Inlet',
'Inlet',
'Island',
'Island',
'Islands',
'Islands',
'Isle',
'Isle',
'Junction',
'Junctions',
'Key',
'Keys',
'Knoll',
'Knolls',
'Lake',
'Lakes',
'Land',
'Landing',
'Lane',
'Light',
'Lights',
'Loaf',
'Lock',
'Locks',
'Locks',
'Lodge',
'Lodge',
'Loop',
'Mall',
'Manor',
'Manors',
'Meadow',
'Meadows',
'Mews',
'Mill',
'Mills',
'Mission',
'Mission',
'Motorway',
'Mount',
'Mountain',
'Mountain',
'Mountains',
'Mountains',
'Neck',
'Orchard',
'Oval',
'Overpass',
'Park',
'Parks',
'Parkway',
'Parkways',
'Pass',
'Passage',
'Path',
'Pike',
'Pine',
'Pines',
'Place',
'Plain',
'Plains',
'Plains',
'Plaza',
'Plaza',
'Point',
'Points',
'Port',
'Port',
'Ports',
'Ports',
'Prairie',
'Prairie',
'Radial',
'Ramp',
'Ranch',
'Rapid',
'Rapids',
'Rest',
'Ridge',
'Ridges',
'River',
'Road',
'Road',
'Roads',
'Roads',
'Route',
'Row',
'Rue',
'Run',
'Shoal',
'Shoals',
'Shore',
'Shores',
'Skyway',
'Spring',
'Springs',
'Springs',
'Spur',
'Spurs',
'Square',
'Square',
'Squares',
'Squares',
'Station',
'Station',
'Stravenue',
'Stravenue',
'Stream',
'Stream',
'Street',
'Street',
'Streets',
'Summit',
'Summit',
'Terrace',
'Throughway',
'Trace',
'Track',
'Trafficway',
'Trail',
'Trail',
'Tunnel',
'Tunnel',
'Turnpike',
'Turnpike',
'Underpass',
'Union',
'Unions',
'Valley',
'Valleys',
'Via',
'Viaduct',
'View',
'Views',
'Village',
'Village',
'Villages',
'Ville',
'Vista',
'Vista',
'Walk',
'Walks',
'Wall',
'Way',
'Ways',
'Well',
'Wells')
postal_code_formats = ('?%? %?%', '?%?%?%')
provinces = (
'Alberta', 'British Columbia', 'Manitoba', 'New Brunswick',
'Newfoundland and Labrador', 'Northwest Territories',
'Nova Scotia', 'Nunavut', 'Ontario',
'Prince Edward Island', 'Quebec', 'Saskatchewan', 'Yukon Territory')
provinces_abbr = (
'AB', 'BC', 'MB', 'NB', 'NL', 'NT', 'NS',
'NU', 'ON', 'PE', 'QC', 'SK', 'YT')
provinces_postcode_prefixes = {
'NL': ['A'], 'NS': ['B'], 'PE': ['C'], 'NB': ['E'],
'QC': ['G', 'H', 'J'], 'ON': ['K', 'L', 'M', 'N', 'P'],
'MB': ['R'], 'SK': ['S'], 'AB': ['T'], 'BC': ['V'],
'NU': ['X'], 'NT': ['X'], 'YT': ['Y'],
}
city_formats = (
'{{city_prefix}} {{first_name}}{{city_suffix}}',
'{{city_prefix}} {{first_name}}',
'{{first_name}}{{city_suffix}}',
'{{last_name}}{{city_suffix}}',
)
street_name_formats = (
'{{first_name}} {{street_suffix}}',
'{{last_name}} {{street_suffix}}',
)
street_address_formats = (
'{{building_number}} {{street_name}}',
'{{building_number}} {{street_name}} {{secondary_address}}',
)
address_formats = (
"{{street_address}}\n{{city}}, {{province_abbr}} {{postalcode}}",
)
secondary_address_formats = ('Apt. ###', 'Suite ###')
def province(self):
return self.random_element(self.provinces)
def province_abbr(self):
return self.random_element(self.provinces_abbr)
def city_prefix(self):
return self.random_element(self.city_prefixes)
def secondary_address(self):
return self.numerify(
self.random_element(
self.secondary_address_formats))
def postal_code_letter(self):
return self.random_element(self.postal_code_letters)
def _postcode_replace(self, postal_code_format):
temp = re.sub(r'\?',
lambda x: self.postal_code_letter(),
postal_code_format)
return self.numerify(temp)
def postcode(self):
return self._postcode_replace(
self.random_element(self.postal_code_formats))
def postcode_in_province(self, province_abbr=None):
if province_abbr is None:
province_abbr = self.random_element(self.provinces_abbr)
if province_abbr in self.provinces_abbr:
postal_code_format = self.random_element(self.postal_code_formats)
postal_code_format = postal_code_format.replace(
'?',
self.generator.random_element(
self.provinces_postcode_prefixes[province_abbr]),
1)
return self._postcode_replace(postal_code_format)
else:
raise Exception('Province Abbreviation not found in list')
def postalcode_in_province(self, province_abbr=None):
return self.postcode_in_province(province_abbr)
def postalcode(self):
return self.postcode()
| true | true |
f72897ac35d6d93b6020380f7e88be2a60683e88 | 3,957 | py | Python | katsdpdisp/test/test_data.py | ska-sa/katsdpdisp | 3fd2f5878c0bd3ae56815568446593b876881e3f | [
"BSD-3-Clause"
] | null | null | null | katsdpdisp/test/test_data.py | ska-sa/katsdpdisp | 3fd2f5878c0bd3ae56815568446593b876881e3f | [
"BSD-3-Clause"
] | 6 | 2020-03-13T08:17:49.000Z | 2021-05-04T14:43:01.000Z | katsdpdisp/test/test_data.py | ska-sa/katsdpdisp | 3fd2f5878c0bd3ae56815568446593b876881e3f | [
"BSD-3-Clause"
] | null | null | null | """Tests for :py:mod:`katsdpdisp.data`."""
import numpy as np
from numpy.testing import assert_array_equal
from katsdpdisp.data import SparseArray
def test_sparsearray(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6,islot_new_bls=6):
"""Simulates the assignment and retrieval of data as it happens in the signal displays when
it receives different sets of baseline data at different timestamps, with some time continuity.
(fullslots,fullbls,fullchan) is the dimensions of the full/complete dataset
(nslots,maxbaselines,fullchan) is the true size of the sparse array, representing a size of (nslots,fullbls,fullchan)
where maxbaselines<fullbls
islot_new_bls is the number of time stamps that passes before there is a new baseline product selected/chosen in the test sequence"""
mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)
rs = np.random.RandomState(seed=0)
fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])
histbaselines=[]
for it in range(fullslots):
if it%islot_new_bls==0:#add a new baseline, remove old, every so often
while True:
newbaseline=rs.random_integers(0,fullbls-1,[1])
if len(histbaselines)==0 or (newbaseline not in histbaselines[-1]):
break
if (len(histbaselines)==0):
newbaselines=np.r_[newbaseline]
elif (len(histbaselines[-1])<islot_new_bls):
newbaselines=np.r_[histbaselines[-1],newbaseline]
else:
newbaselines=np.r_[histbaselines[-1][1:],newbaseline]
histbaselines.append(newbaselines)
mx[it%nslots,histbaselines[-1],:]=fulldata[it,histbaselines[-1],:]
for cit in range(islot_new_bls):
if (cit>=len(histbaselines)):
break
hasthesebaselines=list(set(histbaselines[-1-cit]) & set(histbaselines[-1]))
missingbaselines=list(set(histbaselines[-1-cit]) - set(histbaselines[-1]))
retrieved=mx[(it-cit)%nslots,hasthesebaselines,:]
assert_array_equal(retrieved, fulldata[it-cit,hasthesebaselines,:], 'SparseArray getitem test failed')
missingretrieved=mx[(it-cit)%nslots,missingbaselines,:]
assert_array_equal(missingretrieved,np.zeros(missingretrieved.shape,dtype=np.int32), 'SparseArray missing baseline test failed')
def test_sparsearray_indexing(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6):
mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)
rs = np.random.RandomState(seed=0)
fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])
mx[0,0,0]=fulldata[0,0,0]
assert_array_equal(mx[0,0,0], fulldata[0,0,0], 'SparseArray [scalar,scalar,scalar] index test failed')
mx[1,1,:]=fulldata[1,1,:]
assert_array_equal(mx[1,1,:], fulldata[1,1,:], 'SparseArray [scalar,scalar,slice] index test 2 failed') #baseline change so previous assignment purged (in future may retain until running out of memory and necessary to purge)
mx[2,1,:]=fulldata[2,1,:]
assert_array_equal(mx[1:3,1,:], fulldata[1:3,1,:], 'SparseArray retain old value test failed') #assign to same baseline so previous slot value remain
mx[3,:maxbaselines,0]=fulldata[3,:maxbaselines,0]
assert_array_equal(mx[3,:maxbaselines,0], fulldata[3,:maxbaselines,0], 'SparseArray [scalar,slice,scalar] index test failed')
mx[:,1,3]=fulldata[:nslots,1,3]
assert_array_equal(mx[:,1,3], fulldata[:nslots,1,3], 'SparseArray [slice,scalar,scalar] index test failed')
mx[:,1,:]=fulldata[:nslots,1,:]
assert_array_equal(mx[:,1,:], fulldata[:nslots,1,:], 'SparseArray [slice,scalar,slice] index test failed')
mx[:,1:maxbaselines,:]=fulldata[2:nslots+2,1:maxbaselines,:]
assert_array_equal(mx[:,1:maxbaselines,:], fulldata[2:nslots+2,1:maxbaselines,:], 'SparseArray [slice,slice,slice] index test failed')
| 56.528571 | 228 | 0.700531 |
import numpy as np
from numpy.testing import assert_array_equal
from katsdpdisp.data import SparseArray
def test_sparsearray(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6,islot_new_bls=6):
mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)
rs = np.random.RandomState(seed=0)
fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])
histbaselines=[]
for it in range(fullslots):
if it%islot_new_bls==0:
while True:
newbaseline=rs.random_integers(0,fullbls-1,[1])
if len(histbaselines)==0 or (newbaseline not in histbaselines[-1]):
break
if (len(histbaselines)==0):
newbaselines=np.r_[newbaseline]
elif (len(histbaselines[-1])<islot_new_bls):
newbaselines=np.r_[histbaselines[-1],newbaseline]
else:
newbaselines=np.r_[histbaselines[-1][1:],newbaseline]
histbaselines.append(newbaselines)
mx[it%nslots,histbaselines[-1],:]=fulldata[it,histbaselines[-1],:]
for cit in range(islot_new_bls):
if (cit>=len(histbaselines)):
break
hasthesebaselines=list(set(histbaselines[-1-cit]) & set(histbaselines[-1]))
missingbaselines=list(set(histbaselines[-1-cit]) - set(histbaselines[-1]))
retrieved=mx[(it-cit)%nslots,hasthesebaselines,:]
assert_array_equal(retrieved, fulldata[it-cit,hasthesebaselines,:], 'SparseArray getitem test failed')
missingretrieved=mx[(it-cit)%nslots,missingbaselines,:]
assert_array_equal(missingretrieved,np.zeros(missingretrieved.shape,dtype=np.int32), 'SparseArray missing baseline test failed')
def test_sparsearray_indexing(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6):
mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)
rs = np.random.RandomState(seed=0)
fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])
mx[0,0,0]=fulldata[0,0,0]
assert_array_equal(mx[0,0,0], fulldata[0,0,0], 'SparseArray [scalar,scalar,scalar] index test failed')
mx[1,1,:]=fulldata[1,1,:]
assert_array_equal(mx[1,1,:], fulldata[1,1,:], 'SparseArray [scalar,scalar,slice] index test 2 failed')
mx[2,1,:]=fulldata[2,1,:]
assert_array_equal(mx[1:3,1,:], fulldata[1:3,1,:], 'SparseArray retain old value test failed')
mx[3,:maxbaselines,0]=fulldata[3,:maxbaselines,0]
assert_array_equal(mx[3,:maxbaselines,0], fulldata[3,:maxbaselines,0], 'SparseArray [scalar,slice,scalar] index test failed')
mx[:,1,3]=fulldata[:nslots,1,3]
assert_array_equal(mx[:,1,3], fulldata[:nslots,1,3], 'SparseArray [slice,scalar,scalar] index test failed')
mx[:,1,:]=fulldata[:nslots,1,:]
assert_array_equal(mx[:,1,:], fulldata[:nslots,1,:], 'SparseArray [slice,scalar,slice] index test failed')
mx[:,1:maxbaselines,:]=fulldata[2:nslots+2,1:maxbaselines,:]
assert_array_equal(mx[:,1:maxbaselines,:], fulldata[2:nslots+2,1:maxbaselines,:], 'SparseArray [slice,slice,slice] index test failed')
| true | true |
f72897ce8776833e34cd278e916224124f6b7c16 | 4,321 | py | Python | graalpython/lib-graalpython/property.py | muellren/graalpython | 9104425805f1d38ad7a521c75e53798a3b79b4f0 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | graalpython/lib-graalpython/property.py | muellren/graalpython | 9104425805f1d38ad7a521c75e53798a3b79b4f0 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | graalpython/lib-graalpython/property.py | muellren/graalpython | 9104425805f1d38ad7a521c75e53798a3b79b4f0 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | # Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class property(object):
"""
property(fget=None, fset=None, fdel=None, doc=None) -> property attribute
fget is a function to be used for getting an attribute value, and likewise
fset is a function for setting, and fdel a function for del'ing, an
attribute. Typical use is to define a managed attribute x:
class C(object):
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
Decorators make defining new properties or modifying existing ones easy:
class C(object):
@property
def x(self):
"I am the 'x' property."
return self._x
@x.setter
def x(self, value):
self._x = value
@x.deleter
def x(self):
del self._x
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None, name=None):
self.__get = fget
self.__set = fset
self.__delete = fdel
self.doc = doc
self.name = name
self._owner = None
def __get__(self, instance, owner):
if self._owner is None:
self._owner = owner
if instance is None:
return self
if self.__get is None:
raise AttributeError("unreadable attribute")
return self.__get(instance)
def __set__(self, instance, value):
if self.__set is None:
raise AttributeError("attribute '{}' of '{}' objects is not writable".format(
self.name, getattr(self._owner, "__name__", str(self._owner))))
return self.__set(instance, value)
def __delete__(self, instance):
if self.__delete is None:
raise AttributeError("can't delete attribute")
return self.__delete(instance)
def setter(self, func):
self.__set = func
return self
def deleter(self, func):
self.__delete = func
return self
def getter(self, func):
self.__get = func
return self
def __repr__(self):
return "'".join([
"<property ",
str(self.name),
" of ",
getattr(self._owner, "__name__", str(self._owner)),
" objects>"
])
| 36.618644 | 89 | 0.660495 |
class property(object):
def __init__(self, fget=None, fset=None, fdel=None, doc=None, name=None):
self.__get = fget
self.__set = fset
self.__delete = fdel
self.doc = doc
self.name = name
self._owner = None
def __get__(self, instance, owner):
if self._owner is None:
self._owner = owner
if instance is None:
return self
if self.__get is None:
raise AttributeError("unreadable attribute")
return self.__get(instance)
def __set__(self, instance, value):
if self.__set is None:
raise AttributeError("attribute '{}' of '{}' objects is not writable".format(
self.name, getattr(self._owner, "__name__", str(self._owner))))
return self.__set(instance, value)
def __delete__(self, instance):
if self.__delete is None:
raise AttributeError("can't delete attribute")
return self.__delete(instance)
def setter(self, func):
self.__set = func
return self
def deleter(self, func):
self.__delete = func
return self
def getter(self, func):
self.__get = func
return self
def __repr__(self):
return "'".join([
"<property ",
str(self.name),
" of ",
getattr(self._owner, "__name__", str(self._owner)),
" objects>"
])
| true | true |
f7289806ab4063c4ecf5b399d38eaefb24559333 | 500 | py | Python | sameproject/ops/functions/options.py | SAME-Project/same-project | 6fb6fdab73d98e1ba8f622c4033dbd8cd351b0f6 | [
"Apache-2.0"
] | 8 | 2021-12-17T18:26:24.000Z | 2022-03-16T18:21:04.000Z | sameproject/ops/functions/options.py | SAME-Project/same-project | 6fb6fdab73d98e1ba8f622c4033dbd8cd351b0f6 | [
"Apache-2.0"
] | 45 | 2021-12-18T08:28:56.000Z | 2022-03-31T21:24:45.000Z | sameproject/ops/functions/options.py | SAME-Project/same-project | 6fb6fdab73d98e1ba8f622c4033dbd8cd351b0f6 | [
"Apache-2.0"
] | 5 | 2021-12-17T20:08:38.000Z | 2022-03-21T13:51:06.000Z | from sameproject.ops.runtime_options import register_option
register_option(
"functions_subscription_id",
"Azure subscription ID in which to provision backend functions.",
backend="functions",
schema={
"nullable": True,
"type": "string",
"regex": r"^[\d\w-]+",
},
)
register_option(
"functions_skip_provisioning",
"Skip provisioning of azure functions resources, to be used only if they already exist.",
backend="functions",
type=bool,
)
| 25 | 93 | 0.672 | from sameproject.ops.runtime_options import register_option
register_option(
"functions_subscription_id",
"Azure subscription ID in which to provision backend functions.",
backend="functions",
schema={
"nullable": True,
"type": "string",
"regex": r"^[\d\w-]+",
},
)
register_option(
"functions_skip_provisioning",
"Skip provisioning of azure functions resources, to be used only if they already exist.",
backend="functions",
type=bool,
)
| true | true |
f7289866210609234c7d88389a2b7096438ef21c | 1,522 | py | Python | metaworld/policies/sawyer_coffee_pull_v2_policy.py | rmrafailov/metaworld | 463f1afb1bffbe1fa6b50715ee4a1eeff7c4f463 | [
"MIT"
] | 3 | 2021-06-25T03:35:59.000Z | 2022-03-02T00:08:57.000Z | metaworld/policies/sawyer_coffee_pull_v2_policy.py | zchuning/metaworld | b2cd055e5f2413ec6d66ef29e45d05af989dca3b | [
"MIT"
] | null | null | null | metaworld/policies/sawyer_coffee_pull_v2_policy.py | zchuning/metaworld | b2cd055e5f2413ec6d66ef29e45d05af989dca3b | [
"MIT"
] | 1 | 2021-11-25T14:55:37.000Z | 2021-11-25T14:55:37.000Z | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerCoffeePullV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'mug_pos': obs[3:6],
'unused_info': obs[6:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
@staticmethod
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_mug = o_d['mug_pos'] + np.array([-.005, .0, .05])
if np.linalg.norm(pos_curr[:2] - pos_mug[:2]) > 0.06:
return pos_mug + np.array([.0, .0, .15])
elif abs(pos_curr[2] - pos_mug[2]) > 0.02:
return pos_mug
elif pos_curr[1] > .65:
return np.array([.5, .6, .1])
else:
return np.array([pos_curr[0] - .1, .6, .1])
@staticmethod
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_mug = o_d['mug_pos'] + np.array([.01, .0, .05])
if np.linalg.norm(pos_curr[:2] - pos_mug[:2]) > 0.06 or \
abs(pos_curr[2] - pos_mug[2]) > 0.1:
return -1.
else:
return .7
| 27.672727 | 89 | 0.543364 | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerCoffeePullV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'mug_pos': obs[3:6],
'unused_info': obs[6:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
@staticmethod
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_mug = o_d['mug_pos'] + np.array([-.005, .0, .05])
if np.linalg.norm(pos_curr[:2] - pos_mug[:2]) > 0.06:
return pos_mug + np.array([.0, .0, .15])
elif abs(pos_curr[2] - pos_mug[2]) > 0.02:
return pos_mug
elif pos_curr[1] > .65:
return np.array([.5, .6, .1])
else:
return np.array([pos_curr[0] - .1, .6, .1])
@staticmethod
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_mug = o_d['mug_pos'] + np.array([.01, .0, .05])
if np.linalg.norm(pos_curr[:2] - pos_mug[:2]) > 0.06 or \
abs(pos_curr[2] - pos_mug[2]) > 0.1:
return -1.
else:
return .7
| true | true |
f728989c89ad4ab3040253e2ff03267c79b8da4a | 4,131 | py | Python | setup.py | dfm/celeritelib | c6874e23367d47743c27ae2ea432bee1dbe864f1 | [
"MIT"
] | null | null | null | setup.py | dfm/celeritelib | c6874e23367d47743c27ae2ea432bee1dbe864f1 | [
"MIT"
] | null | null | null | setup.py | dfm/celeritelib | c6874e23367d47743c27ae2ea432bee1dbe864f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Inspired by:
# https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
import codecs
import os
import re
import sys
from pybind11.setup_helpers import Pybind11Extension, build_ext
from setuptools import find_packages, setup
# PROJECT SPECIFIC
NAME = "celerite2"
PACKAGES = find_packages(where="python")
META_PATH = os.path.join("python", "celerite2", "__init__.py")
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
]
INSTALL_REQUIRES = ["numpy>=1.13.0"]
SETUP_REQUIRES = INSTALL_REQUIRES + [
"pybind11>=2.4",
"setuptools>=40.6.0",
"setuptools_scm",
"wheel",
]
EXTRA_REQUIRE = {
"style": ["isort", "black", "black_nbconvert"],
"test": [
"coverage[toml]",
"pytest",
"pytest-cov",
"scipy",
"celerite>=0.3.1",
],
"pymc3": [
"pymc3>=3.9, <3.12",
"aesara-theano-fallback>=0.0.2",
],
"jax": ["jax", "jaxlib"],
"release": ["pep517", "twine"],
"docs": [
"sphinx",
"sphinx-material",
"sphinx_copybutton",
"rtds_action",
"nbsphinx",
"breathe",
"ipython",
],
"tutorials": [
"jupytext",
"jupyter",
"nbconvert",
"matplotlib",
"scipy",
"emcee",
"pymc3>=3.9, <3.12",
"aesara-theano-fallback>=0.0.2",
"tqdm",
"numpyro",
],
}
EXTRA_REQUIRE["theano"] = EXTRA_REQUIRE["pymc3"]
EXTRA_REQUIRE["dev"] = (
EXTRA_REQUIRE["style"]
+ EXTRA_REQUIRE["test"]
+ EXTRA_REQUIRE["release"]
+ ["pre-commit", "nbstripout", "flake8"]
)
include_dirs = [
"c++/include",
"c++/vendor/eigen",
"python/celerite2",
]
if "READTHEDOCS" in os.environ:
ext_modules = []
else:
ext_modules = [
Pybind11Extension(
"celerite2.driver",
["python/celerite2/driver.cpp"],
include_dirs=include_dirs,
language="c++",
),
Pybind11Extension(
"celerite2.backprop",
["python/celerite2/backprop.cpp"],
include_dirs=include_dirs,
language="c++",
),
Pybind11Extension(
"celerite2.jax.xla_ops",
["python/celerite2/jax/xla_ops.cpp"],
include_dirs=include_dirs,
language="c++",
),
]
# END PROJECT SPECIFIC
HERE = os.path.dirname(os.path.realpath(__file__))
def read(*parts):
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
def find_meta(meta, meta_file=read(META_PATH)):
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), meta_file, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
use_scm_version={
"write_to": os.path.join(
"python", NAME, "{0}_version.py".format(NAME)
),
"write_to_template": '__version__ = "{version}"\n',
},
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
url=find_meta("uri"),
license=find_meta("license"),
description=find_meta("description"),
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=PACKAGES,
package_dir={"": "python"},
include_package_data=True,
python_requires=">=3.6",
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
extras_require=EXTRA_REQUIRE,
classifiers=CLASSIFIERS,
zip_safe=False,
ext_modules=ext_modules,
cmdclass={"build_ext": build_ext},
)
| 26.14557 | 79 | 0.574195 |
import codecs
import os
import re
import sys
from pybind11.setup_helpers import Pybind11Extension, build_ext
from setuptools import find_packages, setup
NAME = "celerite2"
PACKAGES = find_packages(where="python")
META_PATH = os.path.join("python", "celerite2", "__init__.py")
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
]
INSTALL_REQUIRES = ["numpy>=1.13.0"]
SETUP_REQUIRES = INSTALL_REQUIRES + [
"pybind11>=2.4",
"setuptools>=40.6.0",
"setuptools_scm",
"wheel",
]
EXTRA_REQUIRE = {
"style": ["isort", "black", "black_nbconvert"],
"test": [
"coverage[toml]",
"pytest",
"pytest-cov",
"scipy",
"celerite>=0.3.1",
],
"pymc3": [
"pymc3>=3.9, <3.12",
"aesara-theano-fallback>=0.0.2",
],
"jax": ["jax", "jaxlib"],
"release": ["pep517", "twine"],
"docs": [
"sphinx",
"sphinx-material",
"sphinx_copybutton",
"rtds_action",
"nbsphinx",
"breathe",
"ipython",
],
"tutorials": [
"jupytext",
"jupyter",
"nbconvert",
"matplotlib",
"scipy",
"emcee",
"pymc3>=3.9, <3.12",
"aesara-theano-fallback>=0.0.2",
"tqdm",
"numpyro",
],
}
EXTRA_REQUIRE["theano"] = EXTRA_REQUIRE["pymc3"]
EXTRA_REQUIRE["dev"] = (
EXTRA_REQUIRE["style"]
+ EXTRA_REQUIRE["test"]
+ EXTRA_REQUIRE["release"]
+ ["pre-commit", "nbstripout", "flake8"]
)
include_dirs = [
"c++/include",
"c++/vendor/eigen",
"python/celerite2",
]
if "READTHEDOCS" in os.environ:
ext_modules = []
else:
ext_modules = [
Pybind11Extension(
"celerite2.driver",
["python/celerite2/driver.cpp"],
include_dirs=include_dirs,
language="c++",
),
Pybind11Extension(
"celerite2.backprop",
["python/celerite2/backprop.cpp"],
include_dirs=include_dirs,
language="c++",
),
Pybind11Extension(
"celerite2.jax.xla_ops",
["python/celerite2/jax/xla_ops.cpp"],
include_dirs=include_dirs,
language="c++",
),
]
HERE = os.path.dirname(os.path.realpath(__file__))
def read(*parts):
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
def find_meta(meta, meta_file=read(META_PATH)):
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), meta_file, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
use_scm_version={
"write_to": os.path.join(
"python", NAME, "{0}_version.py".format(NAME)
),
"write_to_template": '__version__ = "{version}"\n',
},
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
url=find_meta("uri"),
license=find_meta("license"),
description=find_meta("description"),
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=PACKAGES,
package_dir={"": "python"},
include_package_data=True,
python_requires=">=3.6",
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
extras_require=EXTRA_REQUIRE,
classifiers=CLASSIFIERS,
zip_safe=False,
ext_modules=ext_modules,
cmdclass={"build_ext": build_ext},
)
| true | true |
f7289931e85f5002dcdb59e6ca982e243c9c3105 | 43 | py | Python | first_digit_after_dot.py | webkadiz/olympiad-problems | 620912815904c0f95b91ccd193ca3db0ea20e507 | [
"MIT"
] | null | null | null | first_digit_after_dot.py | webkadiz/olympiad-problems | 620912815904c0f95b91ccd193ca3db0ea20e507 | [
"MIT"
] | null | null | null | first_digit_after_dot.py | webkadiz/olympiad-problems | 620912815904c0f95b91ccd193ca3db0ea20e507 | [
"MIT"
] | null | null | null | n = float(input())
print(int(n * 10) % 10) | 14.333333 | 23 | 0.55814 | n = float(input())
print(int(n * 10) % 10) | true | true |
f7289b84c8a95d21008027ff7a1614f1bb727a13 | 717 | py | Python | stats/data.py | AndreeaMutu/Python-Baseball | 6ca5e5006fd01ffa5b55c4859ebad7251a1f35a6 | [
"MIT"
] | null | null | null | stats/data.py | AndreeaMutu/Python-Baseball | 6ca5e5006fd01ffa5b55c4859ebad7251a1f35a6 | [
"MIT"
] | null | null | null | stats/data.py | AndreeaMutu/Python-Baseball | 6ca5e5006fd01ffa5b55c4859ebad7251a1f35a6 | [
"MIT"
] | null | null | null | import os
import glob
import pandas as pd
game_files = glob.glob(os.path.join(os.getcwd(),'games','*.EVE'))
game_files.sort()
game_frames = []
for game_file in game_files:
game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])
game_frames.append(game_frame)
games = pd.concat(game_frames)
games.loc[games['multi5']=='??',['multi5']]=''
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
identifiers.columns=['game_id', 'year']
games = pd.concat([games, identifiers], sort=False, axis=1)
games = games.fillna(' ')
games.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])
print(games.head())
| 31.173913 | 108 | 0.687587 | import os
import glob
import pandas as pd
game_files = glob.glob(os.path.join(os.getcwd(),'games','*.EVE'))
game_files.sort()
game_frames = []
for game_file in game_files:
game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])
game_frames.append(game_frame)
games = pd.concat(game_frames)
games.loc[games['multi5']=='??',['multi5']]=''
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
identifiers.columns=['game_id', 'year']
games = pd.concat([games, identifiers], sort=False, axis=1)
games = games.fillna(' ')
games.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])
print(games.head())
| true | true |
f7289c3ade2665a6c088dfd09ebba24c802d3820 | 136 | py | Python | src/pkg1/__main__.py | p--q/PkgExample | 07726905f963bc710f357414e449001b83f01707 | [
"Apache-2.0"
] | null | null | null | src/pkg1/__main__.py | p--q/PkgExample | 07726905f963bc710f357414e449001b83f01707 | [
"Apache-2.0"
] | null | null | null | src/pkg1/__main__.py | p--q/PkgExample | 07726905f963bc710f357414e449001b83f01707 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
from . import main # PyDevでの実行にはfrom pkg1 import mainとしないといけない。
sys.exit(main())
| 22.666667 | 64 | 0.698529 |
import sys
from . import main
sys.exit(main())
| true | true |
f7289ced641d513a82cb061fb442261cebeeebdc | 3,949 | py | Python | main_EUROC.py | KleistvonLiu/denoise-imu-gyro | 76e75e194a3804c473be077663b4a668fc0b7c28 | [
"MIT"
] | 154 | 2020-02-24T13:45:05.000Z | 2022-03-30T15:01:00.000Z | main_EUROC.py | KleistvonLiu/denoise-imu-gyro | 76e75e194a3804c473be077663b4a668fc0b7c28 | [
"MIT"
] | 11 | 2020-05-07T15:59:51.000Z | 2022-03-16T12:46:50.000Z | main_EUROC.py | KleistvonLiu/denoise-imu-gyro | 76e75e194a3804c473be077663b4a668fc0b7c28 | [
"MIT"
] | 50 | 2020-02-26T16:10:21.000Z | 2022-03-21T06:25:39.000Z | import os
import torch
import src.learning as lr
import src.networks as sn
import src.losses as sl
import src.dataset as ds
import numpy as np
base_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = '/path/to/EUROC/dataset'
# test a given network
# address = os.path.join(base_dir, 'results/EUROC/2020_02_18_16_52_55/')
# or test the last trained network
address = "last"
################################################################################
# Network parameters
################################################################################
net_class = sn.GyroNet
net_params = {
'in_dim': 6,
'out_dim': 3,
'c0': 16,
'dropout': 0.1,
'ks': [7, 7, 7, 7],
'ds': [4, 4, 4],
'momentum': 0.1,
'gyro_std': [1*np.pi/180, 2*np.pi/180, 5*np.pi/180],
}
################################################################################
# Dataset parameters
################################################################################
dataset_class = ds.EUROCDataset
dataset_params = {
# where are raw data ?
'data_dir': data_dir,
# where record preloaded data ?
'predata_dir': os.path.join(base_dir, 'data/EUROC'),
# set train, val and test sequence
'train_seqs': [
'MH_01_easy',
'MH_03_medium',
'MH_05_difficult',
'V1_02_medium',
'V2_01_easy',
'V2_03_difficult'
],
'val_seqs': [
'MH_01_easy',
'MH_03_medium',
'MH_05_difficult',
'V1_02_medium',
'V2_01_easy',
'V2_03_difficult',
],
'test_seqs': [
'MH_02_easy',
'MH_04_difficult',
'V2_02_medium',
'V1_03_difficult',
'V1_01_easy',
],
# size of trajectory during training
'N': 32 * 500, # should be integer * 'max_train_freq'
'min_train_freq': 16,
'max_train_freq': 32,
}
################################################################################
# Training parameters
################################################################################
train_params = {
'optimizer_class': torch.optim.Adam,
'optimizer': {
'lr': 0.01,
'weight_decay': 1e-1,
'amsgrad': False,
},
'loss_class': sl.GyroLoss,
'loss': {
'min_N': int(np.log2(dataset_params['min_train_freq'])),
'max_N': int(np.log2(dataset_params['max_train_freq'])),
'w': 1e6,
'target': 'rotation matrix',
'huber': 0.005,
'dt': 0.005,
},
'scheduler_class': torch.optim.lr_scheduler.CosineAnnealingWarmRestarts,
'scheduler': {
'T_0': 600,
'T_mult': 2,
'eta_min': 1e-3,
},
'dataloader': {
'batch_size': 10,
'pin_memory': False,
'num_workers': 0,
'shuffle': False,
},
# frequency of validation step
'freq_val': 600,
# total number of epochs
'n_epochs': 1800,
# where record results ?
'res_dir': os.path.join(base_dir, "results/EUROC"),
# where record Tensorboard log ?
'tb_dir': os.path.join(base_dir, "results/runs/EUROC"),
}
################################################################################
# Train on training data set
################################################################################
# learning_process = lr.GyroLearningBasedProcessing(train_params['res_dir'],
# train_params['tb_dir'], net_class, net_params, None,
# train_params['loss']['dt'])
# learning_process.train(dataset_class, dataset_params, train_params)
################################################################################
# Test on full data set
################################################################################
learning_process = lr.GyroLearningBasedProcessing(train_params['res_dir'],
train_params['tb_dir'], net_class, net_params, address=address,
dt=train_params['loss']['dt'])
learning_process.test(dataset_class, dataset_params, ['test']) | 32.908333 | 80 | 0.488985 | import os
import torch
import src.learning as lr
import src.networks as sn
import src.losses as sl
import src.dataset as ds
import numpy as np
base_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = '/path/to/EUROC/dataset'
address = "last"
| true | true |
f7289e4a6b181dac115dae05e072db607fbbafe4 | 10,051 | py | Python | tests/ignite/contrib/handlers/test_polyaxon_logger.py | nzare/ignite | 002b595daa8a8345286c5e096c33e278948686a7 | [
"BSD-3-Clause"
] | 1 | 2020-08-29T16:49:36.000Z | 2020-08-29T16:49:36.000Z | tests/ignite/contrib/handlers/test_polyaxon_logger.py | alxlampe/ignite | b53c6aeef87754b3cd3638c91172b386dc73af12 | [
"BSD-3-Clause"
] | 5 | 2020-08-29T16:49:48.000Z | 2020-08-29T17:05:54.000Z | tests/ignite/contrib/handlers/test_polyaxon_logger.py | alxlampe/ignite | b53c6aeef87754b3cd3638c91172b386dc73af12 | [
"BSD-3-Clause"
] | 1 | 2020-10-15T06:21:01.000Z | 2020-10-15T06:21:01.000Z | import os
from unittest.mock import MagicMock, call
import pytest
import torch
from ignite.contrib.handlers.polyaxon_logger import *
from ignite.engine import Engine, Events, State
os.environ["POLYAXON_NO_OP"] = "1"
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with PolyaxonLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"tag/output": 12345})
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"another_tag/loss": 12345})
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b", "c"])
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
wrapper = OutputHandler("tag", metric_names=["a",])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0}),], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls([call(step=7, **{"tag/a": 55.56})], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345})
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=10, **{"tag/loss": 12345})
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 2
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with PolyaxonLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"lr/group_0": 0.01, "step": 123})
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"generator/lr/group_0": 0.01, "step": 123})
def test_integration():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
plx_logger = PolyaxonLogger()
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_integration_as_context_manager():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with PolyaxonLogger() as plx_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
@pytest.fixture
def no_site_packages():
import sys
polyaxon_client_modules = {}
for k in sys.modules:
if "polyaxon" in k:
polyaxon_client_modules[k] = sys.modules[k]
for k in polyaxon_client_modules:
del sys.modules[k]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
for k in polyaxon_client_modules:
sys.modules[k] = polyaxon_client_modules[k]
def test_no_polyaxon_client(no_site_packages):
with pytest.raises(RuntimeError, match=r"This contrib module requires polyaxon-client to be installed"):
PolyaxonLogger()
| 33.392027 | 119 | 0.718535 | import os
from unittest.mock import MagicMock, call
import pytest
import torch
from ignite.contrib.handlers.polyaxon_logger import *
from ignite.engine import Engine, Events, State
os.environ["POLYAXON_NO_OP"] = "1"
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with PolyaxonLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"tag/output": 12345})
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"another_tag/loss": 12345})
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b", "c"])
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
wrapper = OutputHandler("tag", metric_names=["a",])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0}),], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls([call(step=7, **{"tag/a": 55.56})], any_order=True)
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345})
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=10, **{"tag/loss": 12345})
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 2
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with PolyaxonLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"lr/group_0": 0.01, "step": 123})
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"generator/lr/group_0": 0.01, "step": 123})
def test_integration():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
plx_logger = PolyaxonLogger()
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_integration_as_context_manager():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with PolyaxonLogger() as plx_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
@pytest.fixture
def no_site_packages():
import sys
polyaxon_client_modules = {}
for k in sys.modules:
if "polyaxon" in k:
polyaxon_client_modules[k] = sys.modules[k]
for k in polyaxon_client_modules:
del sys.modules[k]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
for k in polyaxon_client_modules:
sys.modules[k] = polyaxon_client_modules[k]
def test_no_polyaxon_client(no_site_packages):
with pytest.raises(RuntimeError, match=r"This contrib module requires polyaxon-client to be installed"):
PolyaxonLogger()
| true | true |
f7289e72bef01664b5299fbb7682aeb177fca247 | 29,390 | py | Python | Tests/varLib/varLib_test.py | benkiel/fonttools | d4cd8acf44fdff2f9dec3279810ac5db9ec705c2 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | Tests/varLib/varLib_test.py | benkiel/fonttools | d4cd8acf44fdff2f9dec3279810ac5db9ec705c2 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | Tests/varLib/varLib_test.py | benkiel/fonttools | d4cd8acf44fdff2f9dec3279810ac5db9ec705c2 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | from fontTools.misc.py23 import *
from fontTools.ttLib import TTFont, newTable
from fontTools.varLib import build
from fontTools.varLib.mutator import instantiateVariableFont
from fontTools.varLib import main as varLib_main, load_masters
from fontTools.varLib import set_default_weight_width_slant
from fontTools.designspaceLib import (
DesignSpaceDocumentError, DesignSpaceDocument, SourceDescriptor,
)
from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
import difflib
import os
import shutil
import sys
import tempfile
import unittest
import pytest
def reload_font(font):
"""(De)serialize to get final binary layout."""
buf = BytesIO()
font.save(buf)
buf.seek(0)
return TTFont(buf)
class BuildTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
@staticmethod
def get_test_input(test_file_or_folder):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", test_file_or_folder)
@staticmethod
def get_test_output(test_file_or_folder):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", "test_results", test_file_or_folder)
@staticmethod
def get_file_list(folder, suffix, prefix=''):
all_files = os.listdir(folder)
file_list = []
for p in all_files:
if p.startswith(prefix) and p.endswith(suffix):
file_list.append(os.path.abspath(os.path.join(folder, p)))
return file_list
def temp_path(self, suffix):
self.temp_dir()
self.num_tempfiles += 1
return os.path.join(self.tempdir,
"tmp%d%s" % (self.num_tempfiles, suffix))
def temp_dir(self):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp()
def read_ttx(self, path):
lines = []
with open(path, "r", encoding="utf-8") as ttx:
for line in ttx.readlines():
# Elide ttFont attributes because ttLibVersion may change,
# and use os-native line separators so we can run difflib.
if line.startswith("<ttFont "):
lines.append("<ttFont>" + os.linesep)
else:
lines.append(line.rstrip() + os.linesep)
return lines
def expect_ttx(self, font, expected_ttx, tables):
path = self.temp_path(suffix=".ttx")
font.saveXML(path, tables=tables)
actual = self.read_ttx(path)
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
expected, actual, fromfile=expected_ttx, tofile=path):
sys.stdout.write(line)
self.fail("TTX output is different from expected")
def check_ttx_dump(self, font, expected_ttx, tables, suffix):
"""Ensure the TTX dump is the same after saving and reloading the font."""
path = self.temp_path(suffix=suffix)
font.save(path)
self.expect_ttx(TTFont(path), expected_ttx, tables)
def compile_font(self, path, suffix, temp_dir):
ttx_filename = os.path.basename(path)
savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix))
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(path)
font.save(savepath, reorderTables=None)
return font, savepath
def _run_varlib_build_test(self, designspace_name, font_name, tables,
expected_ttx_name, save_before_dump=False,
post_process_master=None):
suffix = '.ttf'
ds_path = self.get_test_input(designspace_name + '.designspace')
ufo_dir = self.get_test_input('master_ufo')
ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
self.temp_dir()
ttx_paths = self.get_file_list(ttx_dir, '.ttx', font_name + '-')
for path in ttx_paths:
font, savepath = self.compile_font(path, suffix, self.tempdir)
if post_process_master is not None:
post_process_master(font, savepath)
finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
varfont, model, _ = build(ds_path, finder)
if save_before_dump:
# some data (e.g. counts printed in TTX inline comments) is only
# calculated at compile time, so before we can compare the TTX
# dumps we need to save to a temporary stream, and realod the font
varfont = reload_font(varfont)
expected_ttx_path = self.get_test_output(expected_ttx_name + '.ttx')
self.expect_ttx(varfont, expected_ttx_path, tables)
self.check_ttx_dump(varfont, expected_ttx_path, tables, suffix)
# -----
# Tests
# -----
def test_varlib_build_ttf(self):
"""Designspace file contains <axes> element."""
self._run_varlib_build_test(
designspace_name='Build',
font_name='TestFamily',
tables=['GDEF', 'HVAR', 'MVAR', 'fvar', 'gvar'],
expected_ttx_name='Build'
)
def test_varlib_build_no_axes_ttf(self):
"""Designspace file does not contain an <axes> element."""
ds_path = self.get_test_input('InterpolateLayout3.designspace')
with self.assertRaisesRegex(DesignSpaceDocumentError, "No axes defined"):
build(ds_path)
def test_varlib_avar_single_axis(self):
"""Designspace file contains a 'weight' axis with <map> elements
modifying the normalization mapping. An 'avar' table is generated.
"""
test_name = 'BuildAvarSingleAxis'
self._run_varlib_build_test(
designspace_name=test_name,
font_name='TestFamily3',
tables=['avar'],
expected_ttx_name=test_name
)
def test_varlib_avar_with_identity_maps(self):
"""Designspace file contains two 'weight' and 'width' axes both with
<map> elements.
The 'width' axis only contains identity mappings, however the resulting
avar segment will not be empty but will contain the default axis value
maps: {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}.
This is to work around an issue with some rasterizers:
https://github.com/googlei18n/fontmake/issues/295
https://github.com/fonttools/fonttools/issues/1011
"""
test_name = 'BuildAvarIdentityMaps'
self._run_varlib_build_test(
designspace_name=test_name,
font_name='TestFamily3',
tables=['avar'],
expected_ttx_name=test_name
)
def test_varlib_avar_empty_axis(self):
"""Designspace file contains two 'weight' and 'width' axes, but
only one axis ('weight') has some <map> elements.
Even if no <map> elements are defined for the 'width' axis, the
resulting avar segment still contains the default axis value maps:
{-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}.
This is again to work around an issue with some rasterizers:
https://github.com/googlei18n/fontmake/issues/295
https://github.com/fonttools/fonttools/issues/1011
"""
test_name = 'BuildAvarEmptyAxis'
self._run_varlib_build_test(
designspace_name=test_name,
font_name='TestFamily3',
tables=['avar'],
expected_ttx_name=test_name
)
def test_varlib_build_feature_variations(self):
"""Designspace file contains <rules> element, used to build
GSUB FeatureVariations table.
"""
self._run_varlib_build_test(
designspace_name="FeatureVars",
font_name="TestFamily",
tables=["fvar", "GSUB"],
expected_ttx_name="FeatureVars",
save_before_dump=True,
)
def test_varlib_build_feature_variations_with_existing_rclt(self):
"""Designspace file contains <rules> element, used to build GSUB
FeatureVariations table. <rules> is specified to do its OT processing
"last", so a 'rclt' feature will be used or created. This test covers
the case when a 'rclt' already exists in the masters.
We dynamically add a 'rclt' feature to an existing set of test
masters, to avoid adding more test data.
The multiple languages are done to verify whether multiple existing
'rclt' features are updated correctly.
"""
def add_rclt(font, savepath):
features = """
languagesystem DFLT dflt;
languagesystem latn dflt;
languagesystem latn NLD;
feature rclt {
script latn;
language NLD;
lookup A {
sub uni0041 by uni0061;
} A;
language dflt;
lookup B {
sub uni0041 by uni0061;
} B;
} rclt;
"""
addOpenTypeFeaturesFromString(font, features)
font.save(savepath)
self._run_varlib_build_test(
designspace_name="FeatureVars",
font_name="TestFamily",
tables=["fvar", "GSUB"],
expected_ttx_name="FeatureVars_rclt",
save_before_dump=True,
post_process_master=add_rclt,
)
def test_varlib_gvar_explicit_delta(self):
"""The variable font contains a composite glyph odieresis which does not
need a gvar entry, because all its deltas are 0, but it must be added
anyway to work around an issue with macOS 10.14.
https://github.com/fonttools/fonttools/issues/1381
"""
test_name = 'BuildGvarCompositeExplicitDelta'
self._run_varlib_build_test(
designspace_name=test_name,
font_name='TestFamily4',
tables=['gvar'],
expected_ttx_name=test_name
)
def test_varlib_nonmarking_CFF2(self):
ds_path = self.get_test_input('TestNonMarkingCFF2.designspace')
ttx_dir = self.get_test_input("master_non_marking_cff2")
expected_ttx_path = self.get_test_output("TestNonMarkingCFF2.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestNonMarkingCFF2_'):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".otf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = ["CFF2"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_CFF2(self):
ds_path = self.get_test_input('TestCFF2.designspace')
ttx_dir = self.get_test_input("master_cff2")
expected_ttx_path = self.get_test_output("BuildTestCFF2.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestCFF2_'):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".otf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = ["fvar", "CFF2"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_sparse_CFF2(self):
ds_path = self.get_test_input('TestSparseCFF2VF.designspace')
ttx_dir = self.get_test_input("master_sparse_cff2")
expected_ttx_path = self.get_test_output("TestSparseCFF2VF.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'MasterSet_Kanji-'):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".otf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = ["fvar", "CFF2"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_vpal(self):
ds_path = self.get_test_input('test_vpal.designspace')
ttx_dir = self.get_test_input("master_vpal_test")
expected_ttx_path = self.get_test_output("test_vpal.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'master_vpal_test_'):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".otf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = ["GPOS"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_main_ttf(self):
"""Mostly for testing varLib.main()
"""
suffix = '.ttf'
ds_path = self.get_test_input('Build.designspace')
ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
self.temp_dir()
ttf_dir = os.path.join(self.tempdir, 'master_ttf_interpolatable')
os.makedirs(ttf_dir)
ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily-')
for path in ttx_paths:
self.compile_font(path, suffix, ttf_dir)
ds_copy = os.path.join(self.tempdir, 'BuildMain.designspace')
shutil.copy2(ds_path, ds_copy)
# by default, varLib.main finds master TTFs inside a
# 'master_ttf_interpolatable' subfolder in current working dir
cwd = os.getcwd()
os.chdir(self.tempdir)
try:
varLib_main([ds_copy])
finally:
os.chdir(cwd)
varfont_path = os.path.splitext(ds_copy)[0] + '-VF' + suffix
self.assertTrue(os.path.exists(varfont_path))
# try again passing an explicit --master-finder
os.remove(varfont_path)
finder = "%s/master_ttf_interpolatable/{stem}.ttf" % self.tempdir
varLib_main([ds_copy, "--master-finder", finder])
self.assertTrue(os.path.exists(varfont_path))
# and also with explicit -o output option
os.remove(varfont_path)
varfont_path = os.path.splitext(varfont_path)[0] + "-o" + suffix
varLib_main([ds_copy, "-o", varfont_path, "--master-finder", finder])
self.assertTrue(os.path.exists(varfont_path))
varfont = TTFont(varfont_path)
tables = [table_tag for table_tag in varfont.keys() if table_tag != 'head']
expected_ttx_path = self.get_test_output('BuildMain.ttx')
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_from_ds_object_in_memory_ttfonts(self):
ds_path = self.get_test_input("Build.designspace")
ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
expected_ttx_path = self.get_test_output("BuildMain.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestFamily-'):
self.compile_font(path, ".ttf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
filename = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".ttf")
)
source.font = TTFont(
filename, recalcBBoxes=False, recalcTimestamp=False, lazy=True
)
source.filename = None # Make sure no file path gets into build()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_from_ttf_paths(self):
ds_path = self.get_test_input("Build.designspace")
ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
expected_ttx_path = self.get_test_output("BuildMain.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestFamily-'):
self.compile_font(path, ".ttf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".ttf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_from_ttx_paths(self):
ds_path = self.get_test_input("Build.designspace")
ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
expected_ttx_path = self.get_test_output("BuildMain.ttx")
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
ttx_dir, os.path.basename(source.filename).replace(".ufo", ".ttx")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_sparse_masters(self):
ds_path = self.get_test_input("SparseMasters.designspace")
expected_ttx_path = self.get_test_output("SparseMasters.ttx")
varfont, _, _ = build(ds_path)
varfont = reload_font(varfont)
tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_sparse_masters_MVAR(self):
import fontTools.varLib.mvar
ds_path = self.get_test_input("SparseMasters.designspace")
ds = DesignSpaceDocument.fromfile(ds_path)
load_masters(ds)
# Trigger MVAR generation so varLib is forced to create deltas with a
# sparse master inbetween.
font_0_os2 = ds.sources[0].font["OS/2"]
font_0_os2.sTypoAscender = 1
font_0_os2.sTypoDescender = 1
font_0_os2.sTypoLineGap = 1
font_0_os2.usWinAscent = 1
font_0_os2.usWinDescent = 1
font_0_os2.sxHeight = 1
font_0_os2.sCapHeight = 1
font_0_os2.ySubscriptXSize = 1
font_0_os2.ySubscriptYSize = 1
font_0_os2.ySubscriptXOffset = 1
font_0_os2.ySubscriptYOffset = 1
font_0_os2.ySuperscriptXSize = 1
font_0_os2.ySuperscriptYSize = 1
font_0_os2.ySuperscriptXOffset = 1
font_0_os2.ySuperscriptYOffset = 1
font_0_os2.yStrikeoutSize = 1
font_0_os2.yStrikeoutPosition = 1
font_0_vhea = newTable("vhea")
font_0_vhea.ascent = 1
font_0_vhea.descent = 1
font_0_vhea.lineGap = 1
font_0_vhea.caretSlopeRise = 1
font_0_vhea.caretSlopeRun = 1
font_0_vhea.caretOffset = 1
ds.sources[0].font["vhea"] = font_0_vhea
font_0_hhea = ds.sources[0].font["hhea"]
font_0_hhea.caretSlopeRise = 1
font_0_hhea.caretSlopeRun = 1
font_0_hhea.caretOffset = 1
font_0_post = ds.sources[0].font["post"]
font_0_post.underlineThickness = 1
font_0_post.underlinePosition = 1
font_2_os2 = ds.sources[2].font["OS/2"]
font_2_os2.sTypoAscender = 800
font_2_os2.sTypoDescender = 800
font_2_os2.sTypoLineGap = 800
font_2_os2.usWinAscent = 800
font_2_os2.usWinDescent = 800
font_2_os2.sxHeight = 800
font_2_os2.sCapHeight = 800
font_2_os2.ySubscriptXSize = 800
font_2_os2.ySubscriptYSize = 800
font_2_os2.ySubscriptXOffset = 800
font_2_os2.ySubscriptYOffset = 800
font_2_os2.ySuperscriptXSize = 800
font_2_os2.ySuperscriptYSize = 800
font_2_os2.ySuperscriptXOffset = 800
font_2_os2.ySuperscriptYOffset = 800
font_2_os2.yStrikeoutSize = 800
font_2_os2.yStrikeoutPosition = 800
font_2_vhea = newTable("vhea")
font_2_vhea.ascent = 800
font_2_vhea.descent = 800
font_2_vhea.lineGap = 800
font_2_vhea.caretSlopeRise = 800
font_2_vhea.caretSlopeRun = 800
font_2_vhea.caretOffset = 800
ds.sources[2].font["vhea"] = font_2_vhea
font_2_hhea = ds.sources[2].font["hhea"]
font_2_hhea.caretSlopeRise = 800
font_2_hhea.caretSlopeRun = 800
font_2_hhea.caretOffset = 800
font_2_post = ds.sources[2].font["post"]
font_2_post.underlineThickness = 800
font_2_post.underlinePosition = 800
varfont, _, _ = build(ds)
mvar_tags = [vr.ValueTag for vr in varfont["MVAR"].table.ValueRecord]
assert all(tag in mvar_tags for tag in fontTools.varLib.mvar.MVAR_ENTRIES)
def test_varlib_build_VVAR_CFF2(self):
ds_path = self.get_test_input('TestVVAR.designspace')
ttx_dir = self.get_test_input("master_vvar_cff2")
expected_ttx_name = 'TestVVAR'
suffix = '.otf'
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestVVAR'):
font, savepath = self.compile_font(path, suffix, self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", suffix)
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
expected_ttx_path = self.get_test_output(expected_ttx_name + '.ttx')
tables = ["VVAR"]
self.expect_ttx(varfont, expected_ttx_path, tables)
self.check_ttx_dump(varfont, expected_ttx_path, tables, suffix)
def test_kerning_merging(self):
"""Test the correct merging of class-based pair kerning.
Problem description at https://github.com/fonttools/fonttools/pull/1638.
Test font and Designspace generated by
https://gist.github.com/madig/183d0440c9f7d05f04bd1280b9664bd1.
"""
ds_path = self.get_test_input("KerningMerging.designspace")
ttx_dir = self.get_test_input("master_kerning_merging")
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
ttx_dump = TTFont()
ttx_dump.importXML(
os.path.join(
ttx_dir, os.path.basename(source.filename).replace(".ttf", ".ttx")
)
)
source.font = reload_font(ttx_dump)
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
class_kerning_tables = [
t
for l in varfont["GPOS"].table.LookupList.Lookup
for t in l.SubTable
if t.Format == 2
]
assert len(class_kerning_tables) == 1
class_kerning_table = class_kerning_tables[0]
# Test that no class kerned against class zero (containing all glyphs not
# classed) has a `XAdvDevice` table attached, which in the variable font
# context is a "VariationIndex" table and points to kerning deltas in the GDEF
# table. Variation deltas of any kerning class against class zero should
# probably never exist.
for class1_record in class_kerning_table.Class1Record:
class2_zero = class1_record.Class2Record[0]
assert getattr(class2_zero.Value1, "XAdvDevice", None) is None
# Assert the variable font's kerning table (without deltas) is equal to the
# default font's kerning table. The bug fixed in
# https://github.com/fonttools/fonttools/pull/1638 caused rogue kerning
# values to be written to the variable font.
assert _extract_flat_kerning(varfont, class_kerning_table) == {
("A", ".notdef"): 0,
("A", "A"): 0,
("A", "B"): -20,
("A", "C"): 0,
("A", "D"): -20,
("B", ".notdef"): 0,
("B", "A"): 0,
("B", "B"): 0,
("B", "C"): 0,
("B", "D"): 0,
}
instance_thin = instantiateVariableFont(varfont, {"wght": 100})
instance_thin_kerning_table = (
instance_thin["GPOS"].table.LookupList.Lookup[0].SubTable[0]
)
assert _extract_flat_kerning(instance_thin, instance_thin_kerning_table) == {
("A", ".notdef"): 0,
("A", "A"): 0,
("A", "B"): 0,
("A", "C"): 10,
("A", "D"): 0,
("B", ".notdef"): 0,
("B", "A"): 0,
("B", "B"): 0,
("B", "C"): 10,
("B", "D"): 0,
}
instance_black = instantiateVariableFont(varfont, {"wght": 900})
instance_black_kerning_table = (
instance_black["GPOS"].table.LookupList.Lookup[0].SubTable[0]
)
assert _extract_flat_kerning(instance_black, instance_black_kerning_table) == {
("A", ".notdef"): 0,
("A", "A"): 0,
("A", "B"): 0,
("A", "C"): 0,
("A", "D"): 40,
("B", ".notdef"): 0,
("B", "A"): 0,
("B", "B"): 0,
("B", "C"): 0,
("B", "D"): 40,
}
def test_load_masters_layerName_without_required_font():
ds = DesignSpaceDocument()
s = SourceDescriptor()
s.font = None
s.layerName = "Medium"
ds.addSource(s)
with pytest.raises(
AttributeError,
match="specified a layer name but lacks the required TTFont object",
):
load_masters(ds)
def _extract_flat_kerning(font, pairpos_table):
extracted_kerning = {}
for glyph_name_1 in pairpos_table.Coverage.glyphs:
class_def_1 = pairpos_table.ClassDef1.classDefs.get(glyph_name_1, 0)
for glyph_name_2 in font.getGlyphOrder():
class_def_2 = pairpos_table.ClassDef2.classDefs.get(glyph_name_2, 0)
kern_value = (
pairpos_table.Class1Record[class_def_1]
.Class2Record[class_def_2]
.Value1.XAdvance
)
extracted_kerning[(glyph_name_1, glyph_name_2)] = kern_value
return extracted_kerning
@pytest.fixture
def ttFont():
f = TTFont()
f["OS/2"] = newTable("OS/2")
f["OS/2"].usWeightClass = 400
f["OS/2"].usWidthClass = 100
f["post"] = newTable("post")
f["post"].italicAngle = 0
return f
class SetDefaultWeightWidthSlantTest(object):
@pytest.mark.parametrize(
"location, expected",
[
({"wght": 0}, 1),
({"wght": 1}, 1),
({"wght": 100}, 100),
({"wght": 1000}, 1000),
({"wght": 1001}, 1000),
],
)
def test_wght(self, ttFont, location, expected):
set_default_weight_width_slant(ttFont, location)
assert ttFont["OS/2"].usWeightClass == expected
@pytest.mark.parametrize(
"location, expected",
[
({"wdth": 0}, 1),
({"wdth": 56}, 1),
({"wdth": 57}, 2),
({"wdth": 62.5}, 2),
({"wdth": 75}, 3),
({"wdth": 87.5}, 4),
({"wdth": 100}, 5),
({"wdth": 112.5}, 6),
({"wdth": 125}, 7),
({"wdth": 150}, 8),
({"wdth": 200}, 9),
({"wdth": 201}, 9),
({"wdth": 1000}, 9),
],
)
def test_wdth(self, ttFont, location, expected):
set_default_weight_width_slant(ttFont, location)
assert ttFont["OS/2"].usWidthClass == expected
@pytest.mark.parametrize(
"location, expected",
[
({"slnt": -91}, -90),
({"slnt": -90}, -90),
({"slnt": 0}, 0),
({"slnt": 11.5}, 11.5),
({"slnt": 90}, 90),
({"slnt": 91}, 90),
],
)
def test_slnt(self, ttFont, location, expected):
set_default_weight_width_slant(ttFont, location)
assert ttFont["post"].italicAngle == expected
def test_all(self, ttFont):
set_default_weight_width_slant(
ttFont, {"wght": 500, "wdth": 150, "slnt": -12.0}
)
assert ttFont["OS/2"].usWeightClass == 500
assert ttFont["OS/2"].usWidthClass == 8
assert ttFont["post"].italicAngle == -12.0
if __name__ == "__main__":
sys.exit(unittest.main())
| 37.344346 | 87 | 0.611194 | from fontTools.misc.py23 import *
from fontTools.ttLib import TTFont, newTable
from fontTools.varLib import build
from fontTools.varLib.mutator import instantiateVariableFont
from fontTools.varLib import main as varLib_main, load_masters
from fontTools.varLib import set_default_weight_width_slant
from fontTools.designspaceLib import (
DesignSpaceDocumentError, DesignSpaceDocument, SourceDescriptor,
)
from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
import difflib
import os
import shutil
import sys
import tempfile
import unittest
import pytest
def reload_font(font):
buf = BytesIO()
font.save(buf)
buf.seek(0)
return TTFont(buf)
class BuildTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
@staticmethod
def get_test_input(test_file_or_folder):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", test_file_or_folder)
@staticmethod
def get_test_output(test_file_or_folder):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", "test_results", test_file_or_folder)
@staticmethod
def get_file_list(folder, suffix, prefix=''):
all_files = os.listdir(folder)
file_list = []
for p in all_files:
if p.startswith(prefix) and p.endswith(suffix):
file_list.append(os.path.abspath(os.path.join(folder, p)))
return file_list
def temp_path(self, suffix):
self.temp_dir()
self.num_tempfiles += 1
return os.path.join(self.tempdir,
"tmp%d%s" % (self.num_tempfiles, suffix))
def temp_dir(self):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp()
def read_ttx(self, path):
lines = []
with open(path, "r", encoding="utf-8") as ttx:
for line in ttx.readlines():
if line.startswith("<ttFont "):
lines.append("<ttFont>" + os.linesep)
else:
lines.append(line.rstrip() + os.linesep)
return lines
def expect_ttx(self, font, expected_ttx, tables):
path = self.temp_path(suffix=".ttx")
font.saveXML(path, tables=tables)
actual = self.read_ttx(path)
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
expected, actual, fromfile=expected_ttx, tofile=path):
sys.stdout.write(line)
self.fail("TTX output is different from expected")
def check_ttx_dump(self, font, expected_ttx, tables, suffix):
path = self.temp_path(suffix=suffix)
font.save(path)
self.expect_ttx(TTFont(path), expected_ttx, tables)
def compile_font(self, path, suffix, temp_dir):
ttx_filename = os.path.basename(path)
savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix))
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(path)
font.save(savepath, reorderTables=None)
return font, savepath
def _run_varlib_build_test(self, designspace_name, font_name, tables,
expected_ttx_name, save_before_dump=False,
post_process_master=None):
suffix = '.ttf'
ds_path = self.get_test_input(designspace_name + '.designspace')
ufo_dir = self.get_test_input('master_ufo')
ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
self.temp_dir()
ttx_paths = self.get_file_list(ttx_dir, '.ttx', font_name + '-')
for path in ttx_paths:
font, savepath = self.compile_font(path, suffix, self.tempdir)
if post_process_master is not None:
post_process_master(font, savepath)
finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix)
varfont, model, _ = build(ds_path, finder)
if save_before_dump:
varfont = reload_font(varfont)
expected_ttx_path = self.get_test_output(expected_ttx_name + '.ttx')
self.expect_ttx(varfont, expected_ttx_path, tables)
self.check_ttx_dump(varfont, expected_ttx_path, tables, suffix)
def test_varlib_build_ttf(self):
self._run_varlib_build_test(
designspace_name='Build',
font_name='TestFamily',
tables=['GDEF', 'HVAR', 'MVAR', 'fvar', 'gvar'],
expected_ttx_name='Build'
)
def test_varlib_build_no_axes_ttf(self):
ds_path = self.get_test_input('InterpolateLayout3.designspace')
with self.assertRaisesRegex(DesignSpaceDocumentError, "No axes defined"):
build(ds_path)
def test_varlib_avar_single_axis(self):
test_name = 'BuildAvarSingleAxis'
self._run_varlib_build_test(
designspace_name=test_name,
font_name='TestFamily3',
tables=['avar'],
expected_ttx_name=test_name
)
def test_varlib_avar_with_identity_maps(self):
test_name = 'BuildAvarIdentityMaps'
self._run_varlib_build_test(
designspace_name=test_name,
font_name='TestFamily3',
tables=['avar'],
expected_ttx_name=test_name
)
def test_varlib_avar_empty_axis(self):
test_name = 'BuildAvarEmptyAxis'
self._run_varlib_build_test(
designspace_name=test_name,
font_name='TestFamily3',
tables=['avar'],
expected_ttx_name=test_name
)
def test_varlib_build_feature_variations(self):
self._run_varlib_build_test(
designspace_name="FeatureVars",
font_name="TestFamily",
tables=["fvar", "GSUB"],
expected_ttx_name="FeatureVars",
save_before_dump=True,
)
def test_varlib_build_feature_variations_with_existing_rclt(self):
def add_rclt(font, savepath):
features = """
languagesystem DFLT dflt;
languagesystem latn dflt;
languagesystem latn NLD;
feature rclt {
script latn;
language NLD;
lookup A {
sub uni0041 by uni0061;
} A;
language dflt;
lookup B {
sub uni0041 by uni0061;
} B;
} rclt;
"""
addOpenTypeFeaturesFromString(font, features)
font.save(savepath)
self._run_varlib_build_test(
designspace_name="FeatureVars",
font_name="TestFamily",
tables=["fvar", "GSUB"],
expected_ttx_name="FeatureVars_rclt",
save_before_dump=True,
post_process_master=add_rclt,
)
def test_varlib_gvar_explicit_delta(self):
test_name = 'BuildGvarCompositeExplicitDelta'
self._run_varlib_build_test(
designspace_name=test_name,
font_name='TestFamily4',
tables=['gvar'],
expected_ttx_name=test_name
)
def test_varlib_nonmarking_CFF2(self):
ds_path = self.get_test_input('TestNonMarkingCFF2.designspace')
ttx_dir = self.get_test_input("master_non_marking_cff2")
expected_ttx_path = self.get_test_output("TestNonMarkingCFF2.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestNonMarkingCFF2_'):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".otf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = ["CFF2"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_CFF2(self):
ds_path = self.get_test_input('TestCFF2.designspace')
ttx_dir = self.get_test_input("master_cff2")
expected_ttx_path = self.get_test_output("BuildTestCFF2.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestCFF2_'):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".otf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = ["fvar", "CFF2"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_sparse_CFF2(self):
ds_path = self.get_test_input('TestSparseCFF2VF.designspace')
ttx_dir = self.get_test_input("master_sparse_cff2")
expected_ttx_path = self.get_test_output("TestSparseCFF2VF.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'MasterSet_Kanji-'):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".otf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = ["fvar", "CFF2"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_vpal(self):
ds_path = self.get_test_input('test_vpal.designspace')
ttx_dir = self.get_test_input("master_vpal_test")
expected_ttx_path = self.get_test_output("test_vpal.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'master_vpal_test_'):
self.compile_font(path, ".otf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".otf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = ["GPOS"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_main_ttf(self):
suffix = '.ttf'
ds_path = self.get_test_input('Build.designspace')
ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf')
self.temp_dir()
ttf_dir = os.path.join(self.tempdir, 'master_ttf_interpolatable')
os.makedirs(ttf_dir)
ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily-')
for path in ttx_paths:
self.compile_font(path, suffix, ttf_dir)
ds_copy = os.path.join(self.tempdir, 'BuildMain.designspace')
shutil.copy2(ds_path, ds_copy)
cwd = os.getcwd()
os.chdir(self.tempdir)
try:
varLib_main([ds_copy])
finally:
os.chdir(cwd)
varfont_path = os.path.splitext(ds_copy)[0] + '-VF' + suffix
self.assertTrue(os.path.exists(varfont_path))
os.remove(varfont_path)
finder = "%s/master_ttf_interpolatable/{stem}.ttf" % self.tempdir
varLib_main([ds_copy, "--master-finder", finder])
self.assertTrue(os.path.exists(varfont_path))
os.remove(varfont_path)
varfont_path = os.path.splitext(varfont_path)[0] + "-o" + suffix
varLib_main([ds_copy, "-o", varfont_path, "--master-finder", finder])
self.assertTrue(os.path.exists(varfont_path))
varfont = TTFont(varfont_path)
tables = [table_tag for table_tag in varfont.keys() if table_tag != 'head']
expected_ttx_path = self.get_test_output('BuildMain.ttx')
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_from_ds_object_in_memory_ttfonts(self):
ds_path = self.get_test_input("Build.designspace")
ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
expected_ttx_path = self.get_test_output("BuildMain.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestFamily-'):
self.compile_font(path, ".ttf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
filename = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".ttf")
)
source.font = TTFont(
filename, recalcBBoxes=False, recalcTimestamp=False, lazy=True
)
source.filename = None
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_from_ttf_paths(self):
ds_path = self.get_test_input("Build.designspace")
ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
expected_ttx_path = self.get_test_output("BuildMain.ttx")
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestFamily-'):
self.compile_font(path, ".ttf", self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", ".ttf")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_from_ttx_paths(self):
ds_path = self.get_test_input("Build.designspace")
ttx_dir = self.get_test_input("master_ttx_interpolatable_ttf")
expected_ttx_path = self.get_test_output("BuildMain.ttx")
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
ttx_dir, os.path.basename(source.filename).replace(".ufo", ".ttx")
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_sparse_masters(self):
ds_path = self.get_test_input("SparseMasters.designspace")
expected_ttx_path = self.get_test_output("SparseMasters.ttx")
varfont, _, _ = build(ds_path)
varfont = reload_font(varfont)
tables = [table_tag for table_tag in varfont.keys() if table_tag != "head"]
self.expect_ttx(varfont, expected_ttx_path, tables)
def test_varlib_build_sparse_masters_MVAR(self):
import fontTools.varLib.mvar
ds_path = self.get_test_input("SparseMasters.designspace")
ds = DesignSpaceDocument.fromfile(ds_path)
load_masters(ds)
font_0_os2 = ds.sources[0].font["OS/2"]
font_0_os2.sTypoAscender = 1
font_0_os2.sTypoDescender = 1
font_0_os2.sTypoLineGap = 1
font_0_os2.usWinAscent = 1
font_0_os2.usWinDescent = 1
font_0_os2.sxHeight = 1
font_0_os2.sCapHeight = 1
font_0_os2.ySubscriptXSize = 1
font_0_os2.ySubscriptYSize = 1
font_0_os2.ySubscriptXOffset = 1
font_0_os2.ySubscriptYOffset = 1
font_0_os2.ySuperscriptXSize = 1
font_0_os2.ySuperscriptYSize = 1
font_0_os2.ySuperscriptXOffset = 1
font_0_os2.ySuperscriptYOffset = 1
font_0_os2.yStrikeoutSize = 1
font_0_os2.yStrikeoutPosition = 1
font_0_vhea = newTable("vhea")
font_0_vhea.ascent = 1
font_0_vhea.descent = 1
font_0_vhea.lineGap = 1
font_0_vhea.caretSlopeRise = 1
font_0_vhea.caretSlopeRun = 1
font_0_vhea.caretOffset = 1
ds.sources[0].font["vhea"] = font_0_vhea
font_0_hhea = ds.sources[0].font["hhea"]
font_0_hhea.caretSlopeRise = 1
font_0_hhea.caretSlopeRun = 1
font_0_hhea.caretOffset = 1
font_0_post = ds.sources[0].font["post"]
font_0_post.underlineThickness = 1
font_0_post.underlinePosition = 1
font_2_os2 = ds.sources[2].font["OS/2"]
font_2_os2.sTypoAscender = 800
font_2_os2.sTypoDescender = 800
font_2_os2.sTypoLineGap = 800
font_2_os2.usWinAscent = 800
font_2_os2.usWinDescent = 800
font_2_os2.sxHeight = 800
font_2_os2.sCapHeight = 800
font_2_os2.ySubscriptXSize = 800
font_2_os2.ySubscriptYSize = 800
font_2_os2.ySubscriptXOffset = 800
font_2_os2.ySubscriptYOffset = 800
font_2_os2.ySuperscriptXSize = 800
font_2_os2.ySuperscriptYSize = 800
font_2_os2.ySuperscriptXOffset = 800
font_2_os2.ySuperscriptYOffset = 800
font_2_os2.yStrikeoutSize = 800
font_2_os2.yStrikeoutPosition = 800
font_2_vhea = newTable("vhea")
font_2_vhea.ascent = 800
font_2_vhea.descent = 800
font_2_vhea.lineGap = 800
font_2_vhea.caretSlopeRise = 800
font_2_vhea.caretSlopeRun = 800
font_2_vhea.caretOffset = 800
ds.sources[2].font["vhea"] = font_2_vhea
font_2_hhea = ds.sources[2].font["hhea"]
font_2_hhea.caretSlopeRise = 800
font_2_hhea.caretSlopeRun = 800
font_2_hhea.caretOffset = 800
font_2_post = ds.sources[2].font["post"]
font_2_post.underlineThickness = 800
font_2_post.underlinePosition = 800
varfont, _, _ = build(ds)
mvar_tags = [vr.ValueTag for vr in varfont["MVAR"].table.ValueRecord]
assert all(tag in mvar_tags for tag in fontTools.varLib.mvar.MVAR_ENTRIES)
def test_varlib_build_VVAR_CFF2(self):
ds_path = self.get_test_input('TestVVAR.designspace')
ttx_dir = self.get_test_input("master_vvar_cff2")
expected_ttx_name = 'TestVVAR'
suffix = '.otf'
self.temp_dir()
for path in self.get_file_list(ttx_dir, '.ttx', 'TestVVAR'):
font, savepath = self.compile_font(path, suffix, self.tempdir)
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
source.path = os.path.join(
self.tempdir, os.path.basename(source.filename).replace(".ufo", suffix)
)
ds.updatePaths()
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
expected_ttx_path = self.get_test_output(expected_ttx_name + '.ttx')
tables = ["VVAR"]
self.expect_ttx(varfont, expected_ttx_path, tables)
self.check_ttx_dump(varfont, expected_ttx_path, tables, suffix)
def test_kerning_merging(self):
ds_path = self.get_test_input("KerningMerging.designspace")
ttx_dir = self.get_test_input("master_kerning_merging")
ds = DesignSpaceDocument.fromfile(ds_path)
for source in ds.sources:
ttx_dump = TTFont()
ttx_dump.importXML(
os.path.join(
ttx_dir, os.path.basename(source.filename).replace(".ttf", ".ttx")
)
)
source.font = reload_font(ttx_dump)
varfont, _, _ = build(ds)
varfont = reload_font(varfont)
class_kerning_tables = [
t
for l in varfont["GPOS"].table.LookupList.Lookup
for t in l.SubTable
if t.Format == 2
]
assert len(class_kerning_tables) == 1
class_kerning_table = class_kerning_tables[0]
for class1_record in class_kerning_table.Class1Record:
class2_zero = class1_record.Class2Record[0]
assert getattr(class2_zero.Value1, "XAdvDevice", None) is None
# default font's kerning table. The bug fixed in
assert _extract_flat_kerning(varfont, class_kerning_table) == {
("A", ".notdef"): 0,
("A", "A"): 0,
("A", "B"): -20,
("A", "C"): 0,
("A", "D"): -20,
("B", ".notdef"): 0,
("B", "A"): 0,
("B", "B"): 0,
("B", "C"): 0,
("B", "D"): 0,
}
instance_thin = instantiateVariableFont(varfont, {"wght": 100})
instance_thin_kerning_table = (
instance_thin["GPOS"].table.LookupList.Lookup[0].SubTable[0]
)
assert _extract_flat_kerning(instance_thin, instance_thin_kerning_table) == {
("A", ".notdef"): 0,
("A", "A"): 0,
("A", "B"): 0,
("A", "C"): 10,
("A", "D"): 0,
("B", ".notdef"): 0,
("B", "A"): 0,
("B", "B"): 0,
("B", "C"): 10,
("B", "D"): 0,
}
instance_black = instantiateVariableFont(varfont, {"wght": 900})
instance_black_kerning_table = (
instance_black["GPOS"].table.LookupList.Lookup[0].SubTable[0]
)
assert _extract_flat_kerning(instance_black, instance_black_kerning_table) == {
("A", ".notdef"): 0,
("A", "A"): 0,
("A", "B"): 0,
("A", "C"): 0,
("A", "D"): 40,
("B", ".notdef"): 0,
("B", "A"): 0,
("B", "B"): 0,
("B", "C"): 0,
("B", "D"): 40,
}
def test_load_masters_layerName_without_required_font():
ds = DesignSpaceDocument()
s = SourceDescriptor()
s.font = None
s.layerName = "Medium"
ds.addSource(s)
with pytest.raises(
AttributeError,
match="specified a layer name but lacks the required TTFont object",
):
load_masters(ds)
def _extract_flat_kerning(font, pairpos_table):
extracted_kerning = {}
for glyph_name_1 in pairpos_table.Coverage.glyphs:
class_def_1 = pairpos_table.ClassDef1.classDefs.get(glyph_name_1, 0)
for glyph_name_2 in font.getGlyphOrder():
class_def_2 = pairpos_table.ClassDef2.classDefs.get(glyph_name_2, 0)
kern_value = (
pairpos_table.Class1Record[class_def_1]
.Class2Record[class_def_2]
.Value1.XAdvance
)
extracted_kerning[(glyph_name_1, glyph_name_2)] = kern_value
return extracted_kerning
@pytest.fixture
def ttFont():
f = TTFont()
f["OS/2"] = newTable("OS/2")
f["OS/2"].usWeightClass = 400
f["OS/2"].usWidthClass = 100
f["post"] = newTable("post")
f["post"].italicAngle = 0
return f
class SetDefaultWeightWidthSlantTest(object):
@pytest.mark.parametrize(
"location, expected",
[
({"wght": 0}, 1),
({"wght": 1}, 1),
({"wght": 100}, 100),
({"wght": 1000}, 1000),
({"wght": 1001}, 1000),
],
)
def test_wght(self, ttFont, location, expected):
set_default_weight_width_slant(ttFont, location)
assert ttFont["OS/2"].usWeightClass == expected
@pytest.mark.parametrize(
"location, expected",
[
({"wdth": 0}, 1),
({"wdth": 56}, 1),
({"wdth": 57}, 2),
({"wdth": 62.5}, 2),
({"wdth": 75}, 3),
({"wdth": 87.5}, 4),
({"wdth": 100}, 5),
({"wdth": 112.5}, 6),
({"wdth": 125}, 7),
({"wdth": 150}, 8),
({"wdth": 200}, 9),
({"wdth": 201}, 9),
({"wdth": 1000}, 9),
],
)
def test_wdth(self, ttFont, location, expected):
set_default_weight_width_slant(ttFont, location)
assert ttFont["OS/2"].usWidthClass == expected
@pytest.mark.parametrize(
"location, expected",
[
({"slnt": -91}, -90),
({"slnt": -90}, -90),
({"slnt": 0}, 0),
({"slnt": 11.5}, 11.5),
({"slnt": 90}, 90),
({"slnt": 91}, 90),
],
)
def test_slnt(self, ttFont, location, expected):
set_default_weight_width_slant(ttFont, location)
assert ttFont["post"].italicAngle == expected
def test_all(self, ttFont):
set_default_weight_width_slant(
ttFont, {"wght": 500, "wdth": 150, "slnt": -12.0}
)
assert ttFont["OS/2"].usWeightClass == 500
assert ttFont["OS/2"].usWidthClass == 8
assert ttFont["post"].italicAngle == -12.0
if __name__ == "__main__":
sys.exit(unittest.main())
| true | true |
f7289ecc3ee4be9cb40b74492c2671d44bde5c3d | 7,164 | py | Python | mamonsu/lib/config.py | hisahin/mamonsu | 93524317f8961256b193dc13d13f2d0b679d3352 | [
"BSD-3-Clause"
] | null | null | null | mamonsu/lib/config.py | hisahin/mamonsu | 93524317f8961256b193dc13d13f2d0b679d3352 | [
"BSD-3-Clause"
] | null | null | null | mamonsu/lib/config.py | hisahin/mamonsu | 93524317f8961256b193dc13d13f2d0b679d3352 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import socket
import os
import logging
import sys
import glob
import mamonsu.lib.platform as platform
from mamonsu.lib.plugin import Plugin
from mamonsu.plugins.pgsql.driver.checks import is_conn_to_db
from mamonsu.lib.default_config import DefaultConfig
if platform.PY2:
import ConfigParser as configparser
else:
import configparser
class Config(DefaultConfig):
def __init__(self, cfg_file=None, plugin_directories=[]):
config = configparser.ConfigParser()
config.add_section('postgres')
config.set('postgres', 'enabled', str(True))
config.set('postgres', 'user', Config.default_user())
config.set('postgres', 'password', str(Config.default_pgpassword()))
config.set('postgres', 'database', Config.default_db())
config.set('postgres', 'host', Config.default_host())
config.set('postgres', 'port', str(Config.default_port()))
config.set('postgres', 'application_name', str(Config.default_app()))
config.set('postgres', 'query_timeout', '10')
config.add_section('system')
config.set('system', 'enabled', str(True))
config.add_section('sender')
config.set('sender', 'queue', str(2048))
config.add_section('agent')
config.set('agent', 'enabled', str(True))
config.set('agent', 'host', '127.0.0.1')
config.set('agent', 'port', str(10052))
config.add_section('plugins')
config.set('plugins', 'enabled', str(False))
config.set('plugins', 'directory', '/etc/mamonsu/plugins')
config.add_section('zabbix')
config.set('zabbix', 'enabled', str(True))
config.set('zabbix', 'client', socket.gethostname())
config.set('zabbix', 'address', '127.0.0.1')
config.set('zabbix', 'port', str(10051))
config.add_section('metric_log')
config.set('metric_log', 'enabled', str(False))
config.set('metric_log', 'directory', '/var/log/mamonsu')
config.set('metric_log', 'max_size_mb', '1024')
config.add_section('log')
config.set('log', 'file', str(None))
config.set('log', 'level', 'INFO')
config.set(
'log', 'format',
'[%(levelname)s] %(asctime)s - %(name)s\t-\t%(message)s')
self.config = config
self._load_external_plugins(plugin_directories)
self._apply_default_config()
if cfg_file and not os.path.isfile(cfg_file):
sys.stderr.write('Can\'t found file: {0}'.format(cfg_file))
sys.exit(1)
else:
if cfg_file is not None:
self.config.readfp(open(cfg_file))
plugins = self.fetch('plugins', 'directory', str)
if plugins is not None:
self._load_external_plugins_from_directory(plugins)
self._apply_default_config()
self._check_interval()
self._apply_log_setting()
self._apply_environ()
self._override_auto_variables()
def has_plugin_config(self, name):
return self.config.has_section(name)
def plugin_options(self, name):
return self.config.options(name)
def fetch(self, sec, key, klass=None, raw=False):
try:
if klass == float:
return self.config.getfloat(sec, key)
if klass == int:
return self.config.getint(sec, key)
if klass == bool:
return self.config.getboolean(sec, key)
if self.config.get(sec, key, raw=raw) == 'None':
return None
return self.config.get(sec, key, raw=raw)
except KeyError:
return None
def _apply_environ(self):
os.environ['PGUSER'] = self.fetch('postgres', 'user')
if self.fetch('postgres', 'password'):
os.environ['PGPASSWORD'] = self.fetch('postgres', 'password')
os.environ['PGHOST'] = self.fetch('postgres', 'host')
os.environ['PGPORT'] = str(self.fetch('postgres', 'port'))
os.environ['PGDATABASE'] = self.fetch('postgres', 'database')
os.environ['PGTIMEOUT'] = self.fetch('postgres', 'query_timeout')
os.environ['PGAPPNAME'] = self.fetch('postgres', 'application_name')
def _apply_log_setting(self):
logging.basicConfig(
format=self.fetch('log', 'format', raw=True),
filename=self.fetch('log', 'file'),
level=self.get_logger_level(self.fetch('log', 'level')))
def _load_external_plugins(self, directories):
if directories is None:
return
for dir in directories:
self._load_external_plugins_from_directory(dir)
def _load_external_plugins_from_directory(self, directory):
sys.path.append(directory)
try:
for filename in glob.glob(os.path.join(directory, '*.py')):
if not os.path.isfile(filename):
continue
# /dir/filename.py => filename.py
filename = os.path.basename(filename)
if filename.startswith('_'):
continue
# filename.py => filename
filename, _ = os.path.splitext(filename)
__import__(filename)
except Exception as e:
sys.stderr.write('Can\'t load module: {0}'.format(e))
sys.exit(3)
def _override_auto_variables(self):
self._override_auto_host()
def _override_auto_host(self):
def test_db(self, host_pre):
if is_conn_to_db(
host=host_pre,
db=self.fetch('postgres', 'database'),
port=str(self.fetch('postgres', 'port')),
user=self.fetch('postgres', 'user'),
paswd=self.fetch('postgres', 'password')):
self.config.set('postgres', 'host', host_pre)
self._apply_environ()
return True
return False
host = self.fetch('postgres', 'host')
port = str(self.fetch('postgres', 'port'))
if host == 'auto' and platform.UNIX:
logging.debug('Host set to auto, test variables')
if test_db(self, '/tmp/.s.PGSQL.{0}'.format(port)):
return
if test_db(self, '/var/run/postgresql/.s.PGSQL.{0}'.format(port)):
return
if test_db(self, '127.0.0.1'):
return
# не выходим, так как ожидаем коннекта до localhost
self.config.set('postgres', 'host', 'localhost')
self._apply_environ()
def _apply_default_config(self):
if self.config.has_option('postgres', 'interval'):
interval = self.fetch('postgres', 'interval')
else:
interval = None
for plugin in Plugin.only_child_subclasses():
plugin.set_default_config(self.config, interval)
def _check_interval(self):
for plugin in Plugin.only_child_subclasses():
if not self.config.has_option(plugin.__name__.lower(), 'interval'):
self.config.set(plugin.__name__.lower(), 'interval', '{0}'.format(plugin.Interval))
| 37.507853 | 99 | 0.590313 |
import socket
import os
import logging
import sys
import glob
import mamonsu.lib.platform as platform
from mamonsu.lib.plugin import Plugin
from mamonsu.plugins.pgsql.driver.checks import is_conn_to_db
from mamonsu.lib.default_config import DefaultConfig
if platform.PY2:
import ConfigParser as configparser
else:
import configparser
class Config(DefaultConfig):
def __init__(self, cfg_file=None, plugin_directories=[]):
config = configparser.ConfigParser()
config.add_section('postgres')
config.set('postgres', 'enabled', str(True))
config.set('postgres', 'user', Config.default_user())
config.set('postgres', 'password', str(Config.default_pgpassword()))
config.set('postgres', 'database', Config.default_db())
config.set('postgres', 'host', Config.default_host())
config.set('postgres', 'port', str(Config.default_port()))
config.set('postgres', 'application_name', str(Config.default_app()))
config.set('postgres', 'query_timeout', '10')
config.add_section('system')
config.set('system', 'enabled', str(True))
config.add_section('sender')
config.set('sender', 'queue', str(2048))
config.add_section('agent')
config.set('agent', 'enabled', str(True))
config.set('agent', 'host', '127.0.0.1')
config.set('agent', 'port', str(10052))
config.add_section('plugins')
config.set('plugins', 'enabled', str(False))
config.set('plugins', 'directory', '/etc/mamonsu/plugins')
config.add_section('zabbix')
config.set('zabbix', 'enabled', str(True))
config.set('zabbix', 'client', socket.gethostname())
config.set('zabbix', 'address', '127.0.0.1')
config.set('zabbix', 'port', str(10051))
config.add_section('metric_log')
config.set('metric_log', 'enabled', str(False))
config.set('metric_log', 'directory', '/var/log/mamonsu')
config.set('metric_log', 'max_size_mb', '1024')
config.add_section('log')
config.set('log', 'file', str(None))
config.set('log', 'level', 'INFO')
config.set(
'log', 'format',
'[%(levelname)s] %(asctime)s - %(name)s\t-\t%(message)s')
self.config = config
self._load_external_plugins(plugin_directories)
self._apply_default_config()
if cfg_file and not os.path.isfile(cfg_file):
sys.stderr.write('Can\'t found file: {0}'.format(cfg_file))
sys.exit(1)
else:
if cfg_file is not None:
self.config.readfp(open(cfg_file))
plugins = self.fetch('plugins', 'directory', str)
if plugins is not None:
self._load_external_plugins_from_directory(plugins)
self._apply_default_config()
self._check_interval()
self._apply_log_setting()
self._apply_environ()
self._override_auto_variables()
def has_plugin_config(self, name):
return self.config.has_section(name)
def plugin_options(self, name):
return self.config.options(name)
def fetch(self, sec, key, klass=None, raw=False):
try:
if klass == float:
return self.config.getfloat(sec, key)
if klass == int:
return self.config.getint(sec, key)
if klass == bool:
return self.config.getboolean(sec, key)
if self.config.get(sec, key, raw=raw) == 'None':
return None
return self.config.get(sec, key, raw=raw)
except KeyError:
return None
def _apply_environ(self):
os.environ['PGUSER'] = self.fetch('postgres', 'user')
if self.fetch('postgres', 'password'):
os.environ['PGPASSWORD'] = self.fetch('postgres', 'password')
os.environ['PGHOST'] = self.fetch('postgres', 'host')
os.environ['PGPORT'] = str(self.fetch('postgres', 'port'))
os.environ['PGDATABASE'] = self.fetch('postgres', 'database')
os.environ['PGTIMEOUT'] = self.fetch('postgres', 'query_timeout')
os.environ['PGAPPNAME'] = self.fetch('postgres', 'application_name')
def _apply_log_setting(self):
logging.basicConfig(
format=self.fetch('log', 'format', raw=True),
filename=self.fetch('log', 'file'),
level=self.get_logger_level(self.fetch('log', 'level')))
def _load_external_plugins(self, directories):
if directories is None:
return
for dir in directories:
self._load_external_plugins_from_directory(dir)
def _load_external_plugins_from_directory(self, directory):
sys.path.append(directory)
try:
for filename in glob.glob(os.path.join(directory, '*.py')):
if not os.path.isfile(filename):
continue
# /dir/filename.py => filename.py
filename = os.path.basename(filename)
if filename.startswith('_'):
continue
# filename.py => filename
filename, _ = os.path.splitext(filename)
__import__(filename)
except Exception as e:
sys.stderr.write('Can\'t load module: {0}'.format(e))
sys.exit(3)
def _override_auto_variables(self):
self._override_auto_host()
def _override_auto_host(self):
def test_db(self, host_pre):
if is_conn_to_db(
host=host_pre,
db=self.fetch('postgres', 'database'),
port=str(self.fetch('postgres', 'port')),
user=self.fetch('postgres', 'user'),
paswd=self.fetch('postgres', 'password')):
self.config.set('postgres', 'host', host_pre)
self._apply_environ()
return True
return False
host = self.fetch('postgres', 'host')
port = str(self.fetch('postgres', 'port'))
if host == 'auto' and platform.UNIX:
logging.debug('Host set to auto, test variables')
if test_db(self, '/tmp/.s.PGSQL.{0}'.format(port)):
return
if test_db(self, '/var/run/postgresql/.s.PGSQL.{0}'.format(port)):
return
if test_db(self, '127.0.0.1'):
return
self.config.set('postgres', 'host', 'localhost')
self._apply_environ()
def _apply_default_config(self):
if self.config.has_option('postgres', 'interval'):
interval = self.fetch('postgres', 'interval')
else:
interval = None
for plugin in Plugin.only_child_subclasses():
plugin.set_default_config(self.config, interval)
def _check_interval(self):
for plugin in Plugin.only_child_subclasses():
if not self.config.has_option(plugin.__name__.lower(), 'interval'):
self.config.set(plugin.__name__.lower(), 'interval', '{0}'.format(plugin.Interval))
| true | true |
f7289ff2340b0efc55a36056c729a00c428fcde0 | 42,279 | py | Python | tensorflow/python/ops/math_ops.py | Monnoroch/tensorflow | e4af1c4023826c815135ed330576f7bfeb74f052 | [
"Apache-2.0"
] | 1 | 2015-11-12T06:52:22.000Z | 2015-11-12T06:52:22.000Z | tensorflow/python/ops/math_ops.py | danilodorgam/tensorflow | 1d76583411038767f673a0c96174c80eaf9ff42f | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/math_ops.py | danilodorgam/tensorflow | 1d76583411038767f673a0c96174c80eaf9ff42f | [
"Apache-2.0"
] | 2 | 2016-05-18T12:48:06.000Z | 2019-03-30T03:56:31.000Z | """## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@div
@@mod
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
## Matrix Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions for matrices to your graph.
@@diag
@@transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@batch_matrix_determinant
@@matrix_inverse
@@batch_matrix_inverse
@@cholesky
@@batch_cholesky
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@accumulate_n
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@listdiff
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import six.moves
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import gen_state_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_math_ops import *
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same size and type as `x` with absolute values.
"""
with ops.op_scope([x], name, "Abs") as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype == types.complex64:
return gen_math_ops.complex_abs(x, name=name)
return gen_math_ops._abs(x, name=name)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2]], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
y: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.op_scope([x], name, "Pow") as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation computes complex numbers elementwise of the form \\\\(a + bj\\\\),
where *a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must be the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.74j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor` of type `float`.
imag: A `Tensor` of type `float`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
with ops.op_scope([real, imag], name, "Complex") as name:
return gen_math_ops._complex(real, imag, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, -4.4]
tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float` or `double`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return floor(x + 0.5, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
with ops.op_scope([x], name, "Cast") as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, dtype, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == dtype:
return x
return gen_math_ops.cast(x, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, types.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, types.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, types.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, types.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, types.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", logical_not)
def _OverrideBinaryOperatorHelper(func, op_name):
"""Register operators with different tensor and scalar versions.
Args:
func: the operator
op_name: name of the operator being overridden
"""
def binary_op_wrapper(x, y):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(x, ops.Tensor)
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
ops.Tensor._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
def r_binary_op_wrapper(y, x):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(y, ops.Tensor)
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
ops.Tensor._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
types.uint8: types.float32,
types.int8: types.float32,
types.int16: types.float32,
types.int32: types.float64,
types.int64: types.float64,
types.float32: None,
types.float64: None,
types.complex64: None,
}
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.op_scope([x, y], name, "truediv") as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return div(x, y, name=name)
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding down for floating point.
The same as `tf.div(x,y)`, but uses `tf.floor(tf.div(x,y))` for floating
point arguments so that the result is always an integer (though possibly an
integer represented as floating point). This op is generated by `x // y`
floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, __floordiv__ uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` numerator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly for integers in C).
Raises:
TypeError: If the inputs are complex.
"""
with ops.op_scope([x, y], name, "floordiv") as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return floor(div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
return div(x, y, name=name)
_OverrideBinaryOperatorHelper(add, "add")
_OverrideBinaryOperatorHelper(sub, "sub")
_OverrideBinaryOperatorHelper(mul, "mul")
_OverrideBinaryOperatorHelper(div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(mod, "mod")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return logical_and(logical_or(x, y), logical_not(logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(logical_and, "and")
_OverrideBinaryOperatorHelper(logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", less)
ops.Tensor._override_operator("__le__", less_equal)
ops.Tensor._override_operator("__gt__", greater)
ops.Tensor._override_operator("__ge__", greater_equal)
def range(start, limit, delta=1, name="range"):
"""Creates a sequence of integers.
This operation creates a sequence of integers that begins at `start` and
extends by increments of `delta` up to but not including `limit`.
For example:
```
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
```
Args:
start: A 0-D (scalar) of type `int32`. First entry in sequence.
limit: A 0-D (scalar) of type `int32`. Upper limit of sequence,
exclusive.
delta: A 0-D `Tensor` (scalar) of type `int32`. Optional. Default is 1.
Number that increments `start`.
name: A name for the operation (optional).
Returns:
An 1-D `int32` `Tensor`.
"""
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
start_value = tensor_util.ConstantValue(op.inputs[0])
limit_value = tensor_util.ConstantValue(op.inputs[1])
delta_value = tensor_util.ConstantValue(op.inputs[2])
if start_value is None or limit_value is None or delta_value is None:
return [tensor_shape.vector(None)]
else:
return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //
delta_value)]
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1. ]]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float`, `double`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float`, `double`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
if a.dtype == types.float32 and (a_is_sparse or b_is_sparse):
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.matmul_shape)
ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape)
def _as_indexed_slices(x):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape(x)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == types.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == types.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, types.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if tensor_dtype is None:
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
tensor_dtype = inputs[0].dtype
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if not shape.is_fully_defined():
# TODO(pbar): Make a version of assign_add that accepts an uninitialized
# lvalue, and takes its shape from that? This would allow accumulate_n to
# work in all situations that add_n currently works.
raise ValueError("Cannot infer the shape of the accumulator for "
"accumulate_n. Pass the shape argument, or set the shape "
"of at least one of the inputs.")
with ops.op_scope(inputs, name, "AccumulateN") as name:
var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
var_name = var.op.name
var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
update_ops = []
for input_tensor in inputs:
op = state_ops.assign_add(var, input_tensor, use_locking=True)
update_ops.append(op)
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(var,
var_name=var_name,
name=name)
@ops.RegisterShape("BatchMatMul")
def _BatchMatMulShape(op):
"""Shape function for BatchMatMul op."""
a_shape = op.inputs[0].get_shape()
adj_a = op.get_attr("adj_x")
b_shape = op.inputs[1].get_shape()
adj_b = op.get_attr("adj_y")
if not a_shape.is_fully_defined() or not b_shape.is_fully_defined():
return [tensor_shape.unknown_shape()]
batch_dims = a_shape[:-2].merge_with(b_shape[:-2])
output_rows = a_shape[-1] if adj_a else a_shape[-2]
output_cols = b_shape[-2] if adj_b else b_shape[-1]
inner_a = a_shape[-2] if adj_a else a_shape[-1]
inner_b = b_shape[-1] if adj_b else b_shape[-2]
inner_a.assert_is_compatible_with(inner_b)
return [batch_dims.concatenate([output_rows, output_cols])]
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.op_scope([x], name, "Sigmoid") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
the return type is `quint8`.
"""
with ops.op_scope([x], name, "Tanh") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._tanh(x, name=name)
ops.RegisterShape("Abs")(common_shapes.unchanged_shape)
ops.RegisterShape("Ceil")(common_shapes.unchanged_shape)
ops.RegisterShape("Conj")(common_shapes.unchanged_shape)
ops.RegisterShape("Cos")(common_shapes.unchanged_shape)
ops.RegisterShape("Exp")(common_shapes.unchanged_shape)
ops.RegisterShape("Floor")(common_shapes.unchanged_shape)
ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape)
ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
ops.RegisterShape("Log")(common_shapes.unchanged_shape)
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
ops.RegisterShape("Real")(common_shapes.unchanged_shape)
ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Sign")(common_shapes.unchanged_shape)
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
@ops.RegisterShape("Add")
@ops.RegisterShape("Complex")
@ops.RegisterShape("Div")
@ops.RegisterShape("Equal")
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
@ops.RegisterShape("Maximum")
@ops.RegisterShape("Minimum")
@ops.RegisterShape("Mod")
@ops.RegisterShape("Mul")
@ops.RegisterShape("NotEqual")
@ops.RegisterShape("Pow")
@ops.RegisterShape("Sub")
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
shape_x = op.inputs[0].get_shape()
shape_y = op.inputs[1].get_shape()
if shape_x.ndims is None or shape_y.ndims is None:
return [tensor_shape.unknown_shape()]
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return [tensor_shape.TensorShape(return_dims)]
@ops.RegisterShape("AddN")
def _AddNShape(op):
merged_shape = tensor_shape.unknown_shape()
for input_ in op.inputs:
merged_shape = merged_shape.merge_with(input_.get_shape())
return [merged_shape]
@ops.RegisterShape("Select")
def _SelectShape(op):
# All three inputs must have the same shape.
return [op.inputs[0].get_shape()
.merge_with(op.inputs[1].get_shape())
.merge_with(op.inputs[2].get_shape())]
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
"""Common shape function for arg-reduction ops."""
dimension_shape = op.inputs[1].get_shape()
dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
elif input_shape.ndims <= 1:
return [tensor_shape.scalar()]
dimension = tensor_util.ConstantValue(op.inputs[1])
if dimension is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
elif 0 <= dimension and dimension < input_shape.ndims:
returned_shape = []
for i, dim in enumerate(input_shape.dims):
if i != dimension:
returned_shape.append(dim)
return [tensor_shape.TensorShape(returned_shape)]
else:
raise ValueError(
"dimension (%d) must be in the range [0, %d), where %d is the number "
"of dimensions in the input"
% (dimension, input_shape.ndims, input_shape.ndims))
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
"""Common shape function for reduction ops."""
input_shape = op.inputs[0].get_shape()
reduction_indices = tensor_util.ConstantValue(op.inputs[1])
keep_dims = op.get_attr("keep_dims")
if reduction_indices is None or input_shape.ndims is None:
if keep_dims:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
return [tensor_shape.unknown_shape()]
# Turn reduction_indices from scalar to vector if necessary
reduction_indices = np.ravel(reduction_indices)
for reduction_index in reduction_indices:
if reduction_index < 0 or reduction_index >= input_shape.ndims:
raise ValueError("Invalid reduction dimension %d for input with %d "
"dimensions" % (reduction_index, input_shape.ndims))
returned_dims = []
if keep_dims:
for i, dim in enumerate(input_shape.dims):
if i in reduction_indices:
returned_dims.append(1)
else:
returned_dims.append(dim)
else:
for i, dim in enumerate(input_shape.dims):
if i not in reduction_indices:
returned_dims.append(dim)
return [tensor_shape.TensorShape(returned_dims)]
@ops.RegisterShape("SegmentMax")
@ops.RegisterShape("SegmentMean")
@ops.RegisterShape("SegmentMin")
@ops.RegisterShape("SegmentProd")
@ops.RegisterShape("SegmentSum")
def _SegmentReductionShape(op):
"""Common shape function for segment reduction ops."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
segment_ids_shape.assert_has_rank(1)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMean")
@ops.RegisterShape("SparseSegmentSum")
def _SparseSegmentReductionShape(op):
"""Common shape function for sparse segment reduction ops."""
data_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
indices_shape.assert_has_rank(1)
segment_ids_shape = op.inputs[2].get_shape()
segment_ids_shape.assert_has_rank(1)
indices_shape.assert_is_compatible_with(segment_ids_shape)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMeanGrad")
def _SparseSegmentMeanGradShape(op):
"""Shape function for the SparseSegmentMeanGrad op."""
input_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank(1)
unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)
unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.scalar())
output_dim0 = tensor_util.ConstantValue(op.inputs[3])
if output_dim0 is not None:
dim0 = output_dim0[0]
else:
dim0 = None
return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
"""Shape function for UnsortedSegmentSum."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
mid = segment_ids_shape.ndims
if mid is None:
return [tensor_shape.unknown_shape()]
else:
num_segments = tensor_util.ConstantValue(op.inputs[2])
return [tensor_shape.TensorShape([num_segments]).concatenate(
data_shape[mid:])]
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
num = tensor_util.ConstantValue(op.inputs[2])
return [tensor_shape.vector(num)]
| 32.49731 | 86 | 0.682419 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import six.moves
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops.gen_math_ops import *
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
def abs(x, name=None):
with ops.op_scope([x], name, "Abs") as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype == types.complex64:
return gen_math_ops.complex_abs(x, name=name)
return gen_math_ops._abs(x, name=name)
def pow(x, y, name=None):
with ops.op_scope([x], name, "Pow") as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
with ops.op_scope([real, imag], name, "Complex") as name:
return gen_math_ops._complex(real, imag, name=name)
def round(x, name=None):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return floor(x + 0.5, name=name)
def cast(x, dtype, name=None):
with ops.op_scope([x], name, "Cast") as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, dtype, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == dtype:
return x
return gen_math_ops.cast(x, dtype, name=name)
def to_float(x, name="ToFloat"):
return cast(x, types.float32, name=name)
def to_double(x, name="ToDouble"):
return cast(x, types.float64, name=name)
def to_int32(x, name="ToInt32"):
return cast(x, types.int32, name=name)
def to_int64(x, name="ToInt64"):
return cast(x, types.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
return cast(x, types.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", logical_not)
def _OverrideBinaryOperatorHelper(func, op_name):
def binary_op_wrapper(x, y):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(x, ops.Tensor)
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
ops.Tensor._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
def r_binary_op_wrapper(y, x):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(y, ops.Tensor)
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
ops.Tensor._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
types.uint8: types.float32,
types.int8: types.float32,
types.int16: types.float32,
types.int32: types.float64,
types.int64: types.float64,
types.float32: None,
types.float64: None,
types.complex64: None,
}
def truediv(x, y, name=None):
with ops.op_scope([x, y], name, "truediv") as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return div(x, y, name=name)
def floordiv(x, y, name=None):
with ops.op_scope([x, y], name, "floordiv") as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return floor(div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
return div(x, y, name=name)
_OverrideBinaryOperatorHelper(add, "add")
_OverrideBinaryOperatorHelper(sub, "sub")
_OverrideBinaryOperatorHelper(mul, "mul")
_OverrideBinaryOperatorHelper(div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(mod, "mod")
def logical_xor(x, y, name="LogicalXor"):
# TODO(alemi) Make this a cwise op if people end up relying on it.
return logical_and(logical_or(x, y), logical_not(logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(logical_and, "and")
_OverrideBinaryOperatorHelper(logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", less)
ops.Tensor._override_operator("__le__", less_equal)
ops.Tensor._override_operator("__gt__", greater)
ops.Tensor._override_operator("__ge__", greater_equal)
def range(start, limit, delta=1, name="range"):
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
start_value = tensor_util.ConstantValue(op.inputs[0])
limit_value = tensor_util.ConstantValue(op.inputs[1])
delta_value = tensor_util.ConstantValue(op.inputs[2])
if start_value is None or limit_value is None or delta_value is None:
return [tensor_shape.vector(None)]
else:
return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //
delta_value)]
# Reduction operations
def _ReductionDims(x, reduction_indices):
if reduction_indices is not None:
return reduction_indices
else:
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
if a.dtype == types.float32 and (a_is_sparse or b_is_sparse):
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.matmul_shape)
ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape)
def _as_indexed_slices(x):
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape(x)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs):
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == types.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == types.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, types.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
if tensor_dtype is None:
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
tensor_dtype = inputs[0].dtype
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if not shape.is_fully_defined():
# TODO(pbar): Make a version of assign_add that accepts an uninitialized
# lvalue, and takes its shape from that? This would allow accumulate_n to
# work in all situations that add_n currently works.
raise ValueError("Cannot infer the shape of the accumulator for "
"accumulate_n. Pass the shape argument, or set the shape "
"of at least one of the inputs.")
with ops.op_scope(inputs, name, "AccumulateN") as name:
var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
var_name = var.op.name
var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
update_ops = []
for input_tensor in inputs:
op = state_ops.assign_add(var, input_tensor, use_locking=True)
update_ops.append(op)
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(var,
var_name=var_name,
name=name)
@ops.RegisterShape("BatchMatMul")
def _BatchMatMulShape(op):
a_shape = op.inputs[0].get_shape()
adj_a = op.get_attr("adj_x")
b_shape = op.inputs[1].get_shape()
adj_b = op.get_attr("adj_y")
if not a_shape.is_fully_defined() or not b_shape.is_fully_defined():
return [tensor_shape.unknown_shape()]
batch_dims = a_shape[:-2].merge_with(b_shape[:-2])
output_rows = a_shape[-1] if adj_a else a_shape[-2]
output_cols = b_shape[-2] if adj_b else b_shape[-1]
inner_a = a_shape[-2] if adj_a else a_shape[-1]
inner_b = b_shape[-1] if adj_b else b_shape[-2]
inner_a.assert_is_compatible_with(inner_b)
return [batch_dims.concatenate([output_rows, output_cols])]
def sigmoid(x, name=None):
with ops.op_scope([x], name, "Sigmoid") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
with ops.op_scope([x], name, "Tanh") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._tanh(x, name=name)
ops.RegisterShape("Abs")(common_shapes.unchanged_shape)
ops.RegisterShape("Ceil")(common_shapes.unchanged_shape)
ops.RegisterShape("Conj")(common_shapes.unchanged_shape)
ops.RegisterShape("Cos")(common_shapes.unchanged_shape)
ops.RegisterShape("Exp")(common_shapes.unchanged_shape)
ops.RegisterShape("Floor")(common_shapes.unchanged_shape)
ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape)
ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
ops.RegisterShape("Log")(common_shapes.unchanged_shape)
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
ops.RegisterShape("Real")(common_shapes.unchanged_shape)
ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Sign")(common_shapes.unchanged_shape)
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
@ops.RegisterShape("Add")
@ops.RegisterShape("Complex")
@ops.RegisterShape("Div")
@ops.RegisterShape("Equal")
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
@ops.RegisterShape("Maximum")
@ops.RegisterShape("Minimum")
@ops.RegisterShape("Mod")
@ops.RegisterShape("Mul")
@ops.RegisterShape("NotEqual")
@ops.RegisterShape("Pow")
@ops.RegisterShape("Sub")
def _BroadcastShape(op):
shape_x = op.inputs[0].get_shape()
shape_y = op.inputs[1].get_shape()
if shape_x.ndims is None or shape_y.ndims is None:
return [tensor_shape.unknown_shape()]
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return [tensor_shape.TensorShape(return_dims)]
@ops.RegisterShape("AddN")
def _AddNShape(op):
merged_shape = tensor_shape.unknown_shape()
for input_ in op.inputs:
merged_shape = merged_shape.merge_with(input_.get_shape())
return [merged_shape]
@ops.RegisterShape("Select")
def _SelectShape(op):
# All three inputs must have the same shape.
return [op.inputs[0].get_shape()
.merge_with(op.inputs[1].get_shape())
.merge_with(op.inputs[2].get_shape())]
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
dimension_shape = op.inputs[1].get_shape()
dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
elif input_shape.ndims <= 1:
return [tensor_shape.scalar()]
dimension = tensor_util.ConstantValue(op.inputs[1])
if dimension is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
elif 0 <= dimension and dimension < input_shape.ndims:
returned_shape = []
for i, dim in enumerate(input_shape.dims):
if i != dimension:
returned_shape.append(dim)
return [tensor_shape.TensorShape(returned_shape)]
else:
raise ValueError(
"dimension (%d) must be in the range [0, %d), where %d is the number "
"of dimensions in the input"
% (dimension, input_shape.ndims, input_shape.ndims))
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
input_shape = op.inputs[0].get_shape()
reduction_indices = tensor_util.ConstantValue(op.inputs[1])
keep_dims = op.get_attr("keep_dims")
if reduction_indices is None or input_shape.ndims is None:
if keep_dims:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
return [tensor_shape.unknown_shape()]
# Turn reduction_indices from scalar to vector if necessary
reduction_indices = np.ravel(reduction_indices)
for reduction_index in reduction_indices:
if reduction_index < 0 or reduction_index >= input_shape.ndims:
raise ValueError("Invalid reduction dimension %d for input with %d "
"dimensions" % (reduction_index, input_shape.ndims))
returned_dims = []
if keep_dims:
for i, dim in enumerate(input_shape.dims):
if i in reduction_indices:
returned_dims.append(1)
else:
returned_dims.append(dim)
else:
for i, dim in enumerate(input_shape.dims):
if i not in reduction_indices:
returned_dims.append(dim)
return [tensor_shape.TensorShape(returned_dims)]
@ops.RegisterShape("SegmentMax")
@ops.RegisterShape("SegmentMean")
@ops.RegisterShape("SegmentMin")
@ops.RegisterShape("SegmentProd")
@ops.RegisterShape("SegmentSum")
def _SegmentReductionShape(op):
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
segment_ids_shape.assert_has_rank(1)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMean")
@ops.RegisterShape("SparseSegmentSum")
def _SparseSegmentReductionShape(op):
data_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
indices_shape.assert_has_rank(1)
segment_ids_shape = op.inputs[2].get_shape()
segment_ids_shape.assert_has_rank(1)
indices_shape.assert_is_compatible_with(segment_ids_shape)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMeanGrad")
def _SparseSegmentMeanGradShape(op):
input_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank(1)
unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)
unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.scalar())
output_dim0 = tensor_util.ConstantValue(op.inputs[3])
if output_dim0 is not None:
dim0 = output_dim0[0]
else:
dim0 = None
return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
mid = segment_ids_shape.ndims
if mid is None:
return [tensor_shape.unknown_shape()]
else:
num_segments = tensor_util.ConstantValue(op.inputs[2])
return [tensor_shape.TensorShape([num_segments]).concatenate(
data_shape[mid:])]
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
num = tensor_util.ConstantValue(op.inputs[2])
return [tensor_shape.vector(num)]
| true | true |
f728a2d500d44d3bb874d2b540afb4feaafd646c | 12,370 | py | Python | src/sagemaker/sklearn/estimator.py | anirudh2290/sagemaker-python-sdk | 5b15f3006efe90fbba43da7841ff5f0ad790a78e | [
"Apache-2.0"
] | null | null | null | src/sagemaker/sklearn/estimator.py | anirudh2290/sagemaker-python-sdk | 5b15f3006efe90fbba43da7841ff5f0ad790a78e | [
"Apache-2.0"
] | 1 | 2019-04-23T19:32:17.000Z | 2019-04-23T19:32:17.000Z | src/sagemaker/sklearn/estimator.py | anirudh2290/sagemaker-python-sdk | 5b15f3006efe90fbba43da7841ff5f0ad790a78e | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import logging
from sagemaker import image_uris
from sagemaker.deprecations import renamed_kwargs
from sagemaker.estimator import Framework
from sagemaker.fw_utils import (
framework_name_from_image,
framework_version_from_tag,
validate_version_or_image_args,
)
from sagemaker.sklearn import defaults
from sagemaker.sklearn.model import SKLearnModel
from sagemaker.vpc_utils import VPC_CONFIG_DEFAULT
logger = logging.getLogger("sagemaker")
class SKLearn(Framework):
"""Handle end-to-end training and deployment of custom Scikit-learn code."""
_framework_name = defaults.SKLEARN_NAME
def __init__(
self,
entry_point,
framework_version=None,
py_version="py3",
source_dir=None,
hyperparameters=None,
image_uri=None,
**kwargs
):
"""This ``Estimator`` executes an Scikit-learn script in a managed
Scikit-learn execution environment, within a SageMaker Training Job. The
managed Scikit-learn environment is an Amazon-built Docker container
that executes functions defined in the supplied ``entry_point`` Python
script.
Training is started by calling
:meth:`~sagemaker.amazon.estimator.Framework.fit` on this Estimator.
After training is complete, calling
:meth:`~sagemaker.amazon.estimator.Framework.deploy` creates a hosted
SageMaker endpoint and returns an
:class:`~sagemaker.amazon.sklearn.model.SKLearnPredictor` instance that
can be used to perform inference against the hosted model.
Technical documentation on preparing Scikit-learn scripts for
SageMaker training and using the Scikit-learn Estimator is available on
the project home-page: https://github.com/aws/sagemaker-python-sdk
Args:
entry_point (str): Path (absolute or relative) to the Python source
file which should be executed as the entry point to training.
If ``source_dir`` is specified, then ``entry_point``
must point to a file located at the root of ``source_dir``.
framework_version (str): Scikit-learn version you want to use for
executing your model training code. Defaults to ``None``. Required
unless ``image_uri`` is provided. List of supported versions:
https://github.com/aws/sagemaker-python-sdk#sklearn-sagemaker-estimators
py_version (str): Python version you want to use for executing your
model training code (default: 'py3'). Currently, 'py3' is the only
supported version. If ``None`` is passed in, ``image_uri`` must be
provided.
source_dir (str): Path (absolute, relative or an S3 URI) to a directory
with any other training source code dependencies aside from the entry
point file (default: None). If ``source_dir`` is an S3 URI, it must
point to a tar.gz file. Structure within this directory are preserved
when training on Amazon SageMaker.
hyperparameters (dict): Hyperparameters that will be used for
training (default: None). The hyperparameters are made
accessible as a dict[str, str] to the training code on
SageMaker. For convenience, this accepts other types for keys
and values, but ``str()`` will be called to convert them before
training.
image_uri (str): If specified, the estimator will use this image
for training and hosting, instead of selecting the appropriate
SageMaker official image based on framework_version and
py_version. It can be an ECR url or dockerhub image and tag.
Examples:
123.dkr.ecr.us-west-2.amazonaws.com/my-custom-image:1.0
custom-image:latest.
If ``framework_version`` or ``py_version`` are ``None``, then
``image_uri`` is required. If also ``None``, then a ``ValueError``
will be raised.
**kwargs: Additional kwargs passed to the
:class:`~sagemaker.estimator.Framework` constructor.
.. tip::
You can find additional parameters for initializing this class at
:class:`~sagemaker.estimator.Framework` and
:class:`~sagemaker.estimator.EstimatorBase`.
"""
instance_type = renamed_kwargs(
"train_instance_type", "instance_type", kwargs.get("instance_type"), kwargs
)
instance_count = renamed_kwargs(
"train_instance_count", "instance_count", kwargs.get("instance_count"), kwargs
)
validate_version_or_image_args(framework_version, py_version, image_uri)
if py_version and py_version != "py3":
raise AttributeError(
"Scikit-learn image only supports Python 3. Please use 'py3' for py_version."
)
self.framework_version = framework_version
self.py_version = py_version
# SciKit-Learn does not support distributed training or training on GPU instance types.
# Fail fast.
_validate_not_gpu_instance_type(instance_type)
if instance_count:
if instance_count != 1:
raise AttributeError(
"Scikit-Learn does not support distributed training. Please remove the "
"'instance_count' argument or set 'instance_count=1' when initializing SKLearn."
)
super(SKLearn, self).__init__(
entry_point,
source_dir,
hyperparameters,
image_uri=image_uri,
**dict(kwargs, instance_count=1)
)
if image_uri is None:
self.image_uri = image_uris.retrieve(
SKLearn._framework_name,
self.sagemaker_session.boto_region_name,
version=self.framework_version,
py_version=self.py_version,
instance_type=instance_type,
)
def create_model(
self,
model_server_workers=None,
role=None,
vpc_config_override=VPC_CONFIG_DEFAULT,
entry_point=None,
source_dir=None,
dependencies=None,
**kwargs
):
"""Create a SageMaker ``SKLearnModel`` object that can be deployed to an
``Endpoint``.
Args:
model_server_workers (int): Optional. The number of worker processes
used by the inference server. If None, server will use one
worker per vCPU.
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``,
which is also used during transform jobs. If not specified, the
role from the Estimator will be used.
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on
the model. Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
entry_point (str): Path (absolute or relative) to the local Python source file which
should be executed as the entry point to training. If ``source_dir`` is specified,
then ``entry_point`` must point to a file located at the root of ``source_dir``.
If not specified, the training entry point is used.
source_dir (str): Path (absolute or relative) to a directory with any other serving
source code dependencies aside from the entry point file.
If not specified, the model source directory from training is used.
dependencies (list[str]): A list of paths to directories (absolute or relative) with
any additional libraries that will be exported to the container.
If not specified, the dependencies from training are used.
This is not supported with "local code" in Local Mode.
**kwargs: Additional kwargs passed to the :class:`~sagemaker.sklearn.model.SKLearnModel`
constructor.
Returns:
sagemaker.sklearn.model.SKLearnModel: A SageMaker ``SKLearnModel``
object. See :func:`~sagemaker.sklearn.model.SKLearnModel` for full details.
"""
role = role or self.role
kwargs["name"] = self._get_or_create_name(kwargs.get("name"))
if "image_uri" not in kwargs:
kwargs["image_uri"] = self.image_uri
if "enable_network_isolation" not in kwargs:
kwargs["enable_network_isolation"] = self.enable_network_isolation()
return SKLearnModel(
self.model_data,
role,
entry_point or self._model_entry_point(),
source_dir=(source_dir or self._model_source_dir()),
container_log_level=self.container_log_level,
code_location=self.code_location,
py_version=self.py_version,
framework_version=self.framework_version,
model_server_workers=model_server_workers,
sagemaker_session=self.sagemaker_session,
vpc_config=self.get_vpc_config(vpc_config_override),
dependencies=(dependencies or self.dependencies),
**kwargs
)
@classmethod
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
"""Convert the job description to init params that can be handled by the
class constructor
Args:
job_details: the returned job details from a describe_training_job
API call.
model_channel_name (str): Name of the channel where pre-trained
model data will be downloaded (default: None).
Returns:
dictionary: The transformed init_params
"""
init_params = super(SKLearn, cls)._prepare_init_params_from_job_description(
job_details, model_channel_name
)
image_uri = init_params.pop("image_uri")
framework, py_version, tag, _ = framework_name_from_image(image_uri)
if tag is None:
framework_version = None
else:
framework_version = framework_version_from_tag(tag)
init_params["framework_version"] = framework_version
init_params["py_version"] = py_version
if not framework:
# If we were unable to parse the framework name from the image it is not one of our
# officially supported images, in this case just add the image to the init params.
init_params["image_uri"] = image_uri
return init_params
if framework and framework != "scikit-learn":
raise ValueError(
"Training job: {} didn't use image for requested framework".format(
job_details["TrainingJobName"]
)
)
return init_params
def _validate_not_gpu_instance_type(training_instance_type):
"""
Args:
training_instance_type:
"""
gpu_instance_types = [
"ml.p2.xlarge",
"ml.p2.8xlarge",
"ml.p2.16xlarge",
"ml.p3.xlarge",
"ml.p3.8xlarge",
"ml.p3.16xlarge",
]
if training_instance_type in gpu_instance_types:
raise ValueError(
"GPU training in not supported for Scikit-Learn. "
"Please pick a different instance type from here: "
"https://aws.amazon.com/ec2/instance-types/"
)
| 43.710247 | 100 | 0.637995 |
from __future__ import absolute_import
import logging
from sagemaker import image_uris
from sagemaker.deprecations import renamed_kwargs
from sagemaker.estimator import Framework
from sagemaker.fw_utils import (
framework_name_from_image,
framework_version_from_tag,
validate_version_or_image_args,
)
from sagemaker.sklearn import defaults
from sagemaker.sklearn.model import SKLearnModel
from sagemaker.vpc_utils import VPC_CONFIG_DEFAULT
logger = logging.getLogger("sagemaker")
class SKLearn(Framework):
_framework_name = defaults.SKLEARN_NAME
def __init__(
self,
entry_point,
framework_version=None,
py_version="py3",
source_dir=None,
hyperparameters=None,
image_uri=None,
**kwargs
):
instance_type = renamed_kwargs(
"train_instance_type", "instance_type", kwargs.get("instance_type"), kwargs
)
instance_count = renamed_kwargs(
"train_instance_count", "instance_count", kwargs.get("instance_count"), kwargs
)
validate_version_or_image_args(framework_version, py_version, image_uri)
if py_version and py_version != "py3":
raise AttributeError(
"Scikit-learn image only supports Python 3. Please use 'py3' for py_version."
)
self.framework_version = framework_version
self.py_version = py_version
_validate_not_gpu_instance_type(instance_type)
if instance_count:
if instance_count != 1:
raise AttributeError(
"Scikit-Learn does not support distributed training. Please remove the "
"'instance_count' argument or set 'instance_count=1' when initializing SKLearn."
)
super(SKLearn, self).__init__(
entry_point,
source_dir,
hyperparameters,
image_uri=image_uri,
**dict(kwargs, instance_count=1)
)
if image_uri is None:
self.image_uri = image_uris.retrieve(
SKLearn._framework_name,
self.sagemaker_session.boto_region_name,
version=self.framework_version,
py_version=self.py_version,
instance_type=instance_type,
)
def create_model(
self,
model_server_workers=None,
role=None,
vpc_config_override=VPC_CONFIG_DEFAULT,
entry_point=None,
source_dir=None,
dependencies=None,
**kwargs
):
role = role or self.role
kwargs["name"] = self._get_or_create_name(kwargs.get("name"))
if "image_uri" not in kwargs:
kwargs["image_uri"] = self.image_uri
if "enable_network_isolation" not in kwargs:
kwargs["enable_network_isolation"] = self.enable_network_isolation()
return SKLearnModel(
self.model_data,
role,
entry_point or self._model_entry_point(),
source_dir=(source_dir or self._model_source_dir()),
container_log_level=self.container_log_level,
code_location=self.code_location,
py_version=self.py_version,
framework_version=self.framework_version,
model_server_workers=model_server_workers,
sagemaker_session=self.sagemaker_session,
vpc_config=self.get_vpc_config(vpc_config_override),
dependencies=(dependencies or self.dependencies),
**kwargs
)
@classmethod
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
init_params = super(SKLearn, cls)._prepare_init_params_from_job_description(
job_details, model_channel_name
)
image_uri = init_params.pop("image_uri")
framework, py_version, tag, _ = framework_name_from_image(image_uri)
if tag is None:
framework_version = None
else:
framework_version = framework_version_from_tag(tag)
init_params["framework_version"] = framework_version
init_params["py_version"] = py_version
if not framework:
init_params["image_uri"] = image_uri
return init_params
if framework and framework != "scikit-learn":
raise ValueError(
"Training job: {} didn't use image for requested framework".format(
job_details["TrainingJobName"]
)
)
return init_params
def _validate_not_gpu_instance_type(training_instance_type):
gpu_instance_types = [
"ml.p2.xlarge",
"ml.p2.8xlarge",
"ml.p2.16xlarge",
"ml.p3.xlarge",
"ml.p3.8xlarge",
"ml.p3.16xlarge",
]
if training_instance_type in gpu_instance_types:
raise ValueError(
"GPU training in not supported for Scikit-Learn. "
"Please pick a different instance type from here: "
"https://aws.amazon.com/ec2/instance-types/"
)
| true | true |
f728a40684ccb11af39383987c4d9ec79805e783 | 11,323 | py | Python | src/loss/perceptual_similarity/dist_model.py | markveillette/high-fidelity-generative-compression | d88b4d7f1212efa8611e91737ff6bf00bbf36670 | [
"Apache-2.0"
] | 266 | 2020-08-25T00:04:58.000Z | 2022-03-31T06:41:03.000Z | src/loss/perceptual_similarity/dist_model.py | markveillette/high-fidelity-generative-compression | d88b4d7f1212efa8611e91737ff6bf00bbf36670 | [
"Apache-2.0"
] | 27 | 2020-09-01T21:04:27.000Z | 2022-03-22T02:24:48.000Z | src/loss/perceptual_similarity/dist_model.py | markveillette/high-fidelity-generative-compression | d88b4d7f1212efa8611e91737ff6bf00bbf36670 | [
"Apache-2.0"
] | 50 | 2020-08-28T02:11:46.000Z | 2022-02-25T02:44:42.000Z |
from __future__ import absolute_import
import sys
import numpy as np
import torch
from torch import nn
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from .base_model import BaseModel
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
from tqdm import tqdm
from . import networks_basic as networks
from . import perceptual_loss
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None,
use_gpu=True, printNet=False, spatial=False,
is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]):
'''
INPUTS
model - ['net-lin'] for linearly calibrated network
['net'] for off-the-shelf network
['L2'] for L2 distance in Lab colorspace
['SSIM'] for ssim in RGB colorspace
net - ['squeeze','alex','vgg']
model_path - if None, will look in weights/[NET_NAME].pth
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
use_gpu - bool - whether or not to use a GPU
printNet - bool - whether or not to print network architecture out
spatial - bool - whether to output an array containing varying distances across spatial dimensions
is_train - bool - [True] for training mode
lr - float - initial learning rate
beta1 - float - initial momentum term for adam
version - 0.1 for latest, 0.0 was original (with a bug)
gpu_ids - int array - [0] by default, gpus to use
'''
BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids)
self.model = model
self.net = net
self.is_train = is_train
self.spatial = spatial
self.gpu_ids = gpu_ids
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'): # pretrained net + linear layer
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,
use_dropout=True, spatial=spatial, version=version, lpips=True)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
import inspect
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net)))
if(not is_train):
print('Loading model from: %s'%model_path)
self.net.load_state_dict(torch.load(model_path, **kw), strict=False)
elif(self.model=='net'): # pretrained network
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train: # training mode
# extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
self.rankLoss = networks.BCERankingLoss()
self.parameters += list(self.rankLoss.net.parameters())
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else: # test mode
self.net.eval()
if(use_gpu):
self.net.to(gpu_ids[0])
self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
if(self.is_train):
self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward(self, in0, in1, retPerLayer=False):
''' Function computes the distance between image patches in0 and in1
INPUTS
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
OUTPUT
computed distances between in0 and in1
'''
return self.net.forward(in0, in1, retPerLayer=retPerLayer)
# ***** TRAINING FUNCTIONS *****
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self): # run forward pass
# print(self.net.module.scaling_layer.shift)
# print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item())
self.d0 = self.forward(self.var_ref, self.var_p0)
self.d1 = self.forward(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
''' d0, d1 are Variables, judge is a Tensor '''
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
if(self.use_gpu):
self.save_network(self.net.module, path, '', label)
else:
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader, func, name=''):
''' Function computes Two Alternative Forced Choice (2AFC) score using
distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
[1] - dictionary with following elements
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
gts - N array in [0,1], preferred patch selected by human evaluators
(closer to "0" for left patch p0, "1" for right patch p1,
"0.6" means 60pct people preferred right patch, 40pct preferred left)
scores - N array in [0,1], corresponding to what percentage function agreed with humans
CONSTS
N - number of test triplets in data_loader
'''
d0s = []
d1s = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist()
d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader, func, name=''):
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
OUTPUTS
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
[1] - dictionary with following elements
ds - N array containing distances between two patches shown to human evaluator
sames - N array containing fraction of people who thought the two patches were identical
CONSTS
N - number of test triplets in data_loader
'''
ds = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
ds+=func(data['p0'],data['p1']).data.cpu().numpy().tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| 40.295374 | 134 | 0.618387 |
from __future__ import absolute_import
import sys
import numpy as np
import torch
from torch import nn
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from .base_model import BaseModel
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
from tqdm import tqdm
from . import networks_basic as networks
from . import perceptual_loss
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None,
use_gpu=True, printNet=False, spatial=False,
is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]):
BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids)
self.model = model
self.net = net
self.is_train = is_train
self.spatial = spatial
self.gpu_ids = gpu_ids
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'):
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,
use_dropout=True, spatial=spatial, version=version, lpips=True)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
import inspect
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net)))
if(not is_train):
print('Loading model from: %s'%model_path)
self.net.load_state_dict(torch.load(model_path, **kw), strict=False)
elif(self.model=='net'):
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train:
self.rankLoss = networks.BCERankingLoss()
self.parameters += list(self.rankLoss.net.parameters())
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else:
self.net.eval()
if(use_gpu):
self.net.to(gpu_ids[0])
self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
if(self.is_train):
self.rankLoss = self.rankLoss.to(device=gpu_ids[0])
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward(self, in0, in1, retPerLayer=False):
return self.net.forward(in0, in1, retPerLayer=retPerLayer)
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self):
self.d0 = self.forward(self.var_ref, self.var_p0)
self.d1 = self.forward(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
if(self.use_gpu):
self.save_network(self.net.module, path, '', label)
else:
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader, func, name=''):
d0s = []
d1s = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist()
d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader, func, name=''):
ds = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
ds+=func(data['p0'],data['p1']).data.cpu().numpy().tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| true | true |
f728a40e26f5a734b8142be40f7a803ce264d0f2 | 3,305 | py | Python | src/adafruit_blinka/microcontroller/tegra/t186/pin.py | Jcc99/Adafruit_Blinka | 41f8155bab83039ed9d45276addd3d501e83f3e6 | [
"MIT"
] | 1 | 2020-11-28T18:22:32.000Z | 2020-11-28T18:22:32.000Z | src/adafruit_blinka/microcontroller/tegra/t186/pin.py | Jcc99/Adafruit_Blinka | 41f8155bab83039ed9d45276addd3d501e83f3e6 | [
"MIT"
] | null | null | null | src/adafruit_blinka/microcontroller/tegra/t186/pin.py | Jcc99/Adafruit_Blinka | 41f8155bab83039ed9d45276addd3d501e83f3e6 | [
"MIT"
] | null | null | null | """Tegra T186 pin names"""
import atexit
import Jetson.GPIO as GPIO
GPIO.setmode(GPIO.TEGRA_SOC)
GPIO.setwarnings(False) # shh!
class Pin:
"""Pins dont exist in CPython so...lets make our own!"""
IN = 0
OUT = 1
LOW = 0
HIGH = 1
PULL_NONE = 0
PULL_UP = 1
PULL_DOWN = 2
id = None
_value = LOW
_mode = IN
def __init__(self, bcm_number):
self.id = bcm_number
def __repr__(self):
return str(self.id)
def __eq__(self, other):
return self.id == other
def init(self, mode=IN, pull=None):
"""Initialize the Pin"""
if mode is not None:
if mode == self.IN:
self._mode = self.IN
GPIO.setup(self.id, GPIO.IN)
elif mode == self.OUT:
self._mode = self.OUT
GPIO.setup(self.id, GPIO.OUT)
else:
raise RuntimeError("Invalid mode for pin: %s" % self.id)
if pull is not None:
if self._mode != self.IN:
raise RuntimeError("Cannot set pull resistor on output")
if pull == self.PULL_UP:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_UP)
elif pull == self.PULL_DOWN:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
else:
raise RuntimeError("Invalid pull for pin: %s" % self.id)
def value(self, val=None):
"""Set or return the Pin Value"""
if val is not None:
if val == self.LOW:
self._value = val
GPIO.output(self.id, val)
return None
if val == self.HIGH:
self._value = val
GPIO.output(self.id, val)
return None
raise RuntimeError("Invalid value for pin")
return GPIO.input(self.id)
# pylint: disable=no-method-argument
@atexit.register
def cleanup():
"""Clean up pins"""
print("Exiting... \nCleaning up pins")
GPIO.cleanup()
# pylint: enable=no-method-argument
# Cannot be used as GPIO
SDA = Pin("GPIO_SEN9")
SCL = Pin("GPIO_SEN8")
SDA_1 = Pin("GEN1_I2C_SDA")
SCL_1 = Pin("GEN1_I2C_SCL")
# Jetson TX2 specific
J06 = Pin("GPIO_AUD1")
AA02 = Pin("CAN_GPIO2")
N06 = Pin("GPIO_CAM7")
N04 = Pin("GPIO_CAM5")
N05 = Pin("GPIO_CAM6")
N03 = Pin("GPIO_CAM4")
AA01 = Pin("CAN_GPIO1")
I05 = Pin("GPIO_PQ5")
T03 = Pin("UART1_CTS")
T02 = Pin("UART1_RTS")
P17 = Pin("GPIO_EXP_P17")
AA00 = Pin("CAN0_GPIO0")
Y01 = Pin("GPIO_MDM2")
P16 = Pin("GPIO_EXP_P16")
I04 = Pin("GPIO_PQ4")
J05 = Pin("GPIO_AUD0")
# Jetson TX2 NX specific
W04 = Pin("UART3_RTS")
V01 = Pin("GPIO_SEN1")
C02 = Pin("DAP2_DOUT")
C03 = Pin("DAP2_DIN")
V04 = Pin("GPIO_SEN4")
H02 = Pin("GPIO_WAN7")
H01 = Pin("GPIO_WAN6")
V02 = Pin("GPIO_SEN2")
H00 = Pin("GPIO_WAN5")
H03 = Pin("GPIO_WAN8")
Y03 = Pin("GPIO_MDM4")
N01 = Pin("GPIO_CAM2")
EE02 = Pin("TOUCH_CLK")
U00 = Pin("GPIO_DIS0")
U05 = Pin("GPIO_DIS5")
W05 = Pin("UART3_CTS")
V03 = Pin("GPIO_SEN3")
# Shared pin
J03 = Pin("DAP1_FS")
J02 = Pin("DAP1_DIN")
J01 = Pin("DAP1_DOUT")
J00 = Pin("DAP1_SCLK")
J04 = Pin("AUD_MCLK")
i2cPorts = (
(1, SCL, SDA),
(0, SCL_1, SDA_1),
)
# ordered as spiId, sckId, mosiId, misoId
spiPorts = ((3, N03, N05, N04),)
| 24.301471 | 72 | 0.574584 |
import atexit
import Jetson.GPIO as GPIO
GPIO.setmode(GPIO.TEGRA_SOC)
GPIO.setwarnings(False)
class Pin:
IN = 0
OUT = 1
LOW = 0
HIGH = 1
PULL_NONE = 0
PULL_UP = 1
PULL_DOWN = 2
id = None
_value = LOW
_mode = IN
def __init__(self, bcm_number):
self.id = bcm_number
def __repr__(self):
return str(self.id)
def __eq__(self, other):
return self.id == other
def init(self, mode=IN, pull=None):
if mode is not None:
if mode == self.IN:
self._mode = self.IN
GPIO.setup(self.id, GPIO.IN)
elif mode == self.OUT:
self._mode = self.OUT
GPIO.setup(self.id, GPIO.OUT)
else:
raise RuntimeError("Invalid mode for pin: %s" % self.id)
if pull is not None:
if self._mode != self.IN:
raise RuntimeError("Cannot set pull resistor on output")
if pull == self.PULL_UP:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_UP)
elif pull == self.PULL_DOWN:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
else:
raise RuntimeError("Invalid pull for pin: %s" % self.id)
def value(self, val=None):
if val is not None:
if val == self.LOW:
self._value = val
GPIO.output(self.id, val)
return None
if val == self.HIGH:
self._value = val
GPIO.output(self.id, val)
return None
raise RuntimeError("Invalid value for pin")
return GPIO.input(self.id)
@atexit.register
def cleanup():
print("Exiting... \nCleaning up pins")
GPIO.cleanup()
SDA = Pin("GPIO_SEN9")
SCL = Pin("GPIO_SEN8")
SDA_1 = Pin("GEN1_I2C_SDA")
SCL_1 = Pin("GEN1_I2C_SCL")
J06 = Pin("GPIO_AUD1")
AA02 = Pin("CAN_GPIO2")
N06 = Pin("GPIO_CAM7")
N04 = Pin("GPIO_CAM5")
N05 = Pin("GPIO_CAM6")
N03 = Pin("GPIO_CAM4")
AA01 = Pin("CAN_GPIO1")
I05 = Pin("GPIO_PQ5")
T03 = Pin("UART1_CTS")
T02 = Pin("UART1_RTS")
P17 = Pin("GPIO_EXP_P17")
AA00 = Pin("CAN0_GPIO0")
Y01 = Pin("GPIO_MDM2")
P16 = Pin("GPIO_EXP_P16")
I04 = Pin("GPIO_PQ4")
J05 = Pin("GPIO_AUD0")
W04 = Pin("UART3_RTS")
V01 = Pin("GPIO_SEN1")
C02 = Pin("DAP2_DOUT")
C03 = Pin("DAP2_DIN")
V04 = Pin("GPIO_SEN4")
H02 = Pin("GPIO_WAN7")
H01 = Pin("GPIO_WAN6")
V02 = Pin("GPIO_SEN2")
H00 = Pin("GPIO_WAN5")
H03 = Pin("GPIO_WAN8")
Y03 = Pin("GPIO_MDM4")
N01 = Pin("GPIO_CAM2")
EE02 = Pin("TOUCH_CLK")
U00 = Pin("GPIO_DIS0")
U05 = Pin("GPIO_DIS5")
W05 = Pin("UART3_CTS")
V03 = Pin("GPIO_SEN3")
J03 = Pin("DAP1_FS")
J02 = Pin("DAP1_DIN")
J01 = Pin("DAP1_DOUT")
J00 = Pin("DAP1_SCLK")
J04 = Pin("AUD_MCLK")
i2cPorts = (
(1, SCL, SDA),
(0, SCL_1, SDA_1),
)
spiPorts = ((3, N03, N05, N04),)
| true | true |
f728a4976c0ebc6cacf37854c9e320a7f3a74fd2 | 1,336 | py | Python | src/SimpleCopy.py | sonbyj01/backup_module | 614b149b8436411b62fde274c6de84a680a689be | [
"MIT"
] | null | null | null | src/SimpleCopy.py | sonbyj01/backup_module | 614b149b8436411b62fde274c6de84a680a689be | [
"MIT"
] | null | null | null | src/SimpleCopy.py | sonbyj01/backup_module | 614b149b8436411b62fde274c6de84a680a689be | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from shutil import copy2
from pathlib import Path
import sys
from .SourceFiles import SourceFiles
class SimpleCopy:
def __init__(self, source):
assert isinstance(source, SourceFiles), 'Not a SourceFiles object.'
self.source_object = source
self.source_parent = self.source_object.source_path.parents[0]
def simple_copy2(self, target):
try:
target_folder = Path(target)
except TypeError as fnf:
sys.exit()
if not target_folder.exists():
target_folder.mkdir(parents=True)
for source_folder_path in self.source_object.records_folder:
source_folder_path = Path(source_folder_path)
source_folder_relative_path = source_folder_path.relative_to(self.source_parent)
target_absolute = target_folder.joinpath(source_folder_relative_path)
if not target_absolute.exists():
target_absolute.mkdir(parents=True)
for source_file_path in self.source_object.records_file:
source_file_path = Path(source_file_path)
source_file_relative_path = source_file_path.relative_to(self.source_parent)
target_absolute = target_folder.joinpath(source_file_relative_path)
copy2(source_file_path, target_absolute)
| 33.4 | 92 | 0.69985 |
from shutil import copy2
from pathlib import Path
import sys
from .SourceFiles import SourceFiles
class SimpleCopy:
def __init__(self, source):
assert isinstance(source, SourceFiles), 'Not a SourceFiles object.'
self.source_object = source
self.source_parent = self.source_object.source_path.parents[0]
def simple_copy2(self, target):
try:
target_folder = Path(target)
except TypeError as fnf:
sys.exit()
if not target_folder.exists():
target_folder.mkdir(parents=True)
for source_folder_path in self.source_object.records_folder:
source_folder_path = Path(source_folder_path)
source_folder_relative_path = source_folder_path.relative_to(self.source_parent)
target_absolute = target_folder.joinpath(source_folder_relative_path)
if not target_absolute.exists():
target_absolute.mkdir(parents=True)
for source_file_path in self.source_object.records_file:
source_file_path = Path(source_file_path)
source_file_relative_path = source_file_path.relative_to(self.source_parent)
target_absolute = target_folder.joinpath(source_file_relative_path)
copy2(source_file_path, target_absolute)
| true | true |
f728a4aa74f8ff0d240dce9a8ab152c1db253469 | 4,018 | py | Python | d3rlpy-master/tests/models/torch/test_dynamics.py | SOPR-T/SOPR-T | 3242461fa8b3e917cde70be497beb1158a7b27e6 | [
"MIT"
] | 1 | 2021-07-09T22:39:28.000Z | 2021-07-09T22:39:28.000Z | d3rlpy-master/tests/models/torch/test_dynamics.py | SOPR-T/SOPR-T | 3242461fa8b3e917cde70be497beb1158a7b27e6 | [
"MIT"
] | null | null | null | d3rlpy-master/tests/models/torch/test_dynamics.py | SOPR-T/SOPR-T | 3242461fa8b3e917cde70be497beb1158a7b27e6 | [
"MIT"
] | null | null | null | import pytest
import torch
from d3rlpy.models.encoders import DefaultEncoderFactory
from d3rlpy.models.torch.dynamics import (
ProbabilisticDynamicsModel,
ProbabilisticEnsembleDynamicsModel,
_compute_ensemble_variance,
)
from .model_test import DummyEncoder, check_parameter_updates
@pytest.mark.parametrize("batch_size", [32])
@pytest.mark.parametrize("observation_shape", [(100,)])
@pytest.mark.parametrize("n_ensembles", [5])
@pytest.mark.parametrize("variance_type", ["max", "data"])
def test_compute_ensemble_variance(
batch_size, observation_shape, n_ensembles, variance_type
):
observations = torch.rand((batch_size, n_ensembles) + observation_shape)
rewards = torch.rand(batch_size, n_ensembles, 1)
variances = torch.rand(batch_size, n_ensembles, 1)
if variance_type == "max":
ref = variances.max(dim=1).values
elif variance_type == "data":
data = torch.cat([observations, rewards], dim=2)
ref = (data.std(dim=1) ** 2).sum(dim=1, keepdims=True)
variances = _compute_ensemble_variance(
observations, rewards, variances, variance_type
)
assert variances.shape == (batch_size, 1)
assert torch.allclose(variances, ref)
@pytest.mark.parametrize("feature_size", [100])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("batch_size", [32])
def test_probabilistic_dynamics_model(feature_size, action_size, batch_size):
encoder = DummyEncoder(feature_size, action_size, True)
dynamics = ProbabilisticDynamicsModel(encoder)
# check output shape
x = torch.rand(batch_size, feature_size)
action = torch.rand(batch_size, action_size)
pred_x, pred_reward = dynamics(x, action)
assert pred_x.shape == (batch_size, feature_size)
assert pred_reward.shape == (batch_size, 1)
# check variance
_, _, variance = dynamics.predict_with_variance(x, action)
assert variance.shape == (batch_size, 1)
# TODO: check error
reward = torch.rand(batch_size, 1)
loss = dynamics.compute_error(x, action, reward, x)
assert loss.shape == (batch_size, 1)
# check layer connection
check_parameter_updates(dynamics, (x, action, reward, x))
@pytest.mark.parametrize("feature_size", [100])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("batch_size", [32])
@pytest.mark.parametrize("n_ensembles", [5])
@pytest.mark.parametrize("variance_type", ["max", "data"])
def test_probabilistic_ensemble_dynamics_dynamics_model(
feature_size, action_size, batch_size, n_ensembles, variance_type
):
encoder = DummyEncoder(feature_size, action_size, True)
models = []
for _ in range(n_ensembles):
models.append(ProbabilisticDynamicsModel(encoder))
dynamics = ProbabilisticEnsembleDynamicsModel(models)
# check output shape
x = torch.rand(batch_size, feature_size)
action = torch.rand(batch_size, action_size)
pred_x, pred_reward = dynamics(x, action)
assert pred_x.shape == (batch_size, n_ensembles, feature_size)
assert pred_reward.shape == (batch_size, n_ensembles, 1)
# check variance without indices
pred_x, pred_reward, variances = dynamics.predict_with_variance(
x, action, variance_type=variance_type
)
assert pred_x.shape == (batch_size, n_ensembles, feature_size)
assert pred_reward.shape == (batch_size, n_ensembles, 1)
assert variances.shape == (batch_size, 1)
# check variance with indices
indices = torch.randint(n_ensembles, size=(batch_size,))
pred_x, pred_reward, variances = dynamics.predict_with_variance(
x, action, variance_type=variance_type, indices=indices
)
assert pred_x.shape == (batch_size, feature_size)
assert pred_reward.shape == (batch_size, 1)
assert variances.shape == (batch_size, 1)
# TODO: check error
reward = torch.rand(batch_size, 1)
loss = dynamics.compute_error(x, action, reward, x)
# check layer connection
check_parameter_updates(dynamics, (x, action, reward, x))
| 36.198198 | 77 | 0.725734 | import pytest
import torch
from d3rlpy.models.encoders import DefaultEncoderFactory
from d3rlpy.models.torch.dynamics import (
ProbabilisticDynamicsModel,
ProbabilisticEnsembleDynamicsModel,
_compute_ensemble_variance,
)
from .model_test import DummyEncoder, check_parameter_updates
@pytest.mark.parametrize("batch_size", [32])
@pytest.mark.parametrize("observation_shape", [(100,)])
@pytest.mark.parametrize("n_ensembles", [5])
@pytest.mark.parametrize("variance_type", ["max", "data"])
def test_compute_ensemble_variance(
batch_size, observation_shape, n_ensembles, variance_type
):
observations = torch.rand((batch_size, n_ensembles) + observation_shape)
rewards = torch.rand(batch_size, n_ensembles, 1)
variances = torch.rand(batch_size, n_ensembles, 1)
if variance_type == "max":
ref = variances.max(dim=1).values
elif variance_type == "data":
data = torch.cat([observations, rewards], dim=2)
ref = (data.std(dim=1) ** 2).sum(dim=1, keepdims=True)
variances = _compute_ensemble_variance(
observations, rewards, variances, variance_type
)
assert variances.shape == (batch_size, 1)
assert torch.allclose(variances, ref)
@pytest.mark.parametrize("feature_size", [100])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("batch_size", [32])
def test_probabilistic_dynamics_model(feature_size, action_size, batch_size):
encoder = DummyEncoder(feature_size, action_size, True)
dynamics = ProbabilisticDynamicsModel(encoder)
x = torch.rand(batch_size, feature_size)
action = torch.rand(batch_size, action_size)
pred_x, pred_reward = dynamics(x, action)
assert pred_x.shape == (batch_size, feature_size)
assert pred_reward.shape == (batch_size, 1)
_, _, variance = dynamics.predict_with_variance(x, action)
assert variance.shape == (batch_size, 1)
reward = torch.rand(batch_size, 1)
loss = dynamics.compute_error(x, action, reward, x)
assert loss.shape == (batch_size, 1)
check_parameter_updates(dynamics, (x, action, reward, x))
@pytest.mark.parametrize("feature_size", [100])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("batch_size", [32])
@pytest.mark.parametrize("n_ensembles", [5])
@pytest.mark.parametrize("variance_type", ["max", "data"])
def test_probabilistic_ensemble_dynamics_dynamics_model(
feature_size, action_size, batch_size, n_ensembles, variance_type
):
encoder = DummyEncoder(feature_size, action_size, True)
models = []
for _ in range(n_ensembles):
models.append(ProbabilisticDynamicsModel(encoder))
dynamics = ProbabilisticEnsembleDynamicsModel(models)
x = torch.rand(batch_size, feature_size)
action = torch.rand(batch_size, action_size)
pred_x, pred_reward = dynamics(x, action)
assert pred_x.shape == (batch_size, n_ensembles, feature_size)
assert pred_reward.shape == (batch_size, n_ensembles, 1)
pred_x, pred_reward, variances = dynamics.predict_with_variance(
x, action, variance_type=variance_type
)
assert pred_x.shape == (batch_size, n_ensembles, feature_size)
assert pred_reward.shape == (batch_size, n_ensembles, 1)
assert variances.shape == (batch_size, 1)
indices = torch.randint(n_ensembles, size=(batch_size,))
pred_x, pred_reward, variances = dynamics.predict_with_variance(
x, action, variance_type=variance_type, indices=indices
)
assert pred_x.shape == (batch_size, feature_size)
assert pred_reward.shape == (batch_size, 1)
assert variances.shape == (batch_size, 1)
reward = torch.rand(batch_size, 1)
loss = dynamics.compute_error(x, action, reward, x)
check_parameter_updates(dynamics, (x, action, reward, x))
| true | true |
f728a55ac321560a29e322ee6dcb6c70f2e872ac | 386 | py | Python | fdk_client/platform/models/SaveAttributeRequest.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/SaveAttributeRequest.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/SaveAttributeRequest.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | """Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class SaveAttributeRequest(BaseSchema):
# Feedback swagger.json
description = fields.Str(required=False)
name = fields.Str(required=False)
slug = fields.Str(required=False)
| 14.846154 | 44 | 0.702073 |
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class SaveAttributeRequest(BaseSchema):
description = fields.Str(required=False)
name = fields.Str(required=False)
slug = fields.Str(required=False)
| true | true |
f728a5afcddcf8d1d3f2a2c807ff6efa23a3007b | 12,651 | py | Python | engine.py | LockdownInnovators/CodeNames | b82fc9c85d4887ae81f331de6f2058e5e2cdccd9 | [
"MIT"
] | null | null | null | engine.py | LockdownInnovators/CodeNames | b82fc9c85d4887ae81f331de6f2058e5e2cdccd9 | [
"MIT"
] | null | null | null | engine.py | LockdownInnovators/CodeNames | b82fc9c85d4887ae81f331de6f2058e5e2cdccd9 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import itertools
import re
import sys
import os
import platform
import numpy as np
import model
from config import config
CLUE_PATTERN = r'^([a-zA-Z]+) ({0})$'
UNLIMITED = "unlimited"
# noinspection PyAttributeOutsideInit
class GameEngine(object):
def __init__(self, seed=None, expert=False, word2vec_models=None):
# Load our word list if necessary.
# TODO: Max length of 11 is hardcoded here and in print_board()
if word2vec_models is None:
word2vec_models = {}
with open(config.word_list) as f:
_words = [line.rstrip().lower().replace(' ', '_') for line in f.readlines()]
self.words = np.array(_words)
# Initialize our word embedding models.
self.models = {k: model.WordEmbedding(w2v) for k, w2v in word2vec_models.items()}
# Initialize random numbers.
self.generator = np.random.RandomState(seed=seed)
# Register expert mode
self.expert = expert
self.unfound_words = (set(), set())
# Useful regular expressions.
if self.expert:
self.valid_clue = re.compile(CLUE_PATTERN.format("[0-9]|" + UNLIMITED))
else:
self.valid_clue = re.compile(CLUE_PATTERN.format("[0-9]"))
def initialize_random_game(self, size=5):
self.size = size
# Shuffle the wordlist.
shuffle = self.generator.choice(
len(self.words), size * size, replace=False)
self.board = self.words[shuffle]
# Specify the layout for this game.
assignments = self.generator.permutation(size * size)
self.owner = np.empty(size * size, int)
self.owner[assignments[0]] = 0 # assassin
self.owner[assignments[1:10]] = 1 # first player: 9 words
self.owner[assignments[10:18]] = 2 # second player: 8 words
self.owner[assignments[18:]] = 3 # bystander: 7 words
self.assassin_word = self.board[self.owner == 0]
# All cards are initially visible.
self.visible = np.ones_like(self.owner, dtype=bool)
self.num_turns = -1
def initialize_from_words(self, initial_words, size=5):
"""
The initial_words parameter should be in the format:
ASSASSIN;TEAM1;TEAM2;NEUTRAL
where each group consists of comma-separated words from the word list.
The total number of words must be <= size * size. Any missing words
are considered to be already covered and neutral.
"""
self.size = size
word_groups = initial_words.split(';')
if len(word_groups) != 4:
raise ValueError('Expected 4 groups separated by semicolon.')
board, owner, visible = [], [], []
for group_index, word_group in enumerate(word_groups):
words = word_group.split(',')
for word in words:
word = word.lower().replace(' ', '_')
if word not in self.words:
raise ValueError('Invalid word "{0}".'.format(word))
if word in board:
raise ValueError('Duplicate word "{0}".'.format(word))
board.append(word)
owner.append(group_index)
visible.append(True)
if len(board) > size * size:
raise ValueError('Too many words. Expected <= {0}.'.format(size * size))
# Add dummy hidden words if necessary.
while len(board) < size * size:
board.append('---')
owner.append(3)
visible.append(False)
self.board = np.array(board)
self.owner = np.array(owner)
self.visible = np.array(visible)
# Perform a random shuffle of the board.
shuffle = self.generator.permutation(size * size)
self.board = self.board[shuffle]
self.owner = self.owner[shuffle]
self.visible = self.visible[shuffle]
self.assassin_word = self.board[self.owner == 0]
self.num_turns = -1
def print_board(self, spymaster=False, clear_screen=True):
if clear_screen:
if platform.system() == 'Windows':
os.system('cls')
else:
print(chr(27) + '[2J')
board = self.board.reshape(self.size, self.size)
owner = self.owner.reshape(self.size, self.size)
visible = self.visible.reshape(self.size, self.size)
for row in range(self.size):
for col in range(self.size):
word = board[row, col]
tag = '#<>-'[owner[row, col]]
if not visible[row, col]:
word = tag * 11
elif not spymaster:
tag = ' '
if not spymaster or owner[row, col] in (0, 1, 2):
word = word.upper()
print('{0}{1:11s} '.format(tag, word), end='')
print('')
def play_computer_spymaster(self, gamma=1.0, verbose=True):
say('Thinking...')
sys.stdout.flush()
# Loop over all permutations of words.
num_words = len(self.player_words)
best_score, saved_clues = [], []
for count in range(max(num_words, 2), 0, -1):
# Multiply similarity scores by this factor for any clue
# corresponding to this many words.
bonus_factor = count ** gamma
for group in itertools.combinations(range(num_words), count):
words = self.player_words[list(group)]
clue, score = self.models[f'{self.player + 1} Master'].get_clue(clue_words=words,
pos_words=self.player_words,
neg_words=np.concatenate((
self.opponent_words,
self.neutral_words)),
veto_words=self.assassin_word)
if clue:
best_score.append(score * bonus_factor)
saved_clues.append((clue, words))
num_clues = len(saved_clues)
order = sorted(range(num_clues), key=lambda k: best_score[k], reverse=True)
if verbose:
self.print_board(spymaster=True)
for i in order[:10]:
clue, words = saved_clues[i]
say(u'{0:.3f} {1} = {2}'.format(best_score[i], ' + '.join([w.upper() for w in words]), clue))
clue, words = saved_clues[order[0]]
self.unfound_words[self.player].update(words)
if self.expert and self._should_say_unlimited(nb_clue_words=len(words)):
return clue, UNLIMITED
else:
return clue, len(words)
def _should_say_unlimited(self, nb_clue_words, threshold_opponent=2):
"""
Announce "unlimited" if :
(1) the opposing team risks winning with their next clue,
(2) and our +1 guess isn't enough to catch up during this clue,
(3) but all the words hinted by the current and previous clues
are enough to catch up and win
"""
return (len(self.opponent_words) <= threshold_opponent # (1)
and nb_clue_words + 1 < len(self.player_words) # (2)
and self.unfound_words[self.player]
== set(self.player_words)) # (3)
def play_human_spymaster(self):
self.print_board(spymaster=True)
while True:
clue = ask('{0} Enter your clue: '.format(self.player_label))
matched = self.valid_clue.match(clue)
if matched:
word, count = matched.groups()
if count != UNLIMITED:
count = int(count)
return word, count
say('Invalid clue, should be WORD COUNT.')
def play_human_team(self, word, count):
num_guesses = 0
while (self.expert and count == UNLIMITED) or num_guesses < count + 1:
self.print_board(clear_screen=(num_guesses == 0))
say(u'{0} your clue is: {1} {2}'.format(self.player_label, word, count))
num_guesses += 1
while True:
guess = ask('{0} enter your guess #{1}: '.format(self.player_label, num_guesses))
guess = guess.strip().lower().replace(' ', '_')
if guess == '':
# Team does not want to make any more guesses.
return True
if guess in self.board[self.visible]:
break
say('Invalid guess, should be a visible word.')
loc = np.where(self.board == guess)[0]
self.visible[loc] = False
if guess == self.assassin_word:
say('{0} You guessed the assasin - game over!'.format(self.player_label))
return False
if guess in self.player_words:
self.unfound_words[self.player].discard(guess)
if num_guesses == len(self.player_words):
say('{0} You won!!!'.format(self.player_label))
return False
else:
ask('{0} Congratulations, keep going! (hit ENTER)\n'.format(self.player_label))
else:
if guess in self.opponent_words:
ask('{0} Sorry, word from opposing team! (hit ENTER)\n'.format(self.player_label))
else:
ask('{0} Sorry, bystander! (hit ENTER)\n'.format(self.player_label))
break
return True
def play_computer_team(self, word, count):
num_guesses = 0
say(u'{0} (computer) your clue is: {1} {2}'.format(self.player_label, word, count))
guesses = self.models[f'{self.player + 1} Guesser'].get_closest_board_words_to(word, count, self.player_words)
for guess in guesses:
num_guesses += 1
say(f'Computer guess #{num_guesses}: {guess}')
loc = np.where(self.board == guess)[0]
self.visible[loc] = False
if guess == self.assassin_word:
say('{0} (computer) guessed the assasin - game over!'.format(self.player_label))
return False
if guess in self.player_words:
self.unfound_words[self.player].discard(guess)
if num_guesses == len(self.player_words):
say('{0} (computer) You won!!!'.format(self.player_label))
return False
else:
ask('{0} Congratulations computer, keep going! (hit ENTER)\n'.format(self.player_label))
else:
if guess in self.opponent_words:
ask('{0} Sorry computer, word from opposing team! (hit ENTER)\n'.format(self.player_label))
else:
ask('{0} Sorry computer, bystander! (hit ENTER)\n'.format(self.player_label))
break
return True
def next_turn(self):
self.num_turns += 1
self.player = self.num_turns % 2
self.opponent = (self.player + 1) % 2
self.player_label = '<>'[self.player] * 3
self.player_words = self.board[(self.owner == self.player + 1) & self.visible]
self.opponent_words = self.board[(self.owner == self.opponent + 1) & self.visible]
self.neutral_words = self.board[(self.owner == 3) & self.visible]
def play_turn(self, spymaster='human', team='human'):
self.next_turn()
if spymaster == 'human':
word, count = self.play_human_spymaster()
else:
word, count = self.play_computer_spymaster()
if team == 'human':
ongoing = self.play_human_team(word, count)
else:
ongoing = self.play_computer_team(word, count)
return ongoing
def play_game(self, spymaster1='human', team1='human',
spymaster2='human', team2='human', init=None):
if init is None:
self.initialize_random_game()
else:
self.initialize_from_words(init)
while True:
if not self.play_turn(spymaster1, team1): break
if not self.play_turn(spymaster2, team2): break
def say(message):
print((message + '\n').encode('utf8'))
def ask(message):
try:
return input(message)
except KeyboardInterrupt:
say('\nBye.')
sys.exit(0)
| 37.990991 | 118 | 0.550707 | from __future__ import print_function, division
import itertools
import re
import sys
import os
import platform
import numpy as np
import model
from config import config
CLUE_PATTERN = r'^([a-zA-Z]+) ({0})$'
UNLIMITED = "unlimited"
class GameEngine(object):
def __init__(self, seed=None, expert=False, word2vec_models=None):
if word2vec_models is None:
word2vec_models = {}
with open(config.word_list) as f:
_words = [line.rstrip().lower().replace(' ', '_') for line in f.readlines()]
self.words = np.array(_words)
self.models = {k: model.WordEmbedding(w2v) for k, w2v in word2vec_models.items()}
self.generator = np.random.RandomState(seed=seed)
self.expert = expert
self.unfound_words = (set(), set())
if self.expert:
self.valid_clue = re.compile(CLUE_PATTERN.format("[0-9]|" + UNLIMITED))
else:
self.valid_clue = re.compile(CLUE_PATTERN.format("[0-9]"))
def initialize_random_game(self, size=5):
self.size = size
shuffle = self.generator.choice(
len(self.words), size * size, replace=False)
self.board = self.words[shuffle]
assignments = self.generator.permutation(size * size)
self.owner = np.empty(size * size, int)
self.owner[assignments[0]] = 0
self.owner[assignments[1:10]] = 1
self.owner[assignments[10:18]] = 2
self.owner[assignments[18:]] = 3
self.assassin_word = self.board[self.owner == 0]
self.visible = np.ones_like(self.owner, dtype=bool)
self.num_turns = -1
def initialize_from_words(self, initial_words, size=5):
self.size = size
word_groups = initial_words.split(';')
if len(word_groups) != 4:
raise ValueError('Expected 4 groups separated by semicolon.')
board, owner, visible = [], [], []
for group_index, word_group in enumerate(word_groups):
words = word_group.split(',')
for word in words:
word = word.lower().replace(' ', '_')
if word not in self.words:
raise ValueError('Invalid word "{0}".'.format(word))
if word in board:
raise ValueError('Duplicate word "{0}".'.format(word))
board.append(word)
owner.append(group_index)
visible.append(True)
if len(board) > size * size:
raise ValueError('Too many words. Expected <= {0}.'.format(size * size))
while len(board) < size * size:
board.append('---')
owner.append(3)
visible.append(False)
self.board = np.array(board)
self.owner = np.array(owner)
self.visible = np.array(visible)
shuffle = self.generator.permutation(size * size)
self.board = self.board[shuffle]
self.owner = self.owner[shuffle]
self.visible = self.visible[shuffle]
self.assassin_word = self.board[self.owner == 0]
self.num_turns = -1
def print_board(self, spymaster=False, clear_screen=True):
if clear_screen:
if platform.system() == 'Windows':
os.system('cls')
else:
print(chr(27) + '[2J')
board = self.board.reshape(self.size, self.size)
owner = self.owner.reshape(self.size, self.size)
visible = self.visible.reshape(self.size, self.size)
for row in range(self.size):
for col in range(self.size):
word = board[row, col]
tag = '#<>-'[owner[row, col]]
if not visible[row, col]:
word = tag * 11
elif not spymaster:
tag = ' '
if not spymaster or owner[row, col] in (0, 1, 2):
word = word.upper()
print('{0}{1:11s} '.format(tag, word), end='')
print('')
def play_computer_spymaster(self, gamma=1.0, verbose=True):
say('Thinking...')
sys.stdout.flush()
num_words = len(self.player_words)
best_score, saved_clues = [], []
for count in range(max(num_words, 2), 0, -1):
bonus_factor = count ** gamma
for group in itertools.combinations(range(num_words), count):
words = self.player_words[list(group)]
clue, score = self.models[f'{self.player + 1} Master'].get_clue(clue_words=words,
pos_words=self.player_words,
neg_words=np.concatenate((
self.opponent_words,
self.neutral_words)),
veto_words=self.assassin_word)
if clue:
best_score.append(score * bonus_factor)
saved_clues.append((clue, words))
num_clues = len(saved_clues)
order = sorted(range(num_clues), key=lambda k: best_score[k], reverse=True)
if verbose:
self.print_board(spymaster=True)
for i in order[:10]:
clue, words = saved_clues[i]
say(u'{0:.3f} {1} = {2}'.format(best_score[i], ' + '.join([w.upper() for w in words]), clue))
clue, words = saved_clues[order[0]]
self.unfound_words[self.player].update(words)
if self.expert and self._should_say_unlimited(nb_clue_words=len(words)):
return clue, UNLIMITED
else:
return clue, len(words)
def _should_say_unlimited(self, nb_clue_words, threshold_opponent=2):
return (len(self.opponent_words) <= threshold_opponent
and nb_clue_words + 1 < len(self.player_words)
and self.unfound_words[self.player]
== set(self.player_words))
def play_human_spymaster(self):
self.print_board(spymaster=True)
while True:
clue = ask('{0} Enter your clue: '.format(self.player_label))
matched = self.valid_clue.match(clue)
if matched:
word, count = matched.groups()
if count != UNLIMITED:
count = int(count)
return word, count
say('Invalid clue, should be WORD COUNT.')
def play_human_team(self, word, count):
num_guesses = 0
while (self.expert and count == UNLIMITED) or num_guesses < count + 1:
self.print_board(clear_screen=(num_guesses == 0))
say(u'{0} your clue is: {1} {2}'.format(self.player_label, word, count))
num_guesses += 1
while True:
guess = ask('{0} enter your guess #{1}: '.format(self.player_label, num_guesses))
guess = guess.strip().lower().replace(' ', '_')
if guess == '':
return True
if guess in self.board[self.visible]:
break
say('Invalid guess, should be a visible word.')
loc = np.where(self.board == guess)[0]
self.visible[loc] = False
if guess == self.assassin_word:
say('{0} You guessed the assasin - game over!'.format(self.player_label))
return False
if guess in self.player_words:
self.unfound_words[self.player].discard(guess)
if num_guesses == len(self.player_words):
say('{0} You won!!!'.format(self.player_label))
return False
else:
ask('{0} Congratulations, keep going! (hit ENTER)\n'.format(self.player_label))
else:
if guess in self.opponent_words:
ask('{0} Sorry, word from opposing team! (hit ENTER)\n'.format(self.player_label))
else:
ask('{0} Sorry, bystander! (hit ENTER)\n'.format(self.player_label))
break
return True
def play_computer_team(self, word, count):
num_guesses = 0
say(u'{0} (computer) your clue is: {1} {2}'.format(self.player_label, word, count))
guesses = self.models[f'{self.player + 1} Guesser'].get_closest_board_words_to(word, count, self.player_words)
for guess in guesses:
num_guesses += 1
say(f'Computer guess #{num_guesses}: {guess}')
loc = np.where(self.board == guess)[0]
self.visible[loc] = False
if guess == self.assassin_word:
say('{0} (computer) guessed the assasin - game over!'.format(self.player_label))
return False
if guess in self.player_words:
self.unfound_words[self.player].discard(guess)
if num_guesses == len(self.player_words):
say('{0} (computer) You won!!!'.format(self.player_label))
return False
else:
ask('{0} Congratulations computer, keep going! (hit ENTER)\n'.format(self.player_label))
else:
if guess in self.opponent_words:
ask('{0} Sorry computer, word from opposing team! (hit ENTER)\n'.format(self.player_label))
else:
ask('{0} Sorry computer, bystander! (hit ENTER)\n'.format(self.player_label))
break
return True
def next_turn(self):
self.num_turns += 1
self.player = self.num_turns % 2
self.opponent = (self.player + 1) % 2
self.player_label = '<>'[self.player] * 3
self.player_words = self.board[(self.owner == self.player + 1) & self.visible]
self.opponent_words = self.board[(self.owner == self.opponent + 1) & self.visible]
self.neutral_words = self.board[(self.owner == 3) & self.visible]
def play_turn(self, spymaster='human', team='human'):
self.next_turn()
if spymaster == 'human':
word, count = self.play_human_spymaster()
else:
word, count = self.play_computer_spymaster()
if team == 'human':
ongoing = self.play_human_team(word, count)
else:
ongoing = self.play_computer_team(word, count)
return ongoing
def play_game(self, spymaster1='human', team1='human',
spymaster2='human', team2='human', init=None):
if init is None:
self.initialize_random_game()
else:
self.initialize_from_words(init)
while True:
if not self.play_turn(spymaster1, team1): break
if not self.play_turn(spymaster2, team2): break
def say(message):
print((message + '\n').encode('utf8'))
def ask(message):
try:
return input(message)
except KeyboardInterrupt:
say('\nBye.')
sys.exit(0)
| true | true |
f728a5b68285f8b74272b956d5bb3ba8af7d5994 | 28,367 | py | Python | other_train/train_loadCorrMat.py | aaxwaz/youtube-8m | 3c3ceae83173d6b9eaef6072308a2804ba56bcf5 | [
"Apache-2.0"
] | null | null | null | other_train/train_loadCorrMat.py | aaxwaz/youtube-8m | 3c3ceae83173d6b9eaef6072308a2804ba56bcf5 | [
"Apache-2.0"
] | null | null | null | other_train/train_loadCorrMat.py | aaxwaz/youtube-8m | 3c3ceae83173d6b9eaef6072308a2804ba56bcf5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary for training Tensorflow models on the YouTube-8M dataset."""
import json
import os
import time
import eval_util
import export_model
import losses
import frame_level_models
import video_level_models
import readers
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
from tensorflow.python.client import device_lib
import utils
import numpy as np
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if __name__ == "__main__":
# Dataset flags.
flags.DEFINE_string("train_dir", "/tmp/yt8m_model/",
"The directory to save the model files in.")
flags.DEFINE_string(
"train_data_pattern", "",
"File glob for the training dataset. If the files refer to Frame Level "
"features (i.e. tensorflow.SequenceExample), then set --reader_type "
"format. The (Sequence)Examples are expected to have 'rgb' byte array "
"sequence feature as well as a 'labels' int64 context feature.")
flags.DEFINE_string("feature_names", "mean_rgb", "Name of the feature "
"to use for training.")
flags.DEFINE_string("feature_sizes", "1024", "Length of the feature vectors.")
# Model flags.
flags.DEFINE_bool(
"frame_features", False,
"If set, then --train_data_pattern must be frame-level features. "
"Otherwise, --train_data_pattern must be aggregated video-level "
"features. The model must also be set appropriately (i.e. to read 3D "
"batches VS 4D batches.")
flags.DEFINE_string(
"model", "LogisticModel",
"Which architecture to use for the model. Models are defined "
"in models.py.")
flags.DEFINE_bool(
"start_new_model", False,
"If set, this will not resume from a checkpoint and will instead create a"
" new model instance.")
# Training flags.
flags.DEFINE_integer("num_gpu", 1,
"The maximum number of GPU devices to use for training. "
"Flag only applies if GPUs are installed")
flags.DEFINE_integer("batch_size", 1024,
"How many examples to process per batch for training.")
flags.DEFINE_string("label_loss", "CrossEntropyLoss",
"Which loss function to use for training the model.")
flags.DEFINE_float(
"regularization_penalty", 1.0,
"How much weight to give to the regularization loss (the label loss has "
"a weight of 1).")
flags.DEFINE_float("base_learning_rate", 0.01,
"Which learning rate to start with.")
flags.DEFINE_float("learning_rate_decay", 0.95,
"Learning rate decay factor to be applied every "
"learning_rate_decay_examples.")
flags.DEFINE_float("learning_rate_decay_examples", 4000000,
"Multiply current learning rate by learning_rate_decay "
"every learning_rate_decay_examples.")
flags.DEFINE_integer("num_epochs", 5,
"How many passes to make over the dataset before "
"halting training.")
flags.DEFINE_integer("max_steps", None,
"The maximum number of iterations of the training loop.")
flags.DEFINE_integer("export_model_steps", 10000000000,
"The period, in number of steps, with which the model "
"is exported for batch prediction.")
flags.DEFINE_float("save_checkpoint_every_n_hour", 0.4,
"Save the checkpoint every n hours.")
flags.DEFINE_integer("validate_every_n_training_steps", 100,
"eval on training for every n steps")
# Other flags.
flags.DEFINE_integer("num_readers", 12,
"How many threads to use for reading input files.")
flags.DEFINE_string("optimizer", "AdamOptimizer",
"What optimizer class to use.")
flags.DEFINE_float("clip_gradient_norm", 1.0, "Norm to clip gradients to.")
flags.DEFINE_bool(
"log_device_placement", False,
"Whether to write the device on which every op will run into the "
"logs on startup.")
def validate_class_name(flag_value, category, modules, expected_superclass):
"""Checks that the given string matches a class of the expected type.
Args:
flag_value: A string naming the class to instantiate.
category: A string used further describe the class in error messages
(e.g. 'model', 'reader', 'loss').
modules: A list of modules to search for the given class.
expected_superclass: A class that the given class should inherit from.
Raises:
FlagsError: If the given class could not be found or if the first class
found with that name doesn't inherit from the expected superclass.
Returns:
True if a class was found that matches the given constraints.
"""
candidates = [getattr(module, flag_value, None) for module in modules]
for candidate in candidates:
if not candidate:
continue
if not issubclass(candidate, expected_superclass):
raise flags.FlagsError("%s '%s' doesn't inherit from %s." %
(category, flag_value,
expected_superclass.__name__))
return True
raise flags.FlagsError("Unable to find %s '%s'." % (category, flag_value))
def get_input_data_tensors(reader,
data_pattern,
batch_size=1000,
num_epochs=None,
num_readers=1):
"""Creates the section of the graph which reads the training data.
Args:
reader: A class which parses the training data.
data_pattern: A 'glob' style path to the data files.
batch_size: How many examples to process at a time.
num_epochs: How many passes to make over the training data. Set to 'None'
to run indefinitely.
num_readers: How many I/O threads to use.
Returns:
A tuple containing the features tensor, labels tensor, and optionally a
tensor containing the number of frames per video. The exact dimensions
depend on the reader being used.
Raises:
IOError: If no files matching the given pattern were found.
"""
logging.info("Using batch size of " + str(batch_size) + " for training.")
with tf.name_scope("train_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find training files. data_pattern='" +
data_pattern + "'.")
logging.info("Number of training files: %s.", str(len(files)))
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=True)
training_data = [
reader.prepare_reader(filename_queue) for _ in range(num_readers)
]
return tf.train.shuffle_batch_join(
training_data,
batch_size=batch_size,
capacity=batch_size * 5,
min_after_dequeue=batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
def find_class_by_name(name, modules):
"""Searches the provided modules for the named class and returns it."""
modules = [getattr(module, name, None) for module in modules]
return next(a for a in modules if a)
def build_graph(reader,
model,
train_data_pattern,
label_loss_fn=losses.CrossEntropyLoss(),
batch_size=1000,
base_learning_rate=0.01,
learning_rate_decay_examples=1000000,
learning_rate_decay=0.95,
optimizer_class=tf.train.AdamOptimizer,
clip_gradient_norm=1.0,
regularization_penalty=1,
num_readers=1,
num_epochs=None,
corr_mat=None):
"""Creates the Tensorflow graph.
This will only be called once in the life of
a training model, because after the graph is created the model will be
restored from a meta graph file rather than being recreated.
Args:
reader: The data file reader. It should inherit from BaseReader.
model: The core model (e.g. logistic or neural net). It should inherit
from BaseModel.
train_data_pattern: glob path to the training data files.
label_loss_fn: What kind of loss to apply to the model. It should inherit
from BaseLoss.
batch_size: How many examples to process at a time.
base_learning_rate: What learning rate to initialize the optimizer with.
optimizer_class: Which optimization algorithm to use.
clip_gradient_norm: Magnitude of the gradient to clip to.
regularization_penalty: How much weight to give the regularization loss
compared to the label loss.
num_readers: How many threads to use for I/O operations.
num_epochs: How many passes to make over the data. 'None' means an
unlimited number of passes.
"""
global_step = tf.Variable(0, trainable=False, name="global_step")
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
gpus = gpus[:FLAGS.num_gpu]
num_gpus = len(gpus)
if num_gpus > 0:
logging.info("Using the following GPUs to train: " + str(gpus))
num_towers = num_gpus
device_string = '/gpu:%d'
else:
logging.info("No GPUs found. Training on CPU.")
num_towers = 1
device_string = '/cpu:%d'
learning_rate = tf.train.exponential_decay(
base_learning_rate,
global_step * batch_size * num_towers,
learning_rate_decay_examples,
learning_rate_decay,
staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
optimizer = optimizer_class(learning_rate)
unused_video_id, model_input_raw, labels_batch, num_frames = (
get_input_data_tensors(
reader,
train_data_pattern,
batch_size=batch_size * num_towers,
num_readers=num_readers,
num_epochs=num_epochs))
tf.summary.histogram("model/input_raw", model_input_raw)
feature_dim = len(model_input_raw.get_shape()) - 1
model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)
tower_inputs = tf.split(model_input, num_towers)
tower_labels = tf.split(labels_batch, num_towers)
tower_num_frames = tf.split(num_frames, num_towers)
tower_gradients = []
tower_predictions = []
tower_label_losses = []
tower_reg_losses = []
for i in range(num_towers):
# For some reason these 'with' statements can't be combined onto the same
# line. They have to be nested.
with tf.device(device_string % i):
with (tf.variable_scope(("tower"), reuse=True if i > 0 else None)):
with (slim.arg_scope([slim.model_variable, slim.variable], device="/cpu:0" if num_gpus!=1 else "/gpu:0")):
result = model.create_model(
tower_inputs[i],
num_frames=tower_num_frames[i],
vocab_size=reader.num_classes,
corr_mat_init=corr_mat,
labels=tower_labels[i])
for variable in slim.get_model_variables():
tf.summary.histogram(variable.op.name, variable)
predictions0 = result["predictions0"]
predictions = result["predictions"]
tower_predictions.append(predictions)
label_loss = label_loss_fn.calculate_loss(predictions0, tower_labels[i])
if "regularization_loss" in result.keys():
reg_loss = result["regularization_loss"]
else:
reg_loss = tf.constant(0.0)
reg_losses = tf.losses.get_regularization_losses()
if reg_losses:
reg_loss += tf.add_n(reg_losses)
tower_reg_losses.append(reg_loss)
# Adds update_ops (e.g., moving average updates in batch normalization) as
# a dependency to the train_op.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if "update_ops" in result.keys():
update_ops += result["update_ops"]
if update_ops:
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name="gradient_barrier")
with tf.control_dependencies([barrier]):
label_loss = tf.identity(label_loss)
tower_label_losses.append(label_loss)
# Incorporate the L2 weight penalties etc.
final_loss = regularization_penalty * reg_loss + label_loss
gradients = optimizer.compute_gradients(final_loss,
colocate_gradients_with_ops=False)
tower_gradients.append(gradients)
label_loss = tf.reduce_mean(tf.stack(tower_label_losses))
tf.summary.scalar("label_loss", label_loss)
if regularization_penalty != 0:
reg_loss = tf.reduce_mean(tf.stack(tower_reg_losses))
tf.summary.scalar("reg_loss", reg_loss)
merged_gradients = utils.combine_gradients(tower_gradients)
if clip_gradient_norm > 0:
with tf.name_scope('clip_grads'):
merged_gradients = utils.clip_gradient_norms(merged_gradients, clip_gradient_norm)
train_op = optimizer.apply_gradients(merged_gradients, global_step=global_step)
tf.add_to_collection("global_step", global_step)
tf.add_to_collection("loss", label_loss)
tf.add_to_collection("predictions", tf.concat(tower_predictions, 0))
tf.add_to_collection("input_batch_raw", model_input_raw)
tf.add_to_collection("input_batch", model_input)
tf.add_to_collection("num_frames", num_frames)
tf.add_to_collection("labels", tf.cast(labels_batch, tf.float32))
tf.add_to_collection("train_op", train_op)
class Trainer(object):
"""A Trainer to train a Tensorflow graph."""
def __init__(self, cluster, task, train_dir, model, reader, model_exporter,
log_device_placement=True, max_steps=None,
export_model_steps=1000, corr_mat = None):
""""Creates a Trainer.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
self.cluster = cluster
self.task = task
self.is_master = (task.type == "master" and task.index == 0)
self.train_dir = train_dir
self.config = tf.ConfigProto(
allow_soft_placement=True,log_device_placement=log_device_placement)
self.model = model
self.reader = reader
self.model_exporter = model_exporter
self.max_steps = max_steps
self.max_steps_reached = False
self.export_model_steps = export_model_steps
self.last_model_export_step = 0
self.corr_mat = corr_mat
# if self.is_master and self.task.index > 0:
# raise StandardError("%s: Only one replica of master expected",
# task_as_string(self.task))
def run(self, start_new_model=False):
"""Performs training on the currently defined Tensorflow graph.
Returns:
A tuple of the training Hit@1 and the training PERR.
"""
if self.is_master and start_new_model:
self.remove_training_directory(self.train_dir)
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
model_flags_dict = {
"model": FLAGS.model,
"feature_sizes": FLAGS.feature_sizes,
"feature_names": FLAGS.feature_names,
"frame_features": FLAGS.frame_features,
"label_loss": FLAGS.label_loss,
}
flags_json_path = os.path.join(FLAGS.train_dir, "model_flags.json")
if os.path.exists(flags_json_path):
existing_flags = json.load(open(flags_json_path))
if existing_flags != model_flags_dict:
logging.error("Model flags do not match existing file %s. Please "
"delete the file, change --train_dir, or pass flag "
"--start_new_model",
flags_json_path)
logging.error("Ran model with flags: %s", str(model_flags_dict))
logging.error("Previously ran with flags: %s", str(existing_flags))
exit(1)
else:
# Write the file.
with open(flags_json_path, "w") as fout:
fout.write(json.dumps(model_flags_dict))
target, device_fn = self.start_server_if_distributed()
meta_filename = self.get_meta_filename(start_new_model, self.train_dir)
with tf.Graph().as_default() as graph:
if meta_filename:
saver = self.recover_model(meta_filename)
with tf.device(device_fn):
if not meta_filename:
saver = self.build_model(self.model, self.reader, self.corr_mat)
global_step = tf.get_collection("global_step")[0]
loss = tf.get_collection("loss")[0]
predictions = tf.get_collection("predictions")[0]
labels = tf.get_collection("labels")[0]
train_op = tf.get_collection("train_op")[0]
init_op = tf.global_variables_initializer()
sv = tf.train.Supervisor(
graph,
logdir=self.train_dir,
init_op=init_op,
is_chief=self.is_master,
global_step=global_step,
#save_model_secs=15 * 60,
save_model_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),
#save_summaries_secs=120,
save_summaries_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),
saver=saver)
logging.info("%s: Starting managed session.", task_as_string(self.task))
with sv.managed_session(target, config=self.config) as sess:
try:
logging.info("%s: Entering training loop.", task_as_string(self.task))
while (not sv.should_stop()) and (not self.max_steps_reached):
batch_start_time = time.time()
_, global_step_val, loss_val, predictions_val, labels_val = sess.run(
[train_op, global_step, loss, predictions, labels])
seconds_per_batch = time.time() - batch_start_time
examples_per_second = labels_val.shape[0] / seconds_per_batch
if self.max_steps and self.max_steps <= global_step_val:
self.max_steps_reached = True
#if self.is_master and global_step_val % 10 == 0 and self.train_dir:
if self.is_master and global_step_val % FLAGS.validate_every_n_training_steps == 0 and self.train_dir:
eval_start_time = time.time()
hit_at_one = eval_util.calculate_hit_at_one(predictions_val, labels_val)
perr = eval_util.calculate_precision_at_equal_recall_rate(predictions_val,
labels_val)
gap = eval_util.calculate_gap(predictions_val, labels_val)
eval_end_time = time.time()
eval_time = eval_end_time - eval_start_time
logging.info("training step " + str(global_step_val) + " | Loss: " + ("%.2f" % loss_val) +
" Examples/sec: " + ("%.2f" % examples_per_second) + " | Hit@1: " +
("%.2f" % hit_at_one) + " PERR: " + ("%.2f" % perr) +
" GAP: " + ("%.2f" % gap))
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_Hit@1", hit_at_one),
global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_Perr", perr), global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_GAP", gap), global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("global_step/Examples/Second",
examples_per_second), global_step_val)
sv.summary_writer.flush()
with open(FLAGS.train_dir + '/global_step_{%d}_training_GAP_{%.6f}.txt' % (global_step_val, gap), 'w') as f:
f.write('\n')
# Exporting the model every x steps
time_to_export = ((self.last_model_export_step == 0) or
(global_step_val - self.last_model_export_step
>= self.export_model_steps))
if self.is_master and time_to_export:
self.export_model(global_step_val, sv.saver, sv.save_path, sess)
self.last_model_export_step = global_step_val
else:
#logging.info("training step " + str(global_step_val) + " | Loss: " +
#("%.2f" % loss_val) + " Examples/sec: " + ("%.2f" % examples_per_second))
continue
except tf.errors.OutOfRangeError:
logging.info("%s: Done training -- epoch limit reached.",
task_as_string(self.task))
logging.info("%s: Exited training loop.", task_as_string(self.task))
sv.Stop()
def export_model(self, global_step_val, saver, save_path, session):
# If the model has already been exported at this step, return.
if global_step_val == self.last_model_export_step:
return
last_checkpoint = saver.save(session, save_path, global_step_val)
model_dir = "{0}/export/step_{1}".format(self.train_dir, global_step_val)
logging.info("%s: Exporting the model at step %s to %s.",
task_as_string(self.task), global_step_val, model_dir)
self.model_exporter.export_model(
model_dir=model_dir,
global_step_val=global_step_val,
last_checkpoint=last_checkpoint)
def start_server_if_distributed(self):
"""Starts a server if the execution is distributed."""
if self.cluster:
logging.info("%s: Starting trainer within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
target = server.target
device_fn = tf.train.replica_device_setter(
ps_device="/job:ps",
worker_device="/job:%s/task:%d" % (self.task.type, self.task.index),
cluster=self.cluster)
else:
target = ""
device_fn = ""
return (target, device_fn)
def remove_training_directory(self, train_dir):
"""Removes the training directory."""
try:
logging.info(
"%s: Removing existing train directory.",
task_as_string(self.task))
gfile.DeleteRecursively(train_dir)
except:
logging.error(
"%s: Failed to delete directory " + train_dir +
" when starting a new model. Please delete it manually and" +
" try again.", task_as_string(self.task))
def get_meta_filename(self, start_new_model, train_dir):
if start_new_model:
logging.info("%s: Flag 'start_new_model' is set. Building a new model.",
task_as_string(self.task))
return None
latest_checkpoint = tf.train.latest_checkpoint(train_dir)
if not latest_checkpoint:
logging.info("%s: No checkpoint file found. Building a new model.",
task_as_string(self.task))
return None
meta_filename = latest_checkpoint + ".meta"
if not gfile.Exists(meta_filename):
logging.info("%s: No meta graph file found. Building a new model.",
task_as_string(self.task))
return None
else:
return meta_filename
def recover_model(self, meta_filename):
logging.info("%s: Restoring from meta graph file %s",
task_as_string(self.task), meta_filename)
return tf.train.import_meta_graph(meta_filename)
def build_model(self, model, reader, corr_mat = None):
"""Find the model and build the graph."""
label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()
optimizer_class = find_class_by_name(FLAGS.optimizer, [tf.train])
build_graph(reader=reader,
model=model,
optimizer_class=optimizer_class,
clip_gradient_norm=FLAGS.clip_gradient_norm,
train_data_pattern=FLAGS.train_data_pattern,
label_loss_fn=label_loss_fn,
base_learning_rate=FLAGS.base_learning_rate,
learning_rate_decay=FLAGS.learning_rate_decay,
learning_rate_decay_examples=FLAGS.learning_rate_decay_examples,
regularization_penalty=FLAGS.regularization_penalty,
num_readers=FLAGS.num_readers,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs,
corr_mat = corr_mat)
return tf.train.Saver(max_to_keep=0, keep_checkpoint_every_n_hours=FLAGS.save_checkpoint_every_n_hour)
def get_reader():
# Convert feature_names and feature_sizes to lists of values.
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.feature_names, FLAGS.feature_sizes)
if FLAGS.frame_features:
reader = readers.YT8MFrameFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
else:
reader = readers.YT8MAggregatedFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
return reader
class ParameterServer(object):
"""A parameter server to serve variables in a distributed execution."""
def __init__(self, cluster, task):
"""Creates a ParameterServer.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
self.cluster = cluster
self.task = task
def run(self):
"""Starts the parameter server."""
logging.info("%s: Starting parameter server within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
server.join()
def start_server(cluster, task):
"""Creates a Server.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
if not task.type:
raise ValueError("%s: The task type must be specified." %
task_as_string(task))
if task.index is None:
raise ValueError("%s: The task index must be specified." %
task_as_string(task))
# Create and start a server.
return tf.train.Server(
tf.train.ClusterSpec(cluster),
protocol="grpc",
job_name=task.type,
task_index=task.index)
def task_as_string(task):
return "/job:%s/task:%s" % (task.type, task.index)
def main(unused_argv):
# Load the environment.
env = json.loads(os.environ.get("TF_CONFIG", "{}"))
# Load the cluster data from the environment.
cluster_data = env.get("cluster", None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
# Load the task data from the environment.
task_data = env.get("task", None) or {"type": "master", "index": 0}
task = type("TaskSpec", (object,), task_data)
# Logging the version.
logging.set_verbosity(tf.logging.INFO)
logging.info("%s: Tensorflow version: %s.",
task_as_string(task), tf.__version__)
# Dispatch to a master, a worker, or a parameter server.
if not cluster or task.type == "master" or task.type == "worker":
model = find_class_by_name(FLAGS.model,
[frame_level_models, video_level_models])()
reader = get_reader()
model_exporter = export_model.ModelExporter(
frame_features=FLAGS.frame_features,
model=model,
reader=reader)
mat_dir = '/home/weimin/yt8m/code/youtube-8m/'
with open(mat_dir + 'corr_mat.npz', 'rb') as f:
corr_mat = np.load(f)
Trainer(cluster, task, FLAGS.train_dir, model, reader, model_exporter,
FLAGS.log_device_placement, FLAGS.max_steps,
FLAGS.export_model_steps, corr_mat).run(start_new_model=FLAGS.start_new_model)
elif task.type == "ps":
ParameterServer(cluster, task).run()
else:
raise ValueError("%s: Invalid task_type: %s." %
(task_as_string(task), task.type))
if __name__ == "__main__":
app.run()
| 39.453408 | 120 | 0.663059 |
import json
import os
import time
import eval_util
import export_model
import losses
import frame_level_models
import video_level_models
import readers
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
from tensorflow.python.client import device_lib
import utils
import numpy as np
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if __name__ == "__main__":
flags.DEFINE_string("train_dir", "/tmp/yt8m_model/",
"The directory to save the model files in.")
flags.DEFINE_string(
"train_data_pattern", "",
"File glob for the training dataset. If the files refer to Frame Level "
"features (i.e. tensorflow.SequenceExample), then set --reader_type "
"format. The (Sequence)Examples are expected to have 'rgb' byte array "
"sequence feature as well as a 'labels' int64 context feature.")
flags.DEFINE_string("feature_names", "mean_rgb", "Name of the feature "
"to use for training.")
flags.DEFINE_string("feature_sizes", "1024", "Length of the feature vectors.")
flags.DEFINE_bool(
"frame_features", False,
"If set, then --train_data_pattern must be frame-level features. "
"Otherwise, --train_data_pattern must be aggregated video-level "
"features. The model must also be set appropriately (i.e. to read 3D "
"batches VS 4D batches.")
flags.DEFINE_string(
"model", "LogisticModel",
"Which architecture to use for the model. Models are defined "
"in models.py.")
flags.DEFINE_bool(
"start_new_model", False,
"If set, this will not resume from a checkpoint and will instead create a"
" new model instance.")
flags.DEFINE_integer("num_gpu", 1,
"The maximum number of GPU devices to use for training. "
"Flag only applies if GPUs are installed")
flags.DEFINE_integer("batch_size", 1024,
"How many examples to process per batch for training.")
flags.DEFINE_string("label_loss", "CrossEntropyLoss",
"Which loss function to use for training the model.")
flags.DEFINE_float(
"regularization_penalty", 1.0,
"How much weight to give to the regularization loss (the label loss has "
"a weight of 1).")
flags.DEFINE_float("base_learning_rate", 0.01,
"Which learning rate to start with.")
flags.DEFINE_float("learning_rate_decay", 0.95,
"Learning rate decay factor to be applied every "
"learning_rate_decay_examples.")
flags.DEFINE_float("learning_rate_decay_examples", 4000000,
"Multiply current learning rate by learning_rate_decay "
"every learning_rate_decay_examples.")
flags.DEFINE_integer("num_epochs", 5,
"How many passes to make over the dataset before "
"halting training.")
flags.DEFINE_integer("max_steps", None,
"The maximum number of iterations of the training loop.")
flags.DEFINE_integer("export_model_steps", 10000000000,
"The period, in number of steps, with which the model "
"is exported for batch prediction.")
flags.DEFINE_float("save_checkpoint_every_n_hour", 0.4,
"Save the checkpoint every n hours.")
flags.DEFINE_integer("validate_every_n_training_steps", 100,
"eval on training for every n steps")
flags.DEFINE_integer("num_readers", 12,
"How many threads to use for reading input files.")
flags.DEFINE_string("optimizer", "AdamOptimizer",
"What optimizer class to use.")
flags.DEFINE_float("clip_gradient_norm", 1.0, "Norm to clip gradients to.")
flags.DEFINE_bool(
"log_device_placement", False,
"Whether to write the device on which every op will run into the "
"logs on startup.")
def validate_class_name(flag_value, category, modules, expected_superclass):
candidates = [getattr(module, flag_value, None) for module in modules]
for candidate in candidates:
if not candidate:
continue
if not issubclass(candidate, expected_superclass):
raise flags.FlagsError("%s '%s' doesn't inherit from %s." %
(category, flag_value,
expected_superclass.__name__))
return True
raise flags.FlagsError("Unable to find %s '%s'." % (category, flag_value))
def get_input_data_tensors(reader,
data_pattern,
batch_size=1000,
num_epochs=None,
num_readers=1):
logging.info("Using batch size of " + str(batch_size) + " for training.")
with tf.name_scope("train_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find training files. data_pattern='" +
data_pattern + "'.")
logging.info("Number of training files: %s.", str(len(files)))
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=True)
training_data = [
reader.prepare_reader(filename_queue) for _ in range(num_readers)
]
return tf.train.shuffle_batch_join(
training_data,
batch_size=batch_size,
capacity=batch_size * 5,
min_after_dequeue=batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
def find_class_by_name(name, modules):
modules = [getattr(module, name, None) for module in modules]
return next(a for a in modules if a)
def build_graph(reader,
model,
train_data_pattern,
label_loss_fn=losses.CrossEntropyLoss(),
batch_size=1000,
base_learning_rate=0.01,
learning_rate_decay_examples=1000000,
learning_rate_decay=0.95,
optimizer_class=tf.train.AdamOptimizer,
clip_gradient_norm=1.0,
regularization_penalty=1,
num_readers=1,
num_epochs=None,
corr_mat=None):
global_step = tf.Variable(0, trainable=False, name="global_step")
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
gpus = gpus[:FLAGS.num_gpu]
num_gpus = len(gpus)
if num_gpus > 0:
logging.info("Using the following GPUs to train: " + str(gpus))
num_towers = num_gpus
device_string = '/gpu:%d'
else:
logging.info("No GPUs found. Training on CPU.")
num_towers = 1
device_string = '/cpu:%d'
learning_rate = tf.train.exponential_decay(
base_learning_rate,
global_step * batch_size * num_towers,
learning_rate_decay_examples,
learning_rate_decay,
staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
optimizer = optimizer_class(learning_rate)
unused_video_id, model_input_raw, labels_batch, num_frames = (
get_input_data_tensors(
reader,
train_data_pattern,
batch_size=batch_size * num_towers,
num_readers=num_readers,
num_epochs=num_epochs))
tf.summary.histogram("model/input_raw", model_input_raw)
feature_dim = len(model_input_raw.get_shape()) - 1
model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)
tower_inputs = tf.split(model_input, num_towers)
tower_labels = tf.split(labels_batch, num_towers)
tower_num_frames = tf.split(num_frames, num_towers)
tower_gradients = []
tower_predictions = []
tower_label_losses = []
tower_reg_losses = []
for i in range(num_towers):
# For some reason these 'with' statements can't be combined onto the same
with tf.device(device_string % i):
with (tf.variable_scope(("tower"), reuse=True if i > 0 else None)):
with (slim.arg_scope([slim.model_variable, slim.variable], device="/cpu:0" if num_gpus!=1 else "/gpu:0")):
result = model.create_model(
tower_inputs[i],
num_frames=tower_num_frames[i],
vocab_size=reader.num_classes,
corr_mat_init=corr_mat,
labels=tower_labels[i])
for variable in slim.get_model_variables():
tf.summary.histogram(variable.op.name, variable)
predictions0 = result["predictions0"]
predictions = result["predictions"]
tower_predictions.append(predictions)
label_loss = label_loss_fn.calculate_loss(predictions0, tower_labels[i])
if "regularization_loss" in result.keys():
reg_loss = result["regularization_loss"]
else:
reg_loss = tf.constant(0.0)
reg_losses = tf.losses.get_regularization_losses()
if reg_losses:
reg_loss += tf.add_n(reg_losses)
tower_reg_losses.append(reg_loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if "update_ops" in result.keys():
update_ops += result["update_ops"]
if update_ops:
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name="gradient_barrier")
with tf.control_dependencies([barrier]):
label_loss = tf.identity(label_loss)
tower_label_losses.append(label_loss)
final_loss = regularization_penalty * reg_loss + label_loss
gradients = optimizer.compute_gradients(final_loss,
colocate_gradients_with_ops=False)
tower_gradients.append(gradients)
label_loss = tf.reduce_mean(tf.stack(tower_label_losses))
tf.summary.scalar("label_loss", label_loss)
if regularization_penalty != 0:
reg_loss = tf.reduce_mean(tf.stack(tower_reg_losses))
tf.summary.scalar("reg_loss", reg_loss)
merged_gradients = utils.combine_gradients(tower_gradients)
if clip_gradient_norm > 0:
with tf.name_scope('clip_grads'):
merged_gradients = utils.clip_gradient_norms(merged_gradients, clip_gradient_norm)
train_op = optimizer.apply_gradients(merged_gradients, global_step=global_step)
tf.add_to_collection("global_step", global_step)
tf.add_to_collection("loss", label_loss)
tf.add_to_collection("predictions", tf.concat(tower_predictions, 0))
tf.add_to_collection("input_batch_raw", model_input_raw)
tf.add_to_collection("input_batch", model_input)
tf.add_to_collection("num_frames", num_frames)
tf.add_to_collection("labels", tf.cast(labels_batch, tf.float32))
tf.add_to_collection("train_op", train_op)
class Trainer(object):
def __init__(self, cluster, task, train_dir, model, reader, model_exporter,
log_device_placement=True, max_steps=None,
export_model_steps=1000, corr_mat = None):
self.cluster = cluster
self.task = task
self.is_master = (task.type == "master" and task.index == 0)
self.train_dir = train_dir
self.config = tf.ConfigProto(
allow_soft_placement=True,log_device_placement=log_device_placement)
self.model = model
self.reader = reader
self.model_exporter = model_exporter
self.max_steps = max_steps
self.max_steps_reached = False
self.export_model_steps = export_model_steps
self.last_model_export_step = 0
self.corr_mat = corr_mat
def run(self, start_new_model=False):
if self.is_master and start_new_model:
self.remove_training_directory(self.train_dir)
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
model_flags_dict = {
"model": FLAGS.model,
"feature_sizes": FLAGS.feature_sizes,
"feature_names": FLAGS.feature_names,
"frame_features": FLAGS.frame_features,
"label_loss": FLAGS.label_loss,
}
flags_json_path = os.path.join(FLAGS.train_dir, "model_flags.json")
if os.path.exists(flags_json_path):
existing_flags = json.load(open(flags_json_path))
if existing_flags != model_flags_dict:
logging.error("Model flags do not match existing file %s. Please "
"delete the file, change --train_dir, or pass flag "
"--start_new_model",
flags_json_path)
logging.error("Ran model with flags: %s", str(model_flags_dict))
logging.error("Previously ran with flags: %s", str(existing_flags))
exit(1)
else:
with open(flags_json_path, "w") as fout:
fout.write(json.dumps(model_flags_dict))
target, device_fn = self.start_server_if_distributed()
meta_filename = self.get_meta_filename(start_new_model, self.train_dir)
with tf.Graph().as_default() as graph:
if meta_filename:
saver = self.recover_model(meta_filename)
with tf.device(device_fn):
if not meta_filename:
saver = self.build_model(self.model, self.reader, self.corr_mat)
global_step = tf.get_collection("global_step")[0]
loss = tf.get_collection("loss")[0]
predictions = tf.get_collection("predictions")[0]
labels = tf.get_collection("labels")[0]
train_op = tf.get_collection("train_op")[0]
init_op = tf.global_variables_initializer()
sv = tf.train.Supervisor(
graph,
logdir=self.train_dir,
init_op=init_op,
is_chief=self.is_master,
global_step=global_step,
save_model_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),
save_summaries_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),
saver=saver)
logging.info("%s: Starting managed session.", task_as_string(self.task))
with sv.managed_session(target, config=self.config) as sess:
try:
logging.info("%s: Entering training loop.", task_as_string(self.task))
while (not sv.should_stop()) and (not self.max_steps_reached):
batch_start_time = time.time()
_, global_step_val, loss_val, predictions_val, labels_val = sess.run(
[train_op, global_step, loss, predictions, labels])
seconds_per_batch = time.time() - batch_start_time
examples_per_second = labels_val.shape[0] / seconds_per_batch
if self.max_steps and self.max_steps <= global_step_val:
self.max_steps_reached = True
if self.is_master and global_step_val % FLAGS.validate_every_n_training_steps == 0 and self.train_dir:
eval_start_time = time.time()
hit_at_one = eval_util.calculate_hit_at_one(predictions_val, labels_val)
perr = eval_util.calculate_precision_at_equal_recall_rate(predictions_val,
labels_val)
gap = eval_util.calculate_gap(predictions_val, labels_val)
eval_end_time = time.time()
eval_time = eval_end_time - eval_start_time
logging.info("training step " + str(global_step_val) + " | Loss: " + ("%.2f" % loss_val) +
" Examples/sec: " + ("%.2f" % examples_per_second) + " | Hit@1: " +
("%.2f" % hit_at_one) + " PERR: " + ("%.2f" % perr) +
" GAP: " + ("%.2f" % gap))
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_Hit@1", hit_at_one),
global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_Perr", perr), global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_GAP", gap), global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("global_step/Examples/Second",
examples_per_second), global_step_val)
sv.summary_writer.flush()
with open(FLAGS.train_dir + '/global_step_{%d}_training_GAP_{%.6f}.txt' % (global_step_val, gap), 'w') as f:
f.write('\n')
time_to_export = ((self.last_model_export_step == 0) or
(global_step_val - self.last_model_export_step
>= self.export_model_steps))
if self.is_master and time_to_export:
self.export_model(global_step_val, sv.saver, sv.save_path, sess)
self.last_model_export_step = global_step_val
else:
continue
except tf.errors.OutOfRangeError:
logging.info("%s: Done training -- epoch limit reached.",
task_as_string(self.task))
logging.info("%s: Exited training loop.", task_as_string(self.task))
sv.Stop()
def export_model(self, global_step_val, saver, save_path, session):
if global_step_val == self.last_model_export_step:
return
last_checkpoint = saver.save(session, save_path, global_step_val)
model_dir = "{0}/export/step_{1}".format(self.train_dir, global_step_val)
logging.info("%s: Exporting the model at step %s to %s.",
task_as_string(self.task), global_step_val, model_dir)
self.model_exporter.export_model(
model_dir=model_dir,
global_step_val=global_step_val,
last_checkpoint=last_checkpoint)
def start_server_if_distributed(self):
if self.cluster:
logging.info("%s: Starting trainer within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
target = server.target
device_fn = tf.train.replica_device_setter(
ps_device="/job:ps",
worker_device="/job:%s/task:%d" % (self.task.type, self.task.index),
cluster=self.cluster)
else:
target = ""
device_fn = ""
return (target, device_fn)
def remove_training_directory(self, train_dir):
try:
logging.info(
"%s: Removing existing train directory.",
task_as_string(self.task))
gfile.DeleteRecursively(train_dir)
except:
logging.error(
"%s: Failed to delete directory " + train_dir +
" when starting a new model. Please delete it manually and" +
" try again.", task_as_string(self.task))
def get_meta_filename(self, start_new_model, train_dir):
if start_new_model:
logging.info("%s: Flag 'start_new_model' is set. Building a new model.",
task_as_string(self.task))
return None
latest_checkpoint = tf.train.latest_checkpoint(train_dir)
if not latest_checkpoint:
logging.info("%s: No checkpoint file found. Building a new model.",
task_as_string(self.task))
return None
meta_filename = latest_checkpoint + ".meta"
if not gfile.Exists(meta_filename):
logging.info("%s: No meta graph file found. Building a new model.",
task_as_string(self.task))
return None
else:
return meta_filename
def recover_model(self, meta_filename):
logging.info("%s: Restoring from meta graph file %s",
task_as_string(self.task), meta_filename)
return tf.train.import_meta_graph(meta_filename)
def build_model(self, model, reader, corr_mat = None):
label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()
optimizer_class = find_class_by_name(FLAGS.optimizer, [tf.train])
build_graph(reader=reader,
model=model,
optimizer_class=optimizer_class,
clip_gradient_norm=FLAGS.clip_gradient_norm,
train_data_pattern=FLAGS.train_data_pattern,
label_loss_fn=label_loss_fn,
base_learning_rate=FLAGS.base_learning_rate,
learning_rate_decay=FLAGS.learning_rate_decay,
learning_rate_decay_examples=FLAGS.learning_rate_decay_examples,
regularization_penalty=FLAGS.regularization_penalty,
num_readers=FLAGS.num_readers,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs,
corr_mat = corr_mat)
return tf.train.Saver(max_to_keep=0, keep_checkpoint_every_n_hours=FLAGS.save_checkpoint_every_n_hour)
def get_reader():
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.feature_names, FLAGS.feature_sizes)
if FLAGS.frame_features:
reader = readers.YT8MFrameFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
else:
reader = readers.YT8MAggregatedFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
return reader
class ParameterServer(object):
def __init__(self, cluster, task):
self.cluster = cluster
self.task = task
def run(self):
logging.info("%s: Starting parameter server within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
server.join()
def start_server(cluster, task):
if not task.type:
raise ValueError("%s: The task type must be specified." %
task_as_string(task))
if task.index is None:
raise ValueError("%s: The task index must be specified." %
task_as_string(task))
return tf.train.Server(
tf.train.ClusterSpec(cluster),
protocol="grpc",
job_name=task.type,
task_index=task.index)
def task_as_string(task):
return "/job:%s/task:%s" % (task.type, task.index)
def main(unused_argv):
env = json.loads(os.environ.get("TF_CONFIG", "{}"))
cluster_data = env.get("cluster", None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
task_data = env.get("task", None) or {"type": "master", "index": 0}
task = type("TaskSpec", (object,), task_data)
logging.set_verbosity(tf.logging.INFO)
logging.info("%s: Tensorflow version: %s.",
task_as_string(task), tf.__version__)
if not cluster or task.type == "master" or task.type == "worker":
model = find_class_by_name(FLAGS.model,
[frame_level_models, video_level_models])()
reader = get_reader()
model_exporter = export_model.ModelExporter(
frame_features=FLAGS.frame_features,
model=model,
reader=reader)
mat_dir = '/home/weimin/yt8m/code/youtube-8m/'
with open(mat_dir + 'corr_mat.npz', 'rb') as f:
corr_mat = np.load(f)
Trainer(cluster, task, FLAGS.train_dir, model, reader, model_exporter,
FLAGS.log_device_placement, FLAGS.max_steps,
FLAGS.export_model_steps, corr_mat).run(start_new_model=FLAGS.start_new_model)
elif task.type == "ps":
ParameterServer(cluster, task).run()
else:
raise ValueError("%s: Invalid task_type: %s." %
(task_as_string(task), task.type))
if __name__ == "__main__":
app.run()
| true | true |
f728a5dfcbf2e181dfeaa3c0efc4d062bb4f446c | 2,624 | py | Python | rllab/envs/base.py | Bobeye/rllab | 53c0afb73f93c4a78ff21507914d7f7735c21ea9 | [
"MIT"
] | 1,838 | 2017-08-10T04:19:28.000Z | 2022-03-29T07:41:19.000Z | rllab/envs/base.py | Bobeye/rllab | 53c0afb73f93c4a78ff21507914d7f7735c21ea9 | [
"MIT"
] | 120 | 2016-10-05T09:16:16.000Z | 2017-07-27T22:57:31.000Z | rllab/envs/base.py | Bobeye/rllab | 53c0afb73f93c4a78ff21507914d7f7735c21ea9 | [
"MIT"
] | 498 | 2017-08-16T03:34:28.000Z | 2022-03-31T04:41:32.000Z | from .env_spec import EnvSpec
import collections
from cached_property import cached_property
class Env(object):
def step(self, action):
"""
Run one timestep of the environment's dynamics. When end of episode
is reached, reset() should be called to reset the environment's internal state.
Input
-----
action : an action provided by the environment
Outputs
-------
(observation, reward, done, info)
observation : agent's observation of the current environment
reward [Float] : amount of reward due to the previous action
done : a boolean, indicating whether the episode has ended
info : a dictionary containing other diagnostic information from the previous action
"""
raise NotImplementedError
def reset(self):
"""
Resets the state of the environment, returning an initial observation.
Outputs
-------
observation : the initial observation of the space. (Initial reward is assumed to be 0.)
"""
raise NotImplementedError
@property
def action_space(self):
"""
Returns a Space object
:rtype: rllab.spaces.base.Space
"""
raise NotImplementedError
@property
def observation_space(self):
"""
Returns a Space object
:rtype: rllab.spaces.base.Space
"""
raise NotImplementedError
# Helpers that derive from Spaces
@property
def action_dim(self):
return self.action_space.flat_dim
def render(self):
pass
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
@cached_property
def spec(self):
return EnvSpec(
observation_space=self.observation_space,
action_space=self.action_space,
)
@property
def horizon(self):
"""
Horizon of the environment, if it has one
"""
raise NotImplementedError
def terminate(self):
"""
Clean up operation,
"""
pass
def get_param_values(self):
return None
def set_param_values(self, params):
pass
_Step = collections.namedtuple("Step", ["observation", "reward", "done", "info"])
def Step(observation, reward, done, **kwargs):
"""
Convenience method creating a namedtuple with the results of the
environment.step method.
Put extra diagnostic info in the kwargs
"""
return _Step(observation, reward, done, kwargs)
| 25.980198 | 96 | 0.617378 | from .env_spec import EnvSpec
import collections
from cached_property import cached_property
class Env(object):
def step(self, action):
raise NotImplementedError
def reset(self):
raise NotImplementedError
@property
def action_space(self):
raise NotImplementedError
@property
def observation_space(self):
raise NotImplementedError
@property
def action_dim(self):
return self.action_space.flat_dim
def render(self):
pass
def log_diagnostics(self, paths):
pass
@cached_property
def spec(self):
return EnvSpec(
observation_space=self.observation_space,
action_space=self.action_space,
)
@property
def horizon(self):
raise NotImplementedError
def terminate(self):
pass
def get_param_values(self):
return None
def set_param_values(self, params):
pass
_Step = collections.namedtuple("Step", ["observation", "reward", "done", "info"])
def Step(observation, reward, done, **kwargs):
return _Step(observation, reward, done, kwargs)
| true | true |
f728a9896a7e4604ed5bd4ec09647ec748098257 | 73,983 | py | Python | pycqed/measurement/waveform_control/pulsar.py | sergimasot/PycQED_py3 | 54ad1b14929ffe5cc87cf59423a970e4b9baa3e1 | [
"MIT"
] | null | null | null | pycqed/measurement/waveform_control/pulsar.py | sergimasot/PycQED_py3 | 54ad1b14929ffe5cc87cf59423a970e4b9baa3e1 | [
"MIT"
] | null | null | null | pycqed/measurement/waveform_control/pulsar.py | sergimasot/PycQED_py3 | 54ad1b14929ffe5cc87cf59423a970e4b9baa3e1 | [
"MIT"
] | null | null | null | # Originally by Wolfgang Pfaff
# Modified by Adriaan Rol 9/2015
# Modified by Ants Remm 5/2017
# Modified by Michael Kerschbaum 5/2019
import os
import shutil
import ctypes
import numpy as np
import logging
from qcodes.instrument.base import Instrument
from qcodes.instrument.parameter import (
ManualParameter, InstrumentRefParameter)
import qcodes.utils.validators as vals
import time
from pycqed.instrument_drivers.virtual_instruments.virtual_awg5014 import \
VirtualAWG5014
from pycqed.instrument_drivers.virtual_instruments.virtual_AWG8 import \
VirtualAWG8
# exception catching removed because it does not work in python versions before
# 3.6
try:
from qcodes.instrument_drivers.tektronix.AWG5014 import Tektronix_AWG5014
except Exception:
Tektronix_AWG5014 = type(None)
try:
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.\
UHFQuantumController import UHFQC
except Exception:
UHFQC = type(None)
try:
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments. \
ZI_HDAWG8 import ZI_HDAWG8
except Exception:
ZI_HDAWG8 = type(None)
log = logging.getLogger(__name__)
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments. \
dummy_UHFQC import dummy_UHFQC
class UHFQCPulsar:
"""
Defines the Zurich Instruments UHFQC specific functionality for the Pulsar
class
"""
_supportedAWGtypes = (UHFQC, dummy_UHFQC)
_uhf_sequence_string_template = (
"const WINT_EN = 0x03ff0000;\n"
"const WINT_TRIG = 0x00000010;\n"
"const IAVG_TRIG = 0x00000020;\n"
"var RO_TRIG;\n"
"if (getUserReg(1)) {{\n"
" RO_TRIG = WINT_EN + IAVG_TRIG;\n"
"}} else {{\n"
" RO_TRIG = WINT_EN + WINT_TRIG;\n"
"}}\n"
"setTrigger(WINT_EN);\n"
"\n"
"{wave_definitions}\n"
"\n"
"var loop_cnt = getUserReg(0);\n"
"\n"
"repeat (loop_cnt) {{\n"
" {playback_string}\n"
"}}\n"
)
def _create_awg_parameters(self, awg, channel_name_map):
if not isinstance(awg, UHFQCPulsar._supportedAWGtypes):
return super()._create_awg_parameters(awg, channel_name_map)
name = awg.name
self.add_parameter('{}_reuse_waveforms'.format(awg.name),
initial_value=True, vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_minimize_sequencer_memory'.format(awg.name),
initial_value=True, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Minimizes the sequencer "
"memory by repeating specific sequence "
"patterns (eg. readout) passed in "
"'repeat dictionary'")
self.add_parameter('{}_enforce_single_element'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Group all the pulses on this AWG into "
"a single element. Useful for making sure "
"that the master AWG has only one waveform"
" per segment.")
self.add_parameter('{}_granularity'.format(awg.name),
get_cmd=lambda: 16)
self.add_parameter('{}_element_start_granularity'.format(awg.name),
initial_value=8/(1.8e9),
parameter_class=ManualParameter)
self.add_parameter('{}_min_length'.format(awg.name),
get_cmd=lambda: 16 /(1.8e9))
self.add_parameter('{}_inter_element_deadtime'.format(awg.name),
# get_cmd=lambda: 80 / 2.4e9)
get_cmd=lambda: 8 / (1.8e9))
# get_cmd=lambda: 0 / 2.4e9)
self.add_parameter('{}_precompile'.format(awg.name),
initial_value=False, vals=vals.Bool(),
label='{} precompile segments'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_delay'.format(awg.name),
initial_value=0, label='{} delay'.format(name),
unit='s', parameter_class=ManualParameter,
docstring='Global delay applied to this '
'channel. Positive values move pulses'
' on this channel forward in time')
self.add_parameter('{}_trigger_channels'.format(awg.name),
initial_value=[],
label='{} trigger channel'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_active'.format(awg.name), initial_value=True,
label='{} active'.format(awg.name),
vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_min_length'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_trigger_source'.format(awg.name),
initial_value='Dig1',
vals=vals.Enum('Dig1', 'Dig2', 'DIO'),
parameter_class=ManualParameter,
docstring='Defines for which trigger source \
the AWG should wait, before playing \
the next waveform. Allowed values \
are: "Dig1", "Dig2", "DIO"')
for ch_nr in range(2):
id = 'ch{}'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._uhfqc_create_channel_parameters(id, name, awg)
self.channels.add(name)
def _uhfqc_create_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'analog')
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._uhfqc_setter(awg, id, 'amp'),
get_cmd=self._uhfqc_getter(awg, id, 'amp'),
vals=vals.Numbers(0.075, 1.5),
initial_value=0.75)
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._uhfqc_setter(awg, id, 'offset'),
get_cmd=self._uhfqc_getter(awg, id, 'offset'),
vals=vals.Numbers(-1.5, 1.5),
initial_value=0)
self.add_parameter('{}_distortion'.format(name),
label='{} distortion mode'.format(name),
initial_value='off',
vals=vals.Enum('off', 'precalculate'),
parameter_class=ManualParameter)
self.add_parameter('{}_distortion_dict'.format(name),
label='{} distortion dictionary'.format(name),
vals=vals.Dict(),
parameter_class=ManualParameter)
self.add_parameter('{}_charge_buildup_compensation'.format(name),
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('{}_compensation_pulse_scale'.format(name),
parameter_class=ManualParameter,
vals=vals.Numbers(0., 1.), initial_value=0.5)
self.add_parameter('{}_compensation_pulse_delay'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_gaussian_filter_sigma'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
@staticmethod
def _uhfqc_setter(obj, id, par):
if par == 'offset':
def s(val):
obj.set('sigouts_{}_offset'.format(int(id[2])-1), val)
elif par == 'amp':
def s(val):
obj.set('sigouts_{}_range'.format(int(id[2])-1), val)
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return s
def _uhfqc_getter(self, obj, id, par):
if par == 'offset':
def g():
return obj.get('sigouts_{}_offset'.format(int(id[2])-1))
elif par == 'amp':
def g():
if self._awgs_prequeried_state:
return obj.parameters['sigouts_{}_range' \
.format(int(id[2])-1)].get_latest()/2
else:
return obj.get('sigouts_{}_range' \
.format(int(id[2])-1))/2
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return g
def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):
if not isinstance(obj, UHFQCPulsar._supportedAWGtypes):
return super()._program_awg(obj, awg_sequence, waveforms, repeat_pattern)
if not self._zi_waves_cleared:
_zi_clear_waves()
self._zi_waves_cleared = True
waves_to_upload = {h: waveforms[h]
for codewords in awg_sequence.values()
if codewords is not None
for cw, chids in codewords.items()
if cw != 'metadata'
for h in chids.values()}
self._zi_write_waves(waves_to_upload)
defined_waves = set()
wave_definitions = []
playback_strings = []
ch_has_waveforms = {'ch1': False, 'ch2': False}
current_segment = 'no_segment'
def play_element(element, playback_strings, wave_definitions):
if awg_sequence[element] is None:
current_segment = element
playback_strings.append(f'// Segment {current_segment}')
return playback_strings, wave_definitions
playback_strings.append(f'// Element {element}')
metadata = awg_sequence[element].pop('metadata', {})
if list(awg_sequence[element].keys()) != ['no_codeword']:
raise NotImplementedError('UHFQC sequencer does currently\
not support codewords!')
chid_to_hash = awg_sequence[element]['no_codeword']
wave = (chid_to_hash.get('ch1', None), None,
chid_to_hash.get('ch2', None), None)
wave_definitions += self._zi_wave_definition(wave,
defined_waves)
acq = metadata.get('acq', False)
playback_strings += self._zi_playback_string(name=obj.name,
device='uhf',
wave=wave,
acq=acq)
ch_has_waveforms['ch1'] |= wave[0] is not None
ch_has_waveforms['ch2'] |= wave[2] is not None
return playback_strings, wave_definitions
if repeat_pattern is None:
for element in awg_sequence:
playback_strings, wave_definitions = play_element(element,
playback_strings,
wave_definitions)
else:
real_indicies = []
for index, element in enumerate(awg_sequence):
if awg_sequence[element] is not None:
real_indicies.append(index)
el_total = len(real_indicies)
def repeat_func(n, el_played, index, playback_strings, wave_definitions):
if isinstance(n, tuple):
el_played_list = []
if n[0] > 1:
playback_strings.append('repeat ('+str(n[0])+') {')
for t in n[1:]:
el_cnt, playback_strings, wave_definitions = repeat_func(t,
el_played,
index + np.sum(
el_played_list),
playback_strings,
wave_definitions)
el_played_list.append(el_cnt)
if n[0] > 1:
playback_strings.append('}')
return int(n[0] * np.sum(el_played_list)), playback_strings, wave_definitions
else:
for k in range(n):
el_index = real_indicies[int(index)+k]
element = list(awg_sequence.keys())[el_index]
playback_strings, wave_definitions = play_element(element,
playback_strings,
wave_definitions)
el_played = el_played + 1
return el_played, playback_strings, wave_definitions
el_played, playback_strings, wave_definitions = repeat_func(repeat_pattern, 0, 0,
playback_strings, wave_definitions)
if int(el_played) != int(el_total):
log.error(el_played, ' is not ', el_total)
raise ValueError('Check number of sequences in repeat pattern')
if not (ch_has_waveforms['ch1'] or ch_has_waveforms['ch2']):
return
self.awgs_with_waveforms(obj.name)
awg_str = self._uhf_sequence_string_template.format(
wave_definitions='\n'.join(wave_definitions),
playback_string='\n '.join(playback_strings),
)
# Necessary hack to pass the UHFQC drivers sanity check
# in acquisition_initialize()
obj._awg_program_features['loop_cnt'] = True
obj._awg_program_features['avg_cnt'] = False
# Hack needed to have
obj._awg_needs_configuration[0] = False
obj._awg_program[0] = True
obj.configure_awg_from_string(awg_nr=0, program_string=awg_str, timeout=600)
def _is_awg_running(self, obj):
if not isinstance(obj, UHFQCPulsar._supportedAWGtypes):
return super()._is_awg_running(obj)
return obj.awgs_0_enable() != 0
def _clock(self, obj, cid=None):
if not isinstance(obj, UHFQCPulsar._supportedAWGtypes):
return super()._clock(obj)
return obj.clock_freq()
class HDAWG8Pulsar:
"""
Defines the Zurich Instruments HDAWG8 specific functionality for the Pulsar
class
"""
_supportedAWGtypes = (ZI_HDAWG8, VirtualAWG8, )
_hdawg_sequence_string_template = (
"{wave_definitions}\n"
"\n"
"{codeword_table_defs}\n"
"\n"
"while (1) {{\n"
" {playback_string}\n"
"}}\n"
)
def _create_awg_parameters(self, awg, channel_name_map):
if not isinstance(awg, HDAWG8Pulsar._supportedAWGtypes):
return super()._create_awg_parameters(awg, channel_name_map)
name = awg.name
self.add_parameter('{}_reuse_waveforms'.format(awg.name),
initial_value=True, vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_minimize_sequencer_memory'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Minimizes the sequencer "
"memory by repeating specific sequence "
"patterns (eg. readout) passed in "
"'repeat dictionary'")
self.add_parameter('{}_enforce_single_element'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Group all the pulses on this AWG into "
"a single element. Useful for making sure "
"that the master AWG has only one waveform"
" per segment.")
self.add_parameter('{}_granularity'.format(awg.name),
get_cmd=lambda: 16)
self.add_parameter('{}_element_start_granularity'.format(awg.name),
initial_value=8/(2.4e9),
parameter_class=ManualParameter)
self.add_parameter('{}_min_length'.format(awg.name),
initial_value=16 /(2.4e9),
parameter_class=ManualParameter)
self.add_parameter('{}_inter_element_deadtime'.format(awg.name),
# get_cmd=lambda: 80 / 2.4e9)
get_cmd=lambda: 8 / (2.4e9))
# get_cmd=lambda: 0 / 2.4e9)
self.add_parameter('{}_precompile'.format(awg.name),
initial_value=False, vals=vals.Bool(),
label='{} precompile segments'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_delay'.format(awg.name),
initial_value=0, label='{} delay'.format(name),
unit='s', parameter_class=ManualParameter,
docstring='Global delay applied to this '
'channel. Positive values move pulses'
' on this channel forward in time')
self.add_parameter('{}_trigger_channels'.format(awg.name),
initial_value=[],
label='{} trigger channel'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_active'.format(awg.name), initial_value=True,
label='{} active'.format(awg.name),
vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_min_length'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_trigger_source'.format(awg.name),
initial_value='Dig1',
vals=vals.Enum('Dig1', 'DIO', 'ZSync'),
parameter_class=ManualParameter,
docstring='Defines for which trigger source \
the AWG should wait, before playing \
the next waveform. Allowed values \
are: "Dig1", "DIO", "ZSync"')
for ch_nr in range(8):
id = 'ch{}'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._hdawg_create_analog_channel_parameters(id, name, awg)
self.channels.add(name)
id = 'ch{}m'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._hdawg_create_marker_channel_parameters(id, name, awg)
self.channels.add(name)
def _hdawg_create_analog_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'analog')
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._hdawg_setter(awg, id, 'offset'),
get_cmd=self._hdawg_getter(awg, id, 'offset'),
vals=vals.Numbers())
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._hdawg_setter(awg, id, 'amp'),
get_cmd=self._hdawg_getter(awg, id, 'amp'),
vals=vals.Numbers(0.01, 5.0))
self.add_parameter('{}_distortion'.format(name),
label='{} distortion mode'.format(name),
initial_value='off',
vals=vals.Enum('off', 'precalculate'),
parameter_class=ManualParameter)
self.add_parameter('{}_distortion_dict'.format(name),
label='{} distortion dictionary'.format(name),
vals=vals.Dict(),
parameter_class=ManualParameter)
self.add_parameter('{}_charge_buildup_compensation'.format(name),
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('{}_compensation_pulse_scale'.format(name),
parameter_class=ManualParameter,
vals=vals.Numbers(0., 1.), initial_value=0.5)
self.add_parameter('{}_compensation_pulse_delay'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_gaussian_filter_sigma'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_internal_modulation'.format(name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter)
def _hdawg_create_marker_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'marker')
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._hdawg_setter(awg, id, 'offset'),
get_cmd=self._hdawg_getter(awg, id, 'offset'),
vals=vals.Numbers())
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._hdawg_setter(awg, id, 'amp'),
get_cmd=self._hdawg_getter(awg, id, 'amp'),
vals=vals.Numbers(0.01, 5.0))
@staticmethod
def _hdawg_setter(obj, id, par):
if par == 'offset':
if id[-1] != 'm':
def s(val):
obj.set('sigouts_{}_offset'.format(int(id[2])-1), val)
else:
s = None
elif par == 'amp':
if id[-1] != 'm':
def s(val):
obj.set('sigouts_{}_range'.format(int(id[2])-1), 2*val)
else:
s = None
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return s
def _hdawg_getter(self, obj, id, par):
if par == 'offset':
if id[-1] != 'm':
def g():
return obj.get('sigouts_{}_offset'.format(int(id[2])-1))
else:
return lambda: 0
elif par == 'amp':
if id[-1] != 'm':
def g():
if self._awgs_prequeried_state:
return obj.parameters['sigouts_{}_range' \
.format(int(id[2])-1)].get_latest()/2
else:
return obj.get('sigouts_{}_range' \
.format(int(id[2])-1))/2
else:
return lambda: 1
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return g
def get_divisor(self, chid, awg):
'''
Divisor is 1 for non modulated channels and 2 for modulated non
marker channels.
'''
if chid[-1]=='m':
return 1
name = self._id_channel(chid, awg)
if self.get(f"{name}_internal_modulation"):
return 2
else:
return 1
def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):
if not isinstance(obj, HDAWG8Pulsar._supportedAWGtypes):
return super()._program_awg(obj, awg_sequence, waveforms, repeat_pattern)
if not self._zi_waves_cleared:
_zi_clear_waves()
self._zi_waves_cleared = True
chids = [f'ch{i+1}{m}' for i in range(8) for m in ['','m']]
divisor = {chid: self.get_divisor(chid, obj.name) for chid in chids}
waves_to_upload = {h: divisor[chid]*waveforms[h][::divisor[chid]]
for codewords in awg_sequence.values()
if codewords is not None
for cw, chids in codewords.items()
if cw != 'metadata'
for chid, h in chids.items()}
self._zi_write_waves(waves_to_upload)
ch_has_waveforms = {'ch{}{}'.format(i + 1, m): False
for i in range(8) for m in ['','m']}
for awg_nr in self._hdawg_active_awgs(obj):
defined_waves = set()
codeword_table = {}
wave_definitions = []
codeword_table_defs = []
playback_strings = []
interleaves = []
prev_dio_valid_polarity = obj.get(
'awgs_{}_dio_valid_polarity'.format(awg_nr))
added_cw = set()
ch1id = 'ch{}'.format(awg_nr * 2 + 1)
ch1mid = 'ch{}m'.format(awg_nr * 2 + 1)
ch2id = 'ch{}'.format(awg_nr * 2 + 2)
ch2mid = 'ch{}m'.format(awg_nr * 2 + 2)
chids = [ch1id, ch2id]
channels = [self._id_channel(chid, obj.name) for chid in chids]
codeword_el = set()
if all([self.get(
f'{chan}_internal_modulation') for chan in channels]):
internal_mod = True
elif not any([self.get(
f'{chan}_internal_modulation') for chan in channels]):
internal_mod = False
else:
raise NotImplementedError('Internal modulation can only be'
'specified per sub AWG!')
counter = 1
current_segment = 'no_segment'
for element in awg_sequence:
if awg_sequence[element] is None:
current_segment = element
playback_strings.append(f'// Segment {current_segment}')
continue
playback_strings.append(f'// Element {element}')
metadata = awg_sequence[element].pop('metadata', {})
nr_cw = len(set(awg_sequence[element].keys()) - \
{'no_codeword'})
if nr_cw == 1:
log.warning(
f'Only one codeword has been set for {element}')
else:
for cw in awg_sequence[element]:
if cw == 'no_codeword':
if nr_cw != 0:
continue
chid_to_hash = awg_sequence[element][cw]
wave = tuple(chid_to_hash.get(ch, None)
for ch in [ch1id, ch1mid, ch2id, ch2mid])
wave_definitions += self._zi_wave_definition(wave,
defined_waves)
if nr_cw != 0:
w1, w2 = self._zi_waves_to_wavenames(wave)
if cw not in codeword_table:
codeword_table_defs += \
self._zi_codeword_table_entry(cw, wave)
codeword_table[cw] = (w1, w2)
elif codeword_table[cw] != (w1, w2) \
and self.reuse_waveforms():
log.warning('Same codeword used for different '
'waveforms. Using first waveform. '
f'Ignoring element {element}.')
ch_has_waveforms[ch1id] |= wave[0] is not None
ch_has_waveforms[ch1mid] |= wave[1] is not None
ch_has_waveforms[ch2id] |= wave[2] is not None
ch_has_waveforms[ch2mid] |= wave[3] is not None
if not internal_mod:
playback_strings += self._zi_playback_string(name=obj.name,
device='hdawg', wave=wave, codeword=(nr_cw != 0),
append_zeros=self.append_zeros())
else:
pb_string, interleave_string = \
self._zi_interleaved_playback_string(name=obj.name,
device='hdawg', counter=counter, wave=wave,
codeword=(nr_cw != 0))
counter += 1
playback_strings += pb_string
interleaves += interleave_string
if not any([ch_has_waveforms[ch]
for ch in [ch1id, ch1mid, ch2id, ch2mid]]):
continue
awg_str = self._hdawg_sequence_string_template.format(
wave_definitions='\n'.join(wave_definitions+interleaves),
codeword_table_defs='\n'.join(codeword_table_defs),
playback_string='\n '.join(playback_strings))
# Hack needed to pass the sanity check of the ZI_base_instrument
# class in
obj._awg_needs_configuration[awg_nr] = False
obj._awg_program[awg_nr] = True
obj.configure_awg_from_string(awg_nr, awg_str, timeout=600)
obj.set('awgs_{}_dio_valid_polarity'.format(awg_nr),
prev_dio_valid_polarity)
for ch in range(8):
obj.set('sigouts_{}_on'.format(ch), ch_has_waveforms[f'ch{ch+1}'])
if any(ch_has_waveforms.values()):
self.awgs_with_waveforms(obj.name)
def _is_awg_running(self, obj):
if not isinstance(obj, HDAWG8Pulsar._supportedAWGtypes):
return super()._is_awg_running(obj)
return any([obj.get('awgs_{}_enable'.format(awg_nr)) for awg_nr in
self._hdawg_active_awgs(obj)])
def _clock(self, obj, cid):
if not isinstance(obj, HDAWG8Pulsar._supportedAWGtypes):
return super()._clock(obj, cid)
return obj.clock_freq()
def _hdawg_active_awgs(self, obj):
return [0,1,2,3]
class AWG5014Pulsar:
"""
Defines the Tektronix AWG5014 specific functionality for the Pulsar class
"""
_supportedAWGtypes = (Tektronix_AWG5014, VirtualAWG5014, )
def _create_awg_parameters(self, awg, channel_name_map):
if not isinstance(awg, AWG5014Pulsar._supportedAWGtypes):
return super()._create_awg_parameters(awg, channel_name_map)
self.add_parameter('{}_reuse_waveforms'.format(awg.name),
initial_value=True, vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_minimize_sequencer_memory'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Minimizes the sequencer "
"memory by repeating specific sequence "
"patterns (eg. readout) passed in "
"'repeat dictionary'")
self.add_parameter('{}_enforce_single_element'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Group all the pulses on this AWG into "
"a single element. Useful for making sure "
"that the master AWG has only one waveform"
" per segment.")
self.add_parameter('{}_granularity'.format(awg.name),
get_cmd=lambda: 4)
self.add_parameter('{}_element_start_granularity'.format(awg.name),
initial_value=4/(1.2e9),
parameter_class=ManualParameter)
self.add_parameter('{}_min_length'.format(awg.name),
get_cmd=lambda: 256/(1.2e9)) # Can not be triggered
# faster than 210 ns.
self.add_parameter('{}_inter_element_deadtime'.format(awg.name),
get_cmd=lambda: 0)
self.add_parameter('{}_precompile'.format(awg.name),
initial_value=False,
label='{} precompile segments'.format(awg.name),
parameter_class=ManualParameter, vals=vals.Bool())
self.add_parameter('{}_delay'.format(awg.name), initial_value=0,
label='{} delay'.format(awg.name), unit='s',
parameter_class=ManualParameter,
docstring="Global delay applied to this channel. "
"Positive values move pulses on this "
"channel forward in time")
self.add_parameter('{}_trigger_channels'.format(awg.name),
initial_value=[],
label='{} trigger channels'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_active'.format(awg.name), initial_value=True,
label='{} active'.format(awg.name),
vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_min_length'.format(awg.name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
for ch_nr in range(4):
id = 'ch{}'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._awg5014_create_analog_channel_parameters(id, name, awg)
self.channels.add(name)
id = 'ch{}m1'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._awg5014_create_marker_channel_parameters(id, name, awg)
self.channels.add(name)
id = 'ch{}m2'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._awg5014_create_marker_channel_parameters(id, name, awg)
self.channels.add(name)
def _awg5014_create_analog_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'analog')
self.add_parameter('{}_offset_mode'.format(name),
parameter_class=ManualParameter,
vals=vals.Enum('software', 'hardware'))
offset_mode_func = self.parameters['{}_offset_mode'.format(name)]
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._awg5014_setter(awg, id, 'offset',
offset_mode_func),
get_cmd=self._awg5014_getter(awg, id, 'offset',
offset_mode_func),
vals=vals.Numbers())
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._awg5014_setter(awg, id, 'amp'),
get_cmd=self._awg5014_getter(awg, id, 'amp'),
vals=vals.Numbers(0.01, 2.25))
self.add_parameter('{}_distortion'.format(name),
label='{} distortion mode'.format(name),
initial_value='off',
vals=vals.Enum('off', 'precalculate'),
parameter_class=ManualParameter)
self.add_parameter('{}_distortion_dict'.format(name),
label='{} distortion dictionary'.format(name),
vals=vals.Dict(),
parameter_class=ManualParameter)
self.add_parameter('{}_charge_buildup_compensation'.format(name),
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('{}_compensation_pulse_scale'.format(name),
parameter_class=ManualParameter,
vals=vals.Numbers(0., 1.), initial_value=0.5)
self.add_parameter('{}_compensation_pulse_delay'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_gaussian_filter_sigma'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
def _awg5014_create_marker_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'marker')
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._awg5014_setter(awg, id, 'offset'),
get_cmd=self._awg5014_getter(awg, id, 'offset'),
vals=vals.Numbers(-2.7, 2.7))
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._awg5014_setter(awg, id, 'amp'),
get_cmd=self._awg5014_getter(awg, id, 'amp'),
vals=vals.Numbers(-5.4, 5.4))
@staticmethod
def _awg5014_setter(obj, id, par, offset_mode_func=None):
if id in ['ch1', 'ch2', 'ch3', 'ch4']:
if par == 'offset':
def s(val):
if offset_mode_func() == 'software':
obj.set('{}_offset'.format(id), val)
elif offset_mode_func() == 'hardware':
obj.set('{}_DC_out'.format(id), val)
else:
raise ValueError('Invalid offset mode for AWG5014: '
'{}'.format(offset_mode_func()))
elif par == 'amp':
def s(val):
obj.set('{}_amp'.format(id), 2*val)
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
else:
id_raw = id[:3] + '_' + id[3:] # convert ch1m1 to ch1_m1
if par == 'offset':
def s(val):
h = obj.get('{}_high'.format(id_raw))
l = obj.get('{}_low'.format(id_raw))
obj.set('{}_high'.format(id_raw), val + h - l)
obj.set('{}_low'.format(id_raw), val)
elif par == 'amp':
def s(val):
l = obj.get('{}_low'.format(id_raw))
obj.set('{}_high'.format(id_raw), l + val)
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return s
def _awg5014_getter(self, obj, id, par, offset_mode_func=None):
if id in ['ch1', 'ch2', 'ch3', 'ch4']:
if par == 'offset':
def g():
if offset_mode_func() == 'software':
return obj.get('{}_offset'.format(id))
elif offset_mode_func() == 'hardware':
return obj.get('{}_DC_out'.format(id))
else:
raise ValueError('Invalid offset mode for AWG5014: '
'{}'.format(offset_mode_func()))
elif par == 'amp':
def g():
if self._awgs_prequeried_state:
return obj.parameters['{}_amp'.format(id)] \
.get_latest()/2
else:
return obj.get('{}_amp'.format(id))/2
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
else:
id_raw = id[:3] + '_' + id[3:] # convert ch1m1 to ch1_m1
if par == 'offset':
def g():
return obj.get('{}_low'.format(id_raw))
elif par == 'amp':
def g():
if self._awgs_prequeried_state:
h = obj.get('{}_high'.format(id_raw))
l = obj.get('{}_low'.format(id_raw))
else:
h = obj.parameters['{}_high'.format(id_raw)]\
.get_latest()
l = obj.parameters['{}_low'.format(id_raw)]\
.get_latest()
return h - l
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return g
def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):
if not isinstance(obj, AWG5014Pulsar._supportedAWGtypes):
return super()._program_awg(obj, awg_sequence, waveforms, repeat_pattern)
pars = {
'ch{}_m{}_low'.format(ch + 1, m + 1)
for ch in range(4) for m in range(2)
}
pars |= {
'ch{}_m{}_high'.format(ch + 1, m + 1)
for ch in range(4) for m in range(2)
}
pars |= {
'ch{}_offset'.format(ch + 1) for ch in range(4)
}
old_vals = {}
for par in pars:
old_vals[par] = obj.get(par)
packed_waveforms = {}
wfname_l = []
grp_has_waveforms = {f'ch{i+1}': False for i in range(4)}
for element in awg_sequence:
if awg_sequence[element] is None:
continue
metadata = awg_sequence[element].pop('metadata', {})
if list(awg_sequence[element].keys()) != ['no_codeword']:
raise NotImplementedError('AWG5014 sequencer does '
'not support codewords!')
chid_to_hash = awg_sequence[element]['no_codeword']
if not any(chid_to_hash):
continue # no waveforms
maxlen = max([len(waveforms[h]) for h in chid_to_hash.values()])
maxlen = max(maxlen, 256)
wfname_l.append([])
for grp in [f'ch{i + 1}' for i in range(4)]:
wave = (chid_to_hash.get(grp, None),
chid_to_hash.get(grp + 'm1', None),
chid_to_hash.get(grp + 'm2', None))
grp_has_waveforms[grp] |= (wave != (None, None, None))
wfname = self._hash_to_wavename((maxlen, wave))
grp_wfs = [np.pad(waveforms.get(h, [0]),
(0, maxlen - len(waveforms.get(h, [0]))),
'constant', constant_values=0) for h in wave]
packed_waveforms[wfname] = obj.pack_waveform(*grp_wfs)
wfname_l[-1].append(wfname)
if any([wf[0] != 0 for wf in grp_wfs]):
log.warning(f'Element {element} starts with non-zero '
f'entry on {obj.name}.')
if not any(grp_has_waveforms.values()):
for grp in ['ch1', 'ch2', 'ch3', 'ch4']:
obj.set('{}_state'.format(grp), grp_has_waveforms[grp])
return None
self.awgs_with_waveforms(obj.name)
nrep_l = [1] * len(wfname_l)
goto_l = [0] * len(wfname_l)
goto_l[-1] = 1
wait_l = [1] * len(wfname_l)
logic_jump_l = [0] * len(wfname_l)
filename = 'pycqed_pulsar.awg'
awg_file = obj.generate_awg_file(packed_waveforms, np.array(wfname_l).transpose().copy(),
nrep_l, wait_l, goto_l, logic_jump_l,
self._awg5014_chan_cfg(obj.name))
obj.send_awg_file(filename, awg_file)
obj.load_awg_file(filename)
for par in pars:
obj.set(par, old_vals[par])
time.sleep(.1)
# Waits for AWG to be ready
obj.is_awg_ready()
for grp in ['ch1', 'ch2', 'ch3', 'ch4']:
obj.set('{}_state'.format(grp), 1*grp_has_waveforms[grp])
hardware_offsets = 0
for grp in ['ch1', 'ch2', 'ch3', 'ch4']:
cname = self._id_channel(grp, obj.name)
offset_mode = self.get('{}_offset_mode'.format(cname))
if offset_mode == 'hardware':
hardware_offsets = 1
obj.DC_output(hardware_offsets)
return awg_file
def _is_awg_running(self, obj):
if not isinstance(obj, AWG5014Pulsar._supportedAWGtypes):
return super()._is_awg_running(obj)
return obj.get_state() != 'Idle'
def _clock(self, obj, cid=None):
if not isinstance(obj, AWG5014Pulsar._supportedAWGtypes):
return super()._clock(obj, cid)
return obj.clock_freq()
@staticmethod
def _awg5014_group_ids(cid):
"""
Returns all id-s corresponding to a single channel group.
For example `Pulsar._awg5014_group_ids('ch2')` returns `['ch2',
'ch2m1', 'ch2m2']`.
Args:
cid: An id of one of the AWG5014 channels.
Returns: A list of id-s corresponding to the same group as `cid`.
"""
return [cid[:3], cid[:3] + 'm1', cid[:3] + 'm2']
def _awg5014_chan_cfg(self, awg):
channel_cfg = {}
for channel in self.channels:
if self.get('{}_awg'.format(channel)) != awg:
continue
cid = self.get('{}_id'.format(channel))
amp = self.get('{}_amp'.format(channel))
off = self.get('{}_offset'.format(channel))
if self.get('{}_type'.format(channel)) == 'analog':
offset_mode = self.get('{}_offset_mode'.format(channel))
channel_cfg['ANALOG_METHOD_' + cid[2]] = 1
channel_cfg['ANALOG_AMPLITUDE_' + cid[2]] = amp * 2
if offset_mode == 'software':
channel_cfg['ANALOG_OFFSET_' + cid[2]] = off
channel_cfg['DC_OUTPUT_LEVEL_' + cid[2]] = 0
channel_cfg['EXTERNAL_ADD_' + cid[2]] = 0
else:
channel_cfg['ANALOG_OFFSET_' + cid[2]] = 0
channel_cfg['DC_OUTPUT_LEVEL_' + cid[2]] = off
channel_cfg['EXTERNAL_ADD_' + cid[2]] = 1
else:
channel_cfg['MARKER1_METHOD_' + cid[2]] = 2
channel_cfg['MARKER2_METHOD_' + cid[2]] = 2
channel_cfg['MARKER{}_LOW_{}'.format(cid[-1], cid[2])] = \
off
channel_cfg['MARKER{}_HIGH_{}'.format(cid[-1], cid[2])] = \
off + amp
channel_cfg['CHANNEL_STATE_' + cid[2]] = 0
for channel in self.channels:
if self.get('{}_awg'.format(channel)) != awg:
continue
if self.get('{}_active'.format(awg)):
cid = self.get('{}_id'.format(channel))
channel_cfg['CHANNEL_STATE_' + cid[2]] = 1
return channel_cfg
class Pulsar(AWG5014Pulsar, HDAWG8Pulsar, UHFQCPulsar, Instrument):
"""
A meta-instrument responsible for all communication with the AWGs.
Contains information about all the available awg-channels in the setup.
Starting, stopping and programming and changing the parameters of the AWGs
should be done through Pulsar. Supports Tektronix AWG5014 and partially
ZI UHFLI.
Args:
master_awg: Name of the AWG that triggers all the other AWG-s and
should be started last (after other AWG-s are already
waiting for a trigger.
"""
def __init__(self, name='Pulsar', master_awg=None):
super().__init__(name)
self.add_parameter('master_awg',
parameter_class=InstrumentRefParameter,
initial_value=master_awg)
self.add_parameter('inter_element_spacing',
vals=vals.MultiType(vals.Numbers(0),
vals.Enum('auto')),
set_cmd=self._set_inter_element_spacing,
get_cmd=self._get_inter_element_spacing)
self.add_parameter('reuse_waveforms', initial_value=False,
parameter_class=ManualParameter, vals=vals.Bool())
self.add_parameter('append_zeros', initial_value=0, vals=vals.Ints(),
parameter_class=ManualParameter)
self.add_parameter('flux_crosstalk_cancellation', initial_value=False,
parameter_class=ManualParameter, vals=vals.Bool())
self.add_parameter('flux_channels', initial_value=[],
parameter_class=ManualParameter, vals=vals.Lists())
self.add_parameter('flux_crosstalk_cancellation_mtx',
initial_value=None, parameter_class=ManualParameter)
self.add_parameter('flux_crosstalk_cancellation_shift_mtx',
initial_value=None, parameter_class=ManualParameter)
self._inter_element_spacing = 'auto'
self.channels = set() # channel names
self.awgs = set() # AWG names
self.last_sequence = None
self.last_elements = None
self._awgs_with_waveforms = set()
self._awgs_prequeried_state = False
self._zi_waves_cleared = False
self._hash_to_wavename_table = {}
self.num_seg = 0
Pulsar._instance = self
@staticmethod
def get_instance():
return Pulsar._instance
# channel handling
def define_awg_channels(self, awg, channel_name_map=None):
"""
The AWG object must be created before creating channels for that AWG
Args:
awg: AWG object to add to the pulsar.
channel_name_map: A dictionary that maps channel ids to channel
names. (default {})
"""
if channel_name_map is None:
channel_name_map = {}
for channel_name in channel_name_map.values():
if channel_name in self.channels:
raise KeyError("Channel named '{}' already defined".format(
channel_name))
if awg.name in self.awgs:
raise KeyError("AWG '{}' already added to pulsar".format(awg.name))
fail = None
super()._create_awg_parameters(awg, channel_name_map)
# try:
# super()._create_awg_parameters(awg, channel_name_map)
# except AttributeError as e:
# fail = e
# if fail is not None:
# raise TypeError('Unsupported AWG instrument: {}. '
# .format(awg.name) + str(fail))
self.awgs.add(awg.name)
def find_awg_channels(self, awg):
channel_list = []
for channel in self.channels:
if self.get('{}_awg'.format(channel)) == awg:
channel_list.append(channel)
return channel_list
def AWG_obj(self, **kw):
"""
Return the AWG object corresponding to a channel or an AWG name.
Args:
awg: Name of the AWG Instrument.
channel: Name of the channel
Returns: An instance of Instrument class corresponding to the AWG
requested.
"""
awg = kw.get('awg', None)
chan = kw.get('channel', None)
if awg is not None and chan is not None:
raise ValueError('Both `awg` and `channel` arguments passed to '
'Pulsar.AWG_obj()')
elif awg is None and chan is not None:
name = self.get('{}_awg'.format(chan))
elif awg is not None and chan is None:
name = awg
else:
raise ValueError('Either `awg` or `channel` argument needs to be '
'passed to Pulsar.AWG_obj()')
return Instrument.find_instrument(name)
def clock(self, channel=None, awg=None):
"""
Returns the clock rate of channel or AWG 'instrument_ref'
Args:
isntrument_ref: name of the channel or AWG
Returns: clock rate in samples per second
"""
if channel is not None and awg is not None:
raise ValueError('Both channel and awg arguments passed to '
'Pulsar.clock()')
if channel is None and awg is None:
raise ValueError('Neither channel nor awg arguments passed to '
'Pulsar.clock()')
if channel is not None:
awg = self.get('{}_awg'.format(channel))
if self._awgs_prequeried_state:
return self._clocks[awg]
else:
fail = None
obj = self.AWG_obj(awg=awg)
try:
return super()._clock(obj)
except AttributeError as e:
fail = e
if fail is not None:
raise TypeError('Unsupported AWG instrument: {} of type {}. '
.format(obj.name, type(obj)) + str(fail))
def active_awgs(self):
"""
Returns:
A set of the names of the active AWGs registered
Inactive AWGs don't get started or stopped. Also the waveforms on
inactive AWGs don't get updated.
"""
return {awg for awg in self.awgs if self.get('{}_active'.format(awg))}
def awgs_with_waveforms(self, awg=None):
"""
Adds an awg to the set of AWGs with waveforms programmed, or returns
set of said AWGs.
"""
if awg == None:
return self._awgs_with_waveforms
else:
self._awgs_with_waveforms.add(awg)
def start(self, exclude=None):
"""
Start the active AWGs. If multiple AWGs are used in a setup where the
slave AWGs are triggered by the master AWG, then the slave AWGs must be
running and waiting for trigger when the master AWG is started to
ensure synchronous playback.
"""
if exclude is None:
exclude = []
# Start only the AWGs which have at least one channel programmed, i.e.
# where at least one channel has state = 1.
awgs_with_waveforms = self.awgs_with_waveforms()
used_awgs = set(self.active_awgs()) & awgs_with_waveforms
for awg in used_awgs:
self._stop_awg(awg)
if self.master_awg() is None:
for awg in used_awgs:
if awg not in exclude:
self._start_awg(awg)
else:
if self.master_awg() not in exclude:
self.master_awg.get_instr().stop()
for awg in used_awgs:
if awg != self.master_awg() and awg not in exclude:
self._start_awg(awg)
tstart = time.time()
for awg in used_awgs:
if awg == self.master_awg() or awg in exclude:
continue
good = False
while not (good or time.time() > tstart + 10):
if self._is_awg_running(awg):
good = True
else:
time.sleep(0.1)
if not good:
raise Exception('AWG {} did not start in 10s'
.format(awg))
if self.master_awg() not in exclude:
self.master_awg.get_instr().start()
def stop(self):
"""
Stop all active AWGs.
"""
awgs_with_waveforms = set(self.awgs_with_waveforms())
used_awgs = set(self.active_awgs()) & awgs_with_waveforms
for awg in used_awgs:
self._stop_awg(awg)
def program_awgs(self, sequence, awgs='all'):
# Stores the last uploaded sequence for easy access and plotting
self.last_sequence = sequence
if awgs == 'all':
awgs = self.active_awgs()
# initializes the set of AWGs with waveforms
self._awgs_with_waveforms -= awgs
# prequery all AWG clock values and AWG amplitudes
self.AWGs_prequeried(True)
log.info(f'Starting compilation of sequence {sequence.name}')
t0 = time.time()
waveforms, awg_sequences = sequence.generate_waveforms_sequences()
log.info(f'Finished compilation of sequence {sequence.name} in '
f'{time.time() - t0}')
channels_used = self._channels_in_awg_sequences(awg_sequences)
repeat_dict = self._generate_awg_repeat_dict(sequence.repeat_patterns,
channels_used)
self._zi_waves_cleared = False
self._hash_to_wavename_table = {}
for awg in awgs:
log.info(f'Started programming {awg}')
t0 = time.time()
if awg in repeat_dict.keys():
self._program_awg(self.AWG_obj(awg=awg),
awg_sequences.get(awg, {}), waveforms,
repeat_pattern=repeat_dict[awg])
else:
self._program_awg(self.AWG_obj(awg=awg),
awg_sequences.get(awg, {}), waveforms)
log.info(f'Finished programming {awg} in {time.time() - t0}')
self.num_seg = len(sequence.segments)
self.AWGs_prequeried(False)
def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):
"""
Program the AWG with a sequence of segments.
Args:
obj: the instance of the AWG to program
sequence: the `Sequence` object that determines the segment order,
repetition and trigger wait
el_wfs: A dictionary from element name to a dictionary from channel
id to the waveform.
loop: Boolean flag, whether the segments should be looped over.
Default is `True`.
"""
# fail = None
# try:
# super()._program_awg(obj, awg_sequence, waveforms)
# except AttributeError as e:
# fail = e
# if fail is not None:
# raise TypeError('Unsupported AWG instrument: {} of type {}. '
# .format(obj.name, type(obj)) + str(fail))
if repeat_pattern is not None:
super()._program_awg(obj, awg_sequence, waveforms,
repeat_pattern=repeat_pattern)
else:
super()._program_awg(obj, awg_sequence, waveforms)
def _hash_to_wavename(self, h):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
if h not in self._hash_to_wavename_table:
hash_int = abs(hash(h))
wname = ''.join(to_base(hash_int, len(alphabet), alphabet))[::-1]
while wname in self._hash_to_wavename_table.values():
hash_int += 1
wname = ''.join(to_base(hash_int, len(alphabet), alphabet)) \
[::-1]
self._hash_to_wavename_table[h] = wname
return self._hash_to_wavename_table[h]
def _zi_wave_definition(self, wave, defined_waves=None):
if defined_waves is None:
defined_waves = set()
wave_definition = []
w1, w2 = self._zi_waves_to_wavenames(wave)
for analog, marker, wc in [(wave[0], wave[1], w1),
(wave[2], wave[3], w2)]:
if analog is not None:
wa = self._hash_to_wavename(analog)
if wa not in defined_waves:
wave_definition.append(f'wave {wa} = "{wa}";')
defined_waves.add(wa)
if marker is not None:
wm = self._hash_to_wavename(marker)
if wm not in defined_waves:
wave_definition.append(f'wave {wm} = "{wm}";')
defined_waves.add(wm)
if analog is not None and marker is not None:
if wc not in defined_waves:
wave_definition.append(f'wave {wc} = {wa} + {wm};')
defined_waves.add(wc)
return wave_definition
def _zi_playback_string(self, name, device, wave, acq=False, codeword=False,
append_zeros=0):
playback_string = []
w1, w2 = self._zi_waves_to_wavenames(wave)
trig_source = self.get('{}_trigger_source'.format(name))
if trig_source == 'Dig1':
playback_string.append(
'waitDigTrigger(1{});'.format(', 1' if device == 'uhf' else ''))
elif trig_source == 'Dig2':
playback_string.append('waitDigTrigger(2,1);')
else:
playback_string.append(f'wait{trig_source}Trigger();')
if codeword and not (w1 is None and w2 is None):
playback_string.append('playWaveDIO();')
else:
if w1 is None and w2 is not None:
# This hack is needed due to a bug on the HDAWG.
# Remove this if case once the bug is fixed.
playback_string.append(f'playWave(marker(1,0)*0*{w2}, {w2});')
elif w1 is not None and w2 is None:
# This hack is needed due to a bug on the HDAWG.
# Remove this if case once the bug is fixed.
playback_string.append(f'playWave({w1}, marker(1,0)*0*{w1});')
elif w1 is not None or w2 is not None:
playback_string.append('playWave({});'.format(
_zi_wavename_pair_to_argument(w1, w2)))
if acq:
playback_string.append('setTrigger(RO_TRIG);')
playback_string.append('setTrigger(WINT_EN);')
if append_zeros:
playback_string.append(f'playZero({append_zeros});')
return playback_string
def _zi_interleaved_playback_string(self, name, device, counter,
wave, acq=False, codeword=False):
playback_string = []
w1, w2 = self._zi_waves_to_wavenames(wave)
if w1 is None or w2 is None:
raise ValueError('When using HDAWG modulation both I and Q need '
'to be defined')
wname = f'wave{counter}'
interleaves = [f'wave {wname} = interleave({w1}, {w2});']
if not codeword:
if not acq:
playback_string.append(f'prefetch({wname},{wname});')
trig_source = self.get('{}_trigger_source'.format(name))
if trig_source == 'Dig1':
playback_string.append(
'waitDigTrigger(1{});'.format(', 1' if device == 'uhf' else ''))
elif trig_source == 'Dig2':
playback_string.append('waitDigTrigger(2,1);')
else:
playback_string.append(f'wait{trig_source}Trigger();')
if codeword:
# playback_string.append('playWaveDIO();')
raise NotImplementedError('Modulation in combination with codeword'
'pulses has not yet been implemented!')
else:
playback_string.append(f'playWave({wname},{wname});')
if acq:
playback_string.append('setTrigger(RO_TRIG);')
playback_string.append('setTrigger(WINT_EN);')
return playback_string, interleaves
def _zi_codeword_table_entry(self, codeword, wave):
w1, w2 = self._zi_waves_to_wavenames(wave)
if w1 is None and w2 is not None:
# This hack is needed due to a bug on the HDAWG.
# Remove this if case once the bug is fixed.
return [f'setWaveDIO({codeword}, zeros(1) + marker(1, 0), {w2});']
elif not (w1 is None and w2 is None):
return ['setWaveDIO({}, {});'.format(codeword,
_zi_wavename_pair_to_argument(w1, w2))]
else:
return []
def _zi_waves_to_wavenames(self, wave):
wavenames = []
for analog, marker in [(wave[0], wave[1]), (wave[2], wave[3])]:
if analog is None and marker is None:
wavenames.append(None)
elif analog is None and marker is not None:
wavenames.append(self._hash_to_wavename(marker))
elif analog is not None and marker is None:
wavenames.append(self._hash_to_wavename(analog))
else:
wavenames.append(self._hash_to_wavename((analog, marker)))
return wavenames
def _zi_write_waves(self, waveforms):
wave_dir = _zi_wave_dir()
for h, wf in waveforms.items():
filename = os.path.join(wave_dir, self._hash_to_wavename(h)+'.csv')
fmt = '%.18e' if wf.dtype == np.float else '%d'
np.savetxt(filename, wf, delimiter=",", fmt=fmt)
def _start_awg(self, awg):
obj = self.AWG_obj(awg=awg)
obj.start()
def _stop_awg(self, awg):
obj = self.AWG_obj(awg=awg)
obj.stop()
def _is_awg_running(self, awg):
fail = None
obj = self.AWG_obj(awg=awg)
try:
return super()._is_awg_running(obj)
except AttributeError as e:
fail = e
if fail is not None:
raise TypeError('Unsupported AWG instrument: {} of type {}. '
.format(obj.name, type(obj)) + str(fail))
def _set_inter_element_spacing(self, val):
self._inter_element_spacing = val
def _get_inter_element_spacing(self):
if self._inter_element_spacing != 'auto':
return self._inter_element_spacing
else:
max_spacing = 0
for awg in self.awgs:
max_spacing = max(max_spacing, self.get(
'{}_inter_element_deadtime'.format(awg)))
return max_spacing
def AWGs_prequeried(self, status=None):
if status is None:
return self._awgs_prequeried_state
elif status:
self._awgs_prequeried_state = False
self._clocks = {}
for awg in self.awgs:
self._clocks[awg] = self.clock(awg=awg)
for c in self.channels:
# prequery also the output amplitude values
self.get(c + '_amp')
self._awgs_prequeried_state = True
else:
self._awgs_prequeried_state = False
def _id_channel(self, cid, awg):
"""
Returns the channel name corresponding to the channel with id `cid` on
the AWG `awg`.
Args:
cid: An id of one of the channels.
awg: The name of the AWG.
Returns: The corresponding channel name. If the channel is not found,
returns `None`.
"""
for cname in self.channels:
if self.get('{}_awg'.format(cname)) == awg and \
self.get('{}_id'.format(cname)) == cid:
return cname
return None
@staticmethod
def _channels_in_awg_sequences(awg_sequences):
"""
identifies all channels used in the given awg keyed sequence
:param awg_sequences (dict): awg sequences keyed by awg name, i.e. as
returned by sequence.generate_sequence_waveforms()
:return: dictionary keyed by awg of with all channel used during the sequence
"""
channels_used = dict()
for awg in awg_sequences:
channels_used[awg] = set()
for segname in awg_sequences[awg]:
if awg_sequences[awg][segname] is None:
continue
elements = awg_sequences[awg][segname]
for cw in elements:
if cw != "metadata":
channels_used[awg] |= elements[cw].keys()
return channels_used
def _generate_awg_repeat_dict(self, repeat_dict_per_ch, channels_used):
"""
Translates a repeat dictionary keyed by channels to a repeat dictionary
keyed by awg. Checks whether all channels in channels_used have an entry.
:param repeat_dict_per_ch: keys: channels_id, values: repeat pattern
:param channels_used (dict): list of channel used on each awg
:return:
"""
awg_ch_repeat_dict = dict()
repeat_dict_per_awg = dict()
for cname in repeat_dict_per_ch:
awg = self.get(f"{cname}_awg")
chid = self.get(f"{cname}_id")
if not awg in awg_ch_repeat_dict.keys():
awg_ch_repeat_dict[awg] = []
awg_ch_repeat_dict[awg].append(chid)
if repeat_dict_per_awg.get(awg, repeat_dict_per_ch[cname]) \
!= repeat_dict_per_ch[cname]:
raise NotImplementedError(f"Repeat pattern on {cname} is "
f"different from at least one other channel on {awg}:"
f"{repeat_dict_per_ch[cname]} vs {repeat_dict_per_awg[awg]}")
repeat_dict_per_awg[awg] = repeat_dict_per_ch[cname]
for awg_repeat, chs_repeat in awg_ch_repeat_dict.items():
for ch in channels_used[awg_repeat]:
assert ch in chs_repeat, f"Repeat pattern " \
f"provided for {awg_repeat} but no pattern was given on " \
f"{ch}. All used channels on the same awg must have a " \
f"repeat pattern."
return repeat_dict_per_awg
def to_base(n, b, alphabet=None, prev=None):
if prev is None: prev = []
if n == 0:
if alphabet is None: return prev
else: return [alphabet[i] for i in prev]
return to_base(n//b, b, alphabet, prev+[n%b])
def _zi_wave_dir():
if os.name == 'nt':
dll = ctypes.windll.shell32
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH + 1)
if dll.SHGetSpecialFolderPathW(None, buf, 0x0005, False):
_basedir = buf.value
else:
log.warning('Could not extract my documents folder')
else:
_basedir = os.path.expanduser('~')
wave_dir = os.path.join(_basedir, 'Zurich Instruments', 'LabOne',
'WebServer', 'awg', 'waves')
if not os.path.exists(wave_dir):
os.makedirs(wave_dir)
return wave_dir
def _zi_clear_waves():
wave_dir = _zi_wave_dir()
for f in os.listdir(wave_dir):
if f.endswith(".csv"):
os.remove(os.path.join(wave_dir, f))
elif f.endswith('.cache'):
shutil.rmtree(os.path.join(wave_dir, f))
def _zi_wavename_pair_to_argument(w1, w2):
if w1 is not None and w2 is not None:
return f'{w1}, {w2}'
elif w1 is not None and w2 is None:
return f'1, {w1}'
elif w1 is None and w2 is not None:
return f'2, {w2}'
else:
return '' | 45.001825 | 97 | 0.524877 |
import os
import shutil
import ctypes
import numpy as np
import logging
from qcodes.instrument.base import Instrument
from qcodes.instrument.parameter import (
ManualParameter, InstrumentRefParameter)
import qcodes.utils.validators as vals
import time
from pycqed.instrument_drivers.virtual_instruments.virtual_awg5014 import \
VirtualAWG5014
from pycqed.instrument_drivers.virtual_instruments.virtual_AWG8 import \
VirtualAWG8
try:
from qcodes.instrument_drivers.tektronix.AWG5014 import Tektronix_AWG5014
except Exception:
Tektronix_AWG5014 = type(None)
try:
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.\
UHFQuantumController import UHFQC
except Exception:
UHFQC = type(None)
try:
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments. \
ZI_HDAWG8 import ZI_HDAWG8
except Exception:
ZI_HDAWG8 = type(None)
log = logging.getLogger(__name__)
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments. \
dummy_UHFQC import dummy_UHFQC
class UHFQCPulsar:
_supportedAWGtypes = (UHFQC, dummy_UHFQC)
_uhf_sequence_string_template = (
"const WINT_EN = 0x03ff0000;\n"
"const WINT_TRIG = 0x00000010;\n"
"const IAVG_TRIG = 0x00000020;\n"
"var RO_TRIG;\n"
"if (getUserReg(1)) {{\n"
" RO_TRIG = WINT_EN + IAVG_TRIG;\n"
"}} else {{\n"
" RO_TRIG = WINT_EN + WINT_TRIG;\n"
"}}\n"
"setTrigger(WINT_EN);\n"
"\n"
"{wave_definitions}\n"
"\n"
"var loop_cnt = getUserReg(0);\n"
"\n"
"repeat (loop_cnt) {{\n"
" {playback_string}\n"
"}}\n"
)
def _create_awg_parameters(self, awg, channel_name_map):
if not isinstance(awg, UHFQCPulsar._supportedAWGtypes):
return super()._create_awg_parameters(awg, channel_name_map)
name = awg.name
self.add_parameter('{}_reuse_waveforms'.format(awg.name),
initial_value=True, vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_minimize_sequencer_memory'.format(awg.name),
initial_value=True, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Minimizes the sequencer "
"memory by repeating specific sequence "
"patterns (eg. readout) passed in "
"'repeat dictionary'")
self.add_parameter('{}_enforce_single_element'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Group all the pulses on this AWG into "
"a single element. Useful for making sure "
"that the master AWG has only one waveform"
" per segment.")
self.add_parameter('{}_granularity'.format(awg.name),
get_cmd=lambda: 16)
self.add_parameter('{}_element_start_granularity'.format(awg.name),
initial_value=8/(1.8e9),
parameter_class=ManualParameter)
self.add_parameter('{}_min_length'.format(awg.name),
get_cmd=lambda: 16 /(1.8e9))
self.add_parameter('{}_inter_element_deadtime'.format(awg.name),
get_cmd=lambda: 8 / (1.8e9))
self.add_parameter('{}_precompile'.format(awg.name),
initial_value=False, vals=vals.Bool(),
label='{} precompile segments'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_delay'.format(awg.name),
initial_value=0, label='{} delay'.format(name),
unit='s', parameter_class=ManualParameter,
docstring='Global delay applied to this '
'channel. Positive values move pulses'
' on this channel forward in time')
self.add_parameter('{}_trigger_channels'.format(awg.name),
initial_value=[],
label='{} trigger channel'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_active'.format(awg.name), initial_value=True,
label='{} active'.format(awg.name),
vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_min_length'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_trigger_source'.format(awg.name),
initial_value='Dig1',
vals=vals.Enum('Dig1', 'Dig2', 'DIO'),
parameter_class=ManualParameter,
docstring='Defines for which trigger source \
the AWG should wait, before playing \
the next waveform. Allowed values \
are: "Dig1", "Dig2", "DIO"')
for ch_nr in range(2):
id = 'ch{}'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._uhfqc_create_channel_parameters(id, name, awg)
self.channels.add(name)
def _uhfqc_create_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'analog')
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._uhfqc_setter(awg, id, 'amp'),
get_cmd=self._uhfqc_getter(awg, id, 'amp'),
vals=vals.Numbers(0.075, 1.5),
initial_value=0.75)
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._uhfqc_setter(awg, id, 'offset'),
get_cmd=self._uhfqc_getter(awg, id, 'offset'),
vals=vals.Numbers(-1.5, 1.5),
initial_value=0)
self.add_parameter('{}_distortion'.format(name),
label='{} distortion mode'.format(name),
initial_value='off',
vals=vals.Enum('off', 'precalculate'),
parameter_class=ManualParameter)
self.add_parameter('{}_distortion_dict'.format(name),
label='{} distortion dictionary'.format(name),
vals=vals.Dict(),
parameter_class=ManualParameter)
self.add_parameter('{}_charge_buildup_compensation'.format(name),
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('{}_compensation_pulse_scale'.format(name),
parameter_class=ManualParameter,
vals=vals.Numbers(0., 1.), initial_value=0.5)
self.add_parameter('{}_compensation_pulse_delay'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_gaussian_filter_sigma'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
@staticmethod
def _uhfqc_setter(obj, id, par):
if par == 'offset':
def s(val):
obj.set('sigouts_{}_offset'.format(int(id[2])-1), val)
elif par == 'amp':
def s(val):
obj.set('sigouts_{}_range'.format(int(id[2])-1), val)
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return s
def _uhfqc_getter(self, obj, id, par):
if par == 'offset':
def g():
return obj.get('sigouts_{}_offset'.format(int(id[2])-1))
elif par == 'amp':
def g():
if self._awgs_prequeried_state:
return obj.parameters['sigouts_{}_range' \
.format(int(id[2])-1)].get_latest()/2
else:
return obj.get('sigouts_{}_range' \
.format(int(id[2])-1))/2
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return g
def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):
if not isinstance(obj, UHFQCPulsar._supportedAWGtypes):
return super()._program_awg(obj, awg_sequence, waveforms, repeat_pattern)
if not self._zi_waves_cleared:
_zi_clear_waves()
self._zi_waves_cleared = True
waves_to_upload = {h: waveforms[h]
for codewords in awg_sequence.values()
if codewords is not None
for cw, chids in codewords.items()
if cw != 'metadata'
for h in chids.values()}
self._zi_write_waves(waves_to_upload)
defined_waves = set()
wave_definitions = []
playback_strings = []
ch_has_waveforms = {'ch1': False, 'ch2': False}
current_segment = 'no_segment'
def play_element(element, playback_strings, wave_definitions):
if awg_sequence[element] is None:
current_segment = element
playback_strings.append(f'// Segment {current_segment}')
return playback_strings, wave_definitions
playback_strings.append(f'// Element {element}')
metadata = awg_sequence[element].pop('metadata', {})
if list(awg_sequence[element].keys()) != ['no_codeword']:
raise NotImplementedError('UHFQC sequencer does currently\
not support codewords!')
chid_to_hash = awg_sequence[element]['no_codeword']
wave = (chid_to_hash.get('ch1', None), None,
chid_to_hash.get('ch2', None), None)
wave_definitions += self._zi_wave_definition(wave,
defined_waves)
acq = metadata.get('acq', False)
playback_strings += self._zi_playback_string(name=obj.name,
device='uhf',
wave=wave,
acq=acq)
ch_has_waveforms['ch1'] |= wave[0] is not None
ch_has_waveforms['ch2'] |= wave[2] is not None
return playback_strings, wave_definitions
if repeat_pattern is None:
for element in awg_sequence:
playback_strings, wave_definitions = play_element(element,
playback_strings,
wave_definitions)
else:
real_indicies = []
for index, element in enumerate(awg_sequence):
if awg_sequence[element] is not None:
real_indicies.append(index)
el_total = len(real_indicies)
def repeat_func(n, el_played, index, playback_strings, wave_definitions):
if isinstance(n, tuple):
el_played_list = []
if n[0] > 1:
playback_strings.append('repeat ('+str(n[0])+') {')
for t in n[1:]:
el_cnt, playback_strings, wave_definitions = repeat_func(t,
el_played,
index + np.sum(
el_played_list),
playback_strings,
wave_definitions)
el_played_list.append(el_cnt)
if n[0] > 1:
playback_strings.append('}')
return int(n[0] * np.sum(el_played_list)), playback_strings, wave_definitions
else:
for k in range(n):
el_index = real_indicies[int(index)+k]
element = list(awg_sequence.keys())[el_index]
playback_strings, wave_definitions = play_element(element,
playback_strings,
wave_definitions)
el_played = el_played + 1
return el_played, playback_strings, wave_definitions
el_played, playback_strings, wave_definitions = repeat_func(repeat_pattern, 0, 0,
playback_strings, wave_definitions)
if int(el_played) != int(el_total):
log.error(el_played, ' is not ', el_total)
raise ValueError('Check number of sequences in repeat pattern')
if not (ch_has_waveforms['ch1'] or ch_has_waveforms['ch2']):
return
self.awgs_with_waveforms(obj.name)
awg_str = self._uhf_sequence_string_template.format(
wave_definitions='\n'.join(wave_definitions),
playback_string='\n '.join(playback_strings),
)
obj._awg_program_features['loop_cnt'] = True
obj._awg_program_features['avg_cnt'] = False
obj._awg_needs_configuration[0] = False
obj._awg_program[0] = True
obj.configure_awg_from_string(awg_nr=0, program_string=awg_str, timeout=600)
def _is_awg_running(self, obj):
if not isinstance(obj, UHFQCPulsar._supportedAWGtypes):
return super()._is_awg_running(obj)
return obj.awgs_0_enable() != 0
def _clock(self, obj, cid=None):
if not isinstance(obj, UHFQCPulsar._supportedAWGtypes):
return super()._clock(obj)
return obj.clock_freq()
class HDAWG8Pulsar:
_supportedAWGtypes = (ZI_HDAWG8, VirtualAWG8, )
_hdawg_sequence_string_template = (
"{wave_definitions}\n"
"\n"
"{codeword_table_defs}\n"
"\n"
"while (1) {{\n"
" {playback_string}\n"
"}}\n"
)
def _create_awg_parameters(self, awg, channel_name_map):
if not isinstance(awg, HDAWG8Pulsar._supportedAWGtypes):
return super()._create_awg_parameters(awg, channel_name_map)
name = awg.name
self.add_parameter('{}_reuse_waveforms'.format(awg.name),
initial_value=True, vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_minimize_sequencer_memory'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Minimizes the sequencer "
"memory by repeating specific sequence "
"patterns (eg. readout) passed in "
"'repeat dictionary'")
self.add_parameter('{}_enforce_single_element'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Group all the pulses on this AWG into "
"a single element. Useful for making sure "
"that the master AWG has only one waveform"
" per segment.")
self.add_parameter('{}_granularity'.format(awg.name),
get_cmd=lambda: 16)
self.add_parameter('{}_element_start_granularity'.format(awg.name),
initial_value=8/(2.4e9),
parameter_class=ManualParameter)
self.add_parameter('{}_min_length'.format(awg.name),
initial_value=16 /(2.4e9),
parameter_class=ManualParameter)
self.add_parameter('{}_inter_element_deadtime'.format(awg.name),
get_cmd=lambda: 8 / (2.4e9))
self.add_parameter('{}_precompile'.format(awg.name),
initial_value=False, vals=vals.Bool(),
label='{} precompile segments'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_delay'.format(awg.name),
initial_value=0, label='{} delay'.format(name),
unit='s', parameter_class=ManualParameter,
docstring='Global delay applied to this '
'channel. Positive values move pulses'
' on this channel forward in time')
self.add_parameter('{}_trigger_channels'.format(awg.name),
initial_value=[],
label='{} trigger channel'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_active'.format(awg.name), initial_value=True,
label='{} active'.format(awg.name),
vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_min_length'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_trigger_source'.format(awg.name),
initial_value='Dig1',
vals=vals.Enum('Dig1', 'DIO', 'ZSync'),
parameter_class=ManualParameter,
docstring='Defines for which trigger source \
the AWG should wait, before playing \
the next waveform. Allowed values \
are: "Dig1", "DIO", "ZSync"')
for ch_nr in range(8):
id = 'ch{}'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._hdawg_create_analog_channel_parameters(id, name, awg)
self.channels.add(name)
id = 'ch{}m'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._hdawg_create_marker_channel_parameters(id, name, awg)
self.channels.add(name)
def _hdawg_create_analog_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'analog')
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._hdawg_setter(awg, id, 'offset'),
get_cmd=self._hdawg_getter(awg, id, 'offset'),
vals=vals.Numbers())
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._hdawg_setter(awg, id, 'amp'),
get_cmd=self._hdawg_getter(awg, id, 'amp'),
vals=vals.Numbers(0.01, 5.0))
self.add_parameter('{}_distortion'.format(name),
label='{} distortion mode'.format(name),
initial_value='off',
vals=vals.Enum('off', 'precalculate'),
parameter_class=ManualParameter)
self.add_parameter('{}_distortion_dict'.format(name),
label='{} distortion dictionary'.format(name),
vals=vals.Dict(),
parameter_class=ManualParameter)
self.add_parameter('{}_charge_buildup_compensation'.format(name),
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('{}_compensation_pulse_scale'.format(name),
parameter_class=ManualParameter,
vals=vals.Numbers(0., 1.), initial_value=0.5)
self.add_parameter('{}_compensation_pulse_delay'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_gaussian_filter_sigma'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_internal_modulation'.format(name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter)
def _hdawg_create_marker_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'marker')
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._hdawg_setter(awg, id, 'offset'),
get_cmd=self._hdawg_getter(awg, id, 'offset'),
vals=vals.Numbers())
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._hdawg_setter(awg, id, 'amp'),
get_cmd=self._hdawg_getter(awg, id, 'amp'),
vals=vals.Numbers(0.01, 5.0))
@staticmethod
def _hdawg_setter(obj, id, par):
if par == 'offset':
if id[-1] != 'm':
def s(val):
obj.set('sigouts_{}_offset'.format(int(id[2])-1), val)
else:
s = None
elif par == 'amp':
if id[-1] != 'm':
def s(val):
obj.set('sigouts_{}_range'.format(int(id[2])-1), 2*val)
else:
s = None
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return s
def _hdawg_getter(self, obj, id, par):
if par == 'offset':
if id[-1] != 'm':
def g():
return obj.get('sigouts_{}_offset'.format(int(id[2])-1))
else:
return lambda: 0
elif par == 'amp':
if id[-1] != 'm':
def g():
if self._awgs_prequeried_state:
return obj.parameters['sigouts_{}_range' \
.format(int(id[2])-1)].get_latest()/2
else:
return obj.get('sigouts_{}_range' \
.format(int(id[2])-1))/2
else:
return lambda: 1
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return g
def get_divisor(self, chid, awg):
if chid[-1]=='m':
return 1
name = self._id_channel(chid, awg)
if self.get(f"{name}_internal_modulation"):
return 2
else:
return 1
def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):
if not isinstance(obj, HDAWG8Pulsar._supportedAWGtypes):
return super()._program_awg(obj, awg_sequence, waveforms, repeat_pattern)
if not self._zi_waves_cleared:
_zi_clear_waves()
self._zi_waves_cleared = True
chids = [f'ch{i+1}{m}' for i in range(8) for m in ['','m']]
divisor = {chid: self.get_divisor(chid, obj.name) for chid in chids}
waves_to_upload = {h: divisor[chid]*waveforms[h][::divisor[chid]]
for codewords in awg_sequence.values()
if codewords is not None
for cw, chids in codewords.items()
if cw != 'metadata'
for chid, h in chids.items()}
self._zi_write_waves(waves_to_upload)
ch_has_waveforms = {'ch{}{}'.format(i + 1, m): False
for i in range(8) for m in ['','m']}
for awg_nr in self._hdawg_active_awgs(obj):
defined_waves = set()
codeword_table = {}
wave_definitions = []
codeword_table_defs = []
playback_strings = []
interleaves = []
prev_dio_valid_polarity = obj.get(
'awgs_{}_dio_valid_polarity'.format(awg_nr))
added_cw = set()
ch1id = 'ch{}'.format(awg_nr * 2 + 1)
ch1mid = 'ch{}m'.format(awg_nr * 2 + 1)
ch2id = 'ch{}'.format(awg_nr * 2 + 2)
ch2mid = 'ch{}m'.format(awg_nr * 2 + 2)
chids = [ch1id, ch2id]
channels = [self._id_channel(chid, obj.name) for chid in chids]
codeword_el = set()
if all([self.get(
f'{chan}_internal_modulation') for chan in channels]):
internal_mod = True
elif not any([self.get(
f'{chan}_internal_modulation') for chan in channels]):
internal_mod = False
else:
raise NotImplementedError('Internal modulation can only be'
'specified per sub AWG!')
counter = 1
current_segment = 'no_segment'
for element in awg_sequence:
if awg_sequence[element] is None:
current_segment = element
playback_strings.append(f'// Segment {current_segment}')
continue
playback_strings.append(f'// Element {element}')
metadata = awg_sequence[element].pop('metadata', {})
nr_cw = len(set(awg_sequence[element].keys()) - \
{'no_codeword'})
if nr_cw == 1:
log.warning(
f'Only one codeword has been set for {element}')
else:
for cw in awg_sequence[element]:
if cw == 'no_codeword':
if nr_cw != 0:
continue
chid_to_hash = awg_sequence[element][cw]
wave = tuple(chid_to_hash.get(ch, None)
for ch in [ch1id, ch1mid, ch2id, ch2mid])
wave_definitions += self._zi_wave_definition(wave,
defined_waves)
if nr_cw != 0:
w1, w2 = self._zi_waves_to_wavenames(wave)
if cw not in codeword_table:
codeword_table_defs += \
self._zi_codeword_table_entry(cw, wave)
codeword_table[cw] = (w1, w2)
elif codeword_table[cw] != (w1, w2) \
and self.reuse_waveforms():
log.warning('Same codeword used for different '
'waveforms. Using first waveform. '
f'Ignoring element {element}.')
ch_has_waveforms[ch1id] |= wave[0] is not None
ch_has_waveforms[ch1mid] |= wave[1] is not None
ch_has_waveforms[ch2id] |= wave[2] is not None
ch_has_waveforms[ch2mid] |= wave[3] is not None
if not internal_mod:
playback_strings += self._zi_playback_string(name=obj.name,
device='hdawg', wave=wave, codeword=(nr_cw != 0),
append_zeros=self.append_zeros())
else:
pb_string, interleave_string = \
self._zi_interleaved_playback_string(name=obj.name,
device='hdawg', counter=counter, wave=wave,
codeword=(nr_cw != 0))
counter += 1
playback_strings += pb_string
interleaves += interleave_string
if not any([ch_has_waveforms[ch]
for ch in [ch1id, ch1mid, ch2id, ch2mid]]):
continue
awg_str = self._hdawg_sequence_string_template.format(
wave_definitions='\n'.join(wave_definitions+interleaves),
codeword_table_defs='\n'.join(codeword_table_defs),
playback_string='\n '.join(playback_strings))
obj._awg_needs_configuration[awg_nr] = False
obj._awg_program[awg_nr] = True
obj.configure_awg_from_string(awg_nr, awg_str, timeout=600)
obj.set('awgs_{}_dio_valid_polarity'.format(awg_nr),
prev_dio_valid_polarity)
for ch in range(8):
obj.set('sigouts_{}_on'.format(ch), ch_has_waveforms[f'ch{ch+1}'])
if any(ch_has_waveforms.values()):
self.awgs_with_waveforms(obj.name)
def _is_awg_running(self, obj):
if not isinstance(obj, HDAWG8Pulsar._supportedAWGtypes):
return super()._is_awg_running(obj)
return any([obj.get('awgs_{}_enable'.format(awg_nr)) for awg_nr in
self._hdawg_active_awgs(obj)])
def _clock(self, obj, cid):
if not isinstance(obj, HDAWG8Pulsar._supportedAWGtypes):
return super()._clock(obj, cid)
return obj.clock_freq()
def _hdawg_active_awgs(self, obj):
return [0,1,2,3]
class AWG5014Pulsar:
_supportedAWGtypes = (Tektronix_AWG5014, VirtualAWG5014, )
def _create_awg_parameters(self, awg, channel_name_map):
if not isinstance(awg, AWG5014Pulsar._supportedAWGtypes):
return super()._create_awg_parameters(awg, channel_name_map)
self.add_parameter('{}_reuse_waveforms'.format(awg.name),
initial_value=True, vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_minimize_sequencer_memory'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Minimizes the sequencer "
"memory by repeating specific sequence "
"patterns (eg. readout) passed in "
"'repeat dictionary'")
self.add_parameter('{}_enforce_single_element'.format(awg.name),
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter,
docstring="Group all the pulses on this AWG into "
"a single element. Useful for making sure "
"that the master AWG has only one waveform"
" per segment.")
self.add_parameter('{}_granularity'.format(awg.name),
get_cmd=lambda: 4)
self.add_parameter('{}_element_start_granularity'.format(awg.name),
initial_value=4/(1.2e9),
parameter_class=ManualParameter)
self.add_parameter('{}_min_length'.format(awg.name),
get_cmd=lambda: 256/(1.2e9))
self.add_parameter('{}_inter_element_deadtime'.format(awg.name),
get_cmd=lambda: 0)
self.add_parameter('{}_precompile'.format(awg.name),
initial_value=False,
label='{} precompile segments'.format(awg.name),
parameter_class=ManualParameter, vals=vals.Bool())
self.add_parameter('{}_delay'.format(awg.name), initial_value=0,
label='{} delay'.format(awg.name), unit='s',
parameter_class=ManualParameter,
docstring="Global delay applied to this channel. "
"Positive values move pulses on this "
"channel forward in time")
self.add_parameter('{}_trigger_channels'.format(awg.name),
initial_value=[],
label='{} trigger channels'.format(awg.name),
parameter_class=ManualParameter)
self.add_parameter('{}_active'.format(awg.name), initial_value=True,
label='{} active'.format(awg.name),
vals=vals.Bool(),
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_min_length'.format(awg.name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
for ch_nr in range(4):
id = 'ch{}'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._awg5014_create_analog_channel_parameters(id, name, awg)
self.channels.add(name)
id = 'ch{}m1'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._awg5014_create_marker_channel_parameters(id, name, awg)
self.channels.add(name)
id = 'ch{}m2'.format(ch_nr + 1)
name = channel_name_map.get(id, awg.name + '_' + id)
self._awg5014_create_marker_channel_parameters(id, name, awg)
self.channels.add(name)
def _awg5014_create_analog_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'analog')
self.add_parameter('{}_offset_mode'.format(name),
parameter_class=ManualParameter,
vals=vals.Enum('software', 'hardware'))
offset_mode_func = self.parameters['{}_offset_mode'.format(name)]
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._awg5014_setter(awg, id, 'offset',
offset_mode_func),
get_cmd=self._awg5014_getter(awg, id, 'offset',
offset_mode_func),
vals=vals.Numbers())
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._awg5014_setter(awg, id, 'amp'),
get_cmd=self._awg5014_getter(awg, id, 'amp'),
vals=vals.Numbers(0.01, 2.25))
self.add_parameter('{}_distortion'.format(name),
label='{} distortion mode'.format(name),
initial_value='off',
vals=vals.Enum('off', 'precalculate'),
parameter_class=ManualParameter)
self.add_parameter('{}_distortion_dict'.format(name),
label='{} distortion dictionary'.format(name),
vals=vals.Dict(),
parameter_class=ManualParameter)
self.add_parameter('{}_charge_buildup_compensation'.format(name),
parameter_class=ManualParameter,
vals=vals.Bool(), initial_value=False)
self.add_parameter('{}_compensation_pulse_scale'.format(name),
parameter_class=ManualParameter,
vals=vals.Numbers(0., 1.), initial_value=0.5)
self.add_parameter('{}_compensation_pulse_delay'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
self.add_parameter('{}_compensation_pulse_gaussian_filter_sigma'.format(name),
initial_value=0, unit='s',
parameter_class=ManualParameter)
def _awg5014_create_marker_channel_parameters(self, id, name, awg):
self.add_parameter('{}_id'.format(name), get_cmd=lambda _=id: _)
self.add_parameter('{}_awg'.format(name), get_cmd=lambda _=awg.name: _)
self.add_parameter('{}_type'.format(name), get_cmd=lambda: 'marker')
self.add_parameter('{}_offset'.format(name),
label='{} offset'.format(name), unit='V',
set_cmd=self._awg5014_setter(awg, id, 'offset'),
get_cmd=self._awg5014_getter(awg, id, 'offset'),
vals=vals.Numbers(-2.7, 2.7))
self.add_parameter('{}_amp'.format(name),
label='{} amplitude'.format(name), unit='V',
set_cmd=self._awg5014_setter(awg, id, 'amp'),
get_cmd=self._awg5014_getter(awg, id, 'amp'),
vals=vals.Numbers(-5.4, 5.4))
@staticmethod
def _awg5014_setter(obj, id, par, offset_mode_func=None):
if id in ['ch1', 'ch2', 'ch3', 'ch4']:
if par == 'offset':
def s(val):
if offset_mode_func() == 'software':
obj.set('{}_offset'.format(id), val)
elif offset_mode_func() == 'hardware':
obj.set('{}_DC_out'.format(id), val)
else:
raise ValueError('Invalid offset mode for AWG5014: '
'{}'.format(offset_mode_func()))
elif par == 'amp':
def s(val):
obj.set('{}_amp'.format(id), 2*val)
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
else:
id_raw = id[:3] + '_' + id[3:]
if par == 'offset':
def s(val):
h = obj.get('{}_high'.format(id_raw))
l = obj.get('{}_low'.format(id_raw))
obj.set('{}_high'.format(id_raw), val + h - l)
obj.set('{}_low'.format(id_raw), val)
elif par == 'amp':
def s(val):
l = obj.get('{}_low'.format(id_raw))
obj.set('{}_high'.format(id_raw), l + val)
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return s
def _awg5014_getter(self, obj, id, par, offset_mode_func=None):
if id in ['ch1', 'ch2', 'ch3', 'ch4']:
if par == 'offset':
def g():
if offset_mode_func() == 'software':
return obj.get('{}_offset'.format(id))
elif offset_mode_func() == 'hardware':
return obj.get('{}_DC_out'.format(id))
else:
raise ValueError('Invalid offset mode for AWG5014: '
'{}'.format(offset_mode_func()))
elif par == 'amp':
def g():
if self._awgs_prequeried_state:
return obj.parameters['{}_amp'.format(id)] \
.get_latest()/2
else:
return obj.get('{}_amp'.format(id))/2
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
else:
id_raw = id[:3] + '_' + id[3:]
if par == 'offset':
def g():
return obj.get('{}_low'.format(id_raw))
elif par == 'amp':
def g():
if self._awgs_prequeried_state:
h = obj.get('{}_high'.format(id_raw))
l = obj.get('{}_low'.format(id_raw))
else:
h = obj.parameters['{}_high'.format(id_raw)]\
.get_latest()
l = obj.parameters['{}_low'.format(id_raw)]\
.get_latest()
return h - l
else:
raise NotImplementedError('Unknown parameter {}'.format(par))
return g
def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):
if not isinstance(obj, AWG5014Pulsar._supportedAWGtypes):
return super()._program_awg(obj, awg_sequence, waveforms, repeat_pattern)
pars = {
'ch{}_m{}_low'.format(ch + 1, m + 1)
for ch in range(4) for m in range(2)
}
pars |= {
'ch{}_m{}_high'.format(ch + 1, m + 1)
for ch in range(4) for m in range(2)
}
pars |= {
'ch{}_offset'.format(ch + 1) for ch in range(4)
}
old_vals = {}
for par in pars:
old_vals[par] = obj.get(par)
packed_waveforms = {}
wfname_l = []
grp_has_waveforms = {f'ch{i+1}': False for i in range(4)}
for element in awg_sequence:
if awg_sequence[element] is None:
continue
metadata = awg_sequence[element].pop('metadata', {})
if list(awg_sequence[element].keys()) != ['no_codeword']:
raise NotImplementedError('AWG5014 sequencer does '
'not support codewords!')
chid_to_hash = awg_sequence[element]['no_codeword']
if not any(chid_to_hash):
continue
maxlen = max([len(waveforms[h]) for h in chid_to_hash.values()])
maxlen = max(maxlen, 256)
wfname_l.append([])
for grp in [f'ch{i + 1}' for i in range(4)]:
wave = (chid_to_hash.get(grp, None),
chid_to_hash.get(grp + 'm1', None),
chid_to_hash.get(grp + 'm2', None))
grp_has_waveforms[grp] |= (wave != (None, None, None))
wfname = self._hash_to_wavename((maxlen, wave))
grp_wfs = [np.pad(waveforms.get(h, [0]),
(0, maxlen - len(waveforms.get(h, [0]))),
'constant', constant_values=0) for h in wave]
packed_waveforms[wfname] = obj.pack_waveform(*grp_wfs)
wfname_l[-1].append(wfname)
if any([wf[0] != 0 for wf in grp_wfs]):
log.warning(f'Element {element} starts with non-zero '
f'entry on {obj.name}.')
if not any(grp_has_waveforms.values()):
for grp in ['ch1', 'ch2', 'ch3', 'ch4']:
obj.set('{}_state'.format(grp), grp_has_waveforms[grp])
return None
self.awgs_with_waveforms(obj.name)
nrep_l = [1] * len(wfname_l)
goto_l = [0] * len(wfname_l)
goto_l[-1] = 1
wait_l = [1] * len(wfname_l)
logic_jump_l = [0] * len(wfname_l)
filename = 'pycqed_pulsar.awg'
awg_file = obj.generate_awg_file(packed_waveforms, np.array(wfname_l).transpose().copy(),
nrep_l, wait_l, goto_l, logic_jump_l,
self._awg5014_chan_cfg(obj.name))
obj.send_awg_file(filename, awg_file)
obj.load_awg_file(filename)
for par in pars:
obj.set(par, old_vals[par])
time.sleep(.1)
obj.is_awg_ready()
for grp in ['ch1', 'ch2', 'ch3', 'ch4']:
obj.set('{}_state'.format(grp), 1*grp_has_waveforms[grp])
hardware_offsets = 0
for grp in ['ch1', 'ch2', 'ch3', 'ch4']:
cname = self._id_channel(grp, obj.name)
offset_mode = self.get('{}_offset_mode'.format(cname))
if offset_mode == 'hardware':
hardware_offsets = 1
obj.DC_output(hardware_offsets)
return awg_file
def _is_awg_running(self, obj):
if not isinstance(obj, AWG5014Pulsar._supportedAWGtypes):
return super()._is_awg_running(obj)
return obj.get_state() != 'Idle'
def _clock(self, obj, cid=None):
if not isinstance(obj, AWG5014Pulsar._supportedAWGtypes):
return super()._clock(obj, cid)
return obj.clock_freq()
@staticmethod
def _awg5014_group_ids(cid):
return [cid[:3], cid[:3] + 'm1', cid[:3] + 'm2']
def _awg5014_chan_cfg(self, awg):
channel_cfg = {}
for channel in self.channels:
if self.get('{}_awg'.format(channel)) != awg:
continue
cid = self.get('{}_id'.format(channel))
amp = self.get('{}_amp'.format(channel))
off = self.get('{}_offset'.format(channel))
if self.get('{}_type'.format(channel)) == 'analog':
offset_mode = self.get('{}_offset_mode'.format(channel))
channel_cfg['ANALOG_METHOD_' + cid[2]] = 1
channel_cfg['ANALOG_AMPLITUDE_' + cid[2]] = amp * 2
if offset_mode == 'software':
channel_cfg['ANALOG_OFFSET_' + cid[2]] = off
channel_cfg['DC_OUTPUT_LEVEL_' + cid[2]] = 0
channel_cfg['EXTERNAL_ADD_' + cid[2]] = 0
else:
channel_cfg['ANALOG_OFFSET_' + cid[2]] = 0
channel_cfg['DC_OUTPUT_LEVEL_' + cid[2]] = off
channel_cfg['EXTERNAL_ADD_' + cid[2]] = 1
else:
channel_cfg['MARKER1_METHOD_' + cid[2]] = 2
channel_cfg['MARKER2_METHOD_' + cid[2]] = 2
channel_cfg['MARKER{}_LOW_{}'.format(cid[-1], cid[2])] = \
off
channel_cfg['MARKER{}_HIGH_{}'.format(cid[-1], cid[2])] = \
off + amp
channel_cfg['CHANNEL_STATE_' + cid[2]] = 0
for channel in self.channels:
if self.get('{}_awg'.format(channel)) != awg:
continue
if self.get('{}_active'.format(awg)):
cid = self.get('{}_id'.format(channel))
channel_cfg['CHANNEL_STATE_' + cid[2]] = 1
return channel_cfg
class Pulsar(AWG5014Pulsar, HDAWG8Pulsar, UHFQCPulsar, Instrument):
def __init__(self, name='Pulsar', master_awg=None):
super().__init__(name)
self.add_parameter('master_awg',
parameter_class=InstrumentRefParameter,
initial_value=master_awg)
self.add_parameter('inter_element_spacing',
vals=vals.MultiType(vals.Numbers(0),
vals.Enum('auto')),
set_cmd=self._set_inter_element_spacing,
get_cmd=self._get_inter_element_spacing)
self.add_parameter('reuse_waveforms', initial_value=False,
parameter_class=ManualParameter, vals=vals.Bool())
self.add_parameter('append_zeros', initial_value=0, vals=vals.Ints(),
parameter_class=ManualParameter)
self.add_parameter('flux_crosstalk_cancellation', initial_value=False,
parameter_class=ManualParameter, vals=vals.Bool())
self.add_parameter('flux_channels', initial_value=[],
parameter_class=ManualParameter, vals=vals.Lists())
self.add_parameter('flux_crosstalk_cancellation_mtx',
initial_value=None, parameter_class=ManualParameter)
self.add_parameter('flux_crosstalk_cancellation_shift_mtx',
initial_value=None, parameter_class=ManualParameter)
self._inter_element_spacing = 'auto'
self.channels = set()
self.awgs = set()
self.last_sequence = None
self.last_elements = None
self._awgs_with_waveforms = set()
self._awgs_prequeried_state = False
self._zi_waves_cleared = False
self._hash_to_wavename_table = {}
self.num_seg = 0
Pulsar._instance = self
@staticmethod
def get_instance():
return Pulsar._instance
def define_awg_channels(self, awg, channel_name_map=None):
if channel_name_map is None:
channel_name_map = {}
for channel_name in channel_name_map.values():
if channel_name in self.channels:
raise KeyError("Channel named '{}' already defined".format(
channel_name))
if awg.name in self.awgs:
raise KeyError("AWG '{}' already added to pulsar".format(awg.name))
fail = None
super()._create_awg_parameters(awg, channel_name_map)
self.awgs.add(awg.name)
def find_awg_channels(self, awg):
channel_list = []
for channel in self.channels:
if self.get('{}_awg'.format(channel)) == awg:
channel_list.append(channel)
return channel_list
def AWG_obj(self, **kw):
awg = kw.get('awg', None)
chan = kw.get('channel', None)
if awg is not None and chan is not None:
raise ValueError('Both `awg` and `channel` arguments passed to '
'Pulsar.AWG_obj()')
elif awg is None and chan is not None:
name = self.get('{}_awg'.format(chan))
elif awg is not None and chan is None:
name = awg
else:
raise ValueError('Either `awg` or `channel` argument needs to be '
'passed to Pulsar.AWG_obj()')
return Instrument.find_instrument(name)
def clock(self, channel=None, awg=None):
if channel is not None and awg is not None:
raise ValueError('Both channel and awg arguments passed to '
'Pulsar.clock()')
if channel is None and awg is None:
raise ValueError('Neither channel nor awg arguments passed to '
'Pulsar.clock()')
if channel is not None:
awg = self.get('{}_awg'.format(channel))
if self._awgs_prequeried_state:
return self._clocks[awg]
else:
fail = None
obj = self.AWG_obj(awg=awg)
try:
return super()._clock(obj)
except AttributeError as e:
fail = e
if fail is not None:
raise TypeError('Unsupported AWG instrument: {} of type {}. '
.format(obj.name, type(obj)) + str(fail))
def active_awgs(self):
return {awg for awg in self.awgs if self.get('{}_active'.format(awg))}
def awgs_with_waveforms(self, awg=None):
if awg == None:
return self._awgs_with_waveforms
else:
self._awgs_with_waveforms.add(awg)
def start(self, exclude=None):
if exclude is None:
exclude = []
awgs_with_waveforms = self.awgs_with_waveforms()
used_awgs = set(self.active_awgs()) & awgs_with_waveforms
for awg in used_awgs:
self._stop_awg(awg)
if self.master_awg() is None:
for awg in used_awgs:
if awg not in exclude:
self._start_awg(awg)
else:
if self.master_awg() not in exclude:
self.master_awg.get_instr().stop()
for awg in used_awgs:
if awg != self.master_awg() and awg not in exclude:
self._start_awg(awg)
tstart = time.time()
for awg in used_awgs:
if awg == self.master_awg() or awg in exclude:
continue
good = False
while not (good or time.time() > tstart + 10):
if self._is_awg_running(awg):
good = True
else:
time.sleep(0.1)
if not good:
raise Exception('AWG {} did not start in 10s'
.format(awg))
if self.master_awg() not in exclude:
self.master_awg.get_instr().start()
def stop(self):
awgs_with_waveforms = set(self.awgs_with_waveforms())
used_awgs = set(self.active_awgs()) & awgs_with_waveforms
for awg in used_awgs:
self._stop_awg(awg)
def program_awgs(self, sequence, awgs='all'):
self.last_sequence = sequence
if awgs == 'all':
awgs = self.active_awgs()
self._awgs_with_waveforms -= awgs
self.AWGs_prequeried(True)
log.info(f'Starting compilation of sequence {sequence.name}')
t0 = time.time()
waveforms, awg_sequences = sequence.generate_waveforms_sequences()
log.info(f'Finished compilation of sequence {sequence.name} in '
f'{time.time() - t0}')
channels_used = self._channels_in_awg_sequences(awg_sequences)
repeat_dict = self._generate_awg_repeat_dict(sequence.repeat_patterns,
channels_used)
self._zi_waves_cleared = False
self._hash_to_wavename_table = {}
for awg in awgs:
log.info(f'Started programming {awg}')
t0 = time.time()
if awg in repeat_dict.keys():
self._program_awg(self.AWG_obj(awg=awg),
awg_sequences.get(awg, {}), waveforms,
repeat_pattern=repeat_dict[awg])
else:
self._program_awg(self.AWG_obj(awg=awg),
awg_sequences.get(awg, {}), waveforms)
log.info(f'Finished programming {awg} in {time.time() - t0}')
self.num_seg = len(sequence.segments)
self.AWGs_prequeried(False)
def _program_awg(self, obj, awg_sequence, waveforms, repeat_pattern=None):
if repeat_pattern is not None:
super()._program_awg(obj, awg_sequence, waveforms,
repeat_pattern=repeat_pattern)
else:
super()._program_awg(obj, awg_sequence, waveforms)
def _hash_to_wavename(self, h):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
if h not in self._hash_to_wavename_table:
hash_int = abs(hash(h))
wname = ''.join(to_base(hash_int, len(alphabet), alphabet))[::-1]
while wname in self._hash_to_wavename_table.values():
hash_int += 1
wname = ''.join(to_base(hash_int, len(alphabet), alphabet)) \
[::-1]
self._hash_to_wavename_table[h] = wname
return self._hash_to_wavename_table[h]
def _zi_wave_definition(self, wave, defined_waves=None):
if defined_waves is None:
defined_waves = set()
wave_definition = []
w1, w2 = self._zi_waves_to_wavenames(wave)
for analog, marker, wc in [(wave[0], wave[1], w1),
(wave[2], wave[3], w2)]:
if analog is not None:
wa = self._hash_to_wavename(analog)
if wa not in defined_waves:
wave_definition.append(f'wave {wa} = "{wa}";')
defined_waves.add(wa)
if marker is not None:
wm = self._hash_to_wavename(marker)
if wm not in defined_waves:
wave_definition.append(f'wave {wm} = "{wm}";')
defined_waves.add(wm)
if analog is not None and marker is not None:
if wc not in defined_waves:
wave_definition.append(f'wave {wc} = {wa} + {wm};')
defined_waves.add(wc)
return wave_definition
def _zi_playback_string(self, name, device, wave, acq=False, codeword=False,
append_zeros=0):
playback_string = []
w1, w2 = self._zi_waves_to_wavenames(wave)
trig_source = self.get('{}_trigger_source'.format(name))
if trig_source == 'Dig1':
playback_string.append(
'waitDigTrigger(1{});'.format(', 1' if device == 'uhf' else ''))
elif trig_source == 'Dig2':
playback_string.append('waitDigTrigger(2,1);')
else:
playback_string.append(f'wait{trig_source}Trigger();')
if codeword and not (w1 is None and w2 is None):
playback_string.append('playWaveDIO();')
else:
if w1 is None and w2 is not None:
playback_string.append(f'playWave(marker(1,0)*0*{w2}, {w2});')
elif w1 is not None and w2 is None:
playback_string.append(f'playWave({w1}, marker(1,0)*0*{w1});')
elif w1 is not None or w2 is not None:
playback_string.append('playWave({});'.format(
_zi_wavename_pair_to_argument(w1, w2)))
if acq:
playback_string.append('setTrigger(RO_TRIG);')
playback_string.append('setTrigger(WINT_EN);')
if append_zeros:
playback_string.append(f'playZero({append_zeros});')
return playback_string
def _zi_interleaved_playback_string(self, name, device, counter,
wave, acq=False, codeword=False):
playback_string = []
w1, w2 = self._zi_waves_to_wavenames(wave)
if w1 is None or w2 is None:
raise ValueError('When using HDAWG modulation both I and Q need '
'to be defined')
wname = f'wave{counter}'
interleaves = [f'wave {wname} = interleave({w1}, {w2});']
if not codeword:
if not acq:
playback_string.append(f'prefetch({wname},{wname});')
trig_source = self.get('{}_trigger_source'.format(name))
if trig_source == 'Dig1':
playback_string.append(
'waitDigTrigger(1{});'.format(', 1' if device == 'uhf' else ''))
elif trig_source == 'Dig2':
playback_string.append('waitDigTrigger(2,1);')
else:
playback_string.append(f'wait{trig_source}Trigger();')
if codeword:
raise NotImplementedError('Modulation in combination with codeword'
'pulses has not yet been implemented!')
else:
playback_string.append(f'playWave({wname},{wname});')
if acq:
playback_string.append('setTrigger(RO_TRIG);')
playback_string.append('setTrigger(WINT_EN);')
return playback_string, interleaves
def _zi_codeword_table_entry(self, codeword, wave):
w1, w2 = self._zi_waves_to_wavenames(wave)
if w1 is None and w2 is not None:
return [f'setWaveDIO({codeword}, zeros(1) + marker(1, 0), {w2});']
elif not (w1 is None and w2 is None):
return ['setWaveDIO({}, {});'.format(codeword,
_zi_wavename_pair_to_argument(w1, w2))]
else:
return []
def _zi_waves_to_wavenames(self, wave):
wavenames = []
for analog, marker in [(wave[0], wave[1]), (wave[2], wave[3])]:
if analog is None and marker is None:
wavenames.append(None)
elif analog is None and marker is not None:
wavenames.append(self._hash_to_wavename(marker))
elif analog is not None and marker is None:
wavenames.append(self._hash_to_wavename(analog))
else:
wavenames.append(self._hash_to_wavename((analog, marker)))
return wavenames
def _zi_write_waves(self, waveforms):
wave_dir = _zi_wave_dir()
for h, wf in waveforms.items():
filename = os.path.join(wave_dir, self._hash_to_wavename(h)+'.csv')
fmt = '%.18e' if wf.dtype == np.float else '%d'
np.savetxt(filename, wf, delimiter=",", fmt=fmt)
def _start_awg(self, awg):
obj = self.AWG_obj(awg=awg)
obj.start()
def _stop_awg(self, awg):
obj = self.AWG_obj(awg=awg)
obj.stop()
def _is_awg_running(self, awg):
fail = None
obj = self.AWG_obj(awg=awg)
try:
return super()._is_awg_running(obj)
except AttributeError as e:
fail = e
if fail is not None:
raise TypeError('Unsupported AWG instrument: {} of type {}. '
.format(obj.name, type(obj)) + str(fail))
def _set_inter_element_spacing(self, val):
self._inter_element_spacing = val
def _get_inter_element_spacing(self):
if self._inter_element_spacing != 'auto':
return self._inter_element_spacing
else:
max_spacing = 0
for awg in self.awgs:
max_spacing = max(max_spacing, self.get(
'{}_inter_element_deadtime'.format(awg)))
return max_spacing
def AWGs_prequeried(self, status=None):
if status is None:
return self._awgs_prequeried_state
elif status:
self._awgs_prequeried_state = False
self._clocks = {}
for awg in self.awgs:
self._clocks[awg] = self.clock(awg=awg)
for c in self.channels:
self.get(c + '_amp')
self._awgs_prequeried_state = True
else:
self._awgs_prequeried_state = False
def _id_channel(self, cid, awg):
for cname in self.channels:
if self.get('{}_awg'.format(cname)) == awg and \
self.get('{}_id'.format(cname)) == cid:
return cname
return None
@staticmethod
def _channels_in_awg_sequences(awg_sequences):
channels_used = dict()
for awg in awg_sequences:
channels_used[awg] = set()
for segname in awg_sequences[awg]:
if awg_sequences[awg][segname] is None:
continue
elements = awg_sequences[awg][segname]
for cw in elements:
if cw != "metadata":
channels_used[awg] |= elements[cw].keys()
return channels_used
def _generate_awg_repeat_dict(self, repeat_dict_per_ch, channels_used):
awg_ch_repeat_dict = dict()
repeat_dict_per_awg = dict()
for cname in repeat_dict_per_ch:
awg = self.get(f"{cname}_awg")
chid = self.get(f"{cname}_id")
if not awg in awg_ch_repeat_dict.keys():
awg_ch_repeat_dict[awg] = []
awg_ch_repeat_dict[awg].append(chid)
if repeat_dict_per_awg.get(awg, repeat_dict_per_ch[cname]) \
!= repeat_dict_per_ch[cname]:
raise NotImplementedError(f"Repeat pattern on {cname} is "
f"different from at least one other channel on {awg}:"
f"{repeat_dict_per_ch[cname]} vs {repeat_dict_per_awg[awg]}")
repeat_dict_per_awg[awg] = repeat_dict_per_ch[cname]
for awg_repeat, chs_repeat in awg_ch_repeat_dict.items():
for ch in channels_used[awg_repeat]:
assert ch in chs_repeat, f"Repeat pattern " \
f"provided for {awg_repeat} but no pattern was given on " \
f"{ch}. All used channels on the same awg must have a " \
f"repeat pattern."
return repeat_dict_per_awg
def to_base(n, b, alphabet=None, prev=None):
if prev is None: prev = []
if n == 0:
if alphabet is None: return prev
else: return [alphabet[i] for i in prev]
return to_base(n//b, b, alphabet, prev+[n%b])
def _zi_wave_dir():
if os.name == 'nt':
dll = ctypes.windll.shell32
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH + 1)
if dll.SHGetSpecialFolderPathW(None, buf, 0x0005, False):
_basedir = buf.value
else:
log.warning('Could not extract my documents folder')
else:
_basedir = os.path.expanduser('~')
wave_dir = os.path.join(_basedir, 'Zurich Instruments', 'LabOne',
'WebServer', 'awg', 'waves')
if not os.path.exists(wave_dir):
os.makedirs(wave_dir)
return wave_dir
def _zi_clear_waves():
wave_dir = _zi_wave_dir()
for f in os.listdir(wave_dir):
if f.endswith(".csv"):
os.remove(os.path.join(wave_dir, f))
elif f.endswith('.cache'):
shutil.rmtree(os.path.join(wave_dir, f))
def _zi_wavename_pair_to_argument(w1, w2):
if w1 is not None and w2 is not None:
return f'{w1}, {w2}'
elif w1 is not None and w2 is None:
return f'1, {w1}'
elif w1 is None and w2 is not None:
return f'2, {w2}'
else:
return '' | true | true |
f728aad6629f77a33def0e76786f6f1689baf0e0 | 13,795 | py | Python | sparse_operation_kit/unit_test/test_scripts/tf2/test_sparse_emb_demo_model_multi_worker.py | PeterXingke/HugeCTR | d7552c4c5f93ff18ded961645cac82d5d8b5b785 | [
"Apache-2.0"
] | 1 | 2021-12-23T07:31:32.000Z | 2021-12-23T07:31:32.000Z | sparse_operation_kit/unit_test/test_scripts/tf2/test_sparse_emb_demo_model_multi_worker.py | PeterXingke/HugeCTR | d7552c4c5f93ff18ded961645cac82d5d8b5b785 | [
"Apache-2.0"
] | null | null | null | sparse_operation_kit/unit_test/test_scripts/tf2/test_sparse_emb_demo_model_multi_worker.py | PeterXingke/HugeCTR | d7552c4c5f93ff18ded961645cac82d5d8b5b785 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import sys, os
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), r"../../../")))
import sparse_operation_kit as sok
import tensorflow as tf
import numpy as np
import os, json
import pickle
import utils
from test_sparse_emb_demo_model_single_worker import SOKDemo, test_tf_demo, check_saved_embedding_variables
def test_sok_demo(args, init_tensors, *random_samples):
port = 12345
os.environ["TF_CONFIG"] = json.dumps({
'cluster': {"worker": [args.ips[i] + ":" + str(port + i) for i in range(args.worker_num)] },
'task': {"type": 'worker', "index": args.task_id}
})
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
result = sok.Init(global_batch_size=args.global_batch_size)
plugin_demo = SOKDemo(combiner=args.combiner,
max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,
slot_num=args.slot_num, max_nnz=args.max_nnz,
embedding_vec_size=args.embedding_vec_size)
emb_opt = utils.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)
dense_opt = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)
plugin_saver = sok.Saver()
if (1 == args.restore_params):
filepath = r"./embedding_variables"
plugin_saver.restore_from_file(plugin_demo.embedding_layer.embedding_variable, filepath)
else:
status = plugin_saver.load_embedding_values(plugin_demo.embedding_layer.embedding_variable, init_tensors)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)
@tf.function
def _train_step(inputs, labels):
with tf.GradientTape() as tape:
logit, embedding_vector = plugin_demo(inputs, training=True)
loss = _replica_loss(labels, logit)
embedding_variables, other_variable = sok.split_embedding_variable_from_others(plugin_demo.trainable_variables)
grads, emb_grads = tape.gradient(loss, [other_variable, embedding_variables])
if "plugin" not in args.optimizer:
with sok.OptimizerScope(embedding_variables):
emb_opt.apply_gradients(zip(emb_grads, embedding_variables),
experimental_aggregate_gradients=False)
else:
emb_opt.apply_gradients(zip(emb_grads, embedding_variables),
experimental_aggregate_gradients=False)
dense_opt.apply_gradients(zip(grads, other_variable))
return logit, embedding_vector
sok_results = list()
def _dataset_fn(input_context):
replica_batch_size = input_context.get_per_replica_batch_size(args.global_batch_size)
dataset = utils.tf_dataset(*random_samples, batchsize=replica_batch_size, to_sparse_tensor=True, repeat=1)
# because each worker has its own data source, so that no need to shard the dataset.
return dataset
dataset = strategy.distribute_datasets_from_function(_dataset_fn)
for i, (sparse_tensors, replica_labels) in enumerate(dataset):
print("-" * 30, "step ", str(i), "-" * 30)
logit, embedding_vector = strategy.run(_train_step, args=(sparse_tensors, replica_labels))
print("[INFO]: embedding_vector\n", embedding_vector)
sok_results.append(embedding_vector)
# FIXME: when the forward computation is too fast, there
# may exist some conficts with datareader, which cause the program hang.
import time
time.sleep(0.2) # seconds
# save params to file.
if 1 == args.save_params:
filepath = r"./embedding_variables/"
utils.try_make_dirs(filepath, chief=(True if args.task_id == 0 else False))
plugin_saver.dump_to_file(plugin_demo.embedding_layer.embedding_variable, filepath)
return sok_results, plugin_demo.embedding_layer.embedding_variable.values[0].m_var_name
def compare_sok_with_tf(args):
if (args.global_batch_size % args.local_gpu_num != 0):
raise ValueError("global_batch_size: %d is not divisible by local_gpu_num: %d"
%(args.global_batch_size, args.local_gpu_num))
if (args.global_batch_size % args.worker_num != 0):
raise ValueError("global_batch_size: %d is not divisible by worker_num: %d"
%(args.global_batch_size, args.worker_num))
# each worker generate different dataset
if args.generate_new_datas:
worker_batch_size = args.global_batch_size // args.worker_num
random_samples_local = utils.generate_random_samples(num_of_samples=worker_batch_size * args.iter_num,
vocabulary_size=args.local_gpu_num * args.max_vocabulary_size_per_gpu * args.worker_num,
slot_num=args.slot_num,
max_nnz=args.max_nnz)
utils.save_to_file(r"./random_samples_" + str(args.task_id) + r".file", *random_samples_local)
else:
random_samples_local = utils.restore_from_file(r"./random_samples_" + str(args.task_id) + r".file")
if (0 == args.restore_params):
# each worker generate same init tensors, because each worker will do the filtering by itself.
init_tensors = utils.get_ones_tensor(max_vocab_size_per_gpu=args.max_vocabulary_size_per_gpu,
embedding_vec_size=args.embedding_vec_size,
num=args.local_gpu_num * args.worker_num)
else:
filepath = r"./embedding_variables"
tf_values_filename = os.path.join(filepath, r"tf_variable.file")
init_tensors = utils.restore_from_file(tf_values_filename)
sok_results_local, embedding_variable_name = test_sok_demo(args, init_tensors, *random_samples_local)
# save the forward embedding vector from different worker to file
utils.save_to_file(r"./sok_embedding_vectors_" + str(args.task_id) + r".file", *sok_results_local)
# aggregate dataset from different worker
dataset_filenames = [r"./random_samples_" + str(task_id) + r".file"
for task_id in range(args.worker_num)]
random_samples_total = [list() for _ in range(args.iter_num)]
random_labels_total = [list() for _ in range(args.iter_num)]
local_batch_size = args.global_batch_size // args.worker_num
for work_id in range(args.worker_num):
samples, labels = utils.restore_from_file(dataset_filenames[work_id])
for i in range(args.iter_num):
random_samples_total[i].extend(samples[i * local_batch_size : (i + 1) * local_batch_size])
random_labels_total[i].extend(labels[i * local_batch_size : (i + 1) * local_batch_size])
random_samples_total = np.concatenate(random_samples_total, axis=0)
random_labels_total = np.concatenate(random_labels_total, axis=0)
tf_results = test_tf_demo(args, init_tensors, random_samples_total, random_labels_total)
# aggregate forward embedding vector from different worker
sok_results_filenames = [r"./sok_embedding_vectors_" + str(task_id) + r".file"
for task_id in range(args.worker_num)]
sok_results_total = list()
for file_name in sok_results_filenames:
sok_results_local = utils.restore_from_file(file_name)
sok_results_total.append(sok_results_local)
if (len(sok_results_total[0]) != len(tf_results)):
raise ValueError("The length of results obtained from sok: %d is not equal to that of tensorflow: %d."
%(len(sok_results_total[0]), len(tf_results)))
if (len(tf_results) != args.iter_num):
raise ValueError("The length of embedding vectors: %d is not equal to iteration number: %d."
%(len(tf_results), args.iter_num))
# for i, sok_vector in enumerate(sok_results_total):
for i in range(args.iter_num):
if args.local_gpu_num != 1:
sok_vector = tf.concat([tf.concat(sok_results_total[task_id][i].values, axis=0)
for task_id in range(args.worker_num)], axis=0)
else:
sok_vector = tf.concat([sok_results_total[task_id][i]
for task_id in range(args.worker_num)], axis=0)
tf.debugging.assert_near(tf.reshape(sok_vector,
shape=[-1, tf.shape(sok_vector)[-1]]),
tf_results[i],
atol=1e-4,
rtol=1e-4)
print("\n[INFO]: With MultiWorkerMirroredStrategy, the embedding vector obtained from " +\
"sparse operation kit and tensorflow are consistent for %d iterations."
%args.iter_num)
if (1 == args.save_params):
check_saved_embedding_variables(args, embedding_variable_name)
def get_task_id(ips):
local_ip = utils.get_local_ip()
for i in range(len(ips)):
if ips[i] == local_ip:
return i
raise ValueError("Cannot find local_ip: %s in ips list: [%s]"
%(local_ip, ", ".join(ips)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='test demo model with single worker.')
parser.add_argument('--local_gpu_num', type=int,
help='the number of GPUs used to do paralell training.',
required=False, default=8)
parser.add_argument('--iter_num', type=int,
help='the number of testing iterations.',
required=False, default=100)
parser.add_argument('--max_vocabulary_size_per_gpu', type=int,
required=False, default=128)
parser.add_argument('--slot_num', type=int,
help='the number of feature fields',
required=False, default=1)
parser.add_argument('--max_nnz', type=int,
help='the maximum number of keys in one slot',
required=False, default=1)
parser.add_argument('--embedding_vec_size', type=int,
help='the dimention of embedding vector',
required=False, default=1)
parser.add_argument('--combiner', type=str,
help='the combiner used to do reduction for sparse embedding layer. ' +\
'It is only respected in sparse embedding layer.',
required=False, default='mean', choices=['mean', 'sum'])
parser.add_argument('--global_batch_size', type=int, required=False, default=16)
parser.add_argument('--optimizer', type=str,
help="use what optimizer",
required=False, default='plugin_adam',
choices=['plugin_adam', 'adam', 'sgd'])
parser.add_argument('--ips', type=str, nargs="+",
help="the ip address of each worker.",
required=False, default="0.0.0.0")
parser.add_argument('--generate_new_datas', type=int, choices=[0, 1],
help='whether to generate new random samples',
required=False, default=1)
parser.add_argument('--save_params', type=int, choices=[0, 1],
help='whether to save the trained parameters.',
required=False, default=0)
parser.add_argument('--restore_params', type=int, choices=[0, 1],
help='whether to restore from saved files. '+\
'By default, the testing program will generate random ' +\
'initial value to initialize trainable parameters '+\
'rather than restore trainable parameters from file.',
required=False, default=0)
args = parser.parse_args()
if not isinstance(args.ips, list):
args.ips = [args.ips]
args.worker_num = len(args.ips)
if utils.all_ips_in_local(args.ips):
processes = list()
for task_id in range(args.worker_num):
available_gpus = ",".join([str(args.local_gpu_num * task_id + i)
for i in range(args.local_gpu_num)])
print("[INFO]: on task: %d, its available GPUs are: %s" %(task_id, available_gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = available_gpus
process = utils.TestProcess(func=compare_sok_with_tf, task_id=task_id, arguments=args)
process.start()
processes.append(process)
for process in processes:
process.join()
else:
args.task_id = get_task_id(args.ips)
os.environ['CUDA_VISIBLE_DEVICES'] = ",".join([str(i) for i in range(args.local_gpu_num)])
compare_sok_with_tf(args)
| 50.904059 | 149 | 0.638202 |
import argparse
import sys, os
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), r"../../../")))
import sparse_operation_kit as sok
import tensorflow as tf
import numpy as np
import os, json
import pickle
import utils
from test_sparse_emb_demo_model_single_worker import SOKDemo, test_tf_demo, check_saved_embedding_variables
def test_sok_demo(args, init_tensors, *random_samples):
port = 12345
os.environ["TF_CONFIG"] = json.dumps({
'cluster': {"worker": [args.ips[i] + ":" + str(port + i) for i in range(args.worker_num)] },
'task': {"type": 'worker', "index": args.task_id}
})
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
result = sok.Init(global_batch_size=args.global_batch_size)
plugin_demo = SOKDemo(combiner=args.combiner,
max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,
slot_num=args.slot_num, max_nnz=args.max_nnz,
embedding_vec_size=args.embedding_vec_size)
emb_opt = utils.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)
dense_opt = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)
plugin_saver = sok.Saver()
if (1 == args.restore_params):
filepath = r"./embedding_variables"
plugin_saver.restore_from_file(plugin_demo.embedding_layer.embedding_variable, filepath)
else:
status = plugin_saver.load_embedding_values(plugin_demo.embedding_layer.embedding_variable, init_tensors)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)
@tf.function
def _train_step(inputs, labels):
with tf.GradientTape() as tape:
logit, embedding_vector = plugin_demo(inputs, training=True)
loss = _replica_loss(labels, logit)
embedding_variables, other_variable = sok.split_embedding_variable_from_others(plugin_demo.trainable_variables)
grads, emb_grads = tape.gradient(loss, [other_variable, embedding_variables])
if "plugin" not in args.optimizer:
with sok.OptimizerScope(embedding_variables):
emb_opt.apply_gradients(zip(emb_grads, embedding_variables),
experimental_aggregate_gradients=False)
else:
emb_opt.apply_gradients(zip(emb_grads, embedding_variables),
experimental_aggregate_gradients=False)
dense_opt.apply_gradients(zip(grads, other_variable))
return logit, embedding_vector
sok_results = list()
def _dataset_fn(input_context):
replica_batch_size = input_context.get_per_replica_batch_size(args.global_batch_size)
dataset = utils.tf_dataset(*random_samples, batchsize=replica_batch_size, to_sparse_tensor=True, repeat=1)
return dataset
dataset = strategy.distribute_datasets_from_function(_dataset_fn)
for i, (sparse_tensors, replica_labels) in enumerate(dataset):
print("-" * 30, "step ", str(i), "-" * 30)
logit, embedding_vector = strategy.run(_train_step, args=(sparse_tensors, replica_labels))
print("[INFO]: embedding_vector\n", embedding_vector)
sok_results.append(embedding_vector)
import time
time.sleep(0.2)
if 1 == args.save_params:
filepath = r"./embedding_variables/"
utils.try_make_dirs(filepath, chief=(True if args.task_id == 0 else False))
plugin_saver.dump_to_file(plugin_demo.embedding_layer.embedding_variable, filepath)
return sok_results, plugin_demo.embedding_layer.embedding_variable.values[0].m_var_name
def compare_sok_with_tf(args):
if (args.global_batch_size % args.local_gpu_num != 0):
raise ValueError("global_batch_size: %d is not divisible by local_gpu_num: %d"
%(args.global_batch_size, args.local_gpu_num))
if (args.global_batch_size % args.worker_num != 0):
raise ValueError("global_batch_size: %d is not divisible by worker_num: %d"
%(args.global_batch_size, args.worker_num))
if args.generate_new_datas:
worker_batch_size = args.global_batch_size // args.worker_num
random_samples_local = utils.generate_random_samples(num_of_samples=worker_batch_size * args.iter_num,
vocabulary_size=args.local_gpu_num * args.max_vocabulary_size_per_gpu * args.worker_num,
slot_num=args.slot_num,
max_nnz=args.max_nnz)
utils.save_to_file(r"./random_samples_" + str(args.task_id) + r".file", *random_samples_local)
else:
random_samples_local = utils.restore_from_file(r"./random_samples_" + str(args.task_id) + r".file")
if (0 == args.restore_params):
init_tensors = utils.get_ones_tensor(max_vocab_size_per_gpu=args.max_vocabulary_size_per_gpu,
embedding_vec_size=args.embedding_vec_size,
num=args.local_gpu_num * args.worker_num)
else:
filepath = r"./embedding_variables"
tf_values_filename = os.path.join(filepath, r"tf_variable.file")
init_tensors = utils.restore_from_file(tf_values_filename)
sok_results_local, embedding_variable_name = test_sok_demo(args, init_tensors, *random_samples_local)
utils.save_to_file(r"./sok_embedding_vectors_" + str(args.task_id) + r".file", *sok_results_local)
dataset_filenames = [r"./random_samples_" + str(task_id) + r".file"
for task_id in range(args.worker_num)]
random_samples_total = [list() for _ in range(args.iter_num)]
random_labels_total = [list() for _ in range(args.iter_num)]
local_batch_size = args.global_batch_size // args.worker_num
for work_id in range(args.worker_num):
samples, labels = utils.restore_from_file(dataset_filenames[work_id])
for i in range(args.iter_num):
random_samples_total[i].extend(samples[i * local_batch_size : (i + 1) * local_batch_size])
random_labels_total[i].extend(labels[i * local_batch_size : (i + 1) * local_batch_size])
random_samples_total = np.concatenate(random_samples_total, axis=0)
random_labels_total = np.concatenate(random_labels_total, axis=0)
tf_results = test_tf_demo(args, init_tensors, random_samples_total, random_labels_total)
sok_results_filenames = [r"./sok_embedding_vectors_" + str(task_id) + r".file"
for task_id in range(args.worker_num)]
sok_results_total = list()
for file_name in sok_results_filenames:
sok_results_local = utils.restore_from_file(file_name)
sok_results_total.append(sok_results_local)
if (len(sok_results_total[0]) != len(tf_results)):
raise ValueError("The length of results obtained from sok: %d is not equal to that of tensorflow: %d."
%(len(sok_results_total[0]), len(tf_results)))
if (len(tf_results) != args.iter_num):
raise ValueError("The length of embedding vectors: %d is not equal to iteration number: %d."
%(len(tf_results), args.iter_num))
for i in range(args.iter_num):
if args.local_gpu_num != 1:
sok_vector = tf.concat([tf.concat(sok_results_total[task_id][i].values, axis=0)
for task_id in range(args.worker_num)], axis=0)
else:
sok_vector = tf.concat([sok_results_total[task_id][i]
for task_id in range(args.worker_num)], axis=0)
tf.debugging.assert_near(tf.reshape(sok_vector,
shape=[-1, tf.shape(sok_vector)[-1]]),
tf_results[i],
atol=1e-4,
rtol=1e-4)
print("\n[INFO]: With MultiWorkerMirroredStrategy, the embedding vector obtained from " +\
"sparse operation kit and tensorflow are consistent for %d iterations."
%args.iter_num)
if (1 == args.save_params):
check_saved_embedding_variables(args, embedding_variable_name)
def get_task_id(ips):
local_ip = utils.get_local_ip()
for i in range(len(ips)):
if ips[i] == local_ip:
return i
raise ValueError("Cannot find local_ip: %s in ips list: [%s]"
%(local_ip, ", ".join(ips)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='test demo model with single worker.')
parser.add_argument('--local_gpu_num', type=int,
help='the number of GPUs used to do paralell training.',
required=False, default=8)
parser.add_argument('--iter_num', type=int,
help='the number of testing iterations.',
required=False, default=100)
parser.add_argument('--max_vocabulary_size_per_gpu', type=int,
required=False, default=128)
parser.add_argument('--slot_num', type=int,
help='the number of feature fields',
required=False, default=1)
parser.add_argument('--max_nnz', type=int,
help='the maximum number of keys in one slot',
required=False, default=1)
parser.add_argument('--embedding_vec_size', type=int,
help='the dimention of embedding vector',
required=False, default=1)
parser.add_argument('--combiner', type=str,
help='the combiner used to do reduction for sparse embedding layer. ' +\
'It is only respected in sparse embedding layer.',
required=False, default='mean', choices=['mean', 'sum'])
parser.add_argument('--global_batch_size', type=int, required=False, default=16)
parser.add_argument('--optimizer', type=str,
help="use what optimizer",
required=False, default='plugin_adam',
choices=['plugin_adam', 'adam', 'sgd'])
parser.add_argument('--ips', type=str, nargs="+",
help="the ip address of each worker.",
required=False, default="0.0.0.0")
parser.add_argument('--generate_new_datas', type=int, choices=[0, 1],
help='whether to generate new random samples',
required=False, default=1)
parser.add_argument('--save_params', type=int, choices=[0, 1],
help='whether to save the trained parameters.',
required=False, default=0)
parser.add_argument('--restore_params', type=int, choices=[0, 1],
help='whether to restore from saved files. '+\
'By default, the testing program will generate random ' +\
'initial value to initialize trainable parameters '+\
'rather than restore trainable parameters from file.',
required=False, default=0)
args = parser.parse_args()
if not isinstance(args.ips, list):
args.ips = [args.ips]
args.worker_num = len(args.ips)
if utils.all_ips_in_local(args.ips):
processes = list()
for task_id in range(args.worker_num):
available_gpus = ",".join([str(args.local_gpu_num * task_id + i)
for i in range(args.local_gpu_num)])
print("[INFO]: on task: %d, its available GPUs are: %s" %(task_id, available_gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = available_gpus
process = utils.TestProcess(func=compare_sok_with_tf, task_id=task_id, arguments=args)
process.start()
processes.append(process)
for process in processes:
process.join()
else:
args.task_id = get_task_id(args.ips)
os.environ['CUDA_VISIBLE_DEVICES'] = ",".join([str(i) for i in range(args.local_gpu_num)])
compare_sok_with_tf(args)
| true | true |
f728aad8678accd91f689e9480b5f5a1d385f45c | 2,649 | py | Python | src/python/src/tests/rmq_new_tests/test_spider_errback_successfully_acked.py | halimov-oa/scrapy-boilerplate | fe3c552fed26bedb0618c245ab923aa34a89ac9d | [
"MIT"
] | 34 | 2019-12-13T10:31:39.000Z | 2022-03-09T15:59:07.000Z | src/python/src/tests/rmq_new_tests/test_spider_errback_successfully_acked.py | halimov-oa/scrapy-boilerplate | fe3c552fed26bedb0618c245ab923aa34a89ac9d | [
"MIT"
] | 49 | 2020-02-25T19:41:09.000Z | 2022-02-27T12:05:25.000Z | src/python/src/tests/rmq_new_tests/test_spider_errback_successfully_acked.py | halimov-oa/scrapy-boilerplate | fe3c552fed26bedb0618c245ab923aa34a89ac9d | [
"MIT"
] | 23 | 2019-12-23T15:19:42.000Z | 2022-03-09T16:00:15.000Z | import logging
from typing import Type
import pytest
from scrapy import Request
from scrapy.crawler import CrawlerProcess
from scrapy.http import HtmlResponse
from scrapy.signalmanager import dispatcher
from scrapy.utils.project import get_project_settings
from twisted.python.failure import Failure
from rmq.utils import get_import_full_name
from rmq_alternative.rmq_spider import RmqSpider
from rmq_alternative.schemas.messages.base_rmq_message import BaseRmqMessage
from rmq_alternative.utils import signals as CustomSignals
from rmq_alternative.utils.pika_blocking_connection import PikaBlockingConnection
from tests.rmq_new_tests.constant import QUEUE_NAME
class Response400DownloaderMiddleware:
def process_request(self, request, spider):
return HtmlResponse(url='https://httpstat.us/400', status=400, body=b'{"status": "400"}')
@pytest.fixture
def crawler():
settings = get_project_settings()
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
get_import_full_name(Response400DownloaderMiddleware): 1,
},
'CONCURRENT_REQUESTS': 1,
'LOG_FILE': None,
'LOG_LEVEL': 'DEBUG',
}
settings.setdict(custom_settings or {}, priority='spider')
yield CrawlerProcess(settings=settings)
class MySpider(RmqSpider):
name = 'myspider'
message_type: Type[BaseRmqMessage] = BaseRmqMessage
task_queue_name: str = QUEUE_NAME
def parse(self, response, **kwargs):
raise Exception('FAILED')
yield from ()
def errback(self, failure: Failure):
self.logger.info('SPIDER.errback')
yield from ()
def next_request(self, message: BaseRmqMessage) -> Request:
return Request('https://httpstat.us/400', errback=self.errback, dont_filter=True)
class TestSpiderParseException:
def test_crawler_successfully(self, rabbit_setup: PikaBlockingConnection, crawler: CrawlerProcess):
successfully_handled = False
def nack_callback(rmq_message: BaseRmqMessage):
logging.info('NACK_CALLBACK')
crawler.stop()
def ack_callback(rmq_message: BaseRmqMessage):
logging.info('ACK_CALLBACK')
nonlocal successfully_handled
successfully_handled = True
crawler.stop()
dispatcher.connect(ack_callback, CustomSignals.message_ack)
dispatcher.connect(nack_callback, CustomSignals.message_nack)
crawler.crawl(MySpider)
crawler.start()
assert successfully_handled
queue = rabbit_setup.rabbit_channel.queue_declare(queue=QUEUE_NAME, durable=True)
assert queue.method.message_count == 0
| 32.703704 | 103 | 0.729709 | import logging
from typing import Type
import pytest
from scrapy import Request
from scrapy.crawler import CrawlerProcess
from scrapy.http import HtmlResponse
from scrapy.signalmanager import dispatcher
from scrapy.utils.project import get_project_settings
from twisted.python.failure import Failure
from rmq.utils import get_import_full_name
from rmq_alternative.rmq_spider import RmqSpider
from rmq_alternative.schemas.messages.base_rmq_message import BaseRmqMessage
from rmq_alternative.utils import signals as CustomSignals
from rmq_alternative.utils.pika_blocking_connection import PikaBlockingConnection
from tests.rmq_new_tests.constant import QUEUE_NAME
class Response400DownloaderMiddleware:
def process_request(self, request, spider):
return HtmlResponse(url='https://httpstat.us/400', status=400, body=b'{"status": "400"}')
@pytest.fixture
def crawler():
settings = get_project_settings()
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
get_import_full_name(Response400DownloaderMiddleware): 1,
},
'CONCURRENT_REQUESTS': 1,
'LOG_FILE': None,
'LOG_LEVEL': 'DEBUG',
}
settings.setdict(custom_settings or {}, priority='spider')
yield CrawlerProcess(settings=settings)
class MySpider(RmqSpider):
name = 'myspider'
message_type: Type[BaseRmqMessage] = BaseRmqMessage
task_queue_name: str = QUEUE_NAME
def parse(self, response, **kwargs):
raise Exception('FAILED')
yield from ()
def errback(self, failure: Failure):
self.logger.info('SPIDER.errback')
yield from ()
def next_request(self, message: BaseRmqMessage) -> Request:
return Request('https://httpstat.us/400', errback=self.errback, dont_filter=True)
class TestSpiderParseException:
def test_crawler_successfully(self, rabbit_setup: PikaBlockingConnection, crawler: CrawlerProcess):
successfully_handled = False
def nack_callback(rmq_message: BaseRmqMessage):
logging.info('NACK_CALLBACK')
crawler.stop()
def ack_callback(rmq_message: BaseRmqMessage):
logging.info('ACK_CALLBACK')
nonlocal successfully_handled
successfully_handled = True
crawler.stop()
dispatcher.connect(ack_callback, CustomSignals.message_ack)
dispatcher.connect(nack_callback, CustomSignals.message_nack)
crawler.crawl(MySpider)
crawler.start()
assert successfully_handled
queue = rabbit_setup.rabbit_channel.queue_declare(queue=QUEUE_NAME, durable=True)
assert queue.method.message_count == 0
| true | true |
f728abaefb646a0a709c4f911dfcea1a19f73148 | 2,659 | py | Python | tests/test_frontier.py | avpak/okama | b3c4f6b7dfcc314d3171f20b3bc95cfa04268c1a | [
"MIT"
] | null | null | null | tests/test_frontier.py | avpak/okama | b3c4f6b7dfcc314d3171f20b3bc95cfa04268c1a | [
"MIT"
] | null | null | null | tests/test_frontier.py | avpak/okama | b3c4f6b7dfcc314d3171f20b3bc95cfa04268c1a | [
"MIT"
] | null | null | null | import pytest
from pytest import approx
from pytest import mark
import numpy as np
from numpy.testing import assert_allclose
from okama import EfficientFrontier
@mark.frontier
def test_init_efficient_frontier():
with pytest.raises(Exception, match=r'The number of symbols cannot be less than two'):
EfficientFrontier(symbols=['MCFTR.INDX'])
@mark.frontier
def test_bounds_setter_failing(init_efficient_frontier):
with pytest.raises(Exception, match=r'The number of symbols \(2\) and the length of bounds \(3\) should be equal.'):
init_efficient_frontier.bounds = ((0, 1.), (0.5, 1.), (0, 0.5))
@mark.frontier
def test_gmv(init_efficient_frontier):
assert_allclose(init_efficient_frontier.gmv_weights, np.array([0.67501259, 0.32498741]), rtol=1e-2, atol=1e-2)
@mark.frontier
def test_gmv_monthly(init_efficient_frontier):
assert init_efficient_frontier.gmv_monthly[0] == approx(0.026076618401825784, rel=1e-2)
@mark.frontier
def test_gmv_annualized(init_efficient_frontier):
assert init_efficient_frontier.gmv_annualized[0] == approx(0.10198459385117883, rel=1e-2)
@mark.frontier
def test_optimize_return(init_efficient_frontier):
assert init_efficient_frontier.optimize_return(option='max')['Mean_return_monthly'] == approx(0.015324, rel=1e-2)
assert init_efficient_frontier.optimize_return(option='min')['Mean_return_monthly'] == approx(0.008803, rel=1e-2)
@mark.frontier
def test_minimize_risk(init_efficient_frontier):
assert init_efficient_frontier.minimize_risk(target_return=0.015324, monthly_return=True)['SBMX.MOEX'] == approx(1, rel=1e-2)
assert init_efficient_frontier.minimize_risk(target_return=0.139241, monthly_return=False)['SBMX.MOEX'] == approx(0.32498, rel=1e-2)
@mark.frontier
def test_minimize_risk_bounds(init_efficient_frontier_bounds):
assert init_efficient_frontier_bounds.minimize_risk(target_return=0.015324, monthly_return=True)['SBMX.MOEX'] == approx(1, rel=1e-2)
assert init_efficient_frontier_bounds.minimize_risk(target_return=0.1548, monthly_return=False)['SBMX.MOEX'] == approx(0.50030, rel=1e-2)
@mark.frontier
def test_mean_return_range(init_efficient_frontier):
assert_allclose(init_efficient_frontier.mean_return_range, np.array([0.008803, 0.015325]), rtol=1e-2)
@mark.frontier
def test_mean_return_range_bounds(init_efficient_frontier_bounds):
assert_allclose(init_efficient_frontier_bounds.mean_return_range, np.array([0.012064, 0.015325]), rtol=1e-2)
@mark.frontier
def test_ef_points(init_efficient_frontier):
assert init_efficient_frontier.ef_points['Mean return'].iloc[-1] == approx(0.20007879286573038, rel=1e-2)
| 35.453333 | 141 | 0.787514 | import pytest
from pytest import approx
from pytest import mark
import numpy as np
from numpy.testing import assert_allclose
from okama import EfficientFrontier
@mark.frontier
def test_init_efficient_frontier():
with pytest.raises(Exception, match=r'The number of symbols cannot be less than two'):
EfficientFrontier(symbols=['MCFTR.INDX'])
@mark.frontier
def test_bounds_setter_failing(init_efficient_frontier):
with pytest.raises(Exception, match=r'The number of symbols \(2\) and the length of bounds \(3\) should be equal.'):
init_efficient_frontier.bounds = ((0, 1.), (0.5, 1.), (0, 0.5))
@mark.frontier
def test_gmv(init_efficient_frontier):
assert_allclose(init_efficient_frontier.gmv_weights, np.array([0.67501259, 0.32498741]), rtol=1e-2, atol=1e-2)
@mark.frontier
def test_gmv_monthly(init_efficient_frontier):
assert init_efficient_frontier.gmv_monthly[0] == approx(0.026076618401825784, rel=1e-2)
@mark.frontier
def test_gmv_annualized(init_efficient_frontier):
assert init_efficient_frontier.gmv_annualized[0] == approx(0.10198459385117883, rel=1e-2)
@mark.frontier
def test_optimize_return(init_efficient_frontier):
assert init_efficient_frontier.optimize_return(option='max')['Mean_return_monthly'] == approx(0.015324, rel=1e-2)
assert init_efficient_frontier.optimize_return(option='min')['Mean_return_monthly'] == approx(0.008803, rel=1e-2)
@mark.frontier
def test_minimize_risk(init_efficient_frontier):
assert init_efficient_frontier.minimize_risk(target_return=0.015324, monthly_return=True)['SBMX.MOEX'] == approx(1, rel=1e-2)
assert init_efficient_frontier.minimize_risk(target_return=0.139241, monthly_return=False)['SBMX.MOEX'] == approx(0.32498, rel=1e-2)
@mark.frontier
def test_minimize_risk_bounds(init_efficient_frontier_bounds):
assert init_efficient_frontier_bounds.minimize_risk(target_return=0.015324, monthly_return=True)['SBMX.MOEX'] == approx(1, rel=1e-2)
assert init_efficient_frontier_bounds.minimize_risk(target_return=0.1548, monthly_return=False)['SBMX.MOEX'] == approx(0.50030, rel=1e-2)
@mark.frontier
def test_mean_return_range(init_efficient_frontier):
assert_allclose(init_efficient_frontier.mean_return_range, np.array([0.008803, 0.015325]), rtol=1e-2)
@mark.frontier
def test_mean_return_range_bounds(init_efficient_frontier_bounds):
assert_allclose(init_efficient_frontier_bounds.mean_return_range, np.array([0.012064, 0.015325]), rtol=1e-2)
@mark.frontier
def test_ef_points(init_efficient_frontier):
assert init_efficient_frontier.ef_points['Mean return'].iloc[-1] == approx(0.20007879286573038, rel=1e-2)
| true | true |
f728ac8db271b24289507adc9010ad4b0047f98b | 1,243 | py | Python | classview/views.py | SeshinWei/django24 | 73a066f1ebe8caee09b91ab411a76a8fddabb6c3 | [
"MIT"
] | null | null | null | classview/views.py | SeshinWei/django24 | 73a066f1ebe8caee09b91ab411a76a8fddabb6c3 | [
"MIT"
] | null | null | null | classview/views.py | SeshinWei/django24 | 73a066f1ebe8caee09b91ab411a76a8fddabb6c3 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
# Create your views here.
"""
类视图必须继承View
类视图中的方法名都必须是请求方法名小写
"""
def my_decorator(view_func):
"""定义装饰器"""
def wrapper(request, *args, **kwargs):
print('装饰器被调用了')
return view_func(request, *args, **kwargs)
return wrapper
# @my_decorator
# def index(request):
# return HttpResponse('ok')
# 将普通装饰器进行转换为方法/类的装饰器
# @method_decorator(要进行转换的装饰器, name='要装饰类中的那个方法)
# @method_decorator(my_decorator, name='get')
class DemoView(View):
"""定义类视图"""
# @my_decorator
@method_decorator(my_decorator)
def get(self, request):
return HttpResponse('get请求业务逻辑')
def post(self, request):
return HttpResponse('post请求业务逻辑')
# 映射机制 动态查找
# hasattr() 判断类中是否有某个成员(属性和方法) bool
# getattr() 获取类中的属性或方法
# __import__() # 动态导包
# GET /template_demo/
def template_demo(request):
"""演示模板使用"""
# render(请求对象, 加载模板文件名, 上下文数据)
# 传入到模板中进行渲染的上下文数据必须是以字典的格式传入
context = {
'name': 'zhangsan',
'alist': [10, 20, 30],
'adict': {'age': 20, 'name': 'ww'}
}
return render(request, 'index.html', context) | 22.6 | 52 | 0.666935 | from django.shortcuts import render
from django.views import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
def my_decorator(view_func):
def wrapper(request, *args, **kwargs):
print('装饰器被调用了')
return view_func(request, *args, **kwargs)
return wrapper
# @method_decorator(my_decorator, name='get')
class DemoView(View):
# @my_decorator
@method_decorator(my_decorator)
def get(self, request):
return HttpResponse('get请求业务逻辑')
def post(self, request):
return HttpResponse('post请求业务逻辑')
# 映射机制 动态查找
# hasattr() 判断类中是否有某个成员(属性和方法) bool
# getattr() 获取类中的属性或方法
# __import__() # 动态导包
# GET /template_demo/
def template_demo(request):
# render(请求对象, 加载模板文件名, 上下文数据)
# 传入到模板中进行渲染的上下文数据必须是以字典的格式传入
context = {
'name': 'zhangsan',
'alist': [10, 20, 30],
'adict': {'age': 20, 'name': 'ww'}
}
return render(request, 'index.html', context) | true | true |
f728acca0a7a5018263431ea24aa5a8ba6852f87 | 154 | py | Python | tests/reporting/review/api.py | ctk3b/borderline | 7c4ab891b36c97038940dea678718dea8ebf5060 | [
"MIT"
] | null | null | null | tests/reporting/review/api.py | ctk3b/borderline | 7c4ab891b36c97038940dea678718dea8ebf5060 | [
"MIT"
] | 4 | 2021-09-17T00:53:47.000Z | 2021-09-24T22:05:13.000Z | tests/reporting/review/api.py | ctk3b/borderline | 7c4ab891b36c97038940dea678718dea8ebf5060 | [
"MIT"
] | null | null | null | import reporting.report_builder.api
import reporting.report_builder.this_is_a_violation
import reporting.report_builder.this_is_a_grandfathered_violation
| 38.5 | 65 | 0.922078 | import reporting.report_builder.api
import reporting.report_builder.this_is_a_violation
import reporting.report_builder.this_is_a_grandfathered_violation
| true | true |
f728b025e2af6db7811c909ca4bae4729748b8f2 | 7,707 | py | Python | Multi_Classification/Multi_Image_Classification.py | KKanda900/Model-Maker | e73c6e1d47b9682657694e4f56ee96a34e3a29ea | [
"MIT"
] | 2 | 2021-09-23T03:09:34.000Z | 2021-11-16T12:05:28.000Z | Multi_Classification/Multi_Image_Classification.py | KKanda900/Model-Maker | e73c6e1d47b9682657694e4f56ee96a34e3a29ea | [
"MIT"
] | null | null | null | Multi_Classification/Multi_Image_Classification.py | KKanda900/Model-Maker | e73c6e1d47b9682657694e4f56ee96a34e3a29ea | [
"MIT"
] | null | null | null | # Primary Python Files for Image Classification
import numpy as np
import pandas as pd
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # dont show any tensorflow warning messages
import cv2
# Keras libraries used for making the model and tensorflow
import tensorflow, keras
from tensorflow.keras.utils import to_categorical
from keras.layers import Dense,Conv2D,Flatten,MaxPool2D,Dropout
from keras.models import Sequential
# Sklearn library for splitting the data precisely
from sklearn.model_selection import train_test_split
'''
Multi_Image_Classification Class
Description:
1. Identify different sets of images based on the labels you provide.
2. Works based off a sequential model.
3. Uses a Convolutional Neural Network.
'''
class Multi_Image_Classification:
# ------------------------------ Generic Fields Needed for Training ---------------------------------- #
shape = (200,200) # predefine a established shape for training and resizing the images (default)
labels = [] # define the labels to train on
# --------------------------- Training Tools ---------------------------------- #
train_path = './Multi_Classification/train' # define the path where the training images are located
train_labels = None # define the labels (same as testing)
train_images = None # define the images with the training
x_train = None # split the training images for training
y_train = None # split the training labels for training
# ------------------------- Testing Tools -------------------------------------- #
test_path = './Multi_Classification/test' # define the path where the testing images are located
x_val = None # split the training images for testing
y_val = None # split the training labels for testing
test_labels = None # define the testing labels (same as training)
test_images = None # define the testing images
# ----------------------------------- Main Model Tools ------------------------------- #
epoch = 50 # default epoch
batch_size = 10 # default batch size
model = None # define the model (Sequential for Image Classification)
# ------------------------- Define the Functions for Making the model ---------------------- #
# define the labels and images depending on the directory path
def set_data(self, directory_path):
data_labels = [] # define the set of labels according to the name of the file
data_images = [] # define the images
# iterate through all the images in the directory
for filename in os.listdir(directory_path):
# Get the values of the images at the directory path
img = cv2.imread(os.path.join(directory_path, filename))
# Spliting file names and storing the labels for image in list
data_labels.append(filename.split('_')[0])
# Resize all images to a specific shape
img = cv2.resize(img, self.shape)
data_images.append(img) # append the image
data_labels = pd.get_dummies(data_labels).values # Get the categorical data
data_images = np.array(data_images) # Define the image array as a np array for fitting
return data_labels, data_images # return the labels, images for the specific directory
# define the tools for utilzing on creation of the object
def __init__(self, create_model, labels, shape, epoch, batch_size):
np.random.seed(1) # sets the random seed of the NumPy pseudo-random number generator
self.shape = shape # let the user enter the shape of the images to be formed (default 200x200)
# let the user define the labels for their model they want to create
self.labels = labels # default values
# define the training images and labels
self.train_labels, self.train_images = self.set_data(self.train_path)
# Splitting Training data into train and validation dataset
self.x_train,self.x_val,self.y_train,self.y_val = train_test_split(self.train_images,self.train_labels,random_state=1)
# define the test labels and images
self.test_labels, self.test_images = self.set_data(self.test_path)
# define the model for predicition
if create_model == True:
self.model = self.create_model(epoch, batch_size, self.x_train, self.y_train, self.x_val, self.y_val)
# create the model to be used for predicition
def create_model(self, epoch, batch_size, x_train, y_train, x_val, y_val):
model = Sequential() # define the model as sequential
model.add(Conv2D(kernel_size=(3,3), filters=32, activation='tanh', input_shape=(200,200,3,))) # define the first layer
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the second layer
model.add(MaxPool2D(2,2)) # define the third layer
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the fourth layer
model.add(MaxPool2D(2,2)) # define the fifth layer
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the sixth layer
model.add(Flatten()) # define the seventh layer
model.add(Dense(20,activation='relu')) # define the eigth layer
model.add(Dense(15,activation='relu')) # define the ninth layer
model.add(Dense(len(self.labels),activation = 'softmax')) # define the tenth layer (according to the number of labels for the model)
model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam') # compile the models with categorical because we are working with multiple labels
history = model.fit(x_train,y_train,epochs=epoch,batch_size=batch_size,validation_data=(x_val,y_val)) # train the model
# after the training is done, define a dictionary that holds the model and history from the training
complete_model = {} # define the dictionary
complete_model['model'] = model # define the model with its key
complete_model['history'] = history # define the history with its key
complete_model['labels'] = self.labels # save the labels into the dictionary
return complete_model # return the model at the end
# function to save the model that was created in the create_model function
def save_model(self, model_name, model):
model.save('./Models/{}.h5'.format(model_name)) # save the model in the models directory
# function to save the model's labels to be used later
def save_labels(self, labels, model_name):
f = open('./Models/{}_Labels.txt'.format(model_name), 'a') # create the .txt file that will contain the labels of the model
# iterate through the labels when the model was first created
for i in range(len(labels)):
f.write("{}\n".format(labels[i])) # write the labels to the file
f.close() # after iterating through all the labels, close the file so the space can be free
# ------------------------------------------------------ Define the functions used for classifiying --------------------------------------------- #
# classifies images based on the model and the selected image
def classify_image(self, image, model):
checkImage = image[0] # get the image
checklabel = image[0] # get the label of the image
predict = model.predict(np.array(checkImage)) # get the predicition
predicted_label = self.labels[np.argmax(predict)] # get the predicted label
return predicted_label # return the predicted label from the labels provided by the user
| 51.724832 | 171 | 0.659141 |
import numpy as np
import pandas as pd
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import cv2
import tensorflow, keras
from tensorflow.keras.utils import to_categorical
from keras.layers import Dense,Conv2D,Flatten,MaxPool2D,Dropout
from keras.models import Sequential
from sklearn.model_selection import train_test_split
class Multi_Image_Classification:
shape = (200,200)
labels = []
train_path = './Multi_Classification/train'
train_labels = None
train_images = None
x_train = None
y_train = None
test_path = './Multi_Classification/test'
x_val = None
y_val = None
test_labels = None
test_images = None
epoch = 50
batch_size = 10
model = None
def set_data(self, directory_path):
data_labels = []
data_images = []
for filename in os.listdir(directory_path):
img = cv2.imread(os.path.join(directory_path, filename))
data_labels.append(filename.split('_')[0])
img = cv2.resize(img, self.shape)
data_images.append(img)
data_labels = pd.get_dummies(data_labels).values
data_images = np.array(data_images)
return data_labels, data_images
def __init__(self, create_model, labels, shape, epoch, batch_size):
np.random.seed(1)
self.shape = shape
self.labels = labels
self.train_labels, self.train_images = self.set_data(self.train_path)
self.x_train,self.x_val,self.y_train,self.y_val = train_test_split(self.train_images,self.train_labels,random_state=1)
self.test_labels, self.test_images = self.set_data(self.test_path)
if create_model == True:
self.model = self.create_model(epoch, batch_size, self.x_train, self.y_train, self.x_val, self.y_val)
def create_model(self, epoch, batch_size, x_train, y_train, x_val, y_val):
model = Sequential()
model.add(Conv2D(kernel_size=(3,3), filters=32, activation='tanh', input_shape=(200,200,3,)))
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh'))
model.add(MaxPool2D(2,2))
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh'))
model.add(MaxPool2D(2,2))
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh'))
model.add(Flatten())
model.add(Dense(20,activation='relu'))
model.add(Dense(15,activation='relu'))
model.add(Dense(len(self.labels),activation = 'softmax'))
model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam')
history = model.fit(x_train,y_train,epochs=epoch,batch_size=batch_size,validation_data=(x_val,y_val))
complete_model = {}
complete_model['model'] = model
complete_model['history'] = history
complete_model['labels'] = self.labels
return complete_model
def save_model(self, model_name, model):
model.save('./Models/{}.h5'.format(model_name))
def save_labels(self, labels, model_name):
f = open('./Models/{}_Labels.txt'.format(model_name), 'a') # create the .txt file that will contain the labels of the model
# iterate through the labels when the model was first created
for i in range(len(labels)):
f.write("{}\n".format(labels[i])) # write the labels to the file
f.close() # after iterating through all the labels, close the file so the space can be free
# ------------------------------------------------------ Define the functions used for classifiying --------------------------------------------- #
# classifies images based on the model and the selected image
def classify_image(self, image, model):
checkImage = image[0] # get the image
checklabel = image[0] # get the label of the image
predict = model.predict(np.array(checkImage)) # get the predicition
predicted_label = self.labels[np.argmax(predict)] # get the predicted label
return predicted_label # return the predicted label from the labels provided by the user
| true | true |
f728b02e232a41f5db5c09c8b25110c9198edcc1 | 5,517 | py | Python | tests/test_dependencies.py | mmaioli/projects | 648f1306a3dde5deb456c9886fb59c73e424d186 | [
"Apache-2.0"
] | null | null | null | tests/test_dependencies.py | mmaioli/projects | 648f1306a3dde5deb456c9886fb59c73e424d186 | [
"Apache-2.0"
] | null | null | null | tests/test_dependencies.py | mmaioli/projects | 648f1306a3dde5deb456c9886fb59c73e424d186 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from json import dumps
from unittest import TestCase
import pytest
from werkzeug.exceptions import NotFound
from projects.controllers.dependencies import list_dependencies, list_next_operators, \
create_dependency, delete_dependency
from projects.controllers.utils import uuid_alpha
from projects.database import engine
from projects.object_storage import BUCKET_NAME
from projects.api.main import app
DEPENDENCY_ID = str(uuid_alpha())
OPERATOR_ID = str(uuid_alpha())
OPERATOR_ID_2 = str(uuid_alpha())
NAME = "foo"
DESCRIPTION = "long foo"
PROJECT_ID = str(uuid_alpha())
EXPERIMENT_ID = str(uuid_alpha())
TASK_ID = str(uuid_alpha())
PARAMETERS = {"coef": 0.1}
POSITION = 0
PARAMETERS = {}
COMMANDS = ["CMD"]
COMMANDS_JSON = dumps(COMMANDS)
ARGUMENTS = ["ARG"]
ARGUMENTS_JSON = dumps(ARGUMENTS)
IMAGE = "platiagro/platiagro-notebook-image-test:0.1.0"
TAGS = ["PREDICTOR"]
TAGS_JSON = dumps(TAGS)
PARAMETERS_JSON = dumps(PARAMETERS)
EXPERIMENT_NOTEBOOK_PATH = f"minio://{BUCKET_NAME}/tasks/{TASK_ID}/Experiment.ipynb"
DEPLOYMENT_NOTEBOOK_PATH = f"minio://{BUCKET_NAME}/tasks/{TASK_ID}/Deployment.ipynb"
CREATED_AT = "2000-01-01 00:00:00"
CREATED_AT_ISO = "2000-01-01T00:00:00"
UPDATED_AT = "2000-01-01 00:00:00"
UPDATED_AT_ISO = "2000-01-01T00:00:00"
class TestDependencies(TestCase):
def setUp(self):
self.maxDiff = None
conn = engine.connect()
text = (
f"INSERT INTO projects (uuid, name, created_at, updated_at) "
f"VALUES ('{PROJECT_ID}', '{NAME}', '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO experiments (uuid, name, project_id, position, is_active, created_at, updated_at) "
f"VALUES ('{EXPERIMENT_ID}', '{NAME}', '{PROJECT_ID}', '{POSITION}', 1, '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO tasks (uuid, name, description, image, commands, arguments, tags, experiment_notebook_path, deployment_notebook_path, is_default, created_at, updated_at) "
f"VALUES ('{TASK_ID}', '{NAME}', '{DESCRIPTION}', '{IMAGE}', '{COMMANDS_JSON}', '{ARGUMENTS_JSON}', '{TAGS_JSON}', '{EXPERIMENT_NOTEBOOK_PATH}', '{DEPLOYMENT_NOTEBOOK_PATH}', 0, '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO operators (uuid, experiment_id, task_id, parameters, created_at, updated_at) "
f"VALUES ('{OPERATOR_ID}', '{EXPERIMENT_ID}', '{TASK_ID}', '{PARAMETERS_JSON}', '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO operators (uuid, experiment_id, task_id, parameters, created_at, updated_at) "
f"VALUES ('{OPERATOR_ID_2}', '{EXPERIMENT_ID}', '{TASK_ID}', '{PARAMETERS_JSON}', '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO dependencies (uuid, operator_id, dependency) "
f"VALUES ('{DEPENDENCY_ID}', '{OPERATOR_ID}', '{OPERATOR_ID_2}')"
)
conn.execute(text)
conn.close()
def tearDown(self):
conn = engine.connect()
text = f"DELETE FROM dependencies WHERE operator_id in" \
f" (SELECT uuid FROM operators where task_id = '{TASK_ID}')"
conn.execute(text)
text = f"DELETE FROM operators WHERE experiment_id in" \
f"(SELECT uuid FROM experiments where project_id = '{PROJECT_ID}')"
conn.execute(text)
text = f"DELETE FROM tasks WHERE uuid = '{TASK_ID}'"
conn.execute(text)
text = f"DELETE FROM experiments WHERE project_id = '{PROJECT_ID}'"
conn.execute(text)
text = f"DELETE FROM projects WHERE uuid = '{PROJECT_ID}'"
conn.execute(text)
conn.close()
def test_list_dependencies(self):
result = list_dependencies(OPERATOR_ID)
expected = [
{
"uuid": DEPENDENCY_ID,
"operatorId": OPERATOR_ID,
"dependency": OPERATOR_ID_2
}
]
self.assertListEqual(expected, result)
def test_list_next_operators(self):
result = list_next_operators(OPERATOR_ID_2)
expected = [OPERATOR_ID]
self.assertListEqual(expected, result)
def test_create_dependency(self):
result = create_dependency(OPERATOR_ID, OPERATOR_ID_2)
expected = {
"operatorId": OPERATOR_ID,
"dependency": OPERATOR_ID_2
}
# uuid are machine-generated
# we assert it exist, but we don't assert your values
machine_generated = ["uuid"]
for attr in machine_generated:
self.assertIn(attr, result)
del result[attr]
self.assertDictEqual(expected, result)
def test_update_dependencies(self):
with app.test_client() as c:
rv = c.post(f"/projects/{PROJECT_ID}/experiments", json={
"name": "test2",
"copy_from": f"{EXPERIMENT_ID}"
})
self.assertEqual(rv.status_code, 200)
def test_delete_dependency(self):
with pytest.raises(NotFound) as e:
assert delete_dependency("unk")
assert str(e.value) == "404 Not Found: The specified dependency does not exist"
result = delete_dependency(DEPENDENCY_ID)
expected = {"message": "Dependency deleted"}
self.assertDictEqual(expected, result)
| 36.296053 | 222 | 0.63404 |
from json import dumps
from unittest import TestCase
import pytest
from werkzeug.exceptions import NotFound
from projects.controllers.dependencies import list_dependencies, list_next_operators, \
create_dependency, delete_dependency
from projects.controllers.utils import uuid_alpha
from projects.database import engine
from projects.object_storage import BUCKET_NAME
from projects.api.main import app
DEPENDENCY_ID = str(uuid_alpha())
OPERATOR_ID = str(uuid_alpha())
OPERATOR_ID_2 = str(uuid_alpha())
NAME = "foo"
DESCRIPTION = "long foo"
PROJECT_ID = str(uuid_alpha())
EXPERIMENT_ID = str(uuid_alpha())
TASK_ID = str(uuid_alpha())
PARAMETERS = {"coef": 0.1}
POSITION = 0
PARAMETERS = {}
COMMANDS = ["CMD"]
COMMANDS_JSON = dumps(COMMANDS)
ARGUMENTS = ["ARG"]
ARGUMENTS_JSON = dumps(ARGUMENTS)
IMAGE = "platiagro/platiagro-notebook-image-test:0.1.0"
TAGS = ["PREDICTOR"]
TAGS_JSON = dumps(TAGS)
PARAMETERS_JSON = dumps(PARAMETERS)
EXPERIMENT_NOTEBOOK_PATH = f"minio://{BUCKET_NAME}/tasks/{TASK_ID}/Experiment.ipynb"
DEPLOYMENT_NOTEBOOK_PATH = f"minio://{BUCKET_NAME}/tasks/{TASK_ID}/Deployment.ipynb"
CREATED_AT = "2000-01-01 00:00:00"
CREATED_AT_ISO = "2000-01-01T00:00:00"
UPDATED_AT = "2000-01-01 00:00:00"
UPDATED_AT_ISO = "2000-01-01T00:00:00"
class TestDependencies(TestCase):
def setUp(self):
self.maxDiff = None
conn = engine.connect()
text = (
f"INSERT INTO projects (uuid, name, created_at, updated_at) "
f"VALUES ('{PROJECT_ID}', '{NAME}', '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO experiments (uuid, name, project_id, position, is_active, created_at, updated_at) "
f"VALUES ('{EXPERIMENT_ID}', '{NAME}', '{PROJECT_ID}', '{POSITION}', 1, '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO tasks (uuid, name, description, image, commands, arguments, tags, experiment_notebook_path, deployment_notebook_path, is_default, created_at, updated_at) "
f"VALUES ('{TASK_ID}', '{NAME}', '{DESCRIPTION}', '{IMAGE}', '{COMMANDS_JSON}', '{ARGUMENTS_JSON}', '{TAGS_JSON}', '{EXPERIMENT_NOTEBOOK_PATH}', '{DEPLOYMENT_NOTEBOOK_PATH}', 0, '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO operators (uuid, experiment_id, task_id, parameters, created_at, updated_at) "
f"VALUES ('{OPERATOR_ID}', '{EXPERIMENT_ID}', '{TASK_ID}', '{PARAMETERS_JSON}', '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO operators (uuid, experiment_id, task_id, parameters, created_at, updated_at) "
f"VALUES ('{OPERATOR_ID_2}', '{EXPERIMENT_ID}', '{TASK_ID}', '{PARAMETERS_JSON}', '{CREATED_AT}', '{UPDATED_AT}')"
)
conn.execute(text)
text = (
f"INSERT INTO dependencies (uuid, operator_id, dependency) "
f"VALUES ('{DEPENDENCY_ID}', '{OPERATOR_ID}', '{OPERATOR_ID_2}')"
)
conn.execute(text)
conn.close()
def tearDown(self):
conn = engine.connect()
text = f"DELETE FROM dependencies WHERE operator_id in" \
f" (SELECT uuid FROM operators where task_id = '{TASK_ID}')"
conn.execute(text)
text = f"DELETE FROM operators WHERE experiment_id in" \
f"(SELECT uuid FROM experiments where project_id = '{PROJECT_ID}')"
conn.execute(text)
text = f"DELETE FROM tasks WHERE uuid = '{TASK_ID}'"
conn.execute(text)
text = f"DELETE FROM experiments WHERE project_id = '{PROJECT_ID}'"
conn.execute(text)
text = f"DELETE FROM projects WHERE uuid = '{PROJECT_ID}'"
conn.execute(text)
conn.close()
def test_list_dependencies(self):
result = list_dependencies(OPERATOR_ID)
expected = [
{
"uuid": DEPENDENCY_ID,
"operatorId": OPERATOR_ID,
"dependency": OPERATOR_ID_2
}
]
self.assertListEqual(expected, result)
def test_list_next_operators(self):
result = list_next_operators(OPERATOR_ID_2)
expected = [OPERATOR_ID]
self.assertListEqual(expected, result)
def test_create_dependency(self):
result = create_dependency(OPERATOR_ID, OPERATOR_ID_2)
expected = {
"operatorId": OPERATOR_ID,
"dependency": OPERATOR_ID_2
}
machine_generated = ["uuid"]
for attr in machine_generated:
self.assertIn(attr, result)
del result[attr]
self.assertDictEqual(expected, result)
def test_update_dependencies(self):
with app.test_client() as c:
rv = c.post(f"/projects/{PROJECT_ID}/experiments", json={
"name": "test2",
"copy_from": f"{EXPERIMENT_ID}"
})
self.assertEqual(rv.status_code, 200)
def test_delete_dependency(self):
with pytest.raises(NotFound) as e:
assert delete_dependency("unk")
assert str(e.value) == "404 Not Found: The specified dependency does not exist"
result = delete_dependency(DEPENDENCY_ID)
expected = {"message": "Dependency deleted"}
self.assertDictEqual(expected, result)
| true | true |
f728b0e231c268bd33297e6a7f35127229b428a1 | 1,689 | py | Python | vulcan_medication_bundle/_nbdev.py | pete88b/vulcan_medication_bundle | d0239805ec04430abb5e92572e984e2cd343a49c | [
"Apache-2.0"
] | null | null | null | vulcan_medication_bundle/_nbdev.py | pete88b/vulcan_medication_bundle | d0239805ec04430abb5e92572e984e2cd343a49c | [
"Apache-2.0"
] | null | null | null | vulcan_medication_bundle/_nbdev.py | pete88b/vulcan_medication_bundle | d0239805ec04430abb5e92572e984e2cd343a49c | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"request_headers": "00_core.ipynb",
"get_as_raw_json": "00_core.ipynb",
"get_next_as_raw_json": "00_core.ipynb",
"timestamp_now": "00_core.ipynb",
"new_bundle": "00_core.ipynb",
"new_list": "00_core.ipynb",
"extract_references_from_resource": "00_core.ipynb",
"extract_references": "00_core.ipynb",
"get_by_reference": "00_core.ipynb",
"filter_bundle": "00_core.ipynb",
"create_single_patient_medication_bundle": "10_per_patient.ipynb",
"save_single_patient_medication_bundle": "10_per_patient.ipynb",
"handle_entry_search": "10_per_patient.ipynb",
"medication_status_filter": "10_per_patient.ipynb",
"do_not_perform_filter": "10_per_patient.ipynb",
"CM_EXCLUDE_STATUS_MAP": "20a_status_filter.ipynb",
"get_negated_list": "20a_status_filter.ipynb",
"single_patient_medication_bundle": "30_cli.ipynb",
"remove_non_utf8": "30_cli.ipynb",
"get_single_patient_medication_bundle": "50a_web_demo.ipynb",
"create_app": "50_web_app.ipynb",
"bp": "50a_web_demo.ipynb",
"index": "50a_web_demo.ipynb",
"convert_to_cdisc": "50a_web_demo.ipynb"}
modules = ["core.py",
"per_patient.py",
"status_filter.py",
"cli.py",
"web/app.py",
"web/demo.py"]
doc_url = "https://pete88b.github.io/vulcan_medication_bundle/"
git_url = "https://github.com/pete88b/vulcan_medication_bundle/tree/main/"
def custom_doc_links(name): return None
| 40.214286 | 75 | 0.651273 |
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"request_headers": "00_core.ipynb",
"get_as_raw_json": "00_core.ipynb",
"get_next_as_raw_json": "00_core.ipynb",
"timestamp_now": "00_core.ipynb",
"new_bundle": "00_core.ipynb",
"new_list": "00_core.ipynb",
"extract_references_from_resource": "00_core.ipynb",
"extract_references": "00_core.ipynb",
"get_by_reference": "00_core.ipynb",
"filter_bundle": "00_core.ipynb",
"create_single_patient_medication_bundle": "10_per_patient.ipynb",
"save_single_patient_medication_bundle": "10_per_patient.ipynb",
"handle_entry_search": "10_per_patient.ipynb",
"medication_status_filter": "10_per_patient.ipynb",
"do_not_perform_filter": "10_per_patient.ipynb",
"CM_EXCLUDE_STATUS_MAP": "20a_status_filter.ipynb",
"get_negated_list": "20a_status_filter.ipynb",
"single_patient_medication_bundle": "30_cli.ipynb",
"remove_non_utf8": "30_cli.ipynb",
"get_single_patient_medication_bundle": "50a_web_demo.ipynb",
"create_app": "50_web_app.ipynb",
"bp": "50a_web_demo.ipynb",
"index": "50a_web_demo.ipynb",
"convert_to_cdisc": "50a_web_demo.ipynb"}
modules = ["core.py",
"per_patient.py",
"status_filter.py",
"cli.py",
"web/app.py",
"web/demo.py"]
doc_url = "https://pete88b.github.io/vulcan_medication_bundle/"
git_url = "https://github.com/pete88b/vulcan_medication_bundle/tree/main/"
def custom_doc_links(name): return None
| true | true |
f728b1674cac977ba891f01d63962d8fc34a7577 | 312 | py | Python | app.py | rob-med/BotPitchfork | d3d2991024dcb36a1077247e11242e3c0ac6ca34 | [
"MIT"
] | 1 | 2021-01-05T16:45:36.000Z | 2021-01-05T16:45:36.000Z | app.py | rob-med/BotPitchfork | d3d2991024dcb36a1077247e11242e3c0ac6ca34 | [
"MIT"
] | null | null | null | app.py | rob-med/BotPitchfork | d3d2991024dcb36a1077247e11242e3c0ac6ca34 | [
"MIT"
] | 1 | 2021-12-05T20:29:19.000Z | 2021-12-05T20:29:19.000Z | import os
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True)
| 22.285714 | 68 | 0.682692 | import os
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True)
| true | true |
f728b38692c6b8242f4ba96a6d2a1375387a8827 | 14,292 | py | Python | experiments/experiments_img.py | BeeQC/ANODE-reproducibility | 9d6b5a297302cdaa0bbc3908de1a94f3c28c0606 | [
"MIT"
] | null | null | null | experiments/experiments_img.py | BeeQC/ANODE-reproducibility | 9d6b5a297302cdaa0bbc3908de1a94f3c28c0606 | [
"MIT"
] | null | null | null | experiments/experiments_img.py | BeeQC/ANODE-reproducibility | 9d6b5a297302cdaa0bbc3908de1a94f3c28c0606 | [
"MIT"
] | null | null | null | import json
import matplotlib
matplotlib.use('Agg') # This is hacky (useful for running on VMs)
import numpy as np
import os
import time
import torch
from anode.models import ODENet
from anode.conv_models import ConvODENet
from anode.discrete_models import ResNet
from anode.training import Trainer
from experiments.dataloaders import mnist, cifar10, tiny_imagenet
from viz.plots import histories_plt
def run_and_save_experiments_img(device, path_to_config):
"""Runs and saves experiments as they are produced (so results are still
saved even if NFEs become excessively large or underflow occurs).
Parameters
----------
device : torch.device
path_to_config : string
Path to config json file.
"""
# Open config file
with open(path_to_config) as config_file:
config = json.load(config_file)
# Create a folder to store experiment results
timestamp = time.strftime("%Y-%m-%d_%H-%M")
directory = "img_results_{}_{}".format(timestamp, config["id"])
if not os.path.exists(directory):
os.makedirs(directory)
# Save config file in experiment directory
with open(directory + '/config.json', 'w') as config_file:
json.dump(config, config_file)
num_reps = config["num_reps"]
dataset = config["dataset"]
model_configs = config["model_configs"]
training_config = config["training_config"]
results = {"dataset": dataset, "model_info": []}
if dataset == 'mnist':
data_loader, test_loader = mnist(training_config["batch_size"])
img_size = (1, 28, 28)
output_dim = 10
if dataset == 'cifar10':
data_loader, test_loader = cifar10(training_config["batch_size"])
img_size = (3, 32, 32)
output_dim = 10
if dataset == 'imagenet':
data_loader = tiny_imagenet(training_config["batch_size"])
img_size = (3, 64, 64)
output_dim = 200
only_success = True # Boolean to keep track of any experiments failing
for i, model_config in enumerate(model_configs):
results["model_info"].append({})
# Keep track of losses and nfes
accuracy_histories = []
epoch_accuracy_histories = []
loss_histories = []
nfe_histories = []
bnfe_histories = []
total_nfe_histories = []
epoch_loss_histories = []
epoch_nfe_histories = []
epoch_bnfe_histories = []
epoch_total_nfe_histories = []
# Keep track of models potentially failing
model_stats = {
"exceeded": {"count": 0, "final_losses": [], "final_nfes": [],
"final_bnfes": []},
"underflow": {"count": 0, "final_losses": [], "final_nfes": [],
"final_bnfes": []},
"success": {"count": 0, "final_losses": [], "final_nfes": [],
"final_bnfes": []}
}
if model_config["validation"]:
epoch_loss_val_histories = []
is_ode = model_config["type"] == "odenet" or model_config["type"] == "anode"
for j in range(num_reps):
print("{}/{} model, {}/{} rep".format(i + 1, len(model_configs), j + 1, num_reps))
if is_ode:
if model_config["type"] == "odenet":
augment_dim = 0
else:
augment_dim = model_config["augment_dim"]
model = ConvODENet(device, img_size, model_config["num_filters"],
output_dim=output_dim,
augment_dim=augment_dim,
time_dependent=model_config["time_dependent"],
non_linearity=model_config["non_linearity"],
adjoint=True)
else:
model = ResNet(data_dim, model_config["hidden_dim"],
model_config["num_layers"],
output_dim=output_dim,
is_img=True)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(),
lr=model_config["lr"],
weight_decay=model_config["weight_decay"])
trainer = Trainer(model, optimizer, device,
classification=True,
print_freq=training_config["print_freq"],
record_freq=training_config["record_freq"],
verbose=True,
save_dir=(directory, '{}_{}'.format(i, j)))
accuracy_histories.append([])
epoch_accuracy_histories.append([])
loss_histories.append([])
epoch_loss_histories.append([])
nfe_histories.append([])
epoch_nfe_histories.append([])
bnfe_histories.append([])
epoch_bnfe_histories.append([])
total_nfe_histories.append([])
epoch_total_nfe_histories.append([])
if model_config["validation"]:
epoch_loss_val_histories.append([])
# Train one epoch at a time, as NODEs can underflow or exceed the
# maximum NFEs
for epoch in range(training_config["epochs"]):
print("\nEpoch {}".format(epoch + 1))
try:
trainer.train(data_loader, 1)
end_training = False
except AssertionError as e:
only_success = False
# Assertion error means we either underflowed or exceeded
# the maximum number of steps
error_message = e.args[0]
# Error message in torchdiffeq for max_num_steps starts
# with 'max_num_steps'
if error_message.startswith("max_num_steps"):
print("Maximum number of steps exceeded")
file_name_root = 'exceeded'
elif error_message.startswith("underflow"):
print("Underflow")
file_name_root = 'underflow'
else:
print("Unknown assertion error")
file_name_root = 'unknown'
model_stats[file_name_root]["count"] += 1
if len(trainer.buffer['loss']):
final_loss = np.mean(trainer.buffer['loss'])
else:
final_loss = None
model_stats[file_name_root]["final_losses"].append(final_loss)
if len(trainer.buffer['nfe']):
final_nfes = np.mean(trainer.buffer['nfe'])
else:
final_nfes = None
model_stats[file_name_root]["final_nfes"].append(final_nfes)
if len(trainer.buffer['bnfe']):
final_bnfes = np.mean(trainer.buffer['bnfe'])
else:
final_bnfes = None
model_stats[file_name_root]["final_bnfes"].append(final_bnfes)
# Save final NFEs before error happened
with open(directory + '/{}_{}_{}.json'.format(file_name_root, i, j), 'w') as f:
json.dump({"forward": trainer.nfe_buffer, "backward": trainer.bnfe_buffer}, f)
end_training = True
# Save info at every epoch
accuracy_histories[-1] = trainer.histories['accuracy_history']
epoch_accuracy_histories[-1] = trainer.histories['epoch_accuracy_history']
loss_histories[-1] = trainer.histories['loss_history']
epoch_loss_histories[-1] = trainer.histories['epoch_loss_history']
if is_ode:
nfe_histories[-1] = trainer.histories['nfe_history']
epoch_nfe_histories[-1] = trainer.histories['epoch_nfe_history']
bnfe_histories[-1] = trainer.histories['bnfe_history']
epoch_bnfe_histories[-1] = trainer.histories['epoch_bnfe_history']
total_nfe_histories[-1] = trainer.histories['total_nfe_history']
epoch_total_nfe_histories[-1] = trainer.histories['epoch_total_nfe_history']
if model_config["validation"]:
epoch_loss_val = dataset_mean_loss(trainer, test_loader, device)
if epoch == 0:
epoch_loss_val_histories[-1] = [epoch_loss_val]
else:
epoch_loss_val_histories[-1].append(epoch_loss_val)
results["model_info"][-1]["type"] = model_config["type"]
results["model_info"][-1]["loss_history"] = loss_histories
results["model_info"][-1]["accuracy_history"] = accuracy_histories
results["model_info"][-1]["epoch_accuracy_history"] = epoch_accuracy_histories
results["model_info"][-1]["epoch_loss_history"] = epoch_loss_histories
if model_config["validation"]:
results["model_info"][-1]["epoch_loss_val_history"] = epoch_loss_val_histories
if is_ode:
results["model_info"][-1]["epoch_nfe_history"] = epoch_nfe_histories
results["model_info"][-1]["nfe_history"] = nfe_histories
results["model_info"][-1]["epoch_bnfe_history"] = epoch_bnfe_histories
results["model_info"][-1]["bnfe_history"] = bnfe_histories
results["model_info"][-1]["epoch_total_nfe_history"] = epoch_total_nfe_histories
results["model_info"][-1]["total_nfe_history"] = total_nfe_histories
# Save losses and nfes at every epoch
with open(directory + '/losses_and_nfes.json', 'w') as f:
json.dump(results['model_info'], f)
# If training failed, move on to next rep
if end_training:
break
# If we reached end of training, increment success counter
if epoch == training_config["epochs"] - 1:
model_stats["success"]["count"] += 1
if len(trainer.buffer['loss']):
final_loss = np.mean(trainer.buffer['loss'])
else:
final_loss = None
model_stats["success"]["final_losses"].append(final_loss)
if len(trainer.buffer['nfe']):
final_nfes = np.mean(trainer.buffer['nfe'])
else:
final_nfes = None
model_stats["success"]["final_nfes"].append(final_nfes)
if len(trainer.buffer['bnfe']):
final_bnfes = np.mean(trainer.buffer['bnfe'])
else:
final_bnfes = None
model_stats["success"]["final_bnfes"].append(final_bnfes)
# Save model stats
with open(directory + '/model_stats{}.json'.format(i), 'w') as f:
json.dump(model_stats, f)
# Create plots
# Extract size of augmented dims
augment_labels = ['p = 0' if model_config['type'] == 'odenet' else 'p = {}'.format(model_config['augment_dim'])
for model_config in config['model_configs']]
# Create losses figure
# Note that we can only calculate mean loss if all models trained to
# completion. Therefore we only include mean if only_success is True
histories_plt(results["model_info"], plot_type='loss', labels=augment_labels,
include_mean=only_success, save_fig=directory + '/losses.png')
histories_plt(results["model_info"], plot_type='loss', labels=augment_labels,
include_mean=only_success, shaded_err=True, save_fig=directory + '/losses_shaded.png')
# Create NFE plots if ODE model is included
contains_ode = False
for model_config in config["model_configs"]:
if model_config["type"] == "odenet" or model_config["type"] == "anode":
contains_ode = True
break
if contains_ode:
# If adjoint method was used, plot forwards, backwards and total nfes
if trainer.model.odeblock.adjoint:
nfe_types = ['nfe', 'bnfe', 'total_nfe']
else:
nfe_types = ['nfe']
for nfe_type in nfe_types:
histories_plt(results["model_info"], plot_type='nfe', labels=augment_labels,
include_mean=only_success, nfe_type=nfe_type,
save_fig=directory + '/{}s.png'.format(nfe_type))
histories_plt(results["model_info"], plot_type='nfe', labels=augment_labels,
include_mean=only_success, shaded_err=True, nfe_type=nfe_type,
save_fig=directory + '/{}s_shaded.png'.format(nfe_type))
histories_plt(results["model_info"], plot_type='nfe_vs_loss', labels=augment_labels,
include_mean=only_success, nfe_type=nfe_type,
save_fig=directory + '/{}_vs_loss.png'.format(nfe_type))
histories_plt(results["model_info"], plot_type='nfe_vs_loss', labels=augment_labels,
include_mean=only_success, nfe_type=nfe_type,
save_fig=directory + '/{}_vs_loss.png'.format(nfe_type))
def dataset_mean_loss(trainer, data_loader, device):
"""Returns mean loss of model on a dataset. Useful for calculating
validation loss.
Parameters
----------
trainer : training.Trainer instance
Trainer instance for model we want to evaluate.
data_loader : torch.utils.data.DataLoader
device : torch.device
"""
epoch_loss = 0.
for x_batch, y_batch in data_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
y_pred = trainer.model(x_batch)
loss = trainer._loss(y_pred, y_batch)
epoch_loss += loss.item()
return epoch_loss / len(data_loader)
| 44.111111 | 115 | 0.559124 | import json
import matplotlib
matplotlib.use('Agg')
import numpy as np
import os
import time
import torch
from anode.models import ODENet
from anode.conv_models import ConvODENet
from anode.discrete_models import ResNet
from anode.training import Trainer
from experiments.dataloaders import mnist, cifar10, tiny_imagenet
from viz.plots import histories_plt
def run_and_save_experiments_img(device, path_to_config):
with open(path_to_config) as config_file:
config = json.load(config_file)
timestamp = time.strftime("%Y-%m-%d_%H-%M")
directory = "img_results_{}_{}".format(timestamp, config["id"])
if not os.path.exists(directory):
os.makedirs(directory)
with open(directory + '/config.json', 'w') as config_file:
json.dump(config, config_file)
num_reps = config["num_reps"]
dataset = config["dataset"]
model_configs = config["model_configs"]
training_config = config["training_config"]
results = {"dataset": dataset, "model_info": []}
if dataset == 'mnist':
data_loader, test_loader = mnist(training_config["batch_size"])
img_size = (1, 28, 28)
output_dim = 10
if dataset == 'cifar10':
data_loader, test_loader = cifar10(training_config["batch_size"])
img_size = (3, 32, 32)
output_dim = 10
if dataset == 'imagenet':
data_loader = tiny_imagenet(training_config["batch_size"])
img_size = (3, 64, 64)
output_dim = 200
only_success = True
for i, model_config in enumerate(model_configs):
results["model_info"].append({})
accuracy_histories = []
epoch_accuracy_histories = []
loss_histories = []
nfe_histories = []
bnfe_histories = []
total_nfe_histories = []
epoch_loss_histories = []
epoch_nfe_histories = []
epoch_bnfe_histories = []
epoch_total_nfe_histories = []
model_stats = {
"exceeded": {"count": 0, "final_losses": [], "final_nfes": [],
"final_bnfes": []},
"underflow": {"count": 0, "final_losses": [], "final_nfes": [],
"final_bnfes": []},
"success": {"count": 0, "final_losses": [], "final_nfes": [],
"final_bnfes": []}
}
if model_config["validation"]:
epoch_loss_val_histories = []
is_ode = model_config["type"] == "odenet" or model_config["type"] == "anode"
for j in range(num_reps):
print("{}/{} model, {}/{} rep".format(i + 1, len(model_configs), j + 1, num_reps))
if is_ode:
if model_config["type"] == "odenet":
augment_dim = 0
else:
augment_dim = model_config["augment_dim"]
model = ConvODENet(device, img_size, model_config["num_filters"],
output_dim=output_dim,
augment_dim=augment_dim,
time_dependent=model_config["time_dependent"],
non_linearity=model_config["non_linearity"],
adjoint=True)
else:
model = ResNet(data_dim, model_config["hidden_dim"],
model_config["num_layers"],
output_dim=output_dim,
is_img=True)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(),
lr=model_config["lr"],
weight_decay=model_config["weight_decay"])
trainer = Trainer(model, optimizer, device,
classification=True,
print_freq=training_config["print_freq"],
record_freq=training_config["record_freq"],
verbose=True,
save_dir=(directory, '{}_{}'.format(i, j)))
accuracy_histories.append([])
epoch_accuracy_histories.append([])
loss_histories.append([])
epoch_loss_histories.append([])
nfe_histories.append([])
epoch_nfe_histories.append([])
bnfe_histories.append([])
epoch_bnfe_histories.append([])
total_nfe_histories.append([])
epoch_total_nfe_histories.append([])
if model_config["validation"]:
epoch_loss_val_histories.append([])
for epoch in range(training_config["epochs"]):
print("\nEpoch {}".format(epoch + 1))
try:
trainer.train(data_loader, 1)
end_training = False
except AssertionError as e:
only_success = False
error_message = e.args[0]
if error_message.startswith("max_num_steps"):
print("Maximum number of steps exceeded")
file_name_root = 'exceeded'
elif error_message.startswith("underflow"):
print("Underflow")
file_name_root = 'underflow'
else:
print("Unknown assertion error")
file_name_root = 'unknown'
model_stats[file_name_root]["count"] += 1
if len(trainer.buffer['loss']):
final_loss = np.mean(trainer.buffer['loss'])
else:
final_loss = None
model_stats[file_name_root]["final_losses"].append(final_loss)
if len(trainer.buffer['nfe']):
final_nfes = np.mean(trainer.buffer['nfe'])
else:
final_nfes = None
model_stats[file_name_root]["final_nfes"].append(final_nfes)
if len(trainer.buffer['bnfe']):
final_bnfes = np.mean(trainer.buffer['bnfe'])
else:
final_bnfes = None
model_stats[file_name_root]["final_bnfes"].append(final_bnfes)
with open(directory + '/{}_{}_{}.json'.format(file_name_root, i, j), 'w') as f:
json.dump({"forward": trainer.nfe_buffer, "backward": trainer.bnfe_buffer}, f)
end_training = True
accuracy_histories[-1] = trainer.histories['accuracy_history']
epoch_accuracy_histories[-1] = trainer.histories['epoch_accuracy_history']
loss_histories[-1] = trainer.histories['loss_history']
epoch_loss_histories[-1] = trainer.histories['epoch_loss_history']
if is_ode:
nfe_histories[-1] = trainer.histories['nfe_history']
epoch_nfe_histories[-1] = trainer.histories['epoch_nfe_history']
bnfe_histories[-1] = trainer.histories['bnfe_history']
epoch_bnfe_histories[-1] = trainer.histories['epoch_bnfe_history']
total_nfe_histories[-1] = trainer.histories['total_nfe_history']
epoch_total_nfe_histories[-1] = trainer.histories['epoch_total_nfe_history']
if model_config["validation"]:
epoch_loss_val = dataset_mean_loss(trainer, test_loader, device)
if epoch == 0:
epoch_loss_val_histories[-1] = [epoch_loss_val]
else:
epoch_loss_val_histories[-1].append(epoch_loss_val)
results["model_info"][-1]["type"] = model_config["type"]
results["model_info"][-1]["loss_history"] = loss_histories
results["model_info"][-1]["accuracy_history"] = accuracy_histories
results["model_info"][-1]["epoch_accuracy_history"] = epoch_accuracy_histories
results["model_info"][-1]["epoch_loss_history"] = epoch_loss_histories
if model_config["validation"]:
results["model_info"][-1]["epoch_loss_val_history"] = epoch_loss_val_histories
if is_ode:
results["model_info"][-1]["epoch_nfe_history"] = epoch_nfe_histories
results["model_info"][-1]["nfe_history"] = nfe_histories
results["model_info"][-1]["epoch_bnfe_history"] = epoch_bnfe_histories
results["model_info"][-1]["bnfe_history"] = bnfe_histories
results["model_info"][-1]["epoch_total_nfe_history"] = epoch_total_nfe_histories
results["model_info"][-1]["total_nfe_history"] = total_nfe_histories
with open(directory + '/losses_and_nfes.json', 'w') as f:
json.dump(results['model_info'], f)
if end_training:
break
if epoch == training_config["epochs"] - 1:
model_stats["success"]["count"] += 1
if len(trainer.buffer['loss']):
final_loss = np.mean(trainer.buffer['loss'])
else:
final_loss = None
model_stats["success"]["final_losses"].append(final_loss)
if len(trainer.buffer['nfe']):
final_nfes = np.mean(trainer.buffer['nfe'])
else:
final_nfes = None
model_stats["success"]["final_nfes"].append(final_nfes)
if len(trainer.buffer['bnfe']):
final_bnfes = np.mean(trainer.buffer['bnfe'])
else:
final_bnfes = None
model_stats["success"]["final_bnfes"].append(final_bnfes)
with open(directory + '/model_stats{}.json'.format(i), 'w') as f:
json.dump(model_stats, f)
augment_labels = ['p = 0' if model_config['type'] == 'odenet' else 'p = {}'.format(model_config['augment_dim'])
for model_config in config['model_configs']]
histories_plt(results["model_info"], plot_type='loss', labels=augment_labels,
include_mean=only_success, save_fig=directory + '/losses.png')
histories_plt(results["model_info"], plot_type='loss', labels=augment_labels,
include_mean=only_success, shaded_err=True, save_fig=directory + '/losses_shaded.png')
contains_ode = False
for model_config in config["model_configs"]:
if model_config["type"] == "odenet" or model_config["type"] == "anode":
contains_ode = True
break
if contains_ode:
if trainer.model.odeblock.adjoint:
nfe_types = ['nfe', 'bnfe', 'total_nfe']
else:
nfe_types = ['nfe']
for nfe_type in nfe_types:
histories_plt(results["model_info"], plot_type='nfe', labels=augment_labels,
include_mean=only_success, nfe_type=nfe_type,
save_fig=directory + '/{}s.png'.format(nfe_type))
histories_plt(results["model_info"], plot_type='nfe', labels=augment_labels,
include_mean=only_success, shaded_err=True, nfe_type=nfe_type,
save_fig=directory + '/{}s_shaded.png'.format(nfe_type))
histories_plt(results["model_info"], plot_type='nfe_vs_loss', labels=augment_labels,
include_mean=only_success, nfe_type=nfe_type,
save_fig=directory + '/{}_vs_loss.png'.format(nfe_type))
histories_plt(results["model_info"], plot_type='nfe_vs_loss', labels=augment_labels,
include_mean=only_success, nfe_type=nfe_type,
save_fig=directory + '/{}_vs_loss.png'.format(nfe_type))
def dataset_mean_loss(trainer, data_loader, device):
epoch_loss = 0.
for x_batch, y_batch in data_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
y_pred = trainer.model(x_batch)
loss = trainer._loss(y_pred, y_batch)
epoch_loss += loss.item()
return epoch_loss / len(data_loader)
| true | true |
f728b5f2006ff75905c460e9ff9c990b86682b02 | 12,738 | py | Python | twitter-winner/tweepy/models.py | lucasrangit/twitter-winner | 2f92d7b7dac0a6bfbcea7304261d256d6d12c212 | [
"MIT"
] | 10 | 2020-08-09T16:07:35.000Z | 2021-06-19T08:18:44.000Z | twitter-winner/tweepy/models.py | lucasrangit/twitter-winner | 2f92d7b7dac0a6bfbcea7304261d256d6d12c212 | [
"MIT"
] | 13 | 2020-10-28T16:02:09.000Z | 2020-11-16T13:30:05.000Z | twitter-winner/tweepy/models.py | lucasrangit/twitter-winner | 2f92d7b7dac0a6bfbcea7304261d256d6d12c212 | [
"MIT"
] | 2 | 2020-09-22T12:21:35.000Z | 2020-10-27T06:59:30.000Z | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from tweepy.error import TweepError
from tweepy.utils import parse_datetime, parse_html_value, parse_a_href
class ResultSet(list):
"""A list like object that holds results from a Twitter API query."""
def __init__(self, max_id=None, since_id=None):
super(ResultSet, self).__init__()
self._max_id = max_id
self._since_id = since_id
@property
def max_id(self):
if self._max_id:
return self._max_id
ids = self.ids()
# Max_id is always set to the *smallest* id, minus one, in the set
return (min(ids) - 1) if ids else None
@property
def since_id(self):
if self._since_id:
return self._since_id
ids = self.ids()
# Since_id is always set to the *greatest* id in the set
return max(ids) if ids else None
def ids(self):
return [item.id for item in self if hasattr(item, 'id')]
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
# pickle
pickle = dict(self.__dict__)
try:
del pickle['_api'] # do not pickle the API reference
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(api, obj))
return results
def __repr__(self):
state = ['%s=%s' % (k, repr(v)) for (k,v) in vars(self).items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(state))
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
setattr(status, '_json', json)
for k, v in json.items():
if k == 'user':
user_model = getattr(api.parser.model_factory, 'user') if api else User
user = user_model.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
setattr(status, 'source_url', None)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'place':
if v is not None:
setattr(status, k, Place.parse(api, v))
else:
setattr(status, k, None)
else:
setattr(status, k, v)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
setattr(user, '_json', json)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name, *args, **kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs)
def lists(self, *args, **kargs):
return self._api.lists_all(user=self.screen_name, *args, **kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id, *args, **kargs)
class DirectMessage(Model):
@classmethod
def parse(cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json):
relationship = json['relationship']
# parse source
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class Category(Model):
@classmethod
def parse(cls, api, json):
category = cls(api)
for k, v in json.items():
setattr(category, k, v)
return category
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResults(ResultSet):
@classmethod
def parse(cls, api, json):
metadata = json['search_metadata']
results = SearchResults()
results.refresh_url = metadata.get('refresh_url')
results.completed_in = metadata.get('completed_in')
results.query = metadata.get('query')
results.count = metadata.get('count')
results.next_results = metadata.get('next_results')
status_model = getattr(api.parser.model_factory, 'status') if api else Status
for status in json['statuses']:
results.append(status_model.parse(api, status))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k,v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
elif k == 'created_at':
setattr(lst, k, parse_datetime(v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
if isinstance(json_list, dict):
json_list = json_list['lists']
for obj in json_list:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_list(self.slug)
def timeline(self, **kargs):
return self._api.list_timeline(self.user.screen_name, self.slug, **kargs)
def add_member(self, id):
return self._api.add_list_member(self.slug, id)
def remove_member(self, id):
return self._api.remove_list_member(self.slug, id)
def members(self, **kargs):
return self._api.list_members(self.user.screen_name, self.slug, **kargs)
def is_member(self, id):
return self._api.is_list_member(self.user.screen_name, self.slug, id)
def subscribe(self):
return self._api.subscribe_list(self.user.screen_name, self.slug)
def unsubscribe(self):
return self._api.unsubscribe_list(self.user.screen_name, self.slug)
def subscribers(self, **kargs):
return self._api.list_subscribers(self.user.screen_name, self.slug, **kargs)
def is_subscribed(self, id):
return self._api.is_subscribed_list(self.user.screen_name, self.slug, id)
class Relation(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']:
setattr(result, k, Status.parse(api, v))
elif k == 'results':
setattr(result, k, Relation.parse_list(api, v))
else:
setattr(result, k, v)
return result
class Relationship(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'connections':
setattr(result, 'is_following', 'following' in v)
setattr(result, 'is_followed_by', 'followed_by' in v)
else:
setattr(result, k, v)
return result
class JSONModel(Model):
@classmethod
def parse(cls, api, json):
return json
class IDModel(Model):
@classmethod
def parse(cls, api, json):
if isinstance(json, list):
return json
else:
return json['ids']
class BoundingBox(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
if json is not None:
for k, v in json.items():
setattr(result, k, v)
return result
def origin(self):
"""
Return longitude, latitude of southwest (bottom, left) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][0])
def corner(self):
"""
Return longitude, latitude of northeast (top, right) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][2])
class Place(Model):
@classmethod
def parse(cls, api, json):
place = cls(api)
for k, v in json.items():
if k == 'bounding_box':
# bounding_box value may be null (None.)
# Example: "United States" (id=96683cc9126741d1)
if v is not None:
t = BoundingBox.parse(api, v)
else:
t = v
setattr(place, k, t)
elif k == 'contained_within':
# contained_within is a list of Places.
setattr(place, k, Place.parse_list(api, v))
else:
setattr(place, k, v)
return place
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['result']['places']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
class ModelFactory(object):
"""
Used by parsers for creating instances
of models. You may subclass this factory
to add your own extended models.
"""
status = Status
user = User
direct_message = DirectMessage
friendship = Friendship
saved_search = SavedSearch
search_results = SearchResults
category = Category
list = List
relation = Relation
relationship = Relationship
json = JSONModel
ids = IDModel
place = Place
bounding_box = BoundingBox
| 28.75395 | 87 | 0.569556 |
from tweepy.error import TweepError
from tweepy.utils import parse_datetime, parse_html_value, parse_a_href
class ResultSet(list):
def __init__(self, max_id=None, since_id=None):
super(ResultSet, self).__init__()
self._max_id = max_id
self._since_id = since_id
@property
def max_id(self):
if self._max_id:
return self._max_id
ids = self.ids()
return (min(ids) - 1) if ids else None
@property
def since_id(self):
if self._since_id:
return self._since_id
ids = self.ids()
return max(ids) if ids else None
def ids(self):
return [item.id for item in self if hasattr(item, 'id')]
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
pickle = dict(self.__dict__)
try:
del pickle['_api']
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(api, obj))
return results
def __repr__(self):
state = ['%s=%s' % (k, repr(v)) for (k,v) in vars(self).items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(state))
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
setattr(status, '_json', json)
for k, v in json.items():
if k == 'user':
user_model = getattr(api.parser.model_factory, 'user') if api else User
user = user_model.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user)
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
setattr(status, 'source_url', None)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'place':
if v is not None:
setattr(status, k, Place.parse(api, v))
else:
setattr(status, k, None)
else:
setattr(status, k, v)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
setattr(user, '_json', json)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name, *args, **kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs)
def lists(self, *args, **kargs):
return self._api.lists_all(user=self.screen_name, *args, **kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id, *args, **kargs)
class DirectMessage(Model):
@classmethod
def parse(cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json):
relationship = json['relationship']
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class Category(Model):
@classmethod
def parse(cls, api, json):
category = cls(api)
for k, v in json.items():
setattr(category, k, v)
return category
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResults(ResultSet):
@classmethod
def parse(cls, api, json):
metadata = json['search_metadata']
results = SearchResults()
results.refresh_url = metadata.get('refresh_url')
results.completed_in = metadata.get('completed_in')
results.query = metadata.get('query')
results.count = metadata.get('count')
results.next_results = metadata.get('next_results')
status_model = getattr(api.parser.model_factory, 'status') if api else Status
for status in json['statuses']:
results.append(status_model.parse(api, status))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k,v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
elif k == 'created_at':
setattr(lst, k, parse_datetime(v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
if isinstance(json_list, dict):
json_list = json_list['lists']
for obj in json_list:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_list(self.slug)
def timeline(self, **kargs):
return self._api.list_timeline(self.user.screen_name, self.slug, **kargs)
def add_member(self, id):
return self._api.add_list_member(self.slug, id)
def remove_member(self, id):
return self._api.remove_list_member(self.slug, id)
def members(self, **kargs):
return self._api.list_members(self.user.screen_name, self.slug, **kargs)
def is_member(self, id):
return self._api.is_list_member(self.user.screen_name, self.slug, id)
def subscribe(self):
return self._api.subscribe_list(self.user.screen_name, self.slug)
def unsubscribe(self):
return self._api.unsubscribe_list(self.user.screen_name, self.slug)
def subscribers(self, **kargs):
return self._api.list_subscribers(self.user.screen_name, self.slug, **kargs)
def is_subscribed(self, id):
return self._api.is_subscribed_list(self.user.screen_name, self.slug, id)
class Relation(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']:
setattr(result, k, Status.parse(api, v))
elif k == 'results':
setattr(result, k, Relation.parse_list(api, v))
else:
setattr(result, k, v)
return result
class Relationship(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'connections':
setattr(result, 'is_following', 'following' in v)
setattr(result, 'is_followed_by', 'followed_by' in v)
else:
setattr(result, k, v)
return result
class JSONModel(Model):
@classmethod
def parse(cls, api, json):
return json
class IDModel(Model):
@classmethod
def parse(cls, api, json):
if isinstance(json, list):
return json
else:
return json['ids']
class BoundingBox(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
if json is not None:
for k, v in json.items():
setattr(result, k, v)
return result
def origin(self):
return tuple(self.coordinates[0][0])
def corner(self):
return tuple(self.coordinates[0][2])
class Place(Model):
@classmethod
def parse(cls, api, json):
place = cls(api)
for k, v in json.items():
if k == 'bounding_box':
if v is not None:
t = BoundingBox.parse(api, v)
else:
t = v
setattr(place, k, t)
elif k == 'contained_within':
setattr(place, k, Place.parse_list(api, v))
else:
setattr(place, k, v)
return place
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['result']['places']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
class ModelFactory(object):
status = Status
user = User
direct_message = DirectMessage
friendship = Friendship
saved_search = SavedSearch
search_results = SearchResults
category = Category
list = List
relation = Relation
relationship = Relationship
json = JSONModel
ids = IDModel
place = Place
bounding_box = BoundingBox
| true | true |
f728b6f92dca477e111a1125d6b121b5c1e6cb92 | 26,973 | py | Python | vistrails/packages/vtk/vtk_wrapper/specs.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 83 | 2015-01-05T14:50:50.000Z | 2021-09-17T19:45:26.000Z | vistrails/packages/vtk/vtk_wrapper/specs.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 254 | 2015-01-02T20:39:19.000Z | 2018-11-28T17:16:44.000Z | vistrails/packages/vtk/vtk_wrapper/specs.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 40 | 2015-04-17T16:46:36.000Z | 2021-09-28T22:43:24.000Z | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import ast
from xml.etree import cElementTree as ET
class SpecList(object):
""" A class with module specifications and custom code
This describes how the wrapped methods/classes will
maps to modules in vistrails
"""
def __init__(self, module_specs=None):
if module_specs is None:
module_specs = []
self.module_specs = module_specs
def write_to_xml(self, fname):
root = ET.Element("specs")
for spec in self.module_specs:
root.append(spec.to_xml())
tree = ET.ElementTree(root)
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(tree.getroot())
tree.write(fname)
@staticmethod
def read_from_xml(fname, klass=None):
if klass is None:
klass = ModuleSpec
module_specs = []
tree = ET.parse(fname)
for elt in tree.getroot():
if elt.tag == klass.xml_name:
module_specs.append(klass.from_xml(elt))
retval = SpecList(module_specs)
# for spec in retval.module_specs:
# print "==", spec.name, "=="
# for ps in spec.port_specs:
# print " ", ps.arg, ps.name
return retval
######### BASE MODULE SPEC ###########
class PortSpec(object):
""" Represents specification of a port
"""
xml_name = "portSpec"
# attrs tuple means (default value, [is subelement, [run eval]])
# Subelement: ?
# eval: serialize as string and use eval to get value back
# FIXME: subelement/eval not needed if using json
attrs = {"name": "", # port name
"port_type": None, # type signature in vistrails
"docstring": ("", True), # documentation
"min_conns": (0, False, True), # set min_conns (1=required)
"max_conns": (-1, False, True), # Set max_conns (default -1)
"show_port": (False, False, True), # Set not optional (use connection)
"sort_key": (-1, False, True), # sort_key
"shape": (None, False, True), # physical shape
"depth": (0, False, True)} # expected list depth
def __init__(self, **kwargs):
self.set_defaults(**kwargs)
self.port_types = []
def set_defaults(self, **kwargs):
for attr, props in self.attrs.iteritems():
if isinstance(props, tuple):
default_val = props[0]
else:
default_val = props
if attr in kwargs:
setattr(self, attr, kwargs[attr])
else:
setattr(self, attr, default_val)
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
for attr, props in self.attrs.iteritems():
attr_val = getattr(self, attr)
is_subelt = False
if isinstance(props, tuple):
default_val = props[0]
if len(props) > 1:
is_subelt = props[1]
else:
default_val = props
if default_val != attr_val:
if is_subelt:
subelt = ET.Element(attr)
subelt.text = unicode(getattr(self, attr))
elt.append(subelt)
else:
elt.set(attr, unicode(attr_val))
return elt
@classmethod
def internal_from_xml(cls, elt, obj=None):
if obj is None:
obj = cls()
child_elts = {}
for child in elt.getchildren():
# if child.tag not in obj.attrs:
# raise RuntimeError('Cannot deal with tag "%s"' % child.tag)
if child.tag not in child_elts:
child_elts[child.tag] = []
child_elts[child.tag].append(child)
kwargs = {}
for attr, props in obj.attrs.iteritems():
is_subelt = False
run_eval = False
if isinstance(props, tuple):
if len(props) > 1:
is_subelt = props[1]
if len(props) > 2:
run_eval = props[2]
attr_vals = []
if is_subelt:
if attr in child_elts:
attr_vals = [c.text for c in child_elts[attr]
if c.text is not None]
else:
attr_val = elt.get(attr)
if attr_val is not None:
attr_vals = [attr_val]
if len(attr_vals) > 1:
raise ValueError('Should have only one value for '
'attribute "%s"' % attr)
if len(attr_vals) > 0:
attr_val = attr_vals[0]
if run_eval:
try:
kwargs[attr] = ast.literal_eval(attr_val)
except (NameError, SyntaxError, ValueError):
kwargs[attr] = attr_val
else:
kwargs[attr] = attr_val
obj.set_defaults(**kwargs)
return obj, child_elts
@classmethod
def from_xml(cls, elt, obj=None):
obj, child_elts = cls.internal_from_xml(elt, obj)
return obj
@classmethod
def create_from_xml(cls, elt):
if elt.tag == cls.InputSpecType.xml_name:
return cls.InputSpecType.from_xml(elt)
elif elt.tag == cls.OutputSpecType.xml_name:
return cls.OutputSpecType.from_xml(elt)
raise TypeError('Cannot create spec from element of type "%s"' %
elt.tag)
def get_port_type(self):
if self.port_type is None:
return "basic:Null"
try:
port_types = ast.literal_eval(self.port_type)
def flatten(t):
if not isinstance(t, list):
raise Exception("Expected a list")
flat = []
for elt in t:
if isinstance(elt, list):
flat.extend(flatten(elt))
else:
flat.append(elt)
return flat
return ','.join(flatten(port_types))
except (SyntaxError, ValueError):
pass
return self.port_type
def get_prepend_params(self):
if self.prepend_params is None:
return []
return self.prepend_params
class InputPortSpec(PortSpec):
xml_name = "inputPortSpec"
attrs = {"entry_types": (None, True, True),# custom entry type (like enum)
"values": (None, True, True), # values for enums
"labels": (None, True, True), # custom labels on enum values
"defaults": (None, True, True), # default value list
}
attrs.update(PortSpec.attrs)
def get_port_attrs(self):
""" Port attribute dict that will be used to create the port
"""
attrs = {}
if self.sort_key != -1:
attrs["sort_key"] = self.sort_key
if self.shape:
attrs["shape"] = self.shape
if self.depth:
attrs["depth"] = self.depth
if self.values:
attrs["values"] = unicode(self.values)
if self.labels:
attrs["labels"] = unicode(self.labels)
if self.entry_types:
attrs["entry_types"] = unicode(self.entry_types)
if self.defaults:
attrs["defaults"] = unicode(self.defaults)
if self.docstring:
attrs["docstring"] = self.docstring
if self.min_conns:
attrs["min_conns"] = self.min_conns
if self.max_conns != -1:
attrs["max_conns"] = self.max_conns
if not self.show_port:
attrs["optional"] = True
return attrs
class OutputPortSpec(PortSpec):
xml_name = "outputPortSpec"
attrs = {}
attrs.update(PortSpec.attrs)
def get_port_attrs(self):
""" Port attribute dict that will be used to create the port
"""
attrs = {}
if self.sort_key != -1:
attrs["sort_key"] = self.sort_key
if self.shape:
attrs["shape"] = self.shape
if self.depth:
attrs["depth"] = self.depth
if self.docstring:
attrs["docstring"] = self.docstring
if self.min_conns:
attrs["min_conns"] = self.min_conns
if self.max_conns != -1:
attrs["max_conns"] = self.max_conns
if not self.show_port:
attrs["optional"] = True
return attrs
class ModuleSpec(object):
""" Represents specification of a module
This mirrors how the module will look in the vistrails registry
"""
xml_name = 'moduleSpec'
InputSpecType = InputPortSpec
OutputSpecType = OutputPortSpec
# From Modulesettings. See core.modules.config._documentation
ms_attrs = ['name',
'configure_widget',
'constant_widget',
'constant_widgets',
'signature',
'constant_signature',
'color',
'fringe',
'left_fringe',
'right_fringe',
'abstract',
'namespace',
'package_version',
'hide_descriptor']
attrs = [
# basic attributes
'module_name', # Name of module (can be overridden by modulesettings)
'superklass', # class to inherit from
'code_ref', # reference to wrapped class/method
'docstring', # module __doc__
'cacheable', # should this module be cached
# special attributes
'callback', # name of attribute for progress callback
'tempfile'] # attribute name for temporary file creation method
attrs.extend(ms_attrs)
def __init__(self, module_name='', superklass='', code_ref='',
docstring='', callback=None, tempfile=None, cacheable=True,
input_port_specs=None, output_port_specs=None, **kwargs):
if input_port_specs is None:
input_port_specs = []
if output_port_specs is None:
output_port_specs = []
self.module_name = module_name
self.superklass = superklass
self.code_ref = code_ref
self.docstring = docstring
self.callback = callback
self.tempfile = tempfile
self.cacheable = cacheable
self.input_port_specs = input_port_specs
self.output_port_specs = output_port_specs
for attr in self.ms_attrs:
setattr(self, attr, kwargs.get(attr, None))
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
elt.set("module_name", self.module_name)
elt.set("superklass", self.superklass)
elt.set("code_ref", self.code_ref)
subelt = ET.Element("docstring")
subelt.text = unicode(self.docstring)
elt.append(subelt)
if self.callback is not None:
elt.set("callback", self.callback)
if self.tempfile is not None:
elt.set("tempfile", self.tempfile)
if self.cacheable is False:
elt.set("cacheable", 'False')
for attr in self.ms_attrs:
value = getattr(self, attr)
if value is not None:
elt.set(attr, repr(value))
for port_spec in self.input_port_specs:
subelt = port_spec.to_xml()
elt.append(subelt)
for port_spec in self.output_port_specs:
subelt = port_spec.to_xml()
elt.append(subelt)
return elt
@staticmethod
def from_xml(elt, klass=None):
if klass is None:
klass = ModuleSpec
module_name = elt.get("module_name", '')
superklass = elt.get("superklass", '')
code_ref = elt.get("code_ref", '')
callback = elt.get("callback", None)
tempfile = elt.get("tempfile", None)
cacheable = ast.literal_eval(elt.get("cacheable", "True"))
kwargs = {}
for attr in klass.ms_attrs:
value = elt.get(attr, None)
if value is not None:
kwargs[attr] = ast.literal_eval(value)
docstring = ""
input_port_specs = []
output_port_specs = []
for child in elt.getchildren():
if child.tag == klass.InputSpecType.xml_name:
input_port_specs.append(klass.InputSpecType.from_xml(child))
elif child.tag == klass.OutputSpecType.xml_name:
output_port_specs.append(klass.OutputSpecType.from_xml(child))
elif child.tag == "docstring":
if child.text:
docstring = child.text
return klass(module_name=module_name, superklass=superklass,
code_ref=code_ref, docstring=docstring,
callback=callback, tempfile=tempfile, cacheable=cacheable,
input_port_specs=input_port_specs,
output_port_specs=output_port_specs, **kwargs)
def get_output_port_spec(self, compute_name):
for ps in self.output_port_specs:
if ps.compute_name == compute_name:
return ps
return None
def get_module_settings(self):
""" Returns modulesettings dict
"""
attrs = {}
for attr in self.ms_attrs:
value = getattr(self, attr)
if value is not None:
attrs[attr] = value
return attrs
######### PYTHON FUNCTION SPEC ###########
class FunctionInputPortSpec(InputPortSpec):
xml_name = "functionInputPortSpec"
attrs = {"arg": ""} # attribute name
attrs.update(InputPortSpec.attrs)
class FunctionOutputPortSpec(OutputPortSpec):
xml_name = "functionOutputPortSpec"
class FunctionSpec(ModuleSpec):
""" Specification for wrapping a python function
"""
xml_name = 'functionSpec'
InputSpecType = FunctionInputPortSpec
OutputSpecType = FunctionOutputPortSpec
attrs = ['output_type'] # None(=single), list(ordered), or dict(attr=value)
attrs.extend(ModuleSpec.attrs)
def __init__(self, module_name, superklass='', code_ref='', docstring="",
output_type=None, callback=None, tempfile=None,
cacheable=True, input_port_specs=None, output_port_specs=None,
**kwargs):
ModuleSpec.__init__(self, module_name, superklass, code_ref,
docstring, callback, tempfile, cacheable,
input_port_specs, output_port_specs, **kwargs)
self.output_type = output_type
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
elt = ModuleSpec.to_xml(self, elt)
if self.output_type is not None:
elt.set("output_type", self.output_type)
return elt
@staticmethod
def from_xml(elt):
inst = ModuleSpec.from_xml(elt, FunctionSpec)
inst.output_type = elt.get("output_type", None)
return inst
######### PYTHON CLASS SPEC ###########
class ClassInputPortSpec(InputPortSpec):
xml_name = "classInputPortSpec"
attrs = {"method_name": "", # method name
"method_type": "", # Type like nullary, OnOff or SetXToY
"prepend_params": (None, True, True)} # prepended params like index
attrs.update(InputPortSpec.attrs)
def __init__(self, **kwargs):
InputPortSpec.__init__(self, **kwargs)
if not self.method_name:
self.method_name = self.name
class ClassOutputPortSpec(OutputPortSpec):
xml_name = "classOutputPortSpec"
attrs = {"method_name": "", # method/attribute name
"prepend_params": (None, True, True)} # prepended params used with indexed methods
attrs.update(OutputPortSpec.attrs)
def __init__(self, **kwargs):
OutputPortSpec.__init__(self, **kwargs)
if not self.method_name:
self.method_name = self.name
class ClassSpec(ModuleSpec):
""" Specification for wrapping a python class
"""
xml_name = 'classSpec'
InputSpecType = ClassInputPortSpec
OutputSpecType = ClassOutputPortSpec
attrs = ['methods_last', # If True will compute methods before connections
'compute', # Function to call after input methods
'cleanup'] # Function to call after output methods
attrs.extend(ModuleSpec.attrs)
def __init__(self, module_name, superklass='', code_ref='', docstring="",
callback=None, tempfile=None,
cacheable=True, input_port_specs=None, output_port_specs=None,
compute=None, cleanup=None, methods_last=False, **kwargs):
ModuleSpec.__init__(self, module_name, superklass, code_ref,
docstring, callback, tempfile, cacheable,
input_port_specs, output_port_specs, **kwargs)
self.methods_last = methods_last
self.compute = compute
self.cleanup = cleanup
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
if self.methods_last is not False:
elt.set("methods_last", unicode(self.methods_last))
if self.compute is not None:
elt.set("compute", self.compute)
if self.cleanup is not None:
elt.set("cleanup", self.cleanup)
elt = ModuleSpec.to_xml(self, elt)
return elt
@staticmethod
def from_xml(elt):
inst = ModuleSpec.from_xml(elt, ClassSpec)
inst.methods_last = ast.literal_eval(elt.get("methods_last", 'False'))
inst.compute = elt.get("compute", None)
inst.cleanup = elt.get("cleanup", None)
return inst
###############################################################################
import unittest
class TestModuleSpec(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
import vtk
except ImportError:
raise unittest.SkipTest("vtk is not installed")
from vistrails.tests.utils import enable_package
from ..identifiers import identifier
enable_package(identifier)
def test_module_spec(self):
input_spec = InputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=True,
sort_key=5,
depth=1,
entry_type='enum')
in_attrs = input_spec.get_port_attrs()
output_spec = OutputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1)
out_attrs = output_spec.get_port_attrs()
ms = ModuleSpec(module_name='myclassname',
superklass='mysuperclassname',
code_ref='theclassname',
docstring='my documentation',
callback=None,
tempfile=None,
cacheable=False,
input_port_specs=[input_spec],
output_port_specs=[output_spec])
as_string = ET.tostring(ms.to_xml())
from_string = ET.fromstring(as_string)
ms2 = ModuleSpec.from_xml(from_string)
in_attrs2 = ms2.input_port_specs[0].get_port_attrs()
out_attrs2 = ms2.output_port_specs[0].get_port_attrs()
self.assertEqual(in_attrs, in_attrs2)
self.assertEqual(out_attrs, out_attrs2)
def test_function_spec(self):
input_spec = FunctionInputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1,
arg='myargname',
)
in_attrs = input_spec.get_port_attrs()
output_spec = FunctionOutputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1)
out_attrs = output_spec.get_port_attrs()
ms = FunctionSpec(module_name='myclassname',
superklass='mysuperclassname',
code_ref='theclassname',
docstring='my documentation',
callback=None,
tempfile=None,
cacheable=False,
input_port_specs=[input_spec],
output_port_specs=[output_spec],
output_type='list')
as_string = ET.tostring(ms.to_xml())
from_string = ET.fromstring(as_string)
ms2 = FunctionSpec.from_xml(from_string)
in_attrs2 = ms2.input_port_specs[0].get_port_attrs()
out_attrs2 = ms2.output_port_specs[0].get_port_attrs()
self.assertEqual(in_attrs, in_attrs2)
self.assertEqual(out_attrs, out_attrs2)
def test_class_spec(self):
input_spec = ClassInputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1,
method_name='MyClassMethodName',
method_type='SetXToY',
prepend_params=[1])
in_attrs = input_spec.get_port_attrs()
output_spec = ClassOutputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1,
method_name='MyClassMethodName',
prepend_params=[1])
out_attrs = output_spec.get_port_attrs()
ms = ClassSpec(module_name='myclassname',
superklass='mysuperclassname',
code_ref='theclassname',
docstring='my documentation',
callback=None,
tempfile=None,
cacheable=False,
input_port_specs=[input_spec],
output_port_specs=[output_spec],
methods_last=True,
compute='myCompute',
cleanup='myCleanup')
as_string = ET.tostring(ms.to_xml())
from_string = ET.fromstring(as_string)
ms2 = ClassSpec.from_xml(from_string)
in_attrs2 = ms2.input_port_specs[0].get_port_attrs()
out_attrs2 = ms2.output_port_specs[0].get_port_attrs()
self.assertEqual(in_attrs, in_attrs2)
self.assertEqual(out_attrs, out_attrs2)
#def run():
# specs = SpecList.read_from_xml("mpl_plots_raw.xml")
# specs.write_to_xml("mpl_plots_raw_out.xml")
#if __name__ == '__main__':
# run()
| 38.259574 | 95 | 0.539169 | elt.tag)
def get_port_type(self):
if self.port_type is None:
return "basic:Null"
try:
port_types = ast.literal_eval(self.port_type)
def flatten(t):
if not isinstance(t, list):
raise Exception("Expected a list")
flat = []
for elt in t:
if isinstance(elt, list):
flat.extend(flatten(elt))
else:
flat.append(elt)
return flat
return ','.join(flatten(port_types))
except (SyntaxError, ValueError):
pass
return self.port_type
def get_prepend_params(self):
if self.prepend_params is None:
return []
return self.prepend_params
class InputPortSpec(PortSpec):
xml_name = "inputPortSpec"
attrs = {"entry_types": (None, True, True),
"values": (None, True, True),
"labels": (None, True, True),
"defaults": (None, True, True),
}
attrs.update(PortSpec.attrs)
def get_port_attrs(self):
attrs = {}
if self.sort_key != -1:
attrs["sort_key"] = self.sort_key
if self.shape:
attrs["shape"] = self.shape
if self.depth:
attrs["depth"] = self.depth
if self.values:
attrs["values"] = unicode(self.values)
if self.labels:
attrs["labels"] = unicode(self.labels)
if self.entry_types:
attrs["entry_types"] = unicode(self.entry_types)
if self.defaults:
attrs["defaults"] = unicode(self.defaults)
if self.docstring:
attrs["docstring"] = self.docstring
if self.min_conns:
attrs["min_conns"] = self.min_conns
if self.max_conns != -1:
attrs["max_conns"] = self.max_conns
if not self.show_port:
attrs["optional"] = True
return attrs
class OutputPortSpec(PortSpec):
xml_name = "outputPortSpec"
attrs = {}
attrs.update(PortSpec.attrs)
def get_port_attrs(self):
attrs = {}
if self.sort_key != -1:
attrs["sort_key"] = self.sort_key
if self.shape:
attrs["shape"] = self.shape
if self.depth:
attrs["depth"] = self.depth
if self.docstring:
attrs["docstring"] = self.docstring
if self.min_conns:
attrs["min_conns"] = self.min_conns
if self.max_conns != -1:
attrs["max_conns"] = self.max_conns
if not self.show_port:
attrs["optional"] = True
return attrs
class ModuleSpec(object):
xml_name = 'moduleSpec'
InputSpecType = InputPortSpec
OutputSpecType = OutputPortSpec
ms_attrs = ['name',
'configure_widget',
'constant_widget',
'constant_widgets',
'signature',
'constant_signature',
'color',
'fringe',
'left_fringe',
'right_fringe',
'abstract',
'namespace',
'package_version',
'hide_descriptor']
attrs = [
'module_name',
'superklass',
'code_ref',
'docstring',
'cacheable',
'callback',
'tempfile']
attrs.extend(ms_attrs)
def __init__(self, module_name='', superklass='', code_ref='',
docstring='', callback=None, tempfile=None, cacheable=True,
input_port_specs=None, output_port_specs=None, **kwargs):
if input_port_specs is None:
input_port_specs = []
if output_port_specs is None:
output_port_specs = []
self.module_name = module_name
self.superklass = superklass
self.code_ref = code_ref
self.docstring = docstring
self.callback = callback
self.tempfile = tempfile
self.cacheable = cacheable
self.input_port_specs = input_port_specs
self.output_port_specs = output_port_specs
for attr in self.ms_attrs:
setattr(self, attr, kwargs.get(attr, None))
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
elt.set("module_name", self.module_name)
elt.set("superklass", self.superklass)
elt.set("code_ref", self.code_ref)
subelt = ET.Element("docstring")
subelt.text = unicode(self.docstring)
elt.append(subelt)
if self.callback is not None:
elt.set("callback", self.callback)
if self.tempfile is not None:
elt.set("tempfile", self.tempfile)
if self.cacheable is False:
elt.set("cacheable", 'False')
for attr in self.ms_attrs:
value = getattr(self, attr)
if value is not None:
elt.set(attr, repr(value))
for port_spec in self.input_port_specs:
subelt = port_spec.to_xml()
elt.append(subelt)
for port_spec in self.output_port_specs:
subelt = port_spec.to_xml()
elt.append(subelt)
return elt
@staticmethod
def from_xml(elt, klass=None):
if klass is None:
klass = ModuleSpec
module_name = elt.get("module_name", '')
superklass = elt.get("superklass", '')
code_ref = elt.get("code_ref", '')
callback = elt.get("callback", None)
tempfile = elt.get("tempfile", None)
cacheable = ast.literal_eval(elt.get("cacheable", "True"))
kwargs = {}
for attr in klass.ms_attrs:
value = elt.get(attr, None)
if value is not None:
kwargs[attr] = ast.literal_eval(value)
docstring = ""
input_port_specs = []
output_port_specs = []
for child in elt.getchildren():
if child.tag == klass.InputSpecType.xml_name:
input_port_specs.append(klass.InputSpecType.from_xml(child))
elif child.tag == klass.OutputSpecType.xml_name:
output_port_specs.append(klass.OutputSpecType.from_xml(child))
elif child.tag == "docstring":
if child.text:
docstring = child.text
return klass(module_name=module_name, superklass=superklass,
code_ref=code_ref, docstring=docstring,
callback=callback, tempfile=tempfile, cacheable=cacheable,
input_port_specs=input_port_specs,
output_port_specs=output_port_specs, **kwargs)
def get_output_port_spec(self, compute_name):
for ps in self.output_port_specs:
if ps.compute_name == compute_name:
return ps
return None
def get_module_settings(self):
attrs = {}
for attr in self.ms_attrs:
value = getattr(self, attr)
if value is not None:
attrs[attr] = value
return attrs
putSpecType = FunctionOutputPortSpec
attrs = ['output_type']
attrs.extend(ModuleSpec.attrs)
def __init__(self, module_name, superklass='', code_ref='', docstring="",
output_type=None, callback=None, tempfile=None,
cacheable=True, input_port_specs=None, output_port_specs=None,
**kwargs):
ModuleSpec.__init__(self, module_name, superklass, code_ref,
docstring, callback, tempfile, cacheable,
input_port_specs, output_port_specs, **kwargs)
self.output_type = output_type
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
elt = ModuleSpec.to_xml(self, elt)
if self.output_type is not None:
elt.set("output_type", self.output_type)
return elt
@staticmethod
def from_xml(elt):
inst = ModuleSpec.from_xml(elt, FunctionSpec)
inst.output_type = elt.get("output_type", None)
return inst
**kwargs)
if not self.method_name:
self.method_name = self.name
class ClassOutputPortSpec(OutputPortSpec):
xml_name = "classOutputPortSpec"
attrs = {"method_name": "",
"prepend_params": (None, True, True)}
attrs.update(OutputPortSpec.attrs)
def __init__(self, **kwargs):
OutputPortSpec.__init__(self, **kwargs)
if not self.method_name:
self.method_name = self.name
class ClassSpec(ModuleSpec):
xml_name = 'classSpec'
InputSpecType = ClassInputPortSpec
OutputSpecType = ClassOutputPortSpec
attrs = ['methods_last',
'compute',
'cleanup']
attrs.extend(ModuleSpec.attrs)
def __init__(self, module_name, superklass='', code_ref='', docstring="",
callback=None, tempfile=None,
cacheable=True, input_port_specs=None, output_port_specs=None,
compute=None, cleanup=None, methods_last=False, **kwargs):
ModuleSpec.__init__(self, module_name, superklass, code_ref,
docstring, callback, tempfile, cacheable,
input_port_specs, output_port_specs, **kwargs)
self.methods_last = methods_last
self.compute = compute
self.cleanup = cleanup
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
if self.methods_last is not False:
elt.set("methods_last", unicode(self.methods_last))
if self.compute is not None:
elt.set("compute", self.compute)
if self.cleanup is not None:
elt.set("cleanup", self.cleanup)
elt = ModuleSpec.to_xml(self, elt)
return elt
@staticmethod
def from_xml(elt):
inst = ModuleSpec.from_xml(elt, ClassSpec)
inst.methods_last = ast.literal_eval(elt.get("methods_last", 'False'))
inst.compute = elt.get("compute", None)
inst.cleanup = elt.get("cleanup", None)
return inst
show_port=False,
sort_key=5,
depth=1)
out_attrs = output_spec.get_port_attrs()
ms = FunctionSpec(module_name='myclassname',
superklass='mysuperclassname',
code_ref='theclassname',
docstring='my documentation',
callback=None,
tempfile=None,
cacheable=False,
input_port_specs=[input_spec],
output_port_specs=[output_spec],
output_type='list')
as_string = ET.tostring(ms.to_xml())
from_string = ET.fromstring(as_string)
ms2 = FunctionSpec.from_xml(from_string)
in_attrs2 = ms2.input_port_specs[0].get_port_attrs()
out_attrs2 = ms2.output_port_specs[0].get_port_attrs()
self.assertEqual(in_attrs, in_attrs2)
self.assertEqual(out_attrs, out_attrs2)
def test_class_spec(self):
input_spec = ClassInputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1,
method_name='MyClassMethodName',
method_type='SetXToY',
prepend_params=[1])
in_attrs = input_spec.get_port_attrs()
output_spec = ClassOutputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1,
method_name='MyClassMethodName',
prepend_params=[1])
out_attrs = output_spec.get_port_attrs()
ms = ClassSpec(module_name='myclassname',
superklass='mysuperclassname',
code_ref='theclassname',
docstring='my documentation',
callback=None,
tempfile=None,
cacheable=False,
input_port_specs=[input_spec],
output_port_specs=[output_spec],
methods_last=True,
compute='myCompute',
cleanup='myCleanup')
as_string = ET.tostring(ms.to_xml())
from_string = ET.fromstring(as_string)
ms2 = ClassSpec.from_xml(from_string)
in_attrs2 = ms2.input_port_specs[0].get_port_attrs()
out_attrs2 = ms2.output_port_specs[0].get_port_attrs()
self.assertEqual(in_attrs, in_attrs2)
self.assertEqual(out_attrs, out_attrs2)
| true | true |
f728b7aa08823dde508a38f4ea974fa9249ec9d9 | 30,233 | py | Python | silx/gui/plot/actions/io.py | physwkim/silx | e3f39babad34c97db8ec5dfbb8e92287ce059f70 | [
"CC0-1.0",
"MIT"
] | 1 | 2019-12-11T14:11:03.000Z | 2019-12-11T14:11:03.000Z | silx/gui/plot/actions/io.py | physwkim/silx | e3f39babad34c97db8ec5dfbb8e92287ce059f70 | [
"CC0-1.0",
"MIT"
] | 3 | 2016-09-08T13:14:15.000Z | 2017-05-09T07:51:13.000Z | silx/gui/plot/actions/io.py | physwkim/silx | e3f39babad34c97db8ec5dfbb8e92287ce059f70 | [
"CC0-1.0",
"MIT"
] | 1 | 2017-06-13T13:02:54.000Z | 2017-06-13T13:02:54.000Z | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
:mod:`silx.gui.plot.actions.io` provides a set of QAction relative of inputs
and outputs for a :class:`.PlotWidget`.
The following QAction are available:
- :class:`CopyAction`
- :class:`PrintAction`
- :class:`SaveAction`
"""
from __future__ import division
__authors__ = ["V.A. Sole", "T. Vincent", "P. Knobel"]
__license__ = "MIT"
__date__ = "25/09/2020"
from . import PlotAction
from silx.io.utils import save1D, savespec, NEXUS_HDF5_EXT
from silx.io.nxdata import save_NXdata
import logging
import sys
import os.path
from collections import OrderedDict
import traceback
import numpy
from silx.utils.deprecation import deprecated
from silx.gui import qt, printer
from silx.gui.dialog.GroupDialog import GroupDialog
from silx.third_party.EdfFile import EdfFile
from silx.third_party.TiffIO import TiffIO
from ...utils.image import convertArrayToQImage
if sys.version_info[0] == 3:
from io import BytesIO
else:
import cStringIO as _StringIO
BytesIO = _StringIO.StringIO
_logger = logging.getLogger(__name__)
_NEXUS_HDF5_EXT_STR = ' '.join(['*' + ext for ext in NEXUS_HDF5_EXT])
def selectOutputGroup(h5filename):
"""Open a dialog to prompt the user to select a group in
which to output data.
:param str h5filename: name of an existing HDF5 file
:rtype: str
:return: Name of output group, or None if the dialog was cancelled
"""
dialog = GroupDialog()
dialog.addFile(h5filename)
dialog.setWindowTitle("Select an output group")
if not dialog.exec_():
return None
return dialog.getSelectedDataUrl().data_path()
class SaveAction(PlotAction):
"""QAction for saving Plot content.
It opens a Save as... dialog.
:param plot: :class:`.PlotWidget` instance on which to operate.
:param parent: See :class:`QAction`.
"""
SNAPSHOT_FILTER_SVG = 'Plot Snapshot as SVG (*.svg)'
SNAPSHOT_FILTER_PNG = 'Plot Snapshot as PNG (*.png)'
DEFAULT_ALL_FILTERS = (SNAPSHOT_FILTER_PNG, SNAPSHOT_FILTER_SVG)
# Dict of curve filters with CSV-like format
# Using ordered dict to guarantee filters order
# Note: '%.18e' is numpy.savetxt default format
CURVE_FILTERS_TXT = OrderedDict((
('Curve as Raw ASCII (*.txt)',
{'fmt': '%.18e', 'delimiter': ' ', 'header': False}),
('Curve as ";"-separated CSV (*.csv)',
{'fmt': '%.18e', 'delimiter': ';', 'header': True}),
('Curve as ","-separated CSV (*.csv)',
{'fmt': '%.18e', 'delimiter': ',', 'header': True}),
('Curve as tab-separated CSV (*.csv)',
{'fmt': '%.18e', 'delimiter': '\t', 'header': True}),
('Curve as OMNIC CSV (*.csv)',
{'fmt': '%.7E', 'delimiter': ',', 'header': False}),
('Curve as SpecFile (*.dat)',
{'fmt': '%.10g', 'delimiter': '', 'header': False})
))
CURVE_FILTER_NPY = 'Curve as NumPy binary file (*.npy)'
CURVE_FILTER_NXDATA = 'Curve as NXdata (%s)' % _NEXUS_HDF5_EXT_STR
DEFAULT_CURVE_FILTERS = list(CURVE_FILTERS_TXT.keys()) + [
CURVE_FILTER_NPY, CURVE_FILTER_NXDATA]
DEFAULT_ALL_CURVES_FILTERS = ("All curves as SpecFile (*.dat)",)
IMAGE_FILTER_EDF = 'Image data as EDF (*.edf)'
IMAGE_FILTER_TIFF = 'Image data as TIFF (*.tif)'
IMAGE_FILTER_NUMPY = 'Image data as NumPy binary file (*.npy)'
IMAGE_FILTER_ASCII = 'Image data as ASCII (*.dat)'
IMAGE_FILTER_CSV_COMMA = 'Image data as ,-separated CSV (*.csv)'
IMAGE_FILTER_CSV_SEMICOLON = 'Image data as ;-separated CSV (*.csv)'
IMAGE_FILTER_CSV_TAB = 'Image data as tab-separated CSV (*.csv)'
IMAGE_FILTER_RGB_PNG = 'Image as PNG (*.png)'
IMAGE_FILTER_NXDATA = 'Image as NXdata (%s)' % _NEXUS_HDF5_EXT_STR
DEFAULT_IMAGE_FILTERS = (IMAGE_FILTER_EDF,
IMAGE_FILTER_TIFF,
IMAGE_FILTER_NUMPY,
IMAGE_FILTER_ASCII,
IMAGE_FILTER_CSV_COMMA,
IMAGE_FILTER_CSV_SEMICOLON,
IMAGE_FILTER_CSV_TAB,
IMAGE_FILTER_RGB_PNG,
IMAGE_FILTER_NXDATA)
SCATTER_FILTER_NXDATA = 'Scatter as NXdata (%s)' % _NEXUS_HDF5_EXT_STR
DEFAULT_SCATTER_FILTERS = (SCATTER_FILTER_NXDATA,)
# filters for which we don't want an "overwrite existing file" warning
DEFAULT_APPEND_FILTERS = (CURVE_FILTER_NXDATA, IMAGE_FILTER_NXDATA,
SCATTER_FILTER_NXDATA)
def __init__(self, plot, parent=None):
self._filters = {
'all': OrderedDict(),
'curve': OrderedDict(),
'curves': OrderedDict(),
'image': OrderedDict(),
'scatter': OrderedDict()}
self._appendFilters = list(self.DEFAULT_APPEND_FILTERS)
# Initialize filters
for nameFilter in self.DEFAULT_ALL_FILTERS:
self.setFileFilter(
dataKind='all', nameFilter=nameFilter, func=self._saveSnapshot)
for nameFilter in self.DEFAULT_CURVE_FILTERS:
self.setFileFilter(
dataKind='curve', nameFilter=nameFilter, func=self._saveCurve)
for nameFilter in self.DEFAULT_ALL_CURVES_FILTERS:
self.setFileFilter(
dataKind='curves', nameFilter=nameFilter, func=self._saveCurves)
for nameFilter in self.DEFAULT_IMAGE_FILTERS:
self.setFileFilter(
dataKind='image', nameFilter=nameFilter, func=self._saveImage)
for nameFilter in self.DEFAULT_SCATTER_FILTERS:
self.setFileFilter(
dataKind='scatter', nameFilter=nameFilter, func=self._saveScatter)
super(SaveAction, self).__init__(
plot, icon='document-save', text='Save as...',
tooltip='Save curve/image/plot snapshot dialog',
triggered=self._actionTriggered,
checkable=False, parent=parent)
self.setShortcut(qt.QKeySequence.Save)
self.setShortcutContext(qt.Qt.WidgetShortcut)
@staticmethod
def _errorMessage(informativeText='', parent=None):
"""Display an error message."""
# TODO issue with QMessageBox size fixed and too small
msg = qt.QMessageBox(parent)
msg.setIcon(qt.QMessageBox.Critical)
msg.setInformativeText(informativeText + ' ' + str(sys.exc_info()[1]))
msg.setDetailedText(traceback.format_exc())
msg.exec_()
def _saveSnapshot(self, plot, filename, nameFilter):
"""Save a snapshot of the :class:`PlotWindow` widget.
:param str filename: The name of the file to write
:param str nameFilter: The selected name filter
:return: False if format is not supported or save failed,
True otherwise.
"""
if nameFilter == self.SNAPSHOT_FILTER_PNG:
fileFormat = 'png'
elif nameFilter == self.SNAPSHOT_FILTER_SVG:
fileFormat = 'svg'
else: # Format not supported
_logger.error(
'Saving plot snapshot failed: format not supported')
return False
plot.saveGraph(filename, fileFormat=fileFormat)
return True
def _getAxesLabels(self, item):
# If curve has no associated label, get the default from the plot
xlabel = item.getXLabel() or self.plot.getXAxis().getLabel()
ylabel = item.getYLabel() or self.plot.getYAxis().getLabel()
return xlabel, ylabel
def _get1dData(self, item):
"provide xdata, [ydata], xlabel, [ylabel] and manages error bars"
xlabel, ylabel = self._getAxesLabels(item)
x_data = item.getXData(copy=False)
y_data = item.getYData(copy=False)
x_err = item.getXErrorData(copy=False)
y_err = item.getYErrorData(copy=False)
labels = [ylabel]
data = [y_data]
if x_err is not None:
if numpy.isscalar(x_err):
data.append(numpy.zeros_like(y_data) + x_err)
labels.append(xlabel + "_errors")
elif x_err.ndim == 1:
data.append(x_err)
labels.append(xlabel + "_errors")
elif x_err.ndim == 2:
data.append(x_err[0])
labels.append(xlabel + "_errors_below")
data.append(x_err[1])
labels.append(xlabel + "_errors_above")
if y_err is not None:
if numpy.isscalar(y_err):
data.append(numpy.zeros_like(y_data) + y_err)
labels.append(ylabel + "_errors")
elif y_err.ndim == 1:
data.append(y_err)
labels.append(ylabel + "_errors")
elif y_err.ndim == 2:
data.append(y_err[0])
labels.append(ylabel + "_errors_below")
data.append(y_err[1])
labels.append(ylabel + "_errors_above")
return x_data, data, xlabel, labels
@staticmethod
def _selectWriteableOutputGroup(filename, parent):
if os.path.exists(filename) and os.path.isfile(filename) \
and os.access(filename, os.W_OK):
entryPath = selectOutputGroup(filename)
if entryPath is None:
_logger.info("Save operation cancelled")
return None
return entryPath
elif not os.path.exists(filename):
# create new entry in new file
return "/entry"
else:
SaveAction._errorMessage('Save failed (file access issue)\n', parent=parent)
return None
def _saveCurveAsNXdata(self, curve, filename):
entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot)
if entryPath is None:
return False
xlabel, ylabel = self._getAxesLabels(curve)
return save_NXdata(
filename,
nxentry_name=entryPath,
signal=curve.getYData(copy=False),
axes=[curve.getXData(copy=False)],
signal_name="y",
axes_names=["x"],
signal_long_name=ylabel,
axes_long_names=[xlabel],
signal_errors=curve.getYErrorData(copy=False),
axes_errors=[curve.getXErrorData(copy=True)],
title=self.plot.getGraphTitle())
def _saveCurve(self, plot, filename, nameFilter):
"""Save a curve from the plot.
:param str filename: The name of the file to write
:param str nameFilter: The selected name filter
:return: False if format is not supported or save failed,
True otherwise.
"""
if nameFilter not in self.DEFAULT_CURVE_FILTERS:
return False
# Check if a curve is to be saved
curve = plot.getActiveCurve()
# before calling _saveCurve, if there is no selected curve, we
# make sure there is only one curve on the graph
if curve is None:
curves = plot.getAllCurves()
if not curves:
self._errorMessage("No curve to be saved", parent=self.plot)
return False
curve = curves[0]
if nameFilter in self.CURVE_FILTERS_TXT:
filter_ = self.CURVE_FILTERS_TXT[nameFilter]
fmt = filter_['fmt']
csvdelim = filter_['delimiter']
autoheader = filter_['header']
else:
# .npy or nxdata
fmt, csvdelim, autoheader = ("", "", False)
if nameFilter == self.CURVE_FILTER_NXDATA:
return self._saveCurveAsNXdata(curve, filename)
xdata, data, xlabel, labels = self._get1dData(curve)
try:
save1D(filename,
xdata, data,
xlabel, labels,
fmt=fmt, csvdelim=csvdelim,
autoheader=autoheader)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
return True
def _saveCurves(self, plot, filename, nameFilter):
"""Save all curves from the plot.
:param str filename: The name of the file to write
:param str nameFilter: The selected name filter
:return: False if format is not supported or save failed,
True otherwise.
"""
if nameFilter not in self.DEFAULT_ALL_CURVES_FILTERS:
return False
curves = plot.getAllCurves()
if not curves:
self._errorMessage("No curves to be saved", parent=self.plot)
return False
curve = curves[0]
scanno = 1
try:
xdata, data, xlabel, labels = self._get1dData(curve)
specfile = savespec(filename,
xdata, data,
xlabel, labels,
fmt="%.7g", scan_number=1, mode="w",
write_file_header=True,
close_file=False)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
for curve in curves[1:]:
try:
scanno += 1
xdata, data, xlabel, labels = self._get1dData(curve)
specfile = savespec(specfile,
xdata, data,
xlabel, labels,
fmt="%.7g", scan_number=scanno,
write_file_header=False,
close_file=False)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
specfile.close()
return True
def _saveImage(self, plot, filename, nameFilter):
"""Save an image from the plot.
:param str filename: The name of the file to write
:param str nameFilter: The selected name filter
:return: False if format is not supported or save failed,
True otherwise.
"""
if nameFilter not in self.DEFAULT_IMAGE_FILTERS:
return False
image = plot.getActiveImage()
if image is None:
qt.QMessageBox.warning(
plot, "No Data", "No image to be saved")
return False
data = image.getData(copy=False)
# TODO Use silx.io for writing files
if nameFilter == self.IMAGE_FILTER_EDF:
edfFile = EdfFile(filename, access="w+")
edfFile.WriteImage({}, data, Append=0)
return True
elif nameFilter == self.IMAGE_FILTER_TIFF:
tiffFile = TiffIO(filename, mode='w')
tiffFile.writeImage(data, software='silx')
return True
elif nameFilter == self.IMAGE_FILTER_NUMPY:
try:
numpy.save(filename, data)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
return True
elif nameFilter == self.IMAGE_FILTER_NXDATA:
entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot)
if entryPath is None:
return False
xorigin, yorigin = image.getOrigin()
xscale, yscale = image.getScale()
xaxis = xorigin + xscale * numpy.arange(data.shape[1])
yaxis = yorigin + yscale * numpy.arange(data.shape[0])
xlabel, ylabel = self._getAxesLabels(image)
interpretation = "image" if len(data.shape) == 2 else "rgba-image"
return save_NXdata(filename,
nxentry_name=entryPath,
signal=data,
axes=[yaxis, xaxis],
signal_name="image",
axes_names=["y", "x"],
axes_long_names=[ylabel, xlabel],
title=plot.getGraphTitle(),
interpretation=interpretation)
elif nameFilter in (self.IMAGE_FILTER_ASCII,
self.IMAGE_FILTER_CSV_COMMA,
self.IMAGE_FILTER_CSV_SEMICOLON,
self.IMAGE_FILTER_CSV_TAB):
csvdelim, filetype = {
self.IMAGE_FILTER_ASCII: (' ', 'txt'),
self.IMAGE_FILTER_CSV_COMMA: (',', 'csv'),
self.IMAGE_FILTER_CSV_SEMICOLON: (';', 'csv'),
self.IMAGE_FILTER_CSV_TAB: ('\t', 'csv'),
}[nameFilter]
height, width = data.shape
rows, cols = numpy.mgrid[0:height, 0:width]
try:
save1D(filename, rows.ravel(), (cols.ravel(), data.ravel()),
filetype=filetype,
xlabel='row',
ylabels=['column', 'value'],
csvdelim=csvdelim,
autoheader=True)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
return True
elif nameFilter == self.IMAGE_FILTER_RGB_PNG:
# Get displayed image
rgbaImage = image.getRgbaImageData(copy=False)
# Convert RGB QImage
qimage = convertArrayToQImage(rgbaImage[:, :, :3])
if qimage.save(filename, 'PNG'):
return True
else:
_logger.error('Failed to save image as %s', filename)
qt.QMessageBox.critical(
self.parent(),
'Save image as',
'Failed to save image')
return False
def _saveScatter(self, plot, filename, nameFilter):
"""Save an image from the plot.
:param str filename: The name of the file to write
:param str nameFilter: The selected name filter
:return: False if format is not supported or save failed,
True otherwise.
"""
if nameFilter not in self.DEFAULT_SCATTER_FILTERS:
return False
if nameFilter == self.SCATTER_FILTER_NXDATA:
entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot)
if entryPath is None:
return False
scatter = plot.getScatter()
x = scatter.getXData(copy=False)
y = scatter.getYData(copy=False)
z = scatter.getValueData(copy=False)
xerror = scatter.getXErrorData(copy=False)
if isinstance(xerror, float):
xerror = xerror * numpy.ones(x.shape, dtype=numpy.float32)
yerror = scatter.getYErrorData(copy=False)
if isinstance(yerror, float):
yerror = yerror * numpy.ones(x.shape, dtype=numpy.float32)
xlabel = plot.getGraphXLabel()
ylabel = plot.getGraphYLabel()
return save_NXdata(
filename,
nxentry_name=entryPath,
signal=z,
axes=[x, y],
signal_name="values",
axes_names=["x", "y"],
axes_long_names=[xlabel, ylabel],
axes_errors=[xerror, yerror],
title=plot.getGraphTitle())
def setFileFilter(self, dataKind, nameFilter, func, index=None, appendToFile=False):
"""Set a name filter to add/replace a file format support
:param str dataKind:
The kind of data for which the provided filter is valid.
One of: 'all', 'curve', 'curves', 'image', 'scatter'
:param str nameFilter: The name filter in the QFileDialog.
See :meth:`QFileDialog.setNameFilters`.
:param callable func: The function to call to perform saving.
Expected signature is:
bool func(PlotWidget plot, str filename, str nameFilter)
:param bool appendToFile: True to append the data into the selected
file.
:param integer index: Index of the filter in the final list (or None)
"""
assert dataKind in ('all', 'curve', 'curves', 'image', 'scatter')
if appendToFile:
self._appendFilters.append(nameFilter)
# first append or replace the new filter to prevent colissions
self._filters[dataKind][nameFilter] = func
if index is None:
# we are already done
return
# get the current ordered list of keys
keyList = list(self._filters[dataKind].keys())
# deal with negative indices
if index < 0:
index = len(keyList) + index
if index < 0:
index = 0
if index >= len(keyList):
# nothing to be done, already at the end
txt = 'Requested index %d impossible, already at the end' % index
_logger.info(txt)
return
# get the new ordered list
oldIndex = keyList.index(nameFilter)
del keyList[oldIndex]
keyList.insert(index, nameFilter)
# build the new filters
newFilters = OrderedDict()
for key in keyList:
newFilters[key] = self._filters[dataKind][key]
# and update the filters
self._filters[dataKind] = newFilters
return
def getFileFilters(self, dataKind):
"""Returns the nameFilter and associated function for a kind of data.
:param str dataKind:
The kind of data for which the provided filter is valid.
On of: 'all', 'curve', 'curves', 'image', 'scatter'
:return: {nameFilter: function} associations.
:rtype: collections.OrderedDict
"""
assert dataKind in ('all', 'curve', 'curves', 'image', 'scatter')
return self._filters[dataKind].copy()
def _actionTriggered(self, checked=False):
"""Handle save action."""
# Set-up filters
filters = OrderedDict()
# Add image filters if there is an active image
if self.plot.getActiveImage() is not None:
filters.update(self._filters['image'].items())
# Add curve filters if there is a curve to save
if (self.plot.getActiveCurve() is not None or
len(self.plot.getAllCurves()) == 1):
filters.update(self._filters['curve'].items())
if len(self.plot.getAllCurves()) >= 1:
filters.update(self._filters['curves'].items())
# Add scatter filters if there is a scatter
# todo: CSV
if self.plot.getScatter() is not None:
filters.update(self._filters['scatter'].items())
filters.update(self._filters['all'].items())
# Create and run File dialog
dialog = qt.QFileDialog(self.plot)
dialog.setOption(dialog.DontUseNativeDialog)
dialog.setWindowTitle("Output File Selection")
dialog.setModal(1)
dialog.setNameFilters(list(filters.keys()))
dialog.setFileMode(dialog.AnyFile)
dialog.setAcceptMode(dialog.AcceptSave)
def onFilterSelection(filt_):
# disable overwrite confirmation for NXdata types,
# because we append the data to existing files
if filt_ in self._appendFilters:
dialog.setOption(dialog.DontConfirmOverwrite)
else:
dialog.setOption(dialog.DontConfirmOverwrite, False)
dialog.filterSelected.connect(onFilterSelection)
if not dialog.exec_():
return False
nameFilter = dialog.selectedNameFilter()
filename = dialog.selectedFiles()[0]
dialog.close()
if '(' in nameFilter and ')' == nameFilter.strip()[-1]:
# Check for correct file extension
# Extract file extensions as .something
extensions = [ext[ext.find('.'):] for ext in
nameFilter[nameFilter.find('(') + 1:-1].split()]
for ext in extensions:
if (len(filename) > len(ext) and
filename[-len(ext):].lower() == ext.lower()):
break
else: # filename has no extension supported in nameFilter, add one
if len(extensions) >= 1:
filename += extensions[0]
# Handle save
func = filters.get(nameFilter, None)
if func is not None:
return func(self.plot, filename, nameFilter)
else:
_logger.error('Unsupported file filter: %s', nameFilter)
return False
def _plotAsPNG(plot):
"""Save a :class:`Plot` as PNG and return the payload.
:param plot: The :class:`Plot` to save
"""
pngFile = BytesIO()
plot.saveGraph(pngFile, fileFormat='png')
pngFile.flush()
pngFile.seek(0)
data = pngFile.read()
pngFile.close()
return data
class PrintAction(PlotAction):
"""QAction for printing the plot.
It opens a Print dialog.
Current implementation print a bitmap of the plot area and not vector
graphics, so printing quality is not great.
:param plot: :class:`.PlotWidget` instance on which to operate.
:param parent: See :class:`QAction`.
"""
def __init__(self, plot, parent=None):
super(PrintAction, self).__init__(
plot, icon='document-print', text='Print...',
tooltip='Open print dialog',
triggered=self.printPlot,
checkable=False, parent=parent)
self.setShortcut(qt.QKeySequence.Print)
self.setShortcutContext(qt.Qt.WidgetShortcut)
def getPrinter(self):
"""The QPrinter instance used by the PrintAction.
:rtype: QPrinter
"""
return printer.getDefaultPrinter()
@property
@deprecated(replacement="getPrinter()", since_version="0.8.0")
def printer(self):
return self.getPrinter()
def printPlotAsWidget(self):
"""Open the print dialog and print the plot.
Use :meth:`QWidget.render` to print the plot
:return: True if successful
"""
dialog = qt.QPrintDialog(self.getPrinter(), self.plot)
dialog.setWindowTitle('Print Plot')
if not dialog.exec_():
return False
# Print a snapshot of the plot widget at the top of the page
widget = self.plot.centralWidget()
painter = qt.QPainter()
if not painter.begin(self.getPrinter()):
return False
pageRect = self.getPrinter().pageRect()
xScale = pageRect.width() / widget.width()
yScale = pageRect.height() / widget.height()
scale = min(xScale, yScale)
painter.translate(pageRect.width() / 2., 0.)
painter.scale(scale, scale)
painter.translate(-widget.width() / 2., 0.)
widget.render(painter)
painter.end()
return True
def printPlot(self):
"""Open the print dialog and print the plot.
Use :meth:`Plot.saveGraph` to print the plot.
:return: True if successful
"""
# Init printer and start printer dialog
dialog = qt.QPrintDialog(self.getPrinter(), self.plot)
dialog.setWindowTitle('Print Plot')
if not dialog.exec_():
return False
# Save Plot as PNG and make a pixmap from it with default dpi
pngData = _plotAsPNG(self.plot)
pixmap = qt.QPixmap()
pixmap.loadFromData(pngData, 'png')
xScale = self.getPrinter().pageRect().width() / pixmap.width()
yScale = self.getPrinter().pageRect().height() / pixmap.height()
scale = min(xScale, yScale)
# Draw pixmap with painter
painter = qt.QPainter()
if not painter.begin(self.getPrinter()):
return False
painter.drawPixmap(0, 0,
pixmap.width() * scale,
pixmap.height() * scale,
pixmap)
painter.end()
return True
class CopyAction(PlotAction):
"""QAction to copy :class:`.PlotWidget` content to clipboard.
:param plot: :class:`.PlotWidget` instance on which to operate
:param parent: See :class:`QAction`
"""
def __init__(self, plot, parent=None):
super(CopyAction, self).__init__(
plot, icon='edit-copy', text='Copy plot',
tooltip='Copy a snapshot of the plot into the clipboard',
triggered=self.copyPlot,
checkable=False, parent=parent)
self.setShortcut(qt.QKeySequence.Copy)
self.setShortcutContext(qt.Qt.WidgetShortcut)
def copyPlot(self):
"""Copy plot content to the clipboard as a bitmap."""
# Save Plot as PNG and make a QImage from it with default dpi
pngData = _plotAsPNG(self.plot)
image = qt.QImage.fromData(pngData, 'png')
qt.QApplication.clipboard().setImage(image)
| 36.91453 | 88 | 0.586048 |
else: # Format not supported
_logger.error(
'Saving plot snapshot failed: format not supported')
return False
plot.saveGraph(filename, fileFormat=fileFormat)
return True
def _getAxesLabels(self, item):
# If curve has no associated label, get the default from the plot
xlabel = item.getXLabel() or self.plot.getXAxis().getLabel()
ylabel = item.getYLabel() or self.plot.getYAxis().getLabel()
return xlabel, ylabel
def _get1dData(self, item):
xlabel, ylabel = self._getAxesLabels(item)
x_data = item.getXData(copy=False)
y_data = item.getYData(copy=False)
x_err = item.getXErrorData(copy=False)
y_err = item.getYErrorData(copy=False)
labels = [ylabel]
data = [y_data]
if x_err is not None:
if numpy.isscalar(x_err):
data.append(numpy.zeros_like(y_data) + x_err)
labels.append(xlabel + "_errors")
elif x_err.ndim == 1:
data.append(x_err)
labels.append(xlabel + "_errors")
elif x_err.ndim == 2:
data.append(x_err[0])
labels.append(xlabel + "_errors_below")
data.append(x_err[1])
labels.append(xlabel + "_errors_above")
if y_err is not None:
if numpy.isscalar(y_err):
data.append(numpy.zeros_like(y_data) + y_err)
labels.append(ylabel + "_errors")
elif y_err.ndim == 1:
data.append(y_err)
labels.append(ylabel + "_errors")
elif y_err.ndim == 2:
data.append(y_err[0])
labels.append(ylabel + "_errors_below")
data.append(y_err[1])
labels.append(ylabel + "_errors_above")
return x_data, data, xlabel, labels
@staticmethod
def _selectWriteableOutputGroup(filename, parent):
if os.path.exists(filename) and os.path.isfile(filename) \
and os.access(filename, os.W_OK):
entryPath = selectOutputGroup(filename)
if entryPath is None:
_logger.info("Save operation cancelled")
return None
return entryPath
elif not os.path.exists(filename):
# create new entry in new file
return "/entry"
else:
SaveAction._errorMessage('Save failed (file access issue)\n', parent=parent)
return None
def _saveCurveAsNXdata(self, curve, filename):
entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot)
if entryPath is None:
return False
xlabel, ylabel = self._getAxesLabels(curve)
return save_NXdata(
filename,
nxentry_name=entryPath,
signal=curve.getYData(copy=False),
axes=[curve.getXData(copy=False)],
signal_name="y",
axes_names=["x"],
signal_long_name=ylabel,
axes_long_names=[xlabel],
signal_errors=curve.getYErrorData(copy=False),
axes_errors=[curve.getXErrorData(copy=True)],
title=self.plot.getGraphTitle())
def _saveCurve(self, plot, filename, nameFilter):
if nameFilter not in self.DEFAULT_CURVE_FILTERS:
return False
# Check if a curve is to be saved
curve = plot.getActiveCurve()
# before calling _saveCurve, if there is no selected curve, we
# make sure there is only one curve on the graph
if curve is None:
curves = plot.getAllCurves()
if not curves:
self._errorMessage("No curve to be saved", parent=self.plot)
return False
curve = curves[0]
if nameFilter in self.CURVE_FILTERS_TXT:
filter_ = self.CURVE_FILTERS_TXT[nameFilter]
fmt = filter_['fmt']
csvdelim = filter_['delimiter']
autoheader = filter_['header']
else:
# .npy or nxdata
fmt, csvdelim, autoheader = ("", "", False)
if nameFilter == self.CURVE_FILTER_NXDATA:
return self._saveCurveAsNXdata(curve, filename)
xdata, data, xlabel, labels = self._get1dData(curve)
try:
save1D(filename,
xdata, data,
xlabel, labels,
fmt=fmt, csvdelim=csvdelim,
autoheader=autoheader)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
return True
def _saveCurves(self, plot, filename, nameFilter):
if nameFilter not in self.DEFAULT_ALL_CURVES_FILTERS:
return False
curves = plot.getAllCurves()
if not curves:
self._errorMessage("No curves to be saved", parent=self.plot)
return False
curve = curves[0]
scanno = 1
try:
xdata, data, xlabel, labels = self._get1dData(curve)
specfile = savespec(filename,
xdata, data,
xlabel, labels,
fmt="%.7g", scan_number=1, mode="w",
write_file_header=True,
close_file=False)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
for curve in curves[1:]:
try:
scanno += 1
xdata, data, xlabel, labels = self._get1dData(curve)
specfile = savespec(specfile,
xdata, data,
xlabel, labels,
fmt="%.7g", scan_number=scanno,
write_file_header=False,
close_file=False)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
specfile.close()
return True
def _saveImage(self, plot, filename, nameFilter):
if nameFilter not in self.DEFAULT_IMAGE_FILTERS:
return False
image = plot.getActiveImage()
if image is None:
qt.QMessageBox.warning(
plot, "No Data", "No image to be saved")
return False
data = image.getData(copy=False)
# TODO Use silx.io for writing files
if nameFilter == self.IMAGE_FILTER_EDF:
edfFile = EdfFile(filename, access="w+")
edfFile.WriteImage({}, data, Append=0)
return True
elif nameFilter == self.IMAGE_FILTER_TIFF:
tiffFile = TiffIO(filename, mode='w')
tiffFile.writeImage(data, software='silx')
return True
elif nameFilter == self.IMAGE_FILTER_NUMPY:
try:
numpy.save(filename, data)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
return True
elif nameFilter == self.IMAGE_FILTER_NXDATA:
entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot)
if entryPath is None:
return False
xorigin, yorigin = image.getOrigin()
xscale, yscale = image.getScale()
xaxis = xorigin + xscale * numpy.arange(data.shape[1])
yaxis = yorigin + yscale * numpy.arange(data.shape[0])
xlabel, ylabel = self._getAxesLabels(image)
interpretation = "image" if len(data.shape) == 2 else "rgba-image"
return save_NXdata(filename,
nxentry_name=entryPath,
signal=data,
axes=[yaxis, xaxis],
signal_name="image",
axes_names=["y", "x"],
axes_long_names=[ylabel, xlabel],
title=plot.getGraphTitle(),
interpretation=interpretation)
elif nameFilter in (self.IMAGE_FILTER_ASCII,
self.IMAGE_FILTER_CSV_COMMA,
self.IMAGE_FILTER_CSV_SEMICOLON,
self.IMAGE_FILTER_CSV_TAB):
csvdelim, filetype = {
self.IMAGE_FILTER_ASCII: (' ', 'txt'),
self.IMAGE_FILTER_CSV_COMMA: (',', 'csv'),
self.IMAGE_FILTER_CSV_SEMICOLON: (';', 'csv'),
self.IMAGE_FILTER_CSV_TAB: ('\t', 'csv'),
}[nameFilter]
height, width = data.shape
rows, cols = numpy.mgrid[0:height, 0:width]
try:
save1D(filename, rows.ravel(), (cols.ravel(), data.ravel()),
filetype=filetype,
xlabel='row',
ylabels=['column', 'value'],
csvdelim=csvdelim,
autoheader=True)
except IOError:
self._errorMessage('Save failed\n', parent=self.plot)
return False
return True
elif nameFilter == self.IMAGE_FILTER_RGB_PNG:
# Get displayed image
rgbaImage = image.getRgbaImageData(copy=False)
# Convert RGB QImage
qimage = convertArrayToQImage(rgbaImage[:, :, :3])
if qimage.save(filename, 'PNG'):
return True
else:
_logger.error('Failed to save image as %s', filename)
qt.QMessageBox.critical(
self.parent(),
'Save image as',
'Failed to save image')
return False
def _saveScatter(self, plot, filename, nameFilter):
if nameFilter not in self.DEFAULT_SCATTER_FILTERS:
return False
if nameFilter == self.SCATTER_FILTER_NXDATA:
entryPath = self._selectWriteableOutputGroup(filename, parent=self.plot)
if entryPath is None:
return False
scatter = plot.getScatter()
x = scatter.getXData(copy=False)
y = scatter.getYData(copy=False)
z = scatter.getValueData(copy=False)
xerror = scatter.getXErrorData(copy=False)
if isinstance(xerror, float):
xerror = xerror * numpy.ones(x.shape, dtype=numpy.float32)
yerror = scatter.getYErrorData(copy=False)
if isinstance(yerror, float):
yerror = yerror * numpy.ones(x.shape, dtype=numpy.float32)
xlabel = plot.getGraphXLabel()
ylabel = plot.getGraphYLabel()
return save_NXdata(
filename,
nxentry_name=entryPath,
signal=z,
axes=[x, y],
signal_name="values",
axes_names=["x", "y"],
axes_long_names=[xlabel, ylabel],
axes_errors=[xerror, yerror],
title=plot.getGraphTitle())
def setFileFilter(self, dataKind, nameFilter, func, index=None, appendToFile=False):
assert dataKind in ('all', 'curve', 'curves', 'image', 'scatter')
if appendToFile:
self._appendFilters.append(nameFilter)
# first append or replace the new filter to prevent colissions
self._filters[dataKind][nameFilter] = func
if index is None:
# we are already done
return
# get the current ordered list of keys
keyList = list(self._filters[dataKind].keys())
# deal with negative indices
if index < 0:
index = len(keyList) + index
if index < 0:
index = 0
if index >= len(keyList):
# nothing to be done, already at the end
txt = 'Requested index %d impossible, already at the end' % index
_logger.info(txt)
return
# get the new ordered list
oldIndex = keyList.index(nameFilter)
del keyList[oldIndex]
keyList.insert(index, nameFilter)
# build the new filters
newFilters = OrderedDict()
for key in keyList:
newFilters[key] = self._filters[dataKind][key]
# and update the filters
self._filters[dataKind] = newFilters
return
def getFileFilters(self, dataKind):
assert dataKind in ('all', 'curve', 'curves', 'image', 'scatter')
return self._filters[dataKind].copy()
def _actionTriggered(self, checked=False):
# Set-up filters
filters = OrderedDict()
# Add image filters if there is an active image
if self.plot.getActiveImage() is not None:
filters.update(self._filters['image'].items())
# Add curve filters if there is a curve to save
if (self.plot.getActiveCurve() is not None or
len(self.plot.getAllCurves()) == 1):
filters.update(self._filters['curve'].items())
if len(self.plot.getAllCurves()) >= 1:
filters.update(self._filters['curves'].items())
# Add scatter filters if there is a scatter
# todo: CSV
if self.plot.getScatter() is not None:
filters.update(self._filters['scatter'].items())
filters.update(self._filters['all'].items())
# Create and run File dialog
dialog = qt.QFileDialog(self.plot)
dialog.setOption(dialog.DontUseNativeDialog)
dialog.setWindowTitle("Output File Selection")
dialog.setModal(1)
dialog.setNameFilters(list(filters.keys()))
dialog.setFileMode(dialog.AnyFile)
dialog.setAcceptMode(dialog.AcceptSave)
def onFilterSelection(filt_):
# disable overwrite confirmation for NXdata types,
# because we append the data to existing files
if filt_ in self._appendFilters:
dialog.setOption(dialog.DontConfirmOverwrite)
else:
dialog.setOption(dialog.DontConfirmOverwrite, False)
dialog.filterSelected.connect(onFilterSelection)
if not dialog.exec_():
return False
nameFilter = dialog.selectedNameFilter()
filename = dialog.selectedFiles()[0]
dialog.close()
if '(' in nameFilter and ')' == nameFilter.strip()[-1]:
# Check for correct file extension
# Extract file extensions as .something
extensions = [ext[ext.find('.'):] for ext in
nameFilter[nameFilter.find('(') + 1:-1].split()]
for ext in extensions:
if (len(filename) > len(ext) and
filename[-len(ext):].lower() == ext.lower()):
break
else: # filename has no extension supported in nameFilter, add one
if len(extensions) >= 1:
filename += extensions[0]
# Handle save
func = filters.get(nameFilter, None)
if func is not None:
return func(self.plot, filename, nameFilter)
else:
_logger.error('Unsupported file filter: %s', nameFilter)
return False
def _plotAsPNG(plot):
pngFile = BytesIO()
plot.saveGraph(pngFile, fileFormat='png')
pngFile.flush()
pngFile.seek(0)
data = pngFile.read()
pngFile.close()
return data
class PrintAction(PlotAction):
def __init__(self, plot, parent=None):
super(PrintAction, self).__init__(
plot, icon='document-print', text='Print...',
tooltip='Open print dialog',
triggered=self.printPlot,
checkable=False, parent=parent)
self.setShortcut(qt.QKeySequence.Print)
self.setShortcutContext(qt.Qt.WidgetShortcut)
def getPrinter(self):
return printer.getDefaultPrinter()
@property
@deprecated(replacement="getPrinter()", since_version="0.8.0")
def printer(self):
return self.getPrinter()
def printPlotAsWidget(self):
dialog = qt.QPrintDialog(self.getPrinter(), self.plot)
dialog.setWindowTitle('Print Plot')
if not dialog.exec_():
return False
# Print a snapshot of the plot widget at the top of the page
widget = self.plot.centralWidget()
painter = qt.QPainter()
if not painter.begin(self.getPrinter()):
return False
pageRect = self.getPrinter().pageRect()
xScale = pageRect.width() / widget.width()
yScale = pageRect.height() / widget.height()
scale = min(xScale, yScale)
painter.translate(pageRect.width() / 2., 0.)
painter.scale(scale, scale)
painter.translate(-widget.width() / 2., 0.)
widget.render(painter)
painter.end()
return True
def printPlot(self):
# Init printer and start printer dialog
dialog = qt.QPrintDialog(self.getPrinter(), self.plot)
dialog.setWindowTitle('Print Plot')
if not dialog.exec_():
return False
# Save Plot as PNG and make a pixmap from it with default dpi
pngData = _plotAsPNG(self.plot)
pixmap = qt.QPixmap()
pixmap.loadFromData(pngData, 'png')
xScale = self.getPrinter().pageRect().width() / pixmap.width()
yScale = self.getPrinter().pageRect().height() / pixmap.height()
scale = min(xScale, yScale)
# Draw pixmap with painter
painter = qt.QPainter()
if not painter.begin(self.getPrinter()):
return False
painter.drawPixmap(0, 0,
pixmap.width() * scale,
pixmap.height() * scale,
pixmap)
painter.end()
return True
class CopyAction(PlotAction):
def __init__(self, plot, parent=None):
super(CopyAction, self).__init__(
plot, icon='edit-copy', text='Copy plot',
tooltip='Copy a snapshot of the plot into the clipboard',
triggered=self.copyPlot,
checkable=False, parent=parent)
self.setShortcut(qt.QKeySequence.Copy)
self.setShortcutContext(qt.Qt.WidgetShortcut)
def copyPlot(self):
# Save Plot as PNG and make a QImage from it with default dpi
pngData = _plotAsPNG(self.plot)
image = qt.QImage.fromData(pngData, 'png')
qt.QApplication.clipboard().setImage(image)
| true | true |
f728ba4cdca2ea205374f7b691e644f87e84d989 | 4,873 | py | Python | src/cowrie/telnet/session.py | ProjectZeroDays/cowrie | 080c7231c56f84a90c205d8201f3e494c19bd20f | [
"BSD-3-Clause"
] | 1 | 2021-03-14T00:41:14.000Z | 2021-03-14T00:41:14.000Z | src/cowrie/telnet/session.py | ProjectZeroDays/cowrie | 080c7231c56f84a90c205d8201f3e494c19bd20f | [
"BSD-3-Clause"
] | null | null | null | src/cowrie/telnet/session.py | ProjectZeroDays/cowrie | 080c7231c56f84a90c205d8201f3e494c19bd20f | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2015, 2016 GoSecure Inc.
"""
Telnet User Session management for the Honeypot
@author: Olivier Bilodeau <obilodeau@gosecure.ca>
"""
import traceback
from twisted.conch.ssh import session
from twisted.conch.telnet import ECHO, SGA, TelnetBootstrapProtocol
from twisted.internet import interfaces, protocol
from twisted.python import log
from zope.interface import implementer
from cowrie.insults import insults
from cowrie.shell import protocol as cproto
from cowrie.shell import pwd
class HoneyPotTelnetSession(TelnetBootstrapProtocol):
id = 0 # telnet can only have 1 simultaneous session, unlike SSH
windowSize = [40, 80]
# to be populated by HoneyPotTelnetAuthProtocol after auth
transportId = None
def __init__(self, username, server):
self.username = username.decode()
self.server = server
try:
pwentry = pwd.Passwd().getpwnam(self.username)
self.uid = pwentry["pw_uid"]
self.gid = pwentry["pw_gid"]
self.home = pwentry["pw_dir"]
except KeyError:
self.uid = 1001
self.gid = 1001
self.home = '/home'
self.environ = {
'LOGNAME': self.username,
'USER': self.username,
'SHELL': '/bin/bash',
'HOME': self.home,
'TMOUT': '1800'}
if self.uid == 0:
self.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
else:
self.environ['PATH'] = '/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games'
# required because HoneyPotBaseProtocol relies on avatar.avatar.home
self.avatar = self
# Do the delayed file system initialization
self.server.initFileSystem(self.home)
def connectionMade(self):
processprotocol = TelnetSessionProcessProtocol(self)
# If we are dealing with a proper Telnet client: enable server echo
if self.transport.options:
self.transport.willChain(SGA)
self.transport.willChain(ECHO)
self.protocol = insults.LoggingTelnetServerProtocol(
cproto.HoneyPotInteractiveTelnetProtocol, self)
# somewhere in Twisted this exception gets lost. Log explicitly here
try:
self.protocol.makeConnection(processprotocol)
processprotocol.makeConnection(session.wrapProtocol(self.protocol))
except Exception:
log.msg(traceback.format_exc())
def connectionLost(self, reason):
TelnetBootstrapProtocol.connectionLost(self, reason)
self.server = None
self.avatar = None
self.protocol = None
def logout(self):
log.msg(f'avatar {self.username} logging out')
# Taken and adapted from
# https://github.com/twisted/twisted/blob/26ad16ab41db5f0f6d2526a891e81bbd3e260247/twisted/conch/ssh/session.py#L186
@implementer(interfaces.ITransport)
class TelnetSessionProcessProtocol(protocol.ProcessProtocol):
"""
I am both an L{IProcessProtocol} and an L{ITransport}.
I am a transport to the remote endpoint and a process protocol to the
local subsystem.
"""
def __init__(self, sess):
self.session = sess
self.lostOutOrErrFlag = False
def outReceived(self, data):
self.session.write(data)
def errReceived(self, err):
log.msg(f"Error received: {err}")
# EXTENDED_DATA_STDERR is from ssh, no equivalent in telnet?
# self.session.writeExtended(connection.EXTENDED_DATA_STDERR, err)
def outConnectionLost(self):
"""
EOF should only be sent when both STDOUT and STDERR have been closed.
"""
if self.lostOutOrErrFlag:
self.session.conn.sendEOF(self.session)
else:
self.lostOutOrErrFlag = True
def errConnectionLost(self):
"""
See outConnectionLost().
"""
self.outConnectionLost()
def connectionLost(self, reason=None):
self.session.loseConnection()
self.session = None
def processEnded(self, reason=None):
"""
here SSH is doing signal handling, I don't think telnet supports that so
I'm simply going to bail out
"""
log.msg(f"Process ended. Telnet Session disconnected: {reason}")
self.session.loseConnection()
def getHost(self):
"""
Return the host from my session's transport.
"""
return self.session.transport.getHost()
def getPeer(self):
"""
Return the peer from my session's transport.
"""
return self.session.transport.getPeer()
def write(self, data):
self.session.write(data)
def writeSequence(self, seq):
self.session.write(b''.join(seq))
def loseConnection(self):
self.session.loseConnection()
| 30.841772 | 116 | 0.646214 |
import traceback
from twisted.conch.ssh import session
from twisted.conch.telnet import ECHO, SGA, TelnetBootstrapProtocol
from twisted.internet import interfaces, protocol
from twisted.python import log
from zope.interface import implementer
from cowrie.insults import insults
from cowrie.shell import protocol as cproto
from cowrie.shell import pwd
class HoneyPotTelnetSession(TelnetBootstrapProtocol):
id = 0
windowSize = [40, 80]
transportId = None
def __init__(self, username, server):
self.username = username.decode()
self.server = server
try:
pwentry = pwd.Passwd().getpwnam(self.username)
self.uid = pwentry["pw_uid"]
self.gid = pwentry["pw_gid"]
self.home = pwentry["pw_dir"]
except KeyError:
self.uid = 1001
self.gid = 1001
self.home = '/home'
self.environ = {
'LOGNAME': self.username,
'USER': self.username,
'SHELL': '/bin/bash',
'HOME': self.home,
'TMOUT': '1800'}
if self.uid == 0:
self.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
else:
self.environ['PATH'] = '/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games'
self.avatar = self
self.server.initFileSystem(self.home)
def connectionMade(self):
processprotocol = TelnetSessionProcessProtocol(self)
if self.transport.options:
self.transport.willChain(SGA)
self.transport.willChain(ECHO)
self.protocol = insults.LoggingTelnetServerProtocol(
cproto.HoneyPotInteractiveTelnetProtocol, self)
try:
self.protocol.makeConnection(processprotocol)
processprotocol.makeConnection(session.wrapProtocol(self.protocol))
except Exception:
log.msg(traceback.format_exc())
def connectionLost(self, reason):
TelnetBootstrapProtocol.connectionLost(self, reason)
self.server = None
self.avatar = None
self.protocol = None
def logout(self):
log.msg(f'avatar {self.username} logging out')
lementer(interfaces.ITransport)
class TelnetSessionProcessProtocol(protocol.ProcessProtocol):
def __init__(self, sess):
self.session = sess
self.lostOutOrErrFlag = False
def outReceived(self, data):
self.session.write(data)
def errReceived(self, err):
log.msg(f"Error received: {err}")
def outConnectionLost(self):
if self.lostOutOrErrFlag:
self.session.conn.sendEOF(self.session)
else:
self.lostOutOrErrFlag = True
def errConnectionLost(self):
self.outConnectionLost()
def connectionLost(self, reason=None):
self.session.loseConnection()
self.session = None
def processEnded(self, reason=None):
log.msg(f"Process ended. Telnet Session disconnected: {reason}")
self.session.loseConnection()
def getHost(self):
return self.session.transport.getHost()
def getPeer(self):
return self.session.transport.getPeer()
def write(self, data):
self.session.write(data)
def writeSequence(self, seq):
self.session.write(b''.join(seq))
def loseConnection(self):
self.session.loseConnection()
| true | true |
f728bab1baffa1d80c6916c846a11341858f0334 | 2,584 | py | Python | project/03-asvspoof-mega/03_fuse_score_evaluate.py | Nijta/project-NN-Pytorch-scripts | 06a50ab072613fb60b8b8e1cea85c4aa8e75549d | [
"BSD-3-Clause"
] | null | null | null | project/03-asvspoof-mega/03_fuse_score_evaluate.py | Nijta/project-NN-Pytorch-scripts | 06a50ab072613fb60b8b8e1cea85c4aa8e75549d | [
"BSD-3-Clause"
] | null | null | null | project/03-asvspoof-mega/03_fuse_score_evaluate.py | Nijta/project-NN-Pytorch-scripts | 06a50ab072613fb60b8b8e1cea85c4aa8e75549d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
"""
Wrapper to fuse score and compute EER and min tDCF
Simple score averaging.
Usage:
python 03_fuse_score_evaluate.py log_output_testset_1 log_output_testset_2 ...
The log_output_testset is produced by the pytorch code, for
example, ./lfcc-lcnn-lstmsum-am/01/__pretrained/log_output_testset
It has information like:
...
Generating 71230,LA_E_9999427,0,43237,0, time: 0.005s
Output, LA_E_9999487, 0, 0.172325
...
(See README for the format of this log)
This script will extract the line starts with "Output, ..."
"""
import os
import sys
import numpy as np
from sandbox import eval_asvspoof
def parse_txt(file_path):
bonafide = []
bonafide_file_name = []
spoofed = []
spoofed_file_name = []
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
if line.startswith('Output,'):
#Output, LA_E_9999487, 0, 0.172325
temp = line.split(',')
flag = int(temp[2])
name = temp[1]
if flag:
bonafide_file_name.append(name)
bonafide.append(float(temp[-1]))
else:
spoofed.append(float(temp[-1]))
spoofed_file_name.append(name)
bonafide = np.array(bonafide)
spoofed = np.array(spoofed)
return bonafide, spoofed, bonafide_file_name, spoofed_file_name
def fuse_score(file_path_lists):
bonafide_score = {}
spoofed_score = {}
for data_path in file_path_lists:
bonafide, spoofed, bona_name, spoof_name = parse_txt(data_path)
for score, name in zip(bonafide, bona_name):
if name in bonafide_score:
bonafide_score[name].append(score)
else:
bonafide_score[name] = [score]
for score, name in zip(spoofed, spoof_name):
if name in spoofed_score:
spoofed_score[name].append(score)
else:
spoofed_score[name] = [score]
fused_bonafide = np.array([np.mean(y) for x, y in bonafide_score.items()])
fused_spoofed = np.array([np.mean(y) for x, y in spoofed_score.items()])
return fused_bonafide, fused_spoofed
if __name__ == "__main__":
data_paths = sys.argv[1:]
bonafide, spoofed = fuse_score(data_paths)
mintDCF, eer, threshold = eval_asvspoof.tDCF_wrapper(bonafide, spoofed)
print("Score file: {:s}".format(str(data_paths)))
print("mintDCF: {:1.4f}".format(mintDCF))
print("EER: {:2.3f}%".format(eer * 100))
print("Threshold: {:f}".format(threshold))
| 32.3 | 78 | 0.630805 |
import os
import sys
import numpy as np
from sandbox import eval_asvspoof
def parse_txt(file_path):
bonafide = []
bonafide_file_name = []
spoofed = []
spoofed_file_name = []
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
if line.startswith('Output,'):
temp = line.split(',')
flag = int(temp[2])
name = temp[1]
if flag:
bonafide_file_name.append(name)
bonafide.append(float(temp[-1]))
else:
spoofed.append(float(temp[-1]))
spoofed_file_name.append(name)
bonafide = np.array(bonafide)
spoofed = np.array(spoofed)
return bonafide, spoofed, bonafide_file_name, spoofed_file_name
def fuse_score(file_path_lists):
bonafide_score = {}
spoofed_score = {}
for data_path in file_path_lists:
bonafide, spoofed, bona_name, spoof_name = parse_txt(data_path)
for score, name in zip(bonafide, bona_name):
if name in bonafide_score:
bonafide_score[name].append(score)
else:
bonafide_score[name] = [score]
for score, name in zip(spoofed, spoof_name):
if name in spoofed_score:
spoofed_score[name].append(score)
else:
spoofed_score[name] = [score]
fused_bonafide = np.array([np.mean(y) for x, y in bonafide_score.items()])
fused_spoofed = np.array([np.mean(y) for x, y in spoofed_score.items()])
return fused_bonafide, fused_spoofed
if __name__ == "__main__":
data_paths = sys.argv[1:]
bonafide, spoofed = fuse_score(data_paths)
mintDCF, eer, threshold = eval_asvspoof.tDCF_wrapper(bonafide, spoofed)
print("Score file: {:s}".format(str(data_paths)))
print("mintDCF: {:1.4f}".format(mintDCF))
print("EER: {:2.3f}%".format(eer * 100))
print("Threshold: {:f}".format(threshold))
| true | true |
f728bb4c3bd05022e7c882dda3adb58f34d6f4f1 | 1,873 | py | Python | db_test_decl.py | askanio8/sqlalchemyy | 73fa16317072455fe3bc2e9ae22601c95c86793f | [
"Apache-2.0"
] | null | null | null | db_test_decl.py | askanio8/sqlalchemyy | 73fa16317072455fe3bc2e9ae22601c95c86793f | [
"Apache-2.0"
] | null | null | null | db_test_decl.py | askanio8/sqlalchemyy | 73fa16317072455fe3bc2e9ae22601c95c86793f | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from alchemy_decl import Base, Book, Author
engine = create_engine("mysql+mysqlconnector://root:root@localhost/pylounge2", echo=True)
# Флаг echo включает ведение лога через стандартный модуль logging Питона.
# Когда он включен, мы увидим все созданные нами SQL-запросы.
session = sessionmaker(bind=engine)
s = session()
# добавление записи в таблицу
author_one = Author(name="Лутц")
s.add(author_one)
s.commit()
# добавление записи в таблицу
author_one = Author(name="НеЛутц")
s.add(author_one)
s.commit()
# добавление записи в таблицу
book_one = Book(title="Чистый Python", author_id=1, genre="компьютерная литература", price=1500)
s.add(book_one)
s.commit()
# добавление записЕЙ в таблицу
s.add_all([Book(title="Чистый Чистый Python", author_id=1, genre="компьютерная литература", price=500),
Book(title="НеЧистый Python", author_id=2, genre="компьютерная литература", price=2500),
Book(title="Python как Питон", author_id=1, genre="компьютерная литература", price=2976)
])
s.commit()
# Получим значения поля title из первой записи в таблице Books
print(s.query(Book).first().title)
# Пример запроса с сортировкой
for title, price in s.query(Book.title, Book.price).order_by(Book.title).limit(2):
print(title, price)
print('\n\n\n')
# пример запроса с JOIN и GROUP BY
for row in s.query(Book, Author).filter(Book.author_id == Author.id_author).filter(Book.price > 1000).group_by(Author.name):
print(row.Book.title, ' ', row.Author.name)
print('\n\n\n')
print([(row.Book.title, row.Author.name) for row in s.query(Book, Author).join(Author).all()])
# обновление записи
autor_query = s.query(Author).filter_by(Author.name == 'НеЛутц').one()
if autor_query != []:
autor_query.name = 'Бизли'
s.add(autor_query)
s.commit()
| 33.446429 | 124 | 0.728777 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from alchemy_decl import Base, Book, Author
engine = create_engine("mysql+mysqlconnector://root:root@localhost/pylounge2", echo=True)
session = sessionmaker(bind=engine)
s = session()
author_one = Author(name="Лутц")
s.add(author_one)
s.commit()
author_one = Author(name="НеЛутц")
s.add(author_one)
s.commit()
book_one = Book(title="Чистый Python", author_id=1, genre="компьютерная литература", price=1500)
s.add(book_one)
s.commit()
s.add_all([Book(title="Чистый Чистый Python", author_id=1, genre="компьютерная литература", price=500),
Book(title="НеЧистый Python", author_id=2, genre="компьютерная литература", price=2500),
Book(title="Python как Питон", author_id=1, genre="компьютерная литература", price=2976)
])
s.commit()
print(s.query(Book).first().title)
for title, price in s.query(Book.title, Book.price).order_by(Book.title).limit(2):
print(title, price)
print('\n\n\n')
for row in s.query(Book, Author).filter(Book.author_id == Author.id_author).filter(Book.price > 1000).group_by(Author.name):
print(row.Book.title, ' ', row.Author.name)
print('\n\n\n')
print([(row.Book.title, row.Author.name) for row in s.query(Book, Author).join(Author).all()])
autor_query = s.query(Author).filter_by(Author.name == 'НеЛутц').one()
if autor_query != []:
autor_query.name = 'Бизли'
s.add(autor_query)
s.commit()
| true | true |
f728bd688632fc6b6feb958f4e541b69dd4c20c1 | 2,108 | py | Python | mnist/storage.py | onsabbatical/PoET-BiN | 5226cf7e8e34316a3ced73ce30528ac49730ecf4 | [
"MIT"
] | null | null | null | mnist/storage.py | onsabbatical/PoET-BiN | 5226cf7e8e34316a3ced73ce30528ac49730ecf4 | [
"MIT"
] | null | null | null | mnist/storage.py | onsabbatical/PoET-BiN | 5226cf7e8e34316a3ced73ce30528ac49730ecf4 | [
"MIT"
] | null | null | null | import torch
import numpy as np
def store_value(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
main_array = torch.cat((main_array,cu_uint8),0)
#print(i)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:,:,:,:])
main_array = torch.ByteTensor(1,np.shape(main_array)[1],np.shape(main_array)[2],np.shape(main_array)[3])
return main_array
def store_value_3d(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
cu_uint8 = torch.reshape(cu_uint8,(cu_fl.size()[0],cu_fl.size()[2],cu_fl.size()[3]))
main_array = torch.cat((main_array,cu_uint8),0)
#print(i)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:,:,:])
main_array = torch.ByteTensor(1,np.shape(main_array)[1],np.shape(main_array)[2])
return main_array
def store_value_2d(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
main_array = torch.cat((main_array,cu_uint8),0)
#print(i)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:,:])
main_array = torch.ByteTensor(1,np.shape(main_array)[1])
return main_array
def store_value2(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
main_array = torch.cat((main_array,cu_uint8),0)
#print(i)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:])
main_array = torch.ByteTensor(1)
return main_array
def store_all_weights(dict_wb):
weight_matrix = torch.Tensor(1,8).type(torch.cuda.FloatTensor)
bias_matrix = torch.Tensor(1).type(torch.cuda.FloatTensor)
for items in dict_wb:
print(weight_matrix.size())
if 'weight' in items:
print(dict_wb[items].size())
weight_matrix = torch.cat((weight_matrix,dict_wb[items]),0)
if 'bias' in items:
bias_matrix = torch.cat((bias_matrix,dict_wb[items]),0)
np.save('weight_matrix.npy',weight_matrix[1:,:].cpu().numpy())
np.save('bias_matrix.npy',bias_matrix[1:].cpu().numpy()) | 31.462687 | 106 | 0.702562 | import torch
import numpy as np
def store_value(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
main_array = torch.cat((main_array,cu_uint8),0)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:,:,:,:])
main_array = torch.ByteTensor(1,np.shape(main_array)[1],np.shape(main_array)[2],np.shape(main_array)[3])
return main_array
def store_value_3d(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
cu_uint8 = torch.reshape(cu_uint8,(cu_fl.size()[0],cu_fl.size()[2],cu_fl.size()[3]))
main_array = torch.cat((main_array,cu_uint8),0)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:,:,:])
main_array = torch.ByteTensor(1,np.shape(main_array)[1],np.shape(main_array)[2])
return main_array
def store_value_2d(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
main_array = torch.cat((main_array,cu_uint8),0)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:,:])
main_array = torch.ByteTensor(1,np.shape(main_array)[1])
return main_array
def store_value2(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
main_array = torch.cat((main_array,cu_uint8),0)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:])
main_array = torch.ByteTensor(1)
return main_array
def store_all_weights(dict_wb):
weight_matrix = torch.Tensor(1,8).type(torch.cuda.FloatTensor)
bias_matrix = torch.Tensor(1).type(torch.cuda.FloatTensor)
for items in dict_wb:
print(weight_matrix.size())
if 'weight' in items:
print(dict_wb[items].size())
weight_matrix = torch.cat((weight_matrix,dict_wb[items]),0)
if 'bias' in items:
bias_matrix = torch.cat((bias_matrix,dict_wb[items]),0)
np.save('weight_matrix.npy',weight_matrix[1:,:].cpu().numpy())
np.save('bias_matrix.npy',bias_matrix[1:].cpu().numpy()) | true | true |
f728bd8d10117b9a9aeb142dcd5a0b80154096c8 | 11,964 | py | Python | tanager_feeder/dialogs/dialog.py | first-mode/tanager-feeder | ac9d961439caad7d6c9b861ed27d0192de77edb4 | [
"MIT"
] | null | null | null | tanager_feeder/dialogs/dialog.py | first-mode/tanager-feeder | ac9d961439caad7d6c9b861ed27d0192de77edb4 | [
"MIT"
] | null | null | null | tanager_feeder/dialogs/dialog.py | first-mode/tanager-feeder | ac9d961439caad7d6c9b861ed27d0192de77edb4 | [
"MIT"
] | 1 | 2021-04-23T00:03:46.000Z | 2021-04-23T00:03:46.000Z | import tkinter as tk
from tkinter import Frame, Button, Tk, TclError
from typing import Dict, Optional
from tanager_feeder import utils
class Dialog:
def __init__(
self,
controller,
title: str,
label: str,
buttons: Dict,
width: Optional[int] = None,
height: Optional[int] = None,
allow_exit: bool = True,
button_width: int = 20,
info_string: Optional[str] = None,
start_mainloop: bool = True,
):
self.controller = controller
if self.controller is not None:
self.tk_format = utils.TkFormat(self.controller.config_info)
if width is None or height is None:
self.top = tk.Toplevel(controller.master, bg=self.tk_format.bg)
else:
self.top = tk.Toplevel(controller.master, width=width, height=height, bg=self.tk_format.bg)
if info_string is not None:
self.controller.log(info_string)
else:
self.tk_format = utils.TkFormat()
self.top = Tk()
self.top.configure(background=self.tk_format.bg)
self.top.attributes("-topmost", 1)
self.top.attributes("-topmost", 0)
self.label_frame = Frame(self.top, bg=self.tk_format.bg)
self.label_frame.pack(side=tk.TOP)
self.__label = tk.Label(self.label_frame, fg=self.tk_format.textcolor, text=label, bg=self.tk_format.bg)
self.set_label_text(label, log_string=info_string)
if label != "":
self.__label.pack(pady=(10, 10), padx=(10, 10))
self.button_width = button_width
self.buttons = buttons
self.set_buttons(buttons)
self.top.wm_title(title)
self.allow_exit = allow_exit
self.top.protocol("WM_DELETE_WINDOW", self.on_closing)
if (
self.controller is None and start_mainloop
): # If there's no controller and this is the Tk object, might want to start the mainloop here, or might want
# to make additional modifications first in a subclass.
self.top.mainloop()
@property
def label(self):
return self.__label.cget("text")
@label.setter
def label(self, val: str):
self.__label.configure(text=val)
def set_title(self, newtitle: str):
self.top.wm_title(newtitle)
def set_label_text(self, newlabel: str, log_string: Optional[str] = None):
try:
self.__label.config(fg=self.tk_format.textcolor, text=newlabel)
except TclError:
print("Could not set label.")
if log_string is not None and self.controller is not None:
self.controller.log(log_string)
def set_buttons(self, buttons: Dict, button_width: Optional[int] = None):
self.buttons = buttons
if button_width is None:
button_width = self.button_width
else:
self.button_width = button_width
# Sloppy way to check if button_frame already exists and reset it if it does.
try:
# pylint: disable = access-member-before-definition
self.button_frame.destroy()
except AttributeError:
pass
self.button_frame = Frame(self.top, bg=self.tk_format.bg)
self.button_frame.pack(side=tk.BOTTOM)
self.tk_buttons = []
for button in buttons:
if "ok" in button.lower():
self.ok_button = Button(
self.button_frame, fg=self.tk_format.textcolor, text="OK", command=self.ok, width=self.button_width
)
self.ok_button.bind("<Return>", self.ok)
self.tk_buttons.append(self.ok_button)
self.ok_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
elif "yes to all" in button.lower():
self.yes_to_all_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Yes to all",
command=self.yes_to_all,
width=self.button_width,
)
self.yes_to_all_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.yes_to_all_button)
elif "yes" in button.lower():
self.yes_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Yes",
bg="light gray",
command=self.yes,
width=self.button_width,
)
self.tk_buttons.append(self.yes_button)
self.yes_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
elif "no" in button.lower():
self.no_button = Button(
self.button_frame, fg=self.tk_format.textcolor, text="No", command=self.no, width=self.button_width
)
self.no_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.no_button)
elif "cancel_queue" in button.lower():
self.cancel_queue_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Cancel",
command=self.cancel_queue,
width=self.button_width,
)
self.cancel_queue_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.cancel_queue_button)
elif "cancel" in button.lower():
self.cancel_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Cancel",
command=self.cancel,
width=self.button_width,
)
self.cancel_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.cancel_button)
elif "retry" in button.lower():
self.retry_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Retry",
command=self.retry,
width=self.button_width,
)
self.retry_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.retry_button)
elif "exit" in button.lower():
self.exit_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Exit",
command=self.exit,
width=self.button_width,
)
self.exit_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.exit_button)
elif "work offline" in button.lower():
self.offline_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Work offline",
command=self.work_offline,
width=self.button_width,
)
self.offline_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.offline_button)
elif "pause" in button.lower():
self.pause_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Pause",
command=self.pause,
width=self.button_width,
)
self.pause_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.pause_button)
elif "continue" in button.lower():
self.continue_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Continue",
command=self.cont,
width=self.button_width,
)
self.continue_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.continue_button)
elif "close" in button.lower():
self.close_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Close",
command=self.close,
width=self.button_width,
)
self.close_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.close_button)
elif "reset" in button.lower():
self.reset_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Reset",
command=self.reset,
width=self.button_width,
)
self.reset_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.reset_button)
elif "change ip" in button.lower():
self.ip_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Change IP",
command=self.change_ip,
width=self.button_width,
)
self.ip_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.ip_button)
for tk_button in self.tk_buttons:
tk_button.config(
fg=self.tk_format.buttontextcolor,
highlightbackground=self.tk_format.highlightbackgroundcolor,
bg=self.tk_format.buttonbackgroundcolor,
)
def on_closing(self):
if self.allow_exit:
if self.controller is not None:
self.controller.unfreeze()
self.top.destroy()
def reset(self):
functions = self.buttons["reset"]
self.execute(functions, close=False)
def change_ip(self):
functions = self.buttons["Change IP"]
self.execute(functions)
def close(self):
if self.controller is not None:
self.controller.unfreeze()
self.top.destroy()
def retry(self):
self.close()
functions = self.buttons["retry"]
self.execute(functions, False)
def exit(self):
self.top.destroy()
utils.exit_func()
def cont(self):
functions = self.buttons["continue"]
self.execute(functions, close=False)
def pause(self):
functions = self.buttons["pause"]
self.execute(functions, close=False)
def ok(self, event=None):
# pylint: disable = unused-argument
functions = self.buttons["ok"]
self.execute(functions)
def yes(self):
functions = self.buttons["yes"]
self.execute(functions)
def yes_to_all(self):
functions = self.buttons["yes to all"]
self.execute(functions)
def no(self):
functions = self.buttons["no"]
self.execute(functions)
def cancel(self):
functions = self.buttons["cancel"]
self.execute(functions)
def cancel_queue(self):
functions = self.buttons["cancel_queue"]
self.execute(functions, close=False)
def execute(self, function_info, close=True):
for function in function_info:
args = function_info[function]
function(*args)
if close:
self.close()
def work_offline(self):
self.close()
functions = self.buttons["work offline"]
self.execute(functions, close=False)
| 38.223642 | 119 | 0.540288 | import tkinter as tk
from tkinter import Frame, Button, Tk, TclError
from typing import Dict, Optional
from tanager_feeder import utils
class Dialog:
def __init__(
self,
controller,
title: str,
label: str,
buttons: Dict,
width: Optional[int] = None,
height: Optional[int] = None,
allow_exit: bool = True,
button_width: int = 20,
info_string: Optional[str] = None,
start_mainloop: bool = True,
):
self.controller = controller
if self.controller is not None:
self.tk_format = utils.TkFormat(self.controller.config_info)
if width is None or height is None:
self.top = tk.Toplevel(controller.master, bg=self.tk_format.bg)
else:
self.top = tk.Toplevel(controller.master, width=width, height=height, bg=self.tk_format.bg)
if info_string is not None:
self.controller.log(info_string)
else:
self.tk_format = utils.TkFormat()
self.top = Tk()
self.top.configure(background=self.tk_format.bg)
self.top.attributes("-topmost", 1)
self.top.attributes("-topmost", 0)
self.label_frame = Frame(self.top, bg=self.tk_format.bg)
self.label_frame.pack(side=tk.TOP)
self.__label = tk.Label(self.label_frame, fg=self.tk_format.textcolor, text=label, bg=self.tk_format.bg)
self.set_label_text(label, log_string=info_string)
if label != "":
self.__label.pack(pady=(10, 10), padx=(10, 10))
self.button_width = button_width
self.buttons = buttons
self.set_buttons(buttons)
self.top.wm_title(title)
self.allow_exit = allow_exit
self.top.protocol("WM_DELETE_WINDOW", self.on_closing)
if (
self.controller is None and start_mainloop
):
# to make additional modifications first in a subclass.
self.top.mainloop()
@property
def label(self):
return self.__label.cget("text")
@label.setter
def label(self, val: str):
self.__label.configure(text=val)
def set_title(self, newtitle: str):
self.top.wm_title(newtitle)
def set_label_text(self, newlabel: str, log_string: Optional[str] = None):
try:
self.__label.config(fg=self.tk_format.textcolor, text=newlabel)
except TclError:
print("Could not set label.")
if log_string is not None and self.controller is not None:
self.controller.log(log_string)
def set_buttons(self, buttons: Dict, button_width: Optional[int] = None):
self.buttons = buttons
if button_width is None:
button_width = self.button_width
else:
self.button_width = button_width
# Sloppy way to check if button_frame already exists and reset it if it does.
try:
# pylint: disable = access-member-before-definition
self.button_frame.destroy()
except AttributeError:
pass
self.button_frame = Frame(self.top, bg=self.tk_format.bg)
self.button_frame.pack(side=tk.BOTTOM)
self.tk_buttons = []
for button in buttons:
if "ok" in button.lower():
self.ok_button = Button(
self.button_frame, fg=self.tk_format.textcolor, text="OK", command=self.ok, width=self.button_width
)
self.ok_button.bind("<Return>", self.ok)
self.tk_buttons.append(self.ok_button)
self.ok_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
elif "yes to all" in button.lower():
self.yes_to_all_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Yes to all",
command=self.yes_to_all,
width=self.button_width,
)
self.yes_to_all_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.yes_to_all_button)
elif "yes" in button.lower():
self.yes_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Yes",
bg="light gray",
command=self.yes,
width=self.button_width,
)
self.tk_buttons.append(self.yes_button)
self.yes_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
elif "no" in button.lower():
self.no_button = Button(
self.button_frame, fg=self.tk_format.textcolor, text="No", command=self.no, width=self.button_width
)
self.no_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.no_button)
elif "cancel_queue" in button.lower():
self.cancel_queue_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Cancel",
command=self.cancel_queue,
width=self.button_width,
)
self.cancel_queue_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.cancel_queue_button)
elif "cancel" in button.lower():
self.cancel_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Cancel",
command=self.cancel,
width=self.button_width,
)
self.cancel_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.cancel_button)
elif "retry" in button.lower():
self.retry_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Retry",
command=self.retry,
width=self.button_width,
)
self.retry_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.retry_button)
elif "exit" in button.lower():
self.exit_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Exit",
command=self.exit,
width=self.button_width,
)
self.exit_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.exit_button)
elif "work offline" in button.lower():
self.offline_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Work offline",
command=self.work_offline,
width=self.button_width,
)
self.offline_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.offline_button)
elif "pause" in button.lower():
self.pause_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Pause",
command=self.pause,
width=self.button_width,
)
self.pause_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.pause_button)
elif "continue" in button.lower():
self.continue_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Continue",
command=self.cont,
width=self.button_width,
)
self.continue_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.continue_button)
elif "close" in button.lower():
self.close_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Close",
command=self.close,
width=self.button_width,
)
self.close_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.close_button)
elif "reset" in button.lower():
self.reset_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Reset",
command=self.reset,
width=self.button_width,
)
self.reset_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.reset_button)
elif "change ip" in button.lower():
self.ip_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Change IP",
command=self.change_ip,
width=self.button_width,
)
self.ip_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.ip_button)
for tk_button in self.tk_buttons:
tk_button.config(
fg=self.tk_format.buttontextcolor,
highlightbackground=self.tk_format.highlightbackgroundcolor,
bg=self.tk_format.buttonbackgroundcolor,
)
def on_closing(self):
if self.allow_exit:
if self.controller is not None:
self.controller.unfreeze()
self.top.destroy()
def reset(self):
functions = self.buttons["reset"]
self.execute(functions, close=False)
def change_ip(self):
functions = self.buttons["Change IP"]
self.execute(functions)
def close(self):
if self.controller is not None:
self.controller.unfreeze()
self.top.destroy()
def retry(self):
self.close()
functions = self.buttons["retry"]
self.execute(functions, False)
def exit(self):
self.top.destroy()
utils.exit_func()
def cont(self):
functions = self.buttons["continue"]
self.execute(functions, close=False)
def pause(self):
functions = self.buttons["pause"]
self.execute(functions, close=False)
def ok(self, event=None):
# pylint: disable = unused-argument
functions = self.buttons["ok"]
self.execute(functions)
def yes(self):
functions = self.buttons["yes"]
self.execute(functions)
def yes_to_all(self):
functions = self.buttons["yes to all"]
self.execute(functions)
def no(self):
functions = self.buttons["no"]
self.execute(functions)
def cancel(self):
functions = self.buttons["cancel"]
self.execute(functions)
def cancel_queue(self):
functions = self.buttons["cancel_queue"]
self.execute(functions, close=False)
def execute(self, function_info, close=True):
for function in function_info:
args = function_info[function]
function(*args)
if close:
self.close()
def work_offline(self):
self.close()
functions = self.buttons["work offline"]
self.execute(functions, close=False)
| true | true |
f728bf065f4ac3837c3fe6a2a89d3c689748abef | 5,360 | py | Python | third_party/tflite-micro/tensorflow/lite/micro/tools/metrics/create_size_log.py | keadwen/CFU-Playground | 74c79158e85e1365170ececd1d91ea3fa48faba0 | [
"Apache-2.0"
] | 1 | 2022-01-19T13:47:13.000Z | 2022-01-19T13:47:13.000Z | third_party/tflite-micro/tensorflow/lite/micro/tools/metrics/create_size_log.py | keadwen/CFU-Playground | 74c79158e85e1365170ececd1d91ea3fa48faba0 | [
"Apache-2.0"
] | null | null | null | third_party/tflite-micro/tensorflow/lite/micro/tools/metrics/create_size_log.py | keadwen/CFU-Playground | 74c79158e85e1365170ececd1d91ea3fa48faba0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to build the required binaries, profile their size and generate log.
"""
import argparse
import datetime
import os
import pandas as pd
import subprocess
def _build_a_binary(root_dir, binary_name, makefile_options):
os.chdir(root_dir)
params_list = [
"make", "-f", "tensorflow/lite/micro/tools/make/Makefile", binary_name
] + ["%s=%s" % (key, value) for (key, value) in makefile_options.items()]
process = subprocess.Popen(params_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise RuntimeError("Building %s failed with \n\n %s" %
(" ".join(params_list), stderr.decode()))
def _profile_a_binary(root_dir, binary_name, makefile_options, build_info):
target_dir = "%s_%s_%s" % (makefile_options["TARGET"],
makefile_options["TARGET_ARCH"],
makefile_options["BUILD_TYPE"])
binary_path = os.path.join(root_dir, 'tensorflow/lite/micro/tools/make/gen/',
target_dir, 'bin', binary_name)
csv_path = os.path.join(root_dir, 'data/continuous_builds/size_profiling',
target_dir, "%s.csv" % binary_name)
# Run size command and extract the output
process = subprocess.Popen(["size", binary_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise RuntimeError("size %s failed with \n\n %s" %
(binary_name, stderr.decode()))
output_str = stdout.decode()
df = pd.DataFrame([line.split() for line in output_str.split('\n')[1:]],
columns=list(output_str.split('\n')[0].split()))
# Append the output from the size to the CSV file
report = _create_or_read_csv(csv_path)
report.loc[len(report.index)] = [
build_info["date"], build_info['sha'], df['text'][0], df['data'][0],
df['bss'][0], df['dec'][0]
]
report.to_csv(csv_path, index=False, header=False, mode='a')
def _create_or_read_csv(csv_file_name):
if os.path.exists(csv_file_name) is not True:
csv_df = pd.DataFrame(
columns=['date', 'sha', 'text', 'data', 'bss', 'total'])
csv_df.to_csv(csv_file_name, index=False, mode='w')
csv_head = pd.read_csv(csv_file_name, index_col=False, nrows=0)
return csv_head
def _get_build_info(root_dir):
os.chdir(root_dir)
current_time = str(datetime.datetime.now())
git_process = subprocess.Popen(["git", "rev-parse", "HEAD"],
stdout=subprocess.PIPE,
cwd=root_dir)
sha, err = git_process.communicate()
if git_process.returncode != 0:
raise RuntimeError("Git failed with %s" % err.decode())
return {'date': current_time, 'sha': sha.decode().strip('\n')}
def _build_and_profile(root_dir, makefile_options, binary_names):
build_info = _get_build_info(root_dir)
for binary_name in binary_names:
_build_a_binary(root_dir, binary_name, makefile_options)
_profile_a_binary(root_dir, binary_name, makefile_options, build_info)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
default_binary_list_string = 'keyword_benchmark,baseline_memory_footprint,interpreter_memory_footprint'
parser.add_argument(
'--binary_list',
nargs='?',
const=default_binary_list_string,
default=default_binary_list_string,
help=
'binary list separated by comma (e.g. keyword_benchmark,baseline_memory_footprint)'
)
parser.add_argument('--build_type',
nargs='?',
const='release',
default='release',
help='build type (e.g. release)')
parser.add_argument('--target',
nargs='?',
const='linux',
default='linux',
help='host target (e.g. linux)')
parser.add_argument('--target_arch',
nargs='?',
const='x86_64',
default='x86_64',
help='target architecture (e.g x86_64)')
args = parser.parse_args()
makefile_options = {
"BUILD_TYPE": args.build_type,
"TARGET": args.target,
"TARGET_ARCH": args.target_arch
}
binary_names = args.binary_list.split(',')
script_path = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.join(script_path, '../../../../..')
_build_and_profile(root_dir, makefile_options, binary_names)
| 36.712329 | 105 | 0.624813 |
import argparse
import datetime
import os
import pandas as pd
import subprocess
def _build_a_binary(root_dir, binary_name, makefile_options):
os.chdir(root_dir)
params_list = [
"make", "-f", "tensorflow/lite/micro/tools/make/Makefile", binary_name
] + ["%s=%s" % (key, value) for (key, value) in makefile_options.items()]
process = subprocess.Popen(params_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise RuntimeError("Building %s failed with \n\n %s" %
(" ".join(params_list), stderr.decode()))
def _profile_a_binary(root_dir, binary_name, makefile_options, build_info):
target_dir = "%s_%s_%s" % (makefile_options["TARGET"],
makefile_options["TARGET_ARCH"],
makefile_options["BUILD_TYPE"])
binary_path = os.path.join(root_dir, 'tensorflow/lite/micro/tools/make/gen/',
target_dir, 'bin', binary_name)
csv_path = os.path.join(root_dir, 'data/continuous_builds/size_profiling',
target_dir, "%s.csv" % binary_name)
process = subprocess.Popen(["size", binary_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise RuntimeError("size %s failed with \n\n %s" %
(binary_name, stderr.decode()))
output_str = stdout.decode()
df = pd.DataFrame([line.split() for line in output_str.split('\n')[1:]],
columns=list(output_str.split('\n')[0].split()))
report = _create_or_read_csv(csv_path)
report.loc[len(report.index)] = [
build_info["date"], build_info['sha'], df['text'][0], df['data'][0],
df['bss'][0], df['dec'][0]
]
report.to_csv(csv_path, index=False, header=False, mode='a')
def _create_or_read_csv(csv_file_name):
if os.path.exists(csv_file_name) is not True:
csv_df = pd.DataFrame(
columns=['date', 'sha', 'text', 'data', 'bss', 'total'])
csv_df.to_csv(csv_file_name, index=False, mode='w')
csv_head = pd.read_csv(csv_file_name, index_col=False, nrows=0)
return csv_head
def _get_build_info(root_dir):
os.chdir(root_dir)
current_time = str(datetime.datetime.now())
git_process = subprocess.Popen(["git", "rev-parse", "HEAD"],
stdout=subprocess.PIPE,
cwd=root_dir)
sha, err = git_process.communicate()
if git_process.returncode != 0:
raise RuntimeError("Git failed with %s" % err.decode())
return {'date': current_time, 'sha': sha.decode().strip('\n')}
def _build_and_profile(root_dir, makefile_options, binary_names):
build_info = _get_build_info(root_dir)
for binary_name in binary_names:
_build_a_binary(root_dir, binary_name, makefile_options)
_profile_a_binary(root_dir, binary_name, makefile_options, build_info)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
default_binary_list_string = 'keyword_benchmark,baseline_memory_footprint,interpreter_memory_footprint'
parser.add_argument(
'--binary_list',
nargs='?',
const=default_binary_list_string,
default=default_binary_list_string,
help=
'binary list separated by comma (e.g. keyword_benchmark,baseline_memory_footprint)'
)
parser.add_argument('--build_type',
nargs='?',
const='release',
default='release',
help='build type (e.g. release)')
parser.add_argument('--target',
nargs='?',
const='linux',
default='linux',
help='host target (e.g. linux)')
parser.add_argument('--target_arch',
nargs='?',
const='x86_64',
default='x86_64',
help='target architecture (e.g x86_64)')
args = parser.parse_args()
makefile_options = {
"BUILD_TYPE": args.build_type,
"TARGET": args.target,
"TARGET_ARCH": args.target_arch
}
binary_names = args.binary_list.split(',')
script_path = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.join(script_path, '../../../../..')
_build_and_profile(root_dir, makefile_options, binary_names)
| true | true |
f728bf5b0a13d3b044d905f93c6139c655867979 | 100 | py | Python | spider_project/spider_market/apps.py | Sam1808/SG | 4352aebdc35b5d84be09863af5d85b843e039e20 | [
"MIT"
] | 1 | 2021-11-22T11:15:41.000Z | 2021-11-22T11:15:41.000Z | spider_project/spider_market/apps.py | Sam1808/SG | 4352aebdc35b5d84be09863af5d85b843e039e20 | [
"MIT"
] | null | null | null | spider_project/spider_market/apps.py | Sam1808/SG | 4352aebdc35b5d84be09863af5d85b843e039e20 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class SpiderMarketConfig(AppConfig):
name = 'spider_market'
| 16.666667 | 36 | 0.78 | from django.apps import AppConfig
class SpiderMarketConfig(AppConfig):
name = 'spider_market'
| true | true |
f728bf9140687e6e0eb7b82f8867e24dc9c576ad | 163 | py | Python | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_DayOfWeek_NoAR.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_DayOfWeek_NoAR.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_DayOfWeek_NoAR.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['PolyTrend'] , ['Seasonal_DayOfWeek'] , ['NoAR'] ); | 40.75 | 90 | 0.760736 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['PolyTrend'] , ['Seasonal_DayOfWeek'] , ['NoAR'] ); | true | true |
f728c00a1d05db7029582e2f2ffa30430698a2a4 | 3,084 | py | Python | flumine/events/events.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
] | null | null | null | flumine/events/events.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
] | null | null | null | flumine/events/events.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
] | null | null | null | import datetime
from enum import Enum
class EventType(Enum):
TERMINATOR = "Terminator"
# betfair objects
MARKET_CATALOGUE = "MarketCatalogue"
MARKET_BOOK = "MarketBook"
RAW_DATA = "Raw streaming data"
CURRENT_ORDERS = "CurrentOrders"
CLEARED_MARKETS = "ClearedMarkets"
CLEARED_ORDERS = "ClearedOrders"
CLEARED_ORDERS_META = "ClearedOrders metadata"
BALANCE = "Balance"
# flumine objects
STRATEGY = "Strategy"
MARKET = "Market"
TRADE = "Trade"
ORDER = "Order"
ORDER_PACKAGE = "Order package"
CLOSE_MARKET = "Closed market"
CUSTOM_EVENT = "Custom event"
NEW_DAY = "New day"
class QueueType(Enum):
HANDLER = "Handler queue"
LOGGING = "Logging queue"
class BaseEvent: # todo __slots__?
EVENT_TYPE = None
QUEUE_TYPE = None
def __init__(self, event):
self._time_created = datetime.datetime.utcnow()
self.event = event
@property
def elapsed_seconds(self):
return (datetime.datetime.utcnow() - self._time_created).total_seconds()
def __str__(self):
return "<{0} [{1}]>".format(self.EVENT_TYPE.name, self.QUEUE_TYPE.name)
# HANDLER
class MarketCatalogueEvent(BaseEvent):
EVENT_TYPE = EventType.MARKET_CATALOGUE
QUEUE_TYPE = QueueType.HANDLER
class MarketBookEvent(BaseEvent):
EVENT_TYPE = EventType.MARKET_BOOK
QUEUE_TYPE = QueueType.HANDLER
class RawDataEvent(BaseEvent):
EVENT_TYPE = EventType.RAW_DATA
QUEUE_TYPE = QueueType.HANDLER
class CurrentOrdersEvent(BaseEvent):
EVENT_TYPE = EventType.CURRENT_ORDERS
QUEUE_TYPE = QueueType.HANDLER
class ClearedMarketsEvent(BaseEvent):
EVENT_TYPE = EventType.CLEARED_MARKETS
QUEUE_TYPE = QueueType.HANDLER
class ClearedOrdersEvent(BaseEvent):
EVENT_TYPE = EventType.CLEARED_ORDERS
QUEUE_TYPE = QueueType.HANDLER
class CloseMarketEvent(BaseEvent):
EVENT_TYPE = EventType.CLOSE_MARKET
QUEUE_TYPE = QueueType.HANDLER
class CustomEvent(BaseEvent):
EVENT_TYPE = EventType.CUSTOM_EVENT
QUEUE_TYPE = QueueType.HANDLER
def __init__(self, event, callback, *args, **kwargs):
super(CustomEvent, self).__init__(event)
self.callback = callback
class NewDayEvent(BaseEvent):
EVENT_TYPE = EventType.NEW_DAY
QUEUE_TYPE = QueueType.HANDLER
# LOGGING
class ClearedOrdersMetaEvent(BaseEvent):
EVENT_TYPE = EventType.CLEARED_ORDERS_META
QUEUE_TYPE = QueueType.LOGGING
class BalanceEvent(BaseEvent):
EVENT_TYPE = EventType.BALANCE
QUEUE_TYPE = QueueType.LOGGING
class StrategyEvent(BaseEvent):
EVENT_TYPE = EventType.STRATEGY
QUEUE_TYPE = QueueType.LOGGING
class MarketEvent(BaseEvent):
EVENT_TYPE = EventType.MARKET
QUEUE_TYPE = QueueType.LOGGING
class TradeEvent(BaseEvent):
EVENT_TYPE = EventType.TRADE
QUEUE_TYPE = QueueType.LOGGING
class OrderEvent(BaseEvent):
EVENT_TYPE = EventType.ORDER
QUEUE_TYPE = QueueType.LOGGING
# both
class TerminationEvent(BaseEvent):
EVENT_TYPE = EventType.TERMINATOR
QUEUE_TYPE = QueueType.HANDLER
| 22.18705 | 80 | 0.725032 | import datetime
from enum import Enum
class EventType(Enum):
TERMINATOR = "Terminator"
MARKET_CATALOGUE = "MarketCatalogue"
MARKET_BOOK = "MarketBook"
RAW_DATA = "Raw streaming data"
CURRENT_ORDERS = "CurrentOrders"
CLEARED_MARKETS = "ClearedMarkets"
CLEARED_ORDERS = "ClearedOrders"
CLEARED_ORDERS_META = "ClearedOrders metadata"
BALANCE = "Balance"
STRATEGY = "Strategy"
MARKET = "Market"
TRADE = "Trade"
ORDER = "Order"
ORDER_PACKAGE = "Order package"
CLOSE_MARKET = "Closed market"
CUSTOM_EVENT = "Custom event"
NEW_DAY = "New day"
class QueueType(Enum):
HANDLER = "Handler queue"
LOGGING = "Logging queue"
class BaseEvent:
EVENT_TYPE = None
QUEUE_TYPE = None
def __init__(self, event):
self._time_created = datetime.datetime.utcnow()
self.event = event
@property
def elapsed_seconds(self):
return (datetime.datetime.utcnow() - self._time_created).total_seconds()
def __str__(self):
return "<{0} [{1}]>".format(self.EVENT_TYPE.name, self.QUEUE_TYPE.name)
class MarketCatalogueEvent(BaseEvent):
EVENT_TYPE = EventType.MARKET_CATALOGUE
QUEUE_TYPE = QueueType.HANDLER
class MarketBookEvent(BaseEvent):
EVENT_TYPE = EventType.MARKET_BOOK
QUEUE_TYPE = QueueType.HANDLER
class RawDataEvent(BaseEvent):
EVENT_TYPE = EventType.RAW_DATA
QUEUE_TYPE = QueueType.HANDLER
class CurrentOrdersEvent(BaseEvent):
EVENT_TYPE = EventType.CURRENT_ORDERS
QUEUE_TYPE = QueueType.HANDLER
class ClearedMarketsEvent(BaseEvent):
EVENT_TYPE = EventType.CLEARED_MARKETS
QUEUE_TYPE = QueueType.HANDLER
class ClearedOrdersEvent(BaseEvent):
EVENT_TYPE = EventType.CLEARED_ORDERS
QUEUE_TYPE = QueueType.HANDLER
class CloseMarketEvent(BaseEvent):
EVENT_TYPE = EventType.CLOSE_MARKET
QUEUE_TYPE = QueueType.HANDLER
class CustomEvent(BaseEvent):
EVENT_TYPE = EventType.CUSTOM_EVENT
QUEUE_TYPE = QueueType.HANDLER
def __init__(self, event, callback, *args, **kwargs):
super(CustomEvent, self).__init__(event)
self.callback = callback
class NewDayEvent(BaseEvent):
EVENT_TYPE = EventType.NEW_DAY
QUEUE_TYPE = QueueType.HANDLER
class ClearedOrdersMetaEvent(BaseEvent):
EVENT_TYPE = EventType.CLEARED_ORDERS_META
QUEUE_TYPE = QueueType.LOGGING
class BalanceEvent(BaseEvent):
EVENT_TYPE = EventType.BALANCE
QUEUE_TYPE = QueueType.LOGGING
class StrategyEvent(BaseEvent):
EVENT_TYPE = EventType.STRATEGY
QUEUE_TYPE = QueueType.LOGGING
class MarketEvent(BaseEvent):
EVENT_TYPE = EventType.MARKET
QUEUE_TYPE = QueueType.LOGGING
class TradeEvent(BaseEvent):
EVENT_TYPE = EventType.TRADE
QUEUE_TYPE = QueueType.LOGGING
class OrderEvent(BaseEvent):
EVENT_TYPE = EventType.ORDER
QUEUE_TYPE = QueueType.LOGGING
class TerminationEvent(BaseEvent):
EVENT_TYPE = EventType.TERMINATOR
QUEUE_TYPE = QueueType.HANDLER
| true | true |
f728c043d84924a38e28c702e1a7d6055c1310a5 | 5,071 | py | Python | Stimuli/Test2.py | Tom-TBT/QDSpy | 8756a6251b870c61294f5e3ad83c57e8f49e8195 | [
"MIT"
] | 11 | 2016-04-04T12:54:44.000Z | 2022-02-10T10:24:15.000Z | Stimuli/Test2.py | Tom-TBT/QDSpy | 8756a6251b870c61294f5e3ad83c57e8f49e8195 | [
"MIT"
] | 17 | 2016-04-05T15:43:43.000Z | 2019-06-22T08:08:16.000Z | Stimuli/Test2.py | Tom-TBT/QDSpy | 8756a6251b870c61294f5e3ad83c57e8f49e8195 | [
"MIT"
] | 7 | 2016-01-21T11:23:17.000Z | 2021-06-28T14:34:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ---------------------------------------------------------------------
import random
import QDS
import math
QDS.Initialize("Test2", "Test for Lightcrafter")
#QDS.setColorMode((8,7,7), (0,1,1), 0)
#QDS.setColorMode((8,8,8), (0,0,0), 0)
#QDS.setColorMode((0,0,0), (0,0,0), 2)
nTrials = 120
dt_s = 1.0/60.0
dxScr = 580
dyScr = 580
useStripes= 1
random.seed(1)
# gradient
#
if useStripes:
# Use stripes to generate gradient
#
nRows = 48
nCols = 3
Grad_boxDx = dxScr/float(nCols)
Grad_boxDy = dyScr/float(nRows)
Grad_Colors = [(0,255,0),(0,0,255),(0,255,255)]
nB = nRows*nCols
for iB in range(1, nB+1):
QDS.DefObj_Box(iB, Grad_boxDx, Grad_boxDy)
Grad_indL = []
Grad_posL = []
Grad_colL = []
Grad_alpL = []
Grad_rotL = []
Grad_magL = []
for iX in range(nCols):
for iY in range(nRows):
iB = 1 +iX +iY*nCols
x = iX*Grad_boxDx +Grad_boxDx/2.0 -Grad_boxDx*nCols/2.0
y = iY*Grad_boxDy +Grad_boxDy/2.0 -Grad_boxDy*nRows/2.0
r = Grad_Colors[iX][0]*iY/nRows
g = Grad_Colors[iX][1]*iY/nRows
b = Grad_Colors[iX][2]*iY/nRows
Grad_indL.append(iB)
Grad_posL.append((x,y))
Grad_colL.append((r,g,b))
Grad_rotL.append(0)
Grad_alpL.append(255)
Grad_magL.append((1,1))
QDS.SetObjColorEx(Grad_indL, Grad_colL, Grad_alpL)
else:
# Use whole objects and set color by vertex
#
nRows = 1
nCols = 3
Grad_boxDx = dxScr/float(nCols)
Grad_boxDy = dyScr/float(nRows)
Grad_RGBA = [(0,255,0, 255),(0,0,255, 255),(0,255,255, 255)]
nB = nRows*nCols
for iB in range(1, nB+1):
QDS.DefObj_Box(iB, Grad_boxDx, Grad_boxDy)
Grad_indL = []
Grad_posL = []
Grad_colL = []
Grad_rotL = []
Grad_magL = []
for iX in range(nCols):
iB = iX +1
x = iX*Grad_boxDx +Grad_boxDx/2.0 -Grad_boxDx*nCols/2.0
y = 0
r = Grad_RGBA[iX][0]
g = Grad_RGBA[iX][1]
b = Grad_RGBA[iX][2]
a = Grad_RGBA[iX][3]
Grad_indL.append(iB)
Grad_posL.append((x,y))
Grad_colL.append([(r,g,b,a),(r,g,b,a),(0,0,0,a),(0,0,0,a)])
Grad_rotL.append(0)
Grad_magL.append((1,1))
# center spots (sinusoidal and flicker)
#
Spot_ID_sinB = nRows*nCols+2
Spot_ID_sinG = Spot_ID_sinB +1
Spot_ID_sinW = Spot_ID_sinB +2
Spot_ID_flck1 = Spot_ID_sinB +3
Spot_ID_flck2 = Spot_ID_sinB +4
Spot_ID_sect = Spot_ID_sinB +5
Spot_r = 150
Spot_SinPer_s = 2.0
isShad = 0
QDS.DefObj_EllipseEx(Spot_ID_sinB, Spot_r, Spot_r, isShad)
QDS.DefObj_EllipseEx(Spot_ID_sinG, Spot_r, Spot_r, isShad)
QDS.DefObj_EllipseEx(Spot_ID_sinW, Spot_r, Spot_r, isShad)
QDS.DefObj_BoxEx(Spot_ID_flck1, Spot_r, Spot_r/2, isShad)
QDS.DefObj_BoxEx(Spot_ID_flck2, Spot_r, Spot_r/2, isShad)
QDS.DefObj_SectorEx(Spot_ID_sect, Spot_r*2, Spot_r/2, 225, 270, isShad)
#QDS.DefObj_EllipseEx(Spot_ID_sect, Spot_r*1, Spot_r*1, isShad)
Spots_indL = [Spot_ID_sinB, Spot_ID_sinG, Spot_ID_sinW,
Spot_ID_flck1, Spot_ID_flck2, Spot_ID_sect]
Spots_posL = [(-Spot_r/2.0,-Spot_r/2.0), (Spot_r/2.0,-Spot_r/2.0),
(-Spot_r/2.0, Spot_r/2.0), (Spot_r/2.0, Spot_r*3/4),
( Spot_r/2.0, Spot_r*1/4), (0,0)]
Spots_magL = [(1,1), (1,1), (1,1), (1,1), (1,1), (3,3)]
Spots_rotL = [0,0,0,0,0,0]
Spots_alpL = [255,255,255,255,255,128]
# ---------------------------------------------------------------------
def myLoop():
for iT in range(nTrials):
isMark = int((iT % 20) == 0)
# Update colors of sinusoidal+flickering spots
#
per = math.pi*2 *iT*dt_s/Spot_SinPer_s
iSin = (math.sin(per) +1)/2
iCos = (math.cos(per) +1)/2
Spots_colL = []
r = 0
g = 0
b = int(255 *iSin)
Spots_colL.append((r,g,b))
g = int(255 *iSin)
b = 0
Spots_colL.append((r,g,b))
g = int(255 *iCos)
b = g
Spots_colL.append((r,g,b))
g = int(255 *(iT % 2))
b = g
Spots_colL.append((r,g,b))
g = int(255 *(1- (iT % 2)))
b = g
Spots_colL.append((r,g,b))
Spots_colL.append((255,128,128))
# Set colors and render
#
indL = Grad_indL +Spots_indL
magL = Grad_magL +Spots_magL
posL = Grad_posL +Spots_posL
rotL = Grad_rotL +Spots_rotL
"""
indL = Spots_indL
magL = Spots_magL
posL = Spots_posL
rotL = Spots_rotL
"""
QDS.SetObjColorEx(Spots_indL, Spots_colL, Spots_alpL)
#QDS.SetObjColorAlphaByVertex([Spot_ID_sinW], [[(255,0,0,200),(0,55,0,128)]])
if not(useStripes):
QDS.SetObjColorAlphaByVertex(Grad_indL, Grad_colL)
QDS.Scene_RenderEx(dt_s, indL, posL, magL, rotL, isMark)
# ---------------------------------------------------------------------
QDS.StartScript()
QDS.SetBkgColor((0,0,0))
QDS.Scene_Clear(1.0, 0)
QDS.Loop(5, myLoop)
QDS.Scene_Clear(1.0, 0)
QDS.EndScript()
# ---------------------------------------------------------------------
| 27.559783 | 81 | 0.566555 |
import random
import QDS
import math
QDS.Initialize("Test2", "Test for Lightcrafter")
nTrials = 120
dt_s = 1.0/60.0
dxScr = 580
dyScr = 580
useStripes= 1
random.seed(1)
if useStripes:
nRows = 48
nCols = 3
Grad_boxDx = dxScr/float(nCols)
Grad_boxDy = dyScr/float(nRows)
Grad_Colors = [(0,255,0),(0,0,255),(0,255,255)]
nB = nRows*nCols
for iB in range(1, nB+1):
QDS.DefObj_Box(iB, Grad_boxDx, Grad_boxDy)
Grad_indL = []
Grad_posL = []
Grad_colL = []
Grad_alpL = []
Grad_rotL = []
Grad_magL = []
for iX in range(nCols):
for iY in range(nRows):
iB = 1 +iX +iY*nCols
x = iX*Grad_boxDx +Grad_boxDx/2.0 -Grad_boxDx*nCols/2.0
y = iY*Grad_boxDy +Grad_boxDy/2.0 -Grad_boxDy*nRows/2.0
r = Grad_Colors[iX][0]*iY/nRows
g = Grad_Colors[iX][1]*iY/nRows
b = Grad_Colors[iX][2]*iY/nRows
Grad_indL.append(iB)
Grad_posL.append((x,y))
Grad_colL.append((r,g,b))
Grad_rotL.append(0)
Grad_alpL.append(255)
Grad_magL.append((1,1))
QDS.SetObjColorEx(Grad_indL, Grad_colL, Grad_alpL)
else:
nRows = 1
nCols = 3
Grad_boxDx = dxScr/float(nCols)
Grad_boxDy = dyScr/float(nRows)
Grad_RGBA = [(0,255,0, 255),(0,0,255, 255),(0,255,255, 255)]
nB = nRows*nCols
for iB in range(1, nB+1):
QDS.DefObj_Box(iB, Grad_boxDx, Grad_boxDy)
Grad_indL = []
Grad_posL = []
Grad_colL = []
Grad_rotL = []
Grad_magL = []
for iX in range(nCols):
iB = iX +1
x = iX*Grad_boxDx +Grad_boxDx/2.0 -Grad_boxDx*nCols/2.0
y = 0
r = Grad_RGBA[iX][0]
g = Grad_RGBA[iX][1]
b = Grad_RGBA[iX][2]
a = Grad_RGBA[iX][3]
Grad_indL.append(iB)
Grad_posL.append((x,y))
Grad_colL.append([(r,g,b,a),(r,g,b,a),(0,0,0,a),(0,0,0,a)])
Grad_rotL.append(0)
Grad_magL.append((1,1))
Spot_ID_sinB = nRows*nCols+2
Spot_ID_sinG = Spot_ID_sinB +1
Spot_ID_sinW = Spot_ID_sinB +2
Spot_ID_flck1 = Spot_ID_sinB +3
Spot_ID_flck2 = Spot_ID_sinB +4
Spot_ID_sect = Spot_ID_sinB +5
Spot_r = 150
Spot_SinPer_s = 2.0
isShad = 0
QDS.DefObj_EllipseEx(Spot_ID_sinB, Spot_r, Spot_r, isShad)
QDS.DefObj_EllipseEx(Spot_ID_sinG, Spot_r, Spot_r, isShad)
QDS.DefObj_EllipseEx(Spot_ID_sinW, Spot_r, Spot_r, isShad)
QDS.DefObj_BoxEx(Spot_ID_flck1, Spot_r, Spot_r/2, isShad)
QDS.DefObj_BoxEx(Spot_ID_flck2, Spot_r, Spot_r/2, isShad)
QDS.DefObj_SectorEx(Spot_ID_sect, Spot_r*2, Spot_r/2, 225, 270, isShad)
Spots_indL = [Spot_ID_sinB, Spot_ID_sinG, Spot_ID_sinW,
Spot_ID_flck1, Spot_ID_flck2, Spot_ID_sect]
Spots_posL = [(-Spot_r/2.0,-Spot_r/2.0), (Spot_r/2.0,-Spot_r/2.0),
(-Spot_r/2.0, Spot_r/2.0), (Spot_r/2.0, Spot_r*3/4),
( Spot_r/2.0, Spot_r*1/4), (0,0)]
Spots_magL = [(1,1), (1,1), (1,1), (1,1), (1,1), (3,3)]
Spots_rotL = [0,0,0,0,0,0]
Spots_alpL = [255,255,255,255,255,128]
def myLoop():
for iT in range(nTrials):
isMark = int((iT % 20) == 0)
per = math.pi*2 *iT*dt_s/Spot_SinPer_s
iSin = (math.sin(per) +1)/2
iCos = (math.cos(per) +1)/2
Spots_colL = []
r = 0
g = 0
b = int(255 *iSin)
Spots_colL.append((r,g,b))
g = int(255 *iSin)
b = 0
Spots_colL.append((r,g,b))
g = int(255 *iCos)
b = g
Spots_colL.append((r,g,b))
g = int(255 *(iT % 2))
b = g
Spots_colL.append((r,g,b))
g = int(255 *(1- (iT % 2)))
b = g
Spots_colL.append((r,g,b))
Spots_colL.append((255,128,128))
indL = Grad_indL +Spots_indL
magL = Grad_magL +Spots_magL
posL = Grad_posL +Spots_posL
rotL = Grad_rotL +Spots_rotL
QDS.SetObjColorEx(Spots_indL, Spots_colL, Spots_alpL)
if not(useStripes):
QDS.SetObjColorAlphaByVertex(Grad_indL, Grad_colL)
QDS.Scene_RenderEx(dt_s, indL, posL, magL, rotL, isMark)
QDS.StartScript()
QDS.SetBkgColor((0,0,0))
QDS.Scene_Clear(1.0, 0)
QDS.Loop(5, myLoop)
QDS.Scene_Clear(1.0, 0)
QDS.EndScript()
| true | true |
f728c118f43ad8e3b4ef9f2c993314d232442789 | 1,473 | py | Python | Codechef/SeptemberLunchtime2021/Unqeq.py | Anubha13kumari/Data-Structures | 232c4f2de87f6c0bea7dadc8d46db1be52159f5c | [
"MIT"
] | null | null | null | Codechef/SeptemberLunchtime2021/Unqeq.py | Anubha13kumari/Data-Structures | 232c4f2de87f6c0bea7dadc8d46db1be52159f5c | [
"MIT"
] | 4 | 2021-10-01T16:41:34.000Z | 2021-10-02T13:30:55.000Z | Codechef/SeptemberLunchtime2021/Unqeq.py | Anubha13kumari/Data-Structures | 232c4f2de87f6c0bea7dadc8d46db1be52159f5c | [
"MIT"
] | 2 | 2021-10-01T17:44:31.000Z | 2021-10-02T09:07:04.000Z | import math
T=int(input())
while T>0:
N=int(input())
nums1,nums2=[],[]
if N==1:
print("NO")
elif N==2:
print("NO")
elif int(N/2)%2==0:
l=1
r=N
sum1=0
sum2=0
for i in range(int(N/2)):
if l<=int(N/4):
nums1.append(str(l))
sum1+=l
l+=1
else:
nums1.append(str(r))
sum1+=r
r-=1
while l<=r:
nums2.append(str(l))
sum2+=l
l+=1
if sum1==sum2:
print("YES")
print(" ".join(nums1))
print(" ".join(nums2))
else:
print("NO")
else:
l=1
r=N
sum1=0
sum2=0
for i in range(int(N/2)-1):
if l<=int(N/4):
nums1.append(str(l))
sum1+=l
l+=1
else:
nums1.append(str(r))
sum1+=r
r-=1
while l<=r:
nums2.append(str(l))
sum2+=l
l+=1
x=(sum2-sum1)/2
if math.floor(x)==math.ceil(x):
print("YES")
nums1.insert(int(N/4),str(int(x)))
nums2.remove(str(int(x)))
print(" ".join(nums1))
print(" ".join(nums2))
else:
print("NO")
T-=1 | 20.458333 | 46 | 0.334691 | import math
T=int(input())
while T>0:
N=int(input())
nums1,nums2=[],[]
if N==1:
print("NO")
elif N==2:
print("NO")
elif int(N/2)%2==0:
l=1
r=N
sum1=0
sum2=0
for i in range(int(N/2)):
if l<=int(N/4):
nums1.append(str(l))
sum1+=l
l+=1
else:
nums1.append(str(r))
sum1+=r
r-=1
while l<=r:
nums2.append(str(l))
sum2+=l
l+=1
if sum1==sum2:
print("YES")
print(" ".join(nums1))
print(" ".join(nums2))
else:
print("NO")
else:
l=1
r=N
sum1=0
sum2=0
for i in range(int(N/2)-1):
if l<=int(N/4):
nums1.append(str(l))
sum1+=l
l+=1
else:
nums1.append(str(r))
sum1+=r
r-=1
while l<=r:
nums2.append(str(l))
sum2+=l
l+=1
x=(sum2-sum1)/2
if math.floor(x)==math.ceil(x):
print("YES")
nums1.insert(int(N/4),str(int(x)))
nums2.remove(str(int(x)))
print(" ".join(nums1))
print(" ".join(nums2))
else:
print("NO")
T-=1 | true | true |
f728c1290f4433deb303b7da2d4ed30d91a801e5 | 21,041 | py | Python | python/src/cm_shell/cmps.py | cloudsoft/cm_api | 85c7179044188c785c793a649677a22e427d2924 | [
"Apache-2.0"
] | 6 | 2015-04-28T22:56:49.000Z | 2019-05-23T17:25:05.000Z | python/src/cm_shell/cmps.py | cloudsoft/cm_api | 85c7179044188c785c793a649677a22e427d2924 | [
"Apache-2.0"
] | null | null | null | python/src/cm_shell/cmps.py | cloudsoft/cm_api | 85c7179044188c785c793a649677a22e427d2924 | [
"Apache-2.0"
] | 22 | 2015-04-28T22:56:31.000Z | 2019-02-26T14:34:16.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import getpass
import argparse
import readline
import os
import cmd
from prettytable import PrettyTable
from cm_api.api_client import ApiResource, ApiException
from urllib2 import URLError
# Config
CONFIG = {'cluster': None, 'output_type': 'table', 'seperator': None}
# Initial Prompt
INIT_PROMPT = "cloudera> "
# Banner shown at interactive shell login
BANNER = "Welcome to the Cloudera Manager Console\nSelect a cluster using 'show clusters' and 'use'"
# If true, than the user is running a non-interactive shell (ie: scripting)
EXECUTE = False
# Readline fix for hyphens
readline.set_completer_delims(readline.get_completer_delims().replace('-', ''))
# Global API object
api = None
class ClouderaShell(cmd.Cmd):
"""
Interactive shell for communicating with your
Cloudera Cluster making use of the cm_api
"""
# Set initial cloudera prompt
prompt = INIT_PROMPT
# Set login banner
intro = BANNER
# Help headers
doc_header = "Cloudera Manager Commands"
undoc_header = "Other Commands"
# Initial cache is blank
# when autocomplete for one of these components
# is triggered, it will automatically cache them
CACHED_ROLES = {}
CACHED_SERVICES = None
CACHED_CLUSTERS = None
def preloop(self):
"Checks if the cluster was pre-defined"
if CONFIG['cluster']:
self.set_cluster(CONFIG['cluster'])
else:
self.cluster_object = None
def generate_output(self, headers, rows, align=None):
if CONFIG['output_type'] == "table":
table = PrettyTable(headers)
if align:
for h in align:
table.align[h] = 'l'
for r in rows:
table.add_row(r)
print(table)
if CONFIG['output_type'] == "csv":
print(','.join(headers))
for r in rows:
print(','.join(r))
if CONFIG['output_type'] == "custom":
SEP = CONFIG['seperator']
print(SEP.join(headers))
for r in rows:
print(SEP.join(r))
def emptyline(self):
"""Called each time a user hits enter, by
default it will redo the last command, this
is an extension so it does nothing."""
pass
def set_cluster(self, cluster):
try:
cluster = api.get_cluster(cluster)
except ApiException:
print("Cluster Not Found!")
return None
self.cluster_object = cluster
if not EXECUTE:
print("Connected to %s" % (cluster.name))
self.prompt = cluster.name + "> "
return True
@property
def cluster(self):
if EXECUTE:
if not self.set_cluster(CONFIG['cluster']):
sys.exit(1)
return self.cluster_object.name
if self.cluster_object:
return self.cluster_object.name
else:
return None
def has_cluster(self):
if not self.cluster:
print("Error: No cluster currently selected")
return None
else:
return True
def get_log(self, role, log_type=None):
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name")
return None
try:
service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
role = service.get_role(role)
try:
if EXECUTE:
output = sys.stdout
else:
output = os.popen("less", "w")
if log_type == "full":
output.write(role.get_full_log())
if log_type == "stdout":
output.write(role.get_stdout())
if log_type == "stderr":
output.write(role.get_stderr())
if not EXECUTE:
output.close()
except IOError:
pass
except ApiException:
print("Error: Role or Service Not Found")
def do_status(self, service):
"""
List all services on the cluster
Usage:
> status
"""
if service:
self.do_show("services", single=service)
else:
self.do_show("services")
def do_log(self, role):
"""
Download log file for role
Usage:
> log <role> Download log
"""
self.get_log(role, log_type="full")
def do_stdout(self, role):
"""
Download stdout file for role
Usage:
> stdout <role> Download stdout
"""
self.get_log(role, log_type="stdout")
def do_stderr(self, role):
"""
Download stderr file for role
Usage:
> stderr <role> Download stderr
"""
self.get_log(role, log_type="stderr")
def do_show(self, option, single=None):
"""
General System Information
Usage:
> show clusters list of clusters this CM manages
> show hosts list of all hosts CM manages
> show services list of all services on this cluster
including their health.
"""
headers = []
rows = []
align = None
# show clusters
if option == "clusters":
"Display list of clusters on system"
headers = ["CLUSTER NAME"]
clusters = api.get_all_clusters()
for cluster in clusters:
rows.append([cluster.name])
# show hosts
if option == "hosts":
"Display a list of hosts avaiable on the system"
headers = ["HOSTNAME", "IP ADDRESS", "RACK"]
align = ["HOSTNAME", "IP ADDRESS", "RACK"]
for host in api.get_all_hosts():
rows.append([host.hostname, host.ipAddress, host.rackId])
# show services
if option == "services":
"Show list of services on the cluster"
headers = ["NAME", "SERVICE", "STATUS", "HEALTH", "CONFIG"]
align = ["NAME", "SERVICE"]
# Check if the user has selected a cluster
if not self.has_cluster():
print("Error: Please select a cluster first")
return None
if not single:
for s in api.get_cluster(self.cluster).get_all_services():
if s.configStale:
config = "STALE"
else:
config = "UP TO DATE"
rows.append([s.name, s.type, s.serviceState, s.healthSummary, config])
else:
s = api.get_cluster(self.cluster).get_service(single)
if s.configStale:
config = "STALE"
else:
config = "UP TO DATE"
rows.append([s.name, s.type, s.serviceState, s.healthSummary, config])
self.generate_output(headers, rows, align=align)
def complete_log(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def complete_stdout(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def complete_stderr(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def complete_show(self, text, line, start_index, end_index):
show_commands = ["clusters", "hosts", "services"]
if text:
return [c for c in show_commands if c.startswith(text)]
else:
return show_commands
def service_action(self, service, action):
"Perform given action on service for the selected cluster"
try:
service = api.get_cluster(self.cluster).get_service(service)
except ApiException:
print("Service not found")
return None
if action == "start":
service.start()
if action == "restart":
service.restart()
if action == "stop":
service.stop()
return True
def services_autocomplete(self, text, line, start_index, end_index, append=[]):
if not self.cluster:
return None
else:
if not self.CACHED_SERVICES:
services = [s.name for s in api.get_cluster(self.cluster).get_all_services()]
self.CACHED_SERVICES = services
if text:
return [s for s in self.CACHED_SERVICES + append if s.startswith(text)]
else:
return self.CACHED_SERVICES + append
def do_start_service(self, service):
"""
Start a service
Usage:
> start_service <service>
"""
if not self.has_cluster():
return None
if self.service_action(service=service, action="start"):
print("%s is being started" % (service))
else:
print("Error starting service")
return None
def complete_start_service(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def do_restart_service(self, service):
"""
Restart a service
Usage:
> restart_service <service>
"""
if not self.has_cluster():
return None
if self.service_action(service=service, action="restart"):
print("%s is being restarted" % (service))
else:
print("Error restarting service")
return None
def complete_restart_service(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def do_stop_service(self, service):
"""
Stop a service
Usage:
> stop_service <service>
"""
if not self.has_cluster():
return None
if self.service_action(service=service, action="stop"):
print("%s is being stopped" % (service))
else:
print("Error stopping service")
return None
def complete_stop_service(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def do_use(self, cluster):
"""
Connect to Cluster
Usage:
> use <cluster>
"""
if not self.set_cluster(cluster):
print("Error setting cluster")
def cluster_autocomplete(self, text, line, start_index, end_index):
"autocomplete for the use command, obtain list of clusters first"
if not self.CACHED_CLUSTERS:
clusters = [cluster.name for cluster in api.get_all_clusters()]
self.CACHED_CLUSTERS = clusters
if text:
return [cluster for cluster in self.CACHED_CLUSTERS if cluster.startswith(text)]
else:
return self.CACHED_CLUSTERS
def complete_use(self, text, line, start_index, end_index):
return self.cluster_autocomplete(text, line, start_index, end_index)
def do_roles(self, service):
"""
Role information
Usage:
> roles <servicename> Display role information for service
> roles all Display all role information for cluster
"""
if not self.has_cluster():
return None
if not service:
return None
if service == "all":
if not self.CACHED_SERVICES:
self.services_autocomplete('', service, 0, 0)
for s in self.CACHED_SERVICES:
print("= " + s.upper() + " =")
self.do_roles(s)
return None
try:
service = api.get_cluster(self.cluster).get_service(service)
headers = ["ROLE TYPE", "HOST", "ROLE NAME", "STATE", "HEALTH", "CONFIG"]
align = ["ROLE TYPE", "ROLE NAME", "HOST"]
rows = []
for roletype in service.get_role_types():
for role in service.get_roles_by_type(roletype):
if role.configStale:
config = "STALE"
else:
config = "UP TO DATE"
rows.append([role.type, role.hostRef.hostId, role.name, role.roleState, role.healthSummary, config])
self.generate_output(headers, rows, align=align)
except ApiException:
print("Service not found")
def complete_roles(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index, append=["all"])
def roles_autocomplete(self, text, line, start_index, end_index):
"Return full list of roles"
if '-' not in line:
# Append a dash to each service, makes for faster autocompletion of
# roles
return [s + '-' for s in self.services_autocomplete(text, line, start_index, end_index)]
else:
key, role = line.split()[1].split('-', 1)
if key not in self.CACHED_ROLES:
service = api.get_cluster(self.cluster).get_service(key)
roles = []
for t in service.get_role_types():
for r in service.get_roles_by_type(t):
roles.append(r.name)
self.CACHED_ROLES[key] = roles
if not role:
return self.CACHED_ROLES[key]
else:
return [r for r in self.CACHED_ROLES[key] if r.startswith(line.split()[1])]
def do_start_role(self, role):
"""
Start a role
Usage:
> start_role <role> Restarts this role
"""
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name")
return None
try:
service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
service.start_roles(role)
print("Starting Role")
except ApiException:
print("Error: Role or Service Not Found")
def complete_start_role(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def do_restart_role(self, role):
"""
Restart a role
Usage:
> restart_role <role> Restarts this role
"""
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name")
return None
try:
service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
service.restart_roles(role)
print("Restarting Role")
except ApiException:
print("Error: Role or Service Not Found")
def complete_restart_role(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def do_stop_role(self, role):
"""
Stop a role
Usage:
> stop_role <role> Stops this role
"""
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name")
return None
try:
service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
service.stop_roles(role)
print("Stopping Role")
except ApiException:
print("Error: Role or Service Not Found")
def complete_stop_role(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def do_stop_cluster(self, cluster):
"""
Completely stop the cluster
Usage:
> stop_cluster <cluster>
"""
try:
cluster = api.get_cluster(cluster)
cluster.stop()
print("Stopping Cluster")
except ApiException:
print("Cluster not found")
return None
def complete_stop_cluster(self, text, line, start_index, end_index):
return self.cluster_autocomplete(text, line, start_index, end_index)
def do_start_cluster(self, cluster):
"""
Start the cluster
Usage:
> start_cluster <cluster>
"""
try:
cluster = api.get_cluster(cluster)
cluster.start()
print("Starting Cluster")
except ApiException:
print("Cluster not found")
return None
def complete_start_cluster(self, text, line, start_index, end_index):
return self.cluster_autocomplete(text, line, start_index, end_index)
def do_version(self, cluster=None):
"""
Obtain cluster CDH version
Usage:
> version
or
> version <cluster>
"""
if not cluster:
if not self.has_cluster():
return None
else:
cluster = api.get_cluster(self.cluster)
else:
try:
cluster = api.get_cluster(cluster)
except ApiException:
print("Error: Cluster not found")
return None
print("Version: %s" % (cluster.version))
def complete_version(self, text, line, start_index, end_index):
return self.cluster_autocomplete(text, line, start_index, end_index)
def complete_status(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def main():
parser = argparse.ArgumentParser(description='Cloudera Manager Shell')
parser.add_argument('-H', '--host', '--hostname', action='store', dest='hostname', required=True)
parser.add_argument('-p', '--port', action='store', dest='port', type=int, default=7180)
parser.add_argument('-u', '--user', '--username', action='store', dest='username')
parser.add_argument('-c', '--cluster', action='store', dest='cluster')
parser.add_argument('--password', action='store', dest='password')
parser.add_argument('-e', '--execute', action='store', dest='execute')
parser.add_argument('-s', '--seperator', action='store', dest='seperator')
args = parser.parse_args()
# Check if a username was suplied, if not, prompt the user
if not args.username:
args.username = raw_input("Enter Username: ")
# Check if the password was supplied, if not, prompt the user
if not args.password:
args.password = getpass.getpass("Enter Password: ")
# Attempt to authenticate using the API
global api
api = ApiResource(args.hostname, args.port, args.username, args.password)
try:
api.echo("ping")
except ApiException:
try:
api = ApiResource(args.hostname, args.port, args.username, args.password, version=1)
api.echo("ping")
except ApiException:
print("Unable to Authenticate")
sys.exit(1)
except URLError:
print("Error: Could not connect to %s" % (args.hostname))
sys.exit(1)
CONFIG['cluster'] = args.cluster
# Check if a custom seperator was supplied for the output
if args.seperator:
CONFIG['output_type'] = 'custom'
CONFIG['seperator'] = args.seperator
# Check if user is attempting non-interactive shell
if args.execute:
EXECUTE = True
shell = ClouderaShell()
for command in args.execute.split(';'):
shell.onecmd(command)
sys.exit(0)
try:
ClouderaShell().cmdloop()
except KeyboardInterrupt:
sys.stdout.write("\n")
sys.exit(0)
if __name__ == "__main__":
main()
| 32.621705 | 120 | 0.57388 |
import sys
import getpass
import argparse
import readline
import os
import cmd
from prettytable import PrettyTable
from cm_api.api_client import ApiResource, ApiException
from urllib2 import URLError
CONFIG = {'cluster': None, 'output_type': 'table', 'seperator': None}
INIT_PROMPT = "cloudera> "
BANNER = "Welcome to the Cloudera Manager Console\nSelect a cluster using 'show clusters' and 'use'"
EXECUTE = False
readline.set_completer_delims(readline.get_completer_delims().replace('-', ''))
api = None
class ClouderaShell(cmd.Cmd):
prompt = INIT_PROMPT
intro = BANNER
doc_header = "Cloudera Manager Commands"
undoc_header = "Other Commands"
CACHED_ROLES = {}
CACHED_SERVICES = None
CACHED_CLUSTERS = None
def preloop(self):
if CONFIG['cluster']:
self.set_cluster(CONFIG['cluster'])
else:
self.cluster_object = None
def generate_output(self, headers, rows, align=None):
if CONFIG['output_type'] == "table":
table = PrettyTable(headers)
if align:
for h in align:
table.align[h] = 'l'
for r in rows:
table.add_row(r)
print(table)
if CONFIG['output_type'] == "csv":
print(','.join(headers))
for r in rows:
print(','.join(r))
if CONFIG['output_type'] == "custom":
SEP = CONFIG['seperator']
print(SEP.join(headers))
for r in rows:
print(SEP.join(r))
def emptyline(self):
pass
def set_cluster(self, cluster):
try:
cluster = api.get_cluster(cluster)
except ApiException:
print("Cluster Not Found!")
return None
self.cluster_object = cluster
if not EXECUTE:
print("Connected to %s" % (cluster.name))
self.prompt = cluster.name + "> "
return True
@property
def cluster(self):
if EXECUTE:
if not self.set_cluster(CONFIG['cluster']):
sys.exit(1)
return self.cluster_object.name
if self.cluster_object:
return self.cluster_object.name
else:
return None
def has_cluster(self):
if not self.cluster:
print("Error: No cluster currently selected")
return None
else:
return True
def get_log(self, role, log_type=None):
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name")
return None
try:
service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
role = service.get_role(role)
try:
if EXECUTE:
output = sys.stdout
else:
output = os.popen("less", "w")
if log_type == "full":
output.write(role.get_full_log())
if log_type == "stdout":
output.write(role.get_stdout())
if log_type == "stderr":
output.write(role.get_stderr())
if not EXECUTE:
output.close()
except IOError:
pass
except ApiException:
print("Error: Role or Service Not Found")
def do_status(self, service):
if service:
self.do_show("services", single=service)
else:
self.do_show("services")
def do_log(self, role):
self.get_log(role, log_type="full")
def do_stdout(self, role):
self.get_log(role, log_type="stdout")
def do_stderr(self, role):
self.get_log(role, log_type="stderr")
def do_show(self, option, single=None):
headers = []
rows = []
align = None
if option == "clusters":
headers = ["CLUSTER NAME"]
clusters = api.get_all_clusters()
for cluster in clusters:
rows.append([cluster.name])
if option == "hosts":
headers = ["HOSTNAME", "IP ADDRESS", "RACK"]
align = ["HOSTNAME", "IP ADDRESS", "RACK"]
for host in api.get_all_hosts():
rows.append([host.hostname, host.ipAddress, host.rackId])
if option == "services":
headers = ["NAME", "SERVICE", "STATUS", "HEALTH", "CONFIG"]
align = ["NAME", "SERVICE"]
if not self.has_cluster():
print("Error: Please select a cluster first")
return None
if not single:
for s in api.get_cluster(self.cluster).get_all_services():
if s.configStale:
config = "STALE"
else:
config = "UP TO DATE"
rows.append([s.name, s.type, s.serviceState, s.healthSummary, config])
else:
s = api.get_cluster(self.cluster).get_service(single)
if s.configStale:
config = "STALE"
else:
config = "UP TO DATE"
rows.append([s.name, s.type, s.serviceState, s.healthSummary, config])
self.generate_output(headers, rows, align=align)
def complete_log(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def complete_stdout(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def complete_stderr(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def complete_show(self, text, line, start_index, end_index):
show_commands = ["clusters", "hosts", "services"]
if text:
return [c for c in show_commands if c.startswith(text)]
else:
return show_commands
def service_action(self, service, action):
try:
service = api.get_cluster(self.cluster).get_service(service)
except ApiException:
print("Service not found")
return None
if action == "start":
service.start()
if action == "restart":
service.restart()
if action == "stop":
service.stop()
return True
def services_autocomplete(self, text, line, start_index, end_index, append=[]):
if not self.cluster:
return None
else:
if not self.CACHED_SERVICES:
services = [s.name for s in api.get_cluster(self.cluster).get_all_services()]
self.CACHED_SERVICES = services
if text:
return [s for s in self.CACHED_SERVICES + append if s.startswith(text)]
else:
return self.CACHED_SERVICES + append
def do_start_service(self, service):
if not self.has_cluster():
return None
if self.service_action(service=service, action="start"):
print("%s is being started" % (service))
else:
print("Error starting service")
return None
def complete_start_service(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def do_restart_service(self, service):
if not self.has_cluster():
return None
if self.service_action(service=service, action="restart"):
print("%s is being restarted" % (service))
else:
print("Error restarting service")
return None
def complete_restart_service(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def do_stop_service(self, service):
if not self.has_cluster():
return None
if self.service_action(service=service, action="stop"):
print("%s is being stopped" % (service))
else:
print("Error stopping service")
return None
def complete_stop_service(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def do_use(self, cluster):
if not self.set_cluster(cluster):
print("Error setting cluster")
def cluster_autocomplete(self, text, line, start_index, end_index):
if not self.CACHED_CLUSTERS:
clusters = [cluster.name for cluster in api.get_all_clusters()]
self.CACHED_CLUSTERS = clusters
if text:
return [cluster for cluster in self.CACHED_CLUSTERS if cluster.startswith(text)]
else:
return self.CACHED_CLUSTERS
def complete_use(self, text, line, start_index, end_index):
return self.cluster_autocomplete(text, line, start_index, end_index)
def do_roles(self, service):
if not self.has_cluster():
return None
if not service:
return None
if service == "all":
if not self.CACHED_SERVICES:
self.services_autocomplete('', service, 0, 0)
for s in self.CACHED_SERVICES:
print("= " + s.upper() + " =")
self.do_roles(s)
return None
try:
service = api.get_cluster(self.cluster).get_service(service)
headers = ["ROLE TYPE", "HOST", "ROLE NAME", "STATE", "HEALTH", "CONFIG"]
align = ["ROLE TYPE", "ROLE NAME", "HOST"]
rows = []
for roletype in service.get_role_types():
for role in service.get_roles_by_type(roletype):
if role.configStale:
config = "STALE"
else:
config = "UP TO DATE"
rows.append([role.type, role.hostRef.hostId, role.name, role.roleState, role.healthSummary, config])
self.generate_output(headers, rows, align=align)
except ApiException:
print("Service not found")
def complete_roles(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index, append=["all"])
def roles_autocomplete(self, text, line, start_index, end_index):
if '-' not in line:
return [s + '-' for s in self.services_autocomplete(text, line, start_index, end_index)]
else:
key, role = line.split()[1].split('-', 1)
if key not in self.CACHED_ROLES:
service = api.get_cluster(self.cluster).get_service(key)
roles = []
for t in service.get_role_types():
for r in service.get_roles_by_type(t):
roles.append(r.name)
self.CACHED_ROLES[key] = roles
if not role:
return self.CACHED_ROLES[key]
else:
return [r for r in self.CACHED_ROLES[key] if r.startswith(line.split()[1])]
def do_start_role(self, role):
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name")
return None
try:
service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
service.start_roles(role)
print("Starting Role")
except ApiException:
print("Error: Role or Service Not Found")
def complete_start_role(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def do_restart_role(self, role):
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name")
return None
try:
service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
service.restart_roles(role)
print("Restarting Role")
except ApiException:
print("Error: Role or Service Not Found")
def complete_restart_role(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def do_stop_role(self, role):
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name")
return None
try:
service = api.get_cluster(self.cluster).get_service(role.split('-')[0])
service.stop_roles(role)
print("Stopping Role")
except ApiException:
print("Error: Role or Service Not Found")
def complete_stop_role(self, text, line, start_index, end_index):
return self.roles_autocomplete(text, line, start_index, end_index)
def do_stop_cluster(self, cluster):
try:
cluster = api.get_cluster(cluster)
cluster.stop()
print("Stopping Cluster")
except ApiException:
print("Cluster not found")
return None
def complete_stop_cluster(self, text, line, start_index, end_index):
return self.cluster_autocomplete(text, line, start_index, end_index)
def do_start_cluster(self, cluster):
try:
cluster = api.get_cluster(cluster)
cluster.start()
print("Starting Cluster")
except ApiException:
print("Cluster not found")
return None
def complete_start_cluster(self, text, line, start_index, end_index):
return self.cluster_autocomplete(text, line, start_index, end_index)
def do_version(self, cluster=None):
if not cluster:
if not self.has_cluster():
return None
else:
cluster = api.get_cluster(self.cluster)
else:
try:
cluster = api.get_cluster(cluster)
except ApiException:
print("Error: Cluster not found")
return None
print("Version: %s" % (cluster.version))
def complete_version(self, text, line, start_index, end_index):
return self.cluster_autocomplete(text, line, start_index, end_index)
def complete_status(self, text, line, start_index, end_index):
return self.services_autocomplete(text, line, start_index, end_index)
def main():
parser = argparse.ArgumentParser(description='Cloudera Manager Shell')
parser.add_argument('-H', '--host', '--hostname', action='store', dest='hostname', required=True)
parser.add_argument('-p', '--port', action='store', dest='port', type=int, default=7180)
parser.add_argument('-u', '--user', '--username', action='store', dest='username')
parser.add_argument('-c', '--cluster', action='store', dest='cluster')
parser.add_argument('--password', action='store', dest='password')
parser.add_argument('-e', '--execute', action='store', dest='execute')
parser.add_argument('-s', '--seperator', action='store', dest='seperator')
args = parser.parse_args()
if not args.username:
args.username = raw_input("Enter Username: ")
if not args.password:
args.password = getpass.getpass("Enter Password: ")
global api
api = ApiResource(args.hostname, args.port, args.username, args.password)
try:
api.echo("ping")
except ApiException:
try:
api = ApiResource(args.hostname, args.port, args.username, args.password, version=1)
api.echo("ping")
except ApiException:
print("Unable to Authenticate")
sys.exit(1)
except URLError:
print("Error: Could not connect to %s" % (args.hostname))
sys.exit(1)
CONFIG['cluster'] = args.cluster
if args.seperator:
CONFIG['output_type'] = 'custom'
CONFIG['seperator'] = args.seperator
if args.execute:
EXECUTE = True
shell = ClouderaShell()
for command in args.execute.split(';'):
shell.onecmd(command)
sys.exit(0)
try:
ClouderaShell().cmdloop()
except KeyboardInterrupt:
sys.stdout.write("\n")
sys.exit(0)
if __name__ == "__main__":
main()
| true | true |
f728c129fea752ad0a3d91130e6dfa702c2a0db1 | 2,160 | py | Python | tests/unit/server/test_get_model_status_rest.py | rasapala/OpenVINO-model-server | a7cd5c7fe6c2177aefbe2fc258eec1b9ff0dda2b | [
"Apache-2.0"
] | 1 | 2019-08-31T04:02:04.000Z | 2019-08-31T04:02:04.000Z | tests/unit/server/test_get_model_status_rest.py | rasapala/OpenVINO-model-server | a7cd5c7fe6c2177aefbe2fc258eec1b9ff0dda2b | [
"Apache-2.0"
] | null | null | null | tests/unit/server/test_get_model_status_rest.py | rasapala/OpenVINO-model-server | a7cd5c7fe6c2177aefbe2fc258eec1b9ff0dda2b | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def test_get_model_status_successful(client):
response = client.simulate_request(method='GET',
path='/v1/models/test',
headers={
"Content-Type":
"application/json"})
assert response.status_code == 200
def test_get_model_status_successful_with_specific_version(client):
response = client.simulate_request(method='GET',
path='/v1/models/test/versions/2',
headers={
"Content-Type":
"application/json"})
assert response.status_code == 200
def test_get_model_status_wrong_model(client):
response = client.simulate_request(method='GET',
path='/v1/models/fake_model',
headers={
"Content-Type":
"application/json"})
assert response.status_code == 404
def test_get_model_status_wrong_version(client):
response = client.simulate_request(method='GET',
path='/v1/models/test/versions/5',
headers={
"Content-Type":
"application/json"})
assert response.status_code == 404
| 41.538462 | 74 | 0.52037 |
def test_get_model_status_successful(client):
response = client.simulate_request(method='GET',
path='/v1/models/test',
headers={
"Content-Type":
"application/json"})
assert response.status_code == 200
def test_get_model_status_successful_with_specific_version(client):
response = client.simulate_request(method='GET',
path='/v1/models/test/versions/2',
headers={
"Content-Type":
"application/json"})
assert response.status_code == 200
def test_get_model_status_wrong_model(client):
response = client.simulate_request(method='GET',
path='/v1/models/fake_model',
headers={
"Content-Type":
"application/json"})
assert response.status_code == 404
def test_get_model_status_wrong_version(client):
response = client.simulate_request(method='GET',
path='/v1/models/test/versions/5',
headers={
"Content-Type":
"application/json"})
assert response.status_code == 404
| true | true |
f728c16552bf8fb7df83575a84a2183112ee941d | 6,493 | py | Python | src/analytics/contrail-topology/contrail_topology/config.py | madkiss/contrail-controller | 17f622dfe99f8ab4163436399e80f95dd564814c | [
"Apache-2.0"
] | null | null | null | src/analytics/contrail-topology/contrail_topology/config.py | madkiss/contrail-controller | 17f622dfe99f8ab4163436399e80f95dd564814c | [
"Apache-2.0"
] | null | null | null | src/analytics/contrail-topology/contrail_topology/config.py | madkiss/contrail-controller | 17f622dfe99f8ab4163436399e80f95dd564814c | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
import argparse, os, ConfigParser, sys, re
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
class CfgParser(object):
CONF_DEFAULT_PATH = '/etc/contrail/contrail-topology.conf'
def __init__(self, argv):
self._args = None
self.__pat = None
self._argv = argv or ' '.join(sys.argv[1:])
def parse(self):
'''
command line example
contrail-topology [-h] [-c FILE]
[--analytics_api ANALYTICS_API [ANALYTICS_API ...]]
[--collectors COLLECTORS [COLLECTORS ...]]
[--log_file LOG_FILE] [--log_local]
[--log_category LOG_CATEGORY] [--log_level LOG_LEVEL]
[--use_syslog] [--syslog_facility SYSLOG_FACILITY]
[--scan_frequency SCAN_FREQUENCY]
[--http_server_port HTTP_SERVER_PORT]
optional arguments:
-h, --help show this help message and exit
-c FILE, --conf_file FILE
Specify config file
--analytics_api ANALYTICS_API [ANALYTICS_API ...]
List of analytics-api IP addresses in ip:port format
--collectors COLLECTORS [COLLECTORS ...]
List of Collector IP addresses in ip:port format
--log_file LOG_FILE Filename for the logs to be written to
--log_local Enable local logging of sandesh messages
--log_category LOG_CATEGORY
Category filter for local logging of sandesh messages
--log_level LOG_LEVEL
Severity level for local logging of sandesh messages
--use_syslog Use syslog for logging
--syslog_facility SYSLOG_FACILITY
Syslog facility to receive log lines
--scan_frequency SCAN_FREQUENCY
Time between snmp poll
--http_server_port HTTP_SERVER_PORT
introspect server port
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
kwargs = {'help': "Specify config file", 'metavar':"FILE",
'action':'append'
}
if os.path.exists(self.CONF_DEFAULT_PATH):
kwargs['default'] = [self.CONF_DEFAULT_PATH]
conf_parser.add_argument("-c", "--conf_file", **kwargs)
args, remaining_argv = conf_parser.parse_known_args(self._argv.split())
defaults = {
'collectors' : ['127.0.0.1:8086'],
'analytics_api' : ['127.0.0.1:8081'],
'log_local' : False,
'log_level' : SandeshLevel.SYS_DEBUG,
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'scan_frequency' : 60,
'http_server_port': 5921,
'zookeeper' : '127.0.0.1:2181',
}
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read(args.conf_file)
if 'DEFAULTS' in config.sections():
defaults.update(dict(config.items("DEFAULTS")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**defaults)
parser.add_argument("--analytics_api",
help="List of analytics-api IP addresses in ip:port format",
nargs="+")
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument(
"--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--scan_frequency", type=int,
help="Time between snmp poll")
parser.add_argument("--http_server_port", type=int,
help="introspect server port")
parser.add_argument("--zookeeper",
help="ip:port of zookeeper server")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.analytics_api) is str:
self._args.analytics_api = self._args.analytics_api.split()
self._args.config_sections = config
def _pat(self):
if self.__pat is None:
self.__pat = re.compile(', *| +')
return self.__pat
def _mklist(self, s):
return self._pat().split(s)
def collectors(self):
return self._args.collectors
def zookeeper_server(self):
return self._args.zookeeper
def analytics_api(self):
return self._args.analytics_api
def log_local(self):
return self._args.log_local
def log_category(self):
return self._args.log_category
def log_level(self):
return self._args.log_level
def log_file(self):
return self._args.log_file
def use_syslog(self):
return self._args.use_syslog
def syslog_facility(self):
return self._args.syslog_facility
def frequency(self):
return self._args.scan_frequency
def http_port(self):
return self._args.http_server_port
| 37.97076 | 79 | 0.596335 |
import argparse, os, ConfigParser, sys, re
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
class CfgParser(object):
CONF_DEFAULT_PATH = '/etc/contrail/contrail-topology.conf'
def __init__(self, argv):
self._args = None
self.__pat = None
self._argv = argv or ' '.join(sys.argv[1:])
def parse(self):
conf_parser = argparse.ArgumentParser(add_help=False)
kwargs = {'help': "Specify config file", 'metavar':"FILE",
'action':'append'
}
if os.path.exists(self.CONF_DEFAULT_PATH):
kwargs['default'] = [self.CONF_DEFAULT_PATH]
conf_parser.add_argument("-c", "--conf_file", **kwargs)
args, remaining_argv = conf_parser.parse_known_args(self._argv.split())
defaults = {
'collectors' : ['127.0.0.1:8086'],
'analytics_api' : ['127.0.0.1:8081'],
'log_local' : False,
'log_level' : SandeshLevel.SYS_DEBUG,
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'scan_frequency' : 60,
'http_server_port': 5921,
'zookeeper' : '127.0.0.1:2181',
}
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read(args.conf_file)
if 'DEFAULTS' in config.sections():
defaults.update(dict(config.items("DEFAULTS")))
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**defaults)
parser.add_argument("--analytics_api",
help="List of analytics-api IP addresses in ip:port format",
nargs="+")
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument(
"--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--scan_frequency", type=int,
help="Time between snmp poll")
parser.add_argument("--http_server_port", type=int,
help="introspect server port")
parser.add_argument("--zookeeper",
help="ip:port of zookeeper server")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.analytics_api) is str:
self._args.analytics_api = self._args.analytics_api.split()
self._args.config_sections = config
def _pat(self):
if self.__pat is None:
self.__pat = re.compile(', *| +')
return self.__pat
def _mklist(self, s):
return self._pat().split(s)
def collectors(self):
return self._args.collectors
def zookeeper_server(self):
return self._args.zookeeper
def analytics_api(self):
return self._args.analytics_api
def log_local(self):
return self._args.log_local
def log_category(self):
return self._args.log_category
def log_level(self):
return self._args.log_level
def log_file(self):
return self._args.log_file
def use_syslog(self):
return self._args.use_syslog
def syslog_facility(self):
return self._args.syslog_facility
def frequency(self):
return self._args.scan_frequency
def http_port(self):
return self._args.http_server_port
| true | true |
f728c2249a621aec123829f2600362674d968847 | 2,941 | py | Python | experiments/utils.py | chandar-lab/IIRC | ae6ffcfc0a42274bcda66e2288e09118604620e4 | [
"MIT"
] | 23 | 2021-01-19T11:50:57.000Z | 2021-12-12T17:20:22.000Z | experiments/utils.py | chandar-lab/IIRC | ae6ffcfc0a42274bcda66e2288e09118604620e4 | [
"MIT"
] | 1 | 2021-04-06T14:35:03.000Z | 2021-06-20T08:56:15.000Z | experiments/utils.py | chandar-lab/IIRC | ae6ffcfc0a42274bcda66e2288e09118604620e4 | [
"MIT"
] | 8 | 2021-01-05T10:49:19.000Z | 2021-12-12T17:20:38.000Z | import numpy as np
import torch.nn as nn
import json
def log(epoch, task_id, log_dict, logbook):
log_dict["message"] = f"task_{task_id}_metrics"
log_dict["task_id"] = task_id
log_dict["task_epoch"] = epoch
log_dict["step"] = epoch
logbook.write_metric(log_dict)
def log_task(task_id, log_dict, logbook):
log_dict["message"] = f"incremental_metrics"
log_dict["task_id"] = task_id
log_dict["step"] = task_id
logbook.write_metric(log_dict)
def pad_random_crop(tensor_img, per_direction_padding=0):
pad_left = pad_right = pad_top = pad_bottom = per_direction_padding
tensor_width = tensor_img.shape[-1]
tensor_height = tensor_img.shape[-2]
tensor_img = nn.functional.pad(tensor_img,
[pad_left, pad_right, pad_top, pad_bottom])
start_index_width = np.random.randint(0, pad_left + pad_right)
start_index_height = np.random.randint(0, pad_top + pad_bottom)
end_index_width = start_index_width + tensor_width
end_index_height = start_index_height + tensor_height
return tensor_img[..., start_index_height:end_index_height, start_index_width:end_index_width]
def random_horizontal_flip(tensor_img, flip_prop=0.5):
do_flip = np.random.random() >= (1 - flip_prop)
if do_flip:
return tensor_img.flip((-1))
else:
return tensor_img
def remove_extra_logs(cur_task_id, cur_epoch, file):
logs_to_keep = []
remove_task_summary = False
with open(file, 'r') as logs_file:
for line in logs_file:
json_line = json.loads(line)
if not (json_line['logbook_type'] == "metric"):
logs_to_keep.append(json_line)
elif json_line["task_id"] < cur_task_id:
logs_to_keep.append(json_line)
elif json_line["task_id"] == cur_task_id:
if "task_epoch" in json_line.keys() and json_line["task_epoch"] < cur_epoch:
logs_to_keep.append(json_line)
elif "task_epoch" in json_line.keys() and json_line["task_epoch"] >= cur_epoch:
remove_task_summary = True
elif not remove_task_summary:
logs_to_keep.append(json_line)
with open(file, 'w') as logs_file:
for json_line in logs_to_keep:
logs_file.write(json.dumps(json_line))
logs_file.write("\n")
def extend_list(input_, output_length):
if isinstance(input_, int):
output = [input_ for _ in range(output_length)]
elif hasattr(input_, '__iter__'):
if len(input_) < output_length:
output = input_
output.extend([input_[-1] for _ in range(output_length - len(input_))])
elif len(input_) > output_length:
output = input_[:output_length]
else:
output = input_
else:
raise TypeError("Neither an integer nor an iterable was provided")
return output | 36.7625 | 98 | 0.652159 | import numpy as np
import torch.nn as nn
import json
def log(epoch, task_id, log_dict, logbook):
log_dict["message"] = f"task_{task_id}_metrics"
log_dict["task_id"] = task_id
log_dict["task_epoch"] = epoch
log_dict["step"] = epoch
logbook.write_metric(log_dict)
def log_task(task_id, log_dict, logbook):
log_dict["message"] = f"incremental_metrics"
log_dict["task_id"] = task_id
log_dict["step"] = task_id
logbook.write_metric(log_dict)
def pad_random_crop(tensor_img, per_direction_padding=0):
pad_left = pad_right = pad_top = pad_bottom = per_direction_padding
tensor_width = tensor_img.shape[-1]
tensor_height = tensor_img.shape[-2]
tensor_img = nn.functional.pad(tensor_img,
[pad_left, pad_right, pad_top, pad_bottom])
start_index_width = np.random.randint(0, pad_left + pad_right)
start_index_height = np.random.randint(0, pad_top + pad_bottom)
end_index_width = start_index_width + tensor_width
end_index_height = start_index_height + tensor_height
return tensor_img[..., start_index_height:end_index_height, start_index_width:end_index_width]
def random_horizontal_flip(tensor_img, flip_prop=0.5):
do_flip = np.random.random() >= (1 - flip_prop)
if do_flip:
return tensor_img.flip((-1))
else:
return tensor_img
def remove_extra_logs(cur_task_id, cur_epoch, file):
logs_to_keep = []
remove_task_summary = False
with open(file, 'r') as logs_file:
for line in logs_file:
json_line = json.loads(line)
if not (json_line['logbook_type'] == "metric"):
logs_to_keep.append(json_line)
elif json_line["task_id"] < cur_task_id:
logs_to_keep.append(json_line)
elif json_line["task_id"] == cur_task_id:
if "task_epoch" in json_line.keys() and json_line["task_epoch"] < cur_epoch:
logs_to_keep.append(json_line)
elif "task_epoch" in json_line.keys() and json_line["task_epoch"] >= cur_epoch:
remove_task_summary = True
elif not remove_task_summary:
logs_to_keep.append(json_line)
with open(file, 'w') as logs_file:
for json_line in logs_to_keep:
logs_file.write(json.dumps(json_line))
logs_file.write("\n")
def extend_list(input_, output_length):
if isinstance(input_, int):
output = [input_ for _ in range(output_length)]
elif hasattr(input_, '__iter__'):
if len(input_) < output_length:
output = input_
output.extend([input_[-1] for _ in range(output_length - len(input_))])
elif len(input_) > output_length:
output = input_[:output_length]
else:
output = input_
else:
raise TypeError("Neither an integer nor an iterable was provided")
return output | true | true |
f728c22c6ab2cbd222e594a6ae3da1fe806d67c2 | 15,817 | py | Python | V1_1_0_0/MGC3130/build/lib.linux-armv7l-2.7/MGC3130/MGC3130_DefVar.py | MatteoDestro/RaspberryPi_Gesture_MGC3130 | 071c7d26dab897786dcfd6fc1e5faac9e3531b4b | [
"BSD-2-Clause"
] | 1 | 2021-04-19T12:09:36.000Z | 2021-04-19T12:09:36.000Z | V1_1_0_0/MGC3130/build/lib.linux-armv7l-2.7/MGC3130/MGC3130_DefVar.py | MatteoDestro/RaspberryPi_Gesture_MGC3130 | 071c7d26dab897786dcfd6fc1e5faac9e3531b4b | [
"BSD-2-Clause"
] | null | null | null | V1_1_0_0/MGC3130/build/lib.linux-armv7l-2.7/MGC3130/MGC3130_DefVar.py | MatteoDestro/RaspberryPi_Gesture_MGC3130 | 071c7d26dab897786dcfd6fc1e5faac9e3531b4b | [
"BSD-2-Clause"
] | null | null | null | #==================================================================================
# ctypes type C type Python type
#==================================================================================
# c_bool _Bool bool (1)
#----------------------------------------------------------------------------------
# c_char char 1-character string
#----------------------------------------------------------------------------------
# c_wchar wchar_t 1-character unicode string
#----------------------------------------------------------------------------------
# c_byte char int/long
#----------------------------------------------------------------------------------
# c_ubyte unsigned char int/long
#----------------------------------------------------------------------------------
# c_short short int/long
#----------------------------------------------------------------------------------
# c_ushort unsigned short int/long
#----------------------------------------------------------------------------------
# c_int int int/long
#----------------------------------------------------------------------------------
# c_uint unsigned int int/long
#----------------------------------------------------------------------------------
# c_long long int/long
#----------------------------------------------------------------------------------
# c_ulong unsigned long int/long
#----------------------------------------------------------------------------------
# c_longlong __int64 or long long int/long
#----------------------------------------------------------------------------------
# c_ulonglong unsigned __int64 or
# unsigned long long int/long
#----------------------------------------------------------------------------------
# c_float float float
#----------------------------------------------------------------------------------
# c_double double float
#----------------------------------------------------------------------------------
# c_longdouble long double float
#----------------------------------------------------------------------------------
# c_char_p char * (NUL terminated) string or None
#----------------------------------------------------------------------------------
# c_wchar_p wchar_t * (NUL terminated) unicode or None
#----------------------------------------------------------------------------------
# c_void_p void * int/long or None int/long or None
#==================================================================================
from ctypes import *
#===================================================================
# MGC3130 CMD ID
ID_DATA_OUTPUT = 0x91
ID_FW_VERSION = 0x83
#===================================================================
#===================================================================
MASK_GESTURE_RAW = 0x0001F0FF # Filter mask to remove invalid data into gesture packet
MASK_TOUCH_RAW = 0x00007FFF # Filter mask to remove invalid data into touch packet
MASK_FILTER_GESTURE = 0x00000000000000 # To calculate exactly value of mask see below
# B0000000000000000000000000000000000000000000000000000000000000000 // Set bit to "1" to mask Gesture and convert binary data into hexadecimal data
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||+------> if "1" MASK Touch South
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||+-------> if "1" MASK Touch West
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||+--------> if "1" MASK Touch North
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||+---------> if "1" MASK Touch East
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||+----------> if "1" MASK Touch Centre
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||+-----------> if "1" MASK Tap South
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||+------------> if "1" MASK Tap West
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||+-------------> if "1" MASK Tap North
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||+--------------> if "1" MASK Tap East
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||+---------------> if "1" MASK Tap Centre
# |||||||||||||||||||||||||||||||||||||||||||||||||||||+----------------> if "1" MASK Double Tap South
# ||||||||||||||||||||||||||||||||||||||||||||||||||||+-----------------> if "1" MASK Double Tap West
# |||||||||||||||||||||||||||||||||||||||||||||||||||+------------------> if "1" MASK Double Tap North
# ||||||||||||||||||||||||||||||||||||||||||||||||||+-------------------> if "1" MASK Double Tap East
# |||||||||||||||||||||||||||||||||||||||||||||||||+--------------------> if "1" MASK Double Tap Centre
# ||||||||||||||||||||||||||||||||||||||||||||||||+---------------------> if "1" MASK Gesture West To East
# |||||||||||||||||||||||||||||||||||||||||||||||+----------------------> if "1" MASK Gesture East To West
# ||||||||||||||||||||||||||||||||||||||||||||||+-----------------------> if "1" MASK Gesture South To North
# |||||||||||||||||||||||||||||||||||||||||||||+------------------------> if "1" MASK Gesture North To South
# ||||||||||||||||||||||||||||||||||||||||||||+-------------------------> if "1" MASK Gesture Edge West To East
# |||||||||||||||||||||||||||||||||||||||||||+--------------------------> if "1" MASK Gesture Edge East To West
# ||||||||||||||||||||||||||||||||||||||||||+---------------------------> if "1" MASK Gesture Edge South To North
# |||||||||||||||||||||||||||||||||||||||||+----------------------------> if "1" MASK Gesture Edge North To South
# ||||||||||||||||||||||||||||||||||||||||+-----------------------------> if "1" MASK Gesture Clock Wise
# |||||||||||||||||||||||||||||||||||||||+------------------------------> if "1" MASK Gesture Counter Clock Wise
# ||||||||||||||||||||||||||||||||||||||+-------------------------------> if "1" MASK Gesture Complete Rotation
# |||||||||||||||||||||||||||||||||||||+--------------------------------> if "1" MASK Gesture Wave X
# ||||||||||||||||||||||||||||||||||||+---------------------------------> if "1" MASK Gesture Wave Y
# |||||||||||||||||||||||||||||||||||+----------------------------------> if "1" MASK Gesture Hold
# ||||||||||||||||||||||||||||||||||+-----------------------------------> if "1" MASK Gesture Presence
# |||||||||||||||||||||||||||||||||+------------------------------------> if "1" MASK Gesture Double West To East
# ||||||||||||||||||||||||||||||||+-------------------------------------> if "1" MASK Gesture Double East To West
# |||||||||||||||||||||||||||||||+--------------------------------------> if "1" MASK Gesture Double South To North
# ||||||||||||||||||||||||||||||+---------------------------------------> if "1" MASK Gesture Double North To South
# ++++++++++++++++++++++++++++++----------------------------------------> Free
#===================================================================
# Use this MASK constant to create application code for gestic gesture decode
#===================================================================
# TOUCH/GESTURE OUTPUT MASK
GESTURE_MASK_TOUCH_SOUTH = 0x0000000000000001
GESTURE_MASK_TOUCH_WEST = 0x0000000000000002
GESTURE_MASK_TOUCH_NORTH = 0x0000000000000004
GESTURE_MASK_TOUCH_EAST = 0x0000000000000008
GESTURE_MASK_TOUCH_CENTRE = 0x0000000000000010
GESTURE_MASK_TAP_SOUTH = 0x0000000000000020
GESTURE_MASK_TAP_WEST = 0x0000000000000040
GESTURE_MASK_TAP_NORTH = 0x0000000000000080
GESTURE_MASK_TAP_EAST = 0x0000000000000100
GESTURE_MASK_TAP_CENTRE = 0x0000000000000200
GESTURE_MASK_DOUBLE_TAP_SOUTH = 0x0000000000000400
GESTURE_MASK_DOUBLE_TAP_WEST = 0x0000000000000800
GESTURE_MASK_DOUBLE_TAP_NORTH = 0x0000000000001000
GESTURE_MASK_DOUBLE_TAP_EAST = 0x0000000000002000
GESTURE_MASK_DOUBLE_TAP_CENTRE = 0x0000000000004000
GESTURE_MASK_WEST_EAST = 0x0000000000008000
GESTURE_MASK_EAST_WEST = 0x0000000000010000
GESTURE_MASK_SOUTH_NORTH = 0x0000000000020000
GESTURE_MASK_NORTH_SOUTH = 0x0000000000040000
GESTURE_MASK_EDGE_WEST_EAST = 0x0000000000080000
GESTURE_MASK_EDGE_EAST_WEST = 0x0000000000100000
GESTURE_MASK_EDGE_SOUTH_NORTH = 0x0000000000200000
GESTURE_MASK_EDGE_NORTH_SOUTH = 0x0000000000400000
GESTURE_MASK_CLOCK_WISE = 0x0000000000800000
GESTURE_MASK_COUNTER_CLOCK_WISE = 0x0000000001000000
GESTURE_MASK_WAVE_X = 0x0000000002000000
GESTURE_MASK_WAVE_Y = 0x0000000004000000
GESTURE_MASK_HOLD = 0x0000000008000000
GESTURE_MASK_PRESENCE = 0x0000000010000000
GESTURE_MASK_DOUBLE_WEST_EAST = 0x0000000020000000
GESTURE_MASK_DOUBLE_EAST_WEST = 0x0000000040000000
GESTURE_MASK_DOUBLE_SOUTH_NORTH = 0x0000000080000000
GESTURE_MASK_DOUBLE_NORTH_SOUTH = 0x0000000100000000
#===================================================================
#===================================================================
# GESTURE INPUT CODE MASK
NO_GESTURE = 0x00
GESTURE_GARBAGE = 0x01
GESTURE_WEST_EAST = 0x02
GESTURE_EAST_WEST = 0x03
GESTURE_SOUTH_NORTH = 0x04
GESTURE_NORTH_SOUTH = 0x05
GESTURE_CLOCK_WISE = 0x06
GESTURE_COUNTER_CLOCK_WISE = 0x07
GESTURE_WAVE_X = 0x08
GESTURE_WAVE_Y = 0x09
GESTURE_HOLD = 0x40
GESTURE_PRESENCE = 0x49
GESTURE_EDGE_WEST_EAST = 0x41
GESTURE_EDGE_EAST_WEST = 0x42
GESTURE_EDGE_SOUTH_NORTH = 0x43
GESTURE_EDGE_NORTH_SOUTH = 0x44
GESTURE_DOUBLE_WEST_EAST = 0x45
GESTURE_DOUBLE_EAST_WEST = 0x46
GESTURE_DOUBLE_SOUTH_NORTH = 0x47
GESTURE_DOUBLE_NORTH_SOUTH = 0x48
#===================================================================
#===================================================================
# Sequence for Tap gesture
# Touch -> Tap
#
# Sequence for Double Tap gesture
# Touch -> Tap -> Touch -> DoubleTap -> Touch -> Tap
#===================================================================
#===================================================================
# AirWheel Variable
AirWheelInfo = 0x00
#===================================================================
#===================================================================
# Gesture Private Structure
LastGesture = 0x00000000
class GestureInfoBit(Structure):
_fields_ = [("GestureCode", c_uint32, 8),
("Reserved", c_uint32, 4),
("GestureType", c_uint32, 4),
("Edgeflick", c_uint32, 1),
("Reserved2", c_uint32, 14),
("GestureInProgress", c_uint32, 1)]
class GestureInfoByte(Structure):
_fields_ = [("Byte0", c_uint8),
("Byte1", c_uint8),
("Byte2", c_uint8),
("Byte3", c_uint8)]
class GestureInfo(Union):
_fields_ = [("GestureInfo32Bit", GestureInfoBit),
("GestureInfoByte", GestureInfoByte),
("GestureInfoLong", c_uint32),
("GestInfoArray", c_ubyte * 4)]
#===================================================================
#===================================================================
# Touch Private Structure
LastTouch = 0x00000000
class TouchInfoBit(Structure):
_fields_ = [("TouchSouth", c_uint32, 1),
("TouchWest", c_uint32, 1),
("TouchNorth", c_uint32, 1),
("TouchEast", c_uint32, 1),
("TouchCentre", c_uint32, 1),
("TapSouth", c_uint32, 1),
("TapWest", c_uint32, 1),
("TapNorth", c_uint32, 1),
("TapEast", c_uint32, 1),
("TapCentre", c_uint32, 1),
("DoubleTapSouth", c_uint32, 1),
("DoubleTapWest", c_uint32, 1),
("DoubleTapNorth", c_uint32, 1),
("DoubleTapEast", c_uint32, 1),
("DoubleTapCentre", c_uint32, 1),
("Free", c_uint32, 17)]
class TouchInfoByte(Structure):
_fields_ = [("Byte0", c_uint8),
("Byte1", c_uint8),
("Byte2", c_uint8),
("Byte3", c_uint8)]
class TouchInfo(Union):
_fields_ = [("TouchInfo32Bit", TouchInfoBit),
("TouchInfoByte", TouchInfoByte),
("TouchInfoLong", c_uint32),
("TouchInfoArray", c_ubyte * 4)]
#===================================================================
#===================================================================
# Gesture Public Structure
class GestureBit(Structure):
_fields_ = [("TouchSouth", c_uint64, 1),
("TouchWest", c_uint64, 1),
("TouchNorth", c_uint64, 1),
("TouchEast", c_uint64, 1),
("TouchCentre", c_uint64, 1),
("TapSouth", c_uint64, 1),
("TapWest", c_uint64, 1),
("TapNorth", c_uint64, 1),
("TapEast", c_uint64, 1),
("TapCentre", c_uint64, 1),
("DoubleTapSouth", c_uint64, 1),
("DoubleTapWest", c_uint64, 1),
("DoubleTapNorth", c_uint64, 1),
("DoubleTapEast", c_uint64, 1),
("DoubleTapCentre", c_uint64, 1),
("GestWestEast", c_uint64, 1),
("GestEastWest", c_uint64, 1),
("GestSouthNorth", c_uint64, 1),
("GestNorthSouth", c_uint64, 1),
("EdgeGestWestEast", c_uint64, 1),
("EdgeGestEastWest", c_uint64, 1),
("EdgeGestSouthNorth", c_uint64, 1),
("EdgeGestNorthSouth", c_uint64, 1),
("GestClockWise", c_uint64, 1),
("GestCounterClockWise", c_uint64, 1),
("GestWaveX", c_uint64, 1),
("GestWaveY", c_uint64, 1),
("GestHold", c_uint64, 1),
("GestPresence", c_uint64, 1),
("DoubleGestWestEast", c_uint64, 1),
("DoubleGestEastWest", c_uint64, 1),
("DoubleSouthNorth", c_uint64, 1),
("DoubleGestNorthSouth", c_uint64, 1),
("FreeBit", c_uint64, 31)]
class GestureByte(Structure):
_fields_ = [("Byte0", c_uint8),
("Byte1", c_uint8),
("Byte2", c_uint8),
("Byte3", c_uint8),
("Byte4", c_uint8),
("Byte5", c_uint8),
("Byte6", c_uint8),
("Byte7", c_uint8)]
class Gesture(Union):
_fields_ = [("Gesture64Bit", GestureBit),
("GestureByte", GestureByte),
("GestureLong", c_uint64),
("GestArray", c_ubyte * 8)]
#===================================================================
#===================================================================
# X, Y, Z coordinates Public Class
Last_X = 0x0000
Last_Y = 0x0000
Last_Z = 0x0000
class x_SplitByte(Structure):
_fields_ = [("Byte0", c_ubyte),
("Byte1", c_ubyte)]
class y_SplitByte(Structure):
_fields_ = [("Byte0", c_ubyte),
("Byte1", c_ubyte)]
class z_SplitByte(Structure):
_fields_ = [("Byte0", c_ubyte),
("Byte1", c_ubyte)]
class xyz_Coordinates(Structure):
_fields_ = [("x", x_SplitByte),
("y", y_SplitByte),
("z", z_SplitByte)]
class Coordinates(Union):
_fields_ = [("xyz", xyz_Coordinates),
("xInt", c_uint16),
("yInt", c_uint16),
("zInt", c_uint16),
("xyzArray", c_ubyte * 6)]
#===================================================================
| 49.895899 | 148 | 0.395208 |
from ctypes import *
ID_DATA_OUTPUT = 0x91
ID_FW_VERSION = 0x83
MASK_GESTURE_RAW = 0x0001F0FF
MASK_TOUCH_RAW = 0x00007FFF
MASK_FILTER_GESTURE = 0x00000000000000
GESTURE_MASK_TOUCH_SOUTH = 0x0000000000000001
GESTURE_MASK_TOUCH_WEST = 0x0000000000000002
GESTURE_MASK_TOUCH_NORTH = 0x0000000000000004
GESTURE_MASK_TOUCH_EAST = 0x0000000000000008
GESTURE_MASK_TOUCH_CENTRE = 0x0000000000000010
GESTURE_MASK_TAP_SOUTH = 0x0000000000000020
GESTURE_MASK_TAP_WEST = 0x0000000000000040
GESTURE_MASK_TAP_NORTH = 0x0000000000000080
GESTURE_MASK_TAP_EAST = 0x0000000000000100
GESTURE_MASK_TAP_CENTRE = 0x0000000000000200
GESTURE_MASK_DOUBLE_TAP_SOUTH = 0x0000000000000400
GESTURE_MASK_DOUBLE_TAP_WEST = 0x0000000000000800
GESTURE_MASK_DOUBLE_TAP_NORTH = 0x0000000000001000
GESTURE_MASK_DOUBLE_TAP_EAST = 0x0000000000002000
GESTURE_MASK_DOUBLE_TAP_CENTRE = 0x0000000000004000
GESTURE_MASK_WEST_EAST = 0x0000000000008000
GESTURE_MASK_EAST_WEST = 0x0000000000010000
GESTURE_MASK_SOUTH_NORTH = 0x0000000000020000
GESTURE_MASK_NORTH_SOUTH = 0x0000000000040000
GESTURE_MASK_EDGE_WEST_EAST = 0x0000000000080000
GESTURE_MASK_EDGE_EAST_WEST = 0x0000000000100000
GESTURE_MASK_EDGE_SOUTH_NORTH = 0x0000000000200000
GESTURE_MASK_EDGE_NORTH_SOUTH = 0x0000000000400000
GESTURE_MASK_CLOCK_WISE = 0x0000000000800000
GESTURE_MASK_COUNTER_CLOCK_WISE = 0x0000000001000000
GESTURE_MASK_WAVE_X = 0x0000000002000000
GESTURE_MASK_WAVE_Y = 0x0000000004000000
GESTURE_MASK_HOLD = 0x0000000008000000
GESTURE_MASK_PRESENCE = 0x0000000010000000
GESTURE_MASK_DOUBLE_WEST_EAST = 0x0000000020000000
GESTURE_MASK_DOUBLE_EAST_WEST = 0x0000000040000000
GESTURE_MASK_DOUBLE_SOUTH_NORTH = 0x0000000080000000
GESTURE_MASK_DOUBLE_NORTH_SOUTH = 0x0000000100000000
NO_GESTURE = 0x00
GESTURE_GARBAGE = 0x01
GESTURE_WEST_EAST = 0x02
GESTURE_EAST_WEST = 0x03
GESTURE_SOUTH_NORTH = 0x04
GESTURE_NORTH_SOUTH = 0x05
GESTURE_CLOCK_WISE = 0x06
GESTURE_COUNTER_CLOCK_WISE = 0x07
GESTURE_WAVE_X = 0x08
GESTURE_WAVE_Y = 0x09
GESTURE_HOLD = 0x40
GESTURE_PRESENCE = 0x49
GESTURE_EDGE_WEST_EAST = 0x41
GESTURE_EDGE_EAST_WEST = 0x42
GESTURE_EDGE_SOUTH_NORTH = 0x43
GESTURE_EDGE_NORTH_SOUTH = 0x44
GESTURE_DOUBLE_WEST_EAST = 0x45
GESTURE_DOUBLE_EAST_WEST = 0x46
GESTURE_DOUBLE_SOUTH_NORTH = 0x47
GESTURE_DOUBLE_NORTH_SOUTH = 0x48
AirWheelInfo = 0x00
LastGesture = 0x00000000
class GestureInfoBit(Structure):
_fields_ = [("GestureCode", c_uint32, 8),
("Reserved", c_uint32, 4),
("GestureType", c_uint32, 4),
("Edgeflick", c_uint32, 1),
("Reserved2", c_uint32, 14),
("GestureInProgress", c_uint32, 1)]
class GestureInfoByte(Structure):
_fields_ = [("Byte0", c_uint8),
("Byte1", c_uint8),
("Byte2", c_uint8),
("Byte3", c_uint8)]
class GestureInfo(Union):
_fields_ = [("GestureInfo32Bit", GestureInfoBit),
("GestureInfoByte", GestureInfoByte),
("GestureInfoLong", c_uint32),
("GestInfoArray", c_ubyte * 4)]
LastTouch = 0x00000000
class TouchInfoBit(Structure):
_fields_ = [("TouchSouth", c_uint32, 1),
("TouchWest", c_uint32, 1),
("TouchNorth", c_uint32, 1),
("TouchEast", c_uint32, 1),
("TouchCentre", c_uint32, 1),
("TapSouth", c_uint32, 1),
("TapWest", c_uint32, 1),
("TapNorth", c_uint32, 1),
("TapEast", c_uint32, 1),
("TapCentre", c_uint32, 1),
("DoubleTapSouth", c_uint32, 1),
("DoubleTapWest", c_uint32, 1),
("DoubleTapNorth", c_uint32, 1),
("DoubleTapEast", c_uint32, 1),
("DoubleTapCentre", c_uint32, 1),
("Free", c_uint32, 17)]
class TouchInfoByte(Structure):
_fields_ = [("Byte0", c_uint8),
("Byte1", c_uint8),
("Byte2", c_uint8),
("Byte3", c_uint8)]
class TouchInfo(Union):
_fields_ = [("TouchInfo32Bit", TouchInfoBit),
("TouchInfoByte", TouchInfoByte),
("TouchInfoLong", c_uint32),
("TouchInfoArray", c_ubyte * 4)]
class GestureBit(Structure):
_fields_ = [("TouchSouth", c_uint64, 1),
("TouchWest", c_uint64, 1),
("TouchNorth", c_uint64, 1),
("TouchEast", c_uint64, 1),
("TouchCentre", c_uint64, 1),
("TapSouth", c_uint64, 1),
("TapWest", c_uint64, 1),
("TapNorth", c_uint64, 1),
("TapEast", c_uint64, 1),
("TapCentre", c_uint64, 1),
("DoubleTapSouth", c_uint64, 1),
("DoubleTapWest", c_uint64, 1),
("DoubleTapNorth", c_uint64, 1),
("DoubleTapEast", c_uint64, 1),
("DoubleTapCentre", c_uint64, 1),
("GestWestEast", c_uint64, 1),
("GestEastWest", c_uint64, 1),
("GestSouthNorth", c_uint64, 1),
("GestNorthSouth", c_uint64, 1),
("EdgeGestWestEast", c_uint64, 1),
("EdgeGestEastWest", c_uint64, 1),
("EdgeGestSouthNorth", c_uint64, 1),
("EdgeGestNorthSouth", c_uint64, 1),
("GestClockWise", c_uint64, 1),
("GestCounterClockWise", c_uint64, 1),
("GestWaveX", c_uint64, 1),
("GestWaveY", c_uint64, 1),
("GestHold", c_uint64, 1),
("GestPresence", c_uint64, 1),
("DoubleGestWestEast", c_uint64, 1),
("DoubleGestEastWest", c_uint64, 1),
("DoubleSouthNorth", c_uint64, 1),
("DoubleGestNorthSouth", c_uint64, 1),
("FreeBit", c_uint64, 31)]
class GestureByte(Structure):
_fields_ = [("Byte0", c_uint8),
("Byte1", c_uint8),
("Byte2", c_uint8),
("Byte3", c_uint8),
("Byte4", c_uint8),
("Byte5", c_uint8),
("Byte6", c_uint8),
("Byte7", c_uint8)]
class Gesture(Union):
_fields_ = [("Gesture64Bit", GestureBit),
("GestureByte", GestureByte),
("GestureLong", c_uint64),
("GestArray", c_ubyte * 8)]
Last_X = 0x0000
Last_Y = 0x0000
Last_Z = 0x0000
class x_SplitByte(Structure):
_fields_ = [("Byte0", c_ubyte),
("Byte1", c_ubyte)]
class y_SplitByte(Structure):
_fields_ = [("Byte0", c_ubyte),
("Byte1", c_ubyte)]
class z_SplitByte(Structure):
_fields_ = [("Byte0", c_ubyte),
("Byte1", c_ubyte)]
class xyz_Coordinates(Structure):
_fields_ = [("x", x_SplitByte),
("y", y_SplitByte),
("z", z_SplitByte)]
class Coordinates(Union):
_fields_ = [("xyz", xyz_Coordinates),
("xInt", c_uint16),
("yInt", c_uint16),
("zInt", c_uint16),
("xyzArray", c_ubyte * 6)]
| true | true |
f728c26febf42a991a26952f5fa1a3457348f35c | 196 | py | Python | cofr/exceptions.py | thibault/trezor-keyval | 5a345f2ab2bcf88aa7ddf2f47e1f7c693b295712 | [
"MIT"
] | 3 | 2018-03-01T12:53:34.000Z | 2019-06-01T16:30:57.000Z | cofr/exceptions.py | thibault/trezor-keyval | 5a345f2ab2bcf88aa7ddf2f47e1f7c693b295712 | [
"MIT"
] | 1 | 2021-06-01T21:37:50.000Z | 2021-06-01T21:37:50.000Z | cofr/exceptions.py | thibault/trezor-keyval | 5a345f2ab2bcf88aa7ddf2f47e1f7c693b295712 | [
"MIT"
] | null | null | null | class NoTrezorFoundError(Exception):
"""No plugged Trezor wallet was found."""
pass
class InvalidCofrFileError(Exception):
"""The file is invalid and cannot be parsed."""
pass
| 17.818182 | 51 | 0.693878 | class NoTrezorFoundError(Exception):
pass
class InvalidCofrFileError(Exception):
pass
| true | true |
f728c2d60bcb05e26ba4202235a7c822207b8198 | 3,861 | py | Python | ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py | generalmotors/ambari-metrics | a197d284c583be1a96134215d61fbfc2ec62b66c | [
"Apache-2.0"
] | 29 | 2018-10-03T21:50:39.000Z | 2022-03-30T04:01:25.000Z | ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py | generalmotors/ambari-metrics | a197d284c583be1a96134215d61fbfc2ec62b66c | [
"Apache-2.0"
] | 23 | 2018-09-25T20:54:54.000Z | 2020-12-01T05:51:48.000Z | ambari-metrics-host-monitoring/src/main/python/core/metric_collector.py | generalmotors/ambari-metrics | a197d284c583be1a96134215d61fbfc2ec62b66c | [
"Apache-2.0"
] | 48 | 2018-09-25T20:11:27.000Z | 2022-02-10T06:39:06.000Z | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
from time import time
from event_definition import HostMetricCollectEvent, ProcessMetricCollectEvent
from metering import MeteringMetricHandler
logger = logging.getLogger()
DEFAULT_HOST_APP_ID = '_HOST'
class MetricsCollector():
"""
The main Reader thread that dequeues events from the event queue and
submits a metric record to the emit buffer. Implementation of dequeue is
not required if Timer class is used for metric groups.
"""
def __init__(self, emit_queue, application_metric_map, host_info, config):
self.emit_queue = emit_queue
self.application_metric_map = application_metric_map
self.host_info = host_info
self.metering_enabled = config.is_metering_enabled()
self.metering_handler = MeteringMetricHandler(config)
pass
def process_event(self, event):
if event.get_classname() == HostMetricCollectEvent.__name__:
self.process_host_collection_event(event)
elif event.get_classname() == ProcessMetricCollectEvent.__name__:
self.process_process_collection_event(event)
else:
logger.warn('Unknown event in queue')
pass
def process_host_collection_event(self, event):
startTime = int(round(time() * 1000))
metrics = None
if 'cpu' in event.get_group_name():
metrics = self.host_info.get_cpu_times()
elif 'disk' in event.get_group_name():
metrics = self.host_info.get_combined_disk_usage()
metrics.update(self.host_info.get_combined_disk_io_counters())
metrics.update(self.host_info.get_disk_io_counters_per_disk())
elif 'network' in event.get_group_name():
metrics = self.host_info.get_network_info()
elif 'mem' in event.get_group_name():
metrics = self.host_info.get_mem_info()
elif 'process' in event.get_group_name():
metrics = self.host_info.get_process_info()
elif 'all' in event.get_group_name():
metrics = {}
metrics.update(self.host_info.get_cpu_times())
metrics.update(self.host_info.get_combined_disk_usage())
metrics.update(self.host_info.get_network_info())
metrics.update(self.host_info.get_mem_info())
metrics.update(self.host_info.get_process_info())
metrics.update(self.host_info.get_combined_disk_io_counters())
metrics.update(self.host_info.get_disk_io_counters_per_disk())
else:
logger.warn('Unknown metric group.')
pass
if metrics:
self.application_metric_map.put_metric(DEFAULT_HOST_APP_ID, metrics, startTime)
if self.metering_enabled:
metering_metrics = self.metering_handler.get_metering_metrics(metrics)
self.application_metric_map.put_metric(self.metering_handler.appId, metering_metrics, startTime)
instance_type_metrics = self.metering_handler.get_instance_type_metrics()
self.application_metric_map.put_metric(self.metering_handler.instance_type_metric_appId, instance_type_metrics, startTime)
pass
def process_process_collection_event(self, event):
"""
Collect Process level metrics and update the application metric map
"""
pass
| 37.125 | 130 | 0.760166 |
import logging
from time import time
from event_definition import HostMetricCollectEvent, ProcessMetricCollectEvent
from metering import MeteringMetricHandler
logger = logging.getLogger()
DEFAULT_HOST_APP_ID = '_HOST'
class MetricsCollector():
def __init__(self, emit_queue, application_metric_map, host_info, config):
self.emit_queue = emit_queue
self.application_metric_map = application_metric_map
self.host_info = host_info
self.metering_enabled = config.is_metering_enabled()
self.metering_handler = MeteringMetricHandler(config)
pass
def process_event(self, event):
if event.get_classname() == HostMetricCollectEvent.__name__:
self.process_host_collection_event(event)
elif event.get_classname() == ProcessMetricCollectEvent.__name__:
self.process_process_collection_event(event)
else:
logger.warn('Unknown event in queue')
pass
def process_host_collection_event(self, event):
startTime = int(round(time() * 1000))
metrics = None
if 'cpu' in event.get_group_name():
metrics = self.host_info.get_cpu_times()
elif 'disk' in event.get_group_name():
metrics = self.host_info.get_combined_disk_usage()
metrics.update(self.host_info.get_combined_disk_io_counters())
metrics.update(self.host_info.get_disk_io_counters_per_disk())
elif 'network' in event.get_group_name():
metrics = self.host_info.get_network_info()
elif 'mem' in event.get_group_name():
metrics = self.host_info.get_mem_info()
elif 'process' in event.get_group_name():
metrics = self.host_info.get_process_info()
elif 'all' in event.get_group_name():
metrics = {}
metrics.update(self.host_info.get_cpu_times())
metrics.update(self.host_info.get_combined_disk_usage())
metrics.update(self.host_info.get_network_info())
metrics.update(self.host_info.get_mem_info())
metrics.update(self.host_info.get_process_info())
metrics.update(self.host_info.get_combined_disk_io_counters())
metrics.update(self.host_info.get_disk_io_counters_per_disk())
else:
logger.warn('Unknown metric group.')
pass
if metrics:
self.application_metric_map.put_metric(DEFAULT_HOST_APP_ID, metrics, startTime)
if self.metering_enabled:
metering_metrics = self.metering_handler.get_metering_metrics(metrics)
self.application_metric_map.put_metric(self.metering_handler.appId, metering_metrics, startTime)
instance_type_metrics = self.metering_handler.get_instance_type_metrics()
self.application_metric_map.put_metric(self.metering_handler.instance_type_metric_appId, instance_type_metrics, startTime)
pass
def process_process_collection_event(self, event):
pass
| true | true |
f728c3198824a7a9f7d5386087457b8eda71063b | 1,721 | py | Python | bdd/group_steps.py | russa1995/python_training | 0566725a15565c83ebc5bbf2b18470f1c3ab9595 | [
"Apache-2.0"
] | null | null | null | bdd/group_steps.py | russa1995/python_training | 0566725a15565c83ebc5bbf2b18470f1c3ab9595 | [
"Apache-2.0"
] | null | null | null | bdd/group_steps.py | russa1995/python_training | 0566725a15565c83ebc5bbf2b18470f1c3ab9595 | [
"Apache-2.0"
] | null | null | null | from pytest_bdd import given, when, then
from model.group import Group
import random
@given('a group list')
def group_list(db):
return db.get_group_list()
@given('a group with <name>, <header> and <footer>')
def new_group(name, header, footer):
return Group(name=name, header=header, footer=footer)
@when ('I add the group to the list')
def add_new_group(app, new_group):
app.group.group_create(new_group)
@then ('the new group list is equal to the old list with the added group')
def verify_group_added(db, group_list, new_group):
old_groups = group_list
new_groups = db.get_group_list()
old_groups.append(new_group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
@given('a non-empty group list')
def non_empty_group_list(app, db):
if len(db.get_group_list()) == 0:
app.group.group_create(Group(name='some test'))
return db.get_group_list()
@given('a random group from the list')
def random_group(non_empty_group_list):
return random.choice(non_empty_group_list)
@when('I delete the group from the list')
def delete_group(app, random_group):
app.group.delete_group_by_id(random_group.id)
@then('the new list is equal to the old list without the deleted group')
def verify_group_deleted(db, non_empty_group_list, random_group, app, check_ui):
old_groups = non_empty_group_list
new_groups = db.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups.remove(random_group)
assert old_groups == new_groups
if check_ui:
new_groups = app.group.get_group_list()
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max) | 36.617021 | 113 | 0.740267 | from pytest_bdd import given, when, then
from model.group import Group
import random
@given('a group list')
def group_list(db):
return db.get_group_list()
@given('a group with <name>, <header> and <footer>')
def new_group(name, header, footer):
return Group(name=name, header=header, footer=footer)
@when ('I add the group to the list')
def add_new_group(app, new_group):
app.group.group_create(new_group)
@then ('the new group list is equal to the old list with the added group')
def verify_group_added(db, group_list, new_group):
old_groups = group_list
new_groups = db.get_group_list()
old_groups.append(new_group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
@given('a non-empty group list')
def non_empty_group_list(app, db):
if len(db.get_group_list()) == 0:
app.group.group_create(Group(name='some test'))
return db.get_group_list()
@given('a random group from the list')
def random_group(non_empty_group_list):
return random.choice(non_empty_group_list)
@when('I delete the group from the list')
def delete_group(app, random_group):
app.group.delete_group_by_id(random_group.id)
@then('the new list is equal to the old list without the deleted group')
def verify_group_deleted(db, non_empty_group_list, random_group, app, check_ui):
old_groups = non_empty_group_list
new_groups = db.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups.remove(random_group)
assert old_groups == new_groups
if check_ui:
new_groups = app.group.get_group_list()
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max) | true | true |
f728c31e556a81c5d46abdcc25bf493c790a601a | 84,818 | py | Python | Packs/EWS/Integrations/EWSO365/EWSO365.py | ryantoddtq/content | 50027658da7189e37e9514fc03057d1c1bc3209f | [
"MIT"
] | 2 | 2020-07-27T10:35:41.000Z | 2020-12-14T15:44:18.000Z | Packs/EWS/Integrations/EWSO365/EWSO365.py | Axonius/content | e058add82b7422338015cf14591512b9aad4d3e9 | [
"MIT"
] | 48 | 2022-03-08T13:45:00.000Z | 2022-03-31T14:32:05.000Z | Packs/EWS/Integrations/EWSO365/EWSO365.py | Axonius/content | e058add82b7422338015cf14591512b9aad4d3e9 | [
"MIT"
] | 1 | 2022-01-06T07:09:11.000Z | 2022-01-06T07:09:11.000Z | import random
import string
from typing import Dict
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import sys
import traceback
import json
import os
import hashlib
from datetime import timedelta
from io import StringIO
import logging
import warnings
import email
from requests.exceptions import ConnectionError
from collections import deque
from multiprocessing import Process
import exchangelib
from exchangelib.errors import (
ErrorItemNotFound,
ResponseMessageError,
RateLimitError,
ErrorInvalidIdMalformed,
ErrorFolderNotFound,
ErrorMailboxStoreUnavailable,
ErrorMailboxMoveInProgress,
ErrorNameResolutionNoResults,
MalformedResponseError,
)
from exchangelib.items import Item, Message, Contact
from exchangelib.services.common import EWSService, EWSAccountService
from exchangelib.util import create_element, add_xml_child, MNS, TNS
from exchangelib import (
IMPERSONATION,
Account,
EWSDateTime,
EWSTimeZone,
Configuration,
FileAttachment,
Version,
Folder,
HTMLBody,
Body,
ItemAttachment,
OAUTH2,
OAuth2AuthorizationCodeCredentials,
Identity,
ExtendedProperty
)
from oauthlib.oauth2 import OAuth2Token
from exchangelib.version import EXCHANGE_O365
from exchangelib.protocol import BaseProtocol, NoVerifyHTTPAdapter
# Ignore warnings print to stdout
warnings.filterwarnings("ignore")
""" Constants """
APP_NAME = "ms-ews-o365"
FOLDER_ID_LEN = 120
MAX_INCIDENTS_PER_FETCH = 50
# move results
MOVED_TO_MAILBOX = "movedToMailbox"
MOVED_TO_FOLDER = "movedToFolder"
# item types
FILE_ATTACHMENT_TYPE = "FileAttachment"
ITEM_ATTACHMENT_TYPE = "ItemAttachment"
ATTACHMENT_TYPE = "attachmentType"
TOIS_PATH = "/root/Top of Information Store/"
# context keys
ATTACHMENT_ID = "attachmentId"
ATTACHMENT_ORIGINAL_ITEM_ID = "originalItemId"
NEW_ITEM_ID = "newItemId"
MESSAGE_ID = "messageId"
ITEM_ID = "itemId"
ACTION = "action"
MAILBOX = "mailbox"
MAILBOX_ID = "mailboxId"
FOLDER_ID = "id"
TARGET_MAILBOX = 'receivedBy'
# context paths
CONTEXT_UPDATE_EWS_ITEM = f"EWS.Items((val.{ITEM_ID} === obj.{ITEM_ID} || " \
f"(val.{MESSAGE_ID} && obj.{MESSAGE_ID} && val.{MESSAGE_ID} === obj.{MESSAGE_ID}))" \
f" && val.{TARGET_MAILBOX} === obj.{TARGET_MAILBOX})"
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT = "EWS.Items(val.{0} == obj.{1})".format(
ITEM_ID, ATTACHMENT_ORIGINAL_ITEM_ID
)
CONTEXT_UPDATE_ITEM_ATTACHMENT = ".ItemAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FILE_ATTACHMENT = ".FileAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FOLDER = "EWS.Folders(val.{0} == obj.{0})".format(FOLDER_ID)
# fetch params
LAST_RUN_TIME = "lastRunTime"
LAST_RUN_IDS = "ids"
LAST_RUN_FOLDER = "folderName"
ERROR_COUNTER = "errorCounter"
# headers
ITEMS_RESULTS_HEADERS = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"textBody",
]
UTF_8 = 'utf-8'
""" Classes """
class ProxyAdapter(requests.adapters.HTTPAdapter):
"""
Proxy Adapter used to add PROXY to requests
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class InsecureProxyAdapter(NoVerifyHTTPAdapter):
"""
Insecure Proxy Adapter used to add PROXY and INSECURE to requests
NoVerifyHTTPAdapter is a built-in insecure HTTPAdapter class
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class EWSClient:
def __init__(
self,
default_target_mailbox,
client_id,
client_secret,
tenant_id,
folder="Inbox",
is_public_folder=False,
request_timeout="120",
max_fetch=MAX_INCIDENTS_PER_FETCH,
self_deployed=True,
insecure=True,
proxy=False,
**kwargs,
):
"""
Client used to communicate with EWS
:param default_target_mailbox: Email address from which to fetch incidents
:param client_id: Application client ID
:param client_secret: Application client secret
:param folder: Name of the folder from which to fetch incidents
:param is_public_folder: Public Folder flag
:param request_timeout: Timeout (in seconds) for HTTP requests to Exchange Server
:param max_fetch: Max incidents per fetch
:param insecure: Trust any certificate (not secure)
"""
BaseProtocol.TIMEOUT = int(request_timeout)
self.ews_server = "https://outlook.office365.com/EWS/Exchange.asmx/"
self.ms_client = MicrosoftClient(
tenant_id=tenant_id,
auth_id=client_id,
enc_key=client_secret,
app_name=APP_NAME,
base_url=self.ews_server,
verify=not insecure,
proxy=proxy,
self_deployed=self_deployed,
scope="https://outlook.office.com/.default",
)
self.folder_name = folder
self.is_public_folder = is_public_folder
self.access_type = kwargs.get('access_type') or IMPERSONATION
self.max_fetch = min(MAX_INCIDENTS_PER_FETCH, int(max_fetch))
self.last_run_ids_queue_size = 500
self.client_id = client_id
self.client_secret = client_secret
self.account_email = default_target_mailbox
self.config = self.__prepare(insecure)
self.protocol = BaseProtocol(self.config)
def __prepare(self, insecure):
"""
Prepares the client PROTOCOL, CREDENTIALS and CONFIGURATION
:param insecure: Trust any certificate (not secure)
:return: OAuth 2 Configuration
"""
BaseProtocol.HTTP_ADAPTER_CLS = InsecureProxyAdapter if insecure else ProxyAdapter
access_token = self.ms_client.get_access_token()
oauth2_token = OAuth2Token({"access_token": access_token})
self.credentials = credentials = OAuth2AuthorizationCodeCredentials(
client_id=self.client_id,
client_secret=self.client_secret,
access_token=oauth2_token,
)
# need to add identity for protocol OAuth header
self.credentials.identity = Identity(upn=self.account_email)
config_args = {
"credentials": credentials,
"auth_type": OAUTH2,
"version": Version(EXCHANGE_O365),
"service_endpoint": "https://outlook.office365.com/EWS/Exchange.asmx",
}
return Configuration(**config_args)
def get_account(self, target_mailbox=None):
"""
Request an account from EWS
:param (Optional) target_mailbox: Mailbox associated with the requested account
:return: exchangelib Account
"""
if not target_mailbox:
target_mailbox = self.account_email
return Account(
primary_smtp_address=target_mailbox,
autodiscover=False,
config=self.config,
access_type=self.access_type,
)
def get_items_from_mailbox(self, account, item_ids):
"""
Request specific items from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_ids: item_ids of the requested items
:return: list of exchangelib Items
"""
# allow user to pass target_mailbox as account
if isinstance(account, str):
account = self.get_account(account)
else:
account = self.get_account(self.account_email)
if type(item_ids) is not list:
item_ids = [item_ids]
items = [Item(id=x) for x in item_ids]
result = list(account.fetch(ids=items))
result = [x for x in result if not isinstance(x, ErrorItemNotFound)]
if len(result) != len(item_ids):
raise Exception(
"One or more items were not found. Check the input item ids"
)
return result
def get_item_from_mailbox(self, account, item_id):
"""
Request a single item from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_id: item_id of the requested item
:return: exchangelib Item
"""
result = self.get_items_from_mailbox(account, [item_id])
if len(result) == 0:
raise Exception(f"ItemId {str(item_id)} not found")
return result[0]
def get_attachments_for_item(self, item_id, account, attachment_ids=None):
"""
Request attachments for an item
:param item_id: item_id of the item to retrieve attachments from
:param account: EWS account or target_mailbox associated with that account
:param (Optional) attachment_ids: attachment_ids: attachment_ids to retrieve
:return: list of exchangelib Item.attachments
"""
item = self.get_item_from_mailbox(account, item_id)
attachments = []
attachment_ids = argToList(attachment_ids)
if item:
if item.attachments:
for attachment in item.attachments:
if (
attachment_ids
and attachment.attachment_id.id not in attachment_ids
):
continue
attachments.append(attachment)
else:
raise Exception("Message item not found: " + item_id)
if attachment_ids and len(attachments) < len(attachment_ids):
raise Exception(
"Some attachment id did not found for message:" + str(attachment_ids)
)
return attachments
def is_default_folder(self, folder_path, is_public=None):
"""
Is the given folder_path public
:param folder_path: folder path to check if is public
:param is_public: (Optional) if provided, will return this value
:return: Boolean
"""
if is_public is not None:
return is_public
if folder_path == self.folder_name:
return self.is_public_folder
return False
def get_folder_by_path(self, path, account=None, is_public=False):
"""
Retrieve folder by path
:param path: path of the folder
:param account: account associated with the requested path
:param is_public: is the requested folder public
:return: exchangelib Folder
"""
if account is None:
account = self.get_account()
# handle exchange folder id
if len(path) == FOLDER_ID_LEN:
folders_map = account.root._folders_map
if path in folders_map:
return account.root._folders_map[path]
if is_public:
folder_result = account.public_folders_root
elif path == "AllItems":
folder_result = account.root
else:
folder_result = account.inbox.parent # Top of Information Store
path = path.replace("/", "\\")
path = path.split("\\")
for sub_folder_name in path:
folder_filter_by_name = [
x
for x in folder_result.children
if x.name.lower() == sub_folder_name.lower()
]
if len(folder_filter_by_name) == 0:
raise Exception(f"No such folder {path}")
folder_result = folder_filter_by_name[0]
return folder_result
def send_email(self, message: Message):
account = self.get_account()
message.account = account
message.send_and_save()
class MarkAsJunk(EWSAccountService):
"""
EWSAccountService class used for marking items as junk
"""
SERVICE_NAME = "MarkAsJunk"
def call(self, item_id, move_item):
elements = list(
self._get_elements(
payload=self.get_payload(item_id=item_id, move_item=move_item)
)
)
for element in elements:
if isinstance(element, ResponseMessageError):
return str(element)
return "Success"
def get_payload(self, item_id, move_item):
junk = create_element(
f"m:{self.SERVICE_NAME}",
{"IsJunk": "true", "MoveItem": "true" if move_item else "false"},
)
items_list = create_element("m:ItemIds")
item_element = create_element("t:ItemId", {"Id": item_id})
items_list.append(item_element)
junk.append(items_list)
return junk
class GetSearchableMailboxes(EWSService):
"""
EWSAccountService class used for getting Searchable Mailboxes
"""
SERVICE_NAME = "GetSearchableMailboxes"
element_container_name = f"{{{MNS}}}SearchableMailboxes"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}PrimarySmtpAddress").text
if element.find(f"{{{TNS}}}PrimarySmtpAddress") is not None
else None,
MAILBOX_ID: element.find(f"{{{TNS}}}ReferenceId").text
if element.find(f"{{{TNS}}}ReferenceId") is not None
else None,
"displayName": element.find(f"{{{TNS}}}DisplayName").text
if element.find(f"{{{TNS}}}DisplayName") is not None
else None,
"isExternal": element.find(f"{{{TNS}}}IsExternalMailbox").text
if element.find(f"{{{TNS}}}IsExternalMailbox") is not None
else None,
"externalEmailAddress": element.find(f"{{{TNS}}}ExternalEmailAddress").text
if element.find(f"{{{TNS}}}ExternalEmailAddress") is not None
else None,
}
def call(self):
elements = self._get_elements(payload=self.get_payload())
return [
self.parse_element(x)
for x in elements
if x.find(f"{{{TNS}}}ReferenceId").text
]
def get_payload(self):
element = create_element(f"m:{self.SERVICE_NAME}")
return element
class ExpandGroup(EWSService):
"""
EWSAccountService class used for expanding groups
"""
SERVICE_NAME = "ExpandDL"
element_container_name = f"{{{MNS}}}DLExpansion"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}EmailAddress").text
if element.find(f"{{{TNS}}}EmailAddress") is not None
else None,
"displayName": element.find(f"{{{TNS}}}Name").text
if element.find(f"{{{TNS}}}Name") is not None
else None,
"mailboxType": element.find(f"{{{TNS}}}MailboxType").text
if element.find(f"{{{TNS}}}MailboxType") is not None
else None,
}
def call(self, email_address, recursive_expansion=False):
try:
if recursive_expansion == "True":
group_members: Dict = {}
self.expand_group_recursive(email_address, group_members)
return list(group_members.values())
else:
return self.expand_group(email_address)
except ErrorNameResolutionNoResults:
demisto.results("No results were found.")
sys.exit()
def get_payload(self, email_address):
element = create_element(f"m:{self.SERVICE_NAME}")
mailbox_element = create_element("m:Mailbox")
add_xml_child(mailbox_element, "t:EmailAddress", email_address)
element.append(mailbox_element)
return element
def expand_group(self, email_address):
"""
Expand given group
:param email_address: email address of the group to expand
:return: list dict with parsed expanded group data
"""
elements = self._get_elements(payload=self.get_payload(email_address))
return [self.parse_element(x) for x in elements]
def expand_group_recursive(self, email_address, non_dl_emails, dl_emails=None):
"""
Expand group recursively
:param email_address: email address of the group to expand
:param non_dl_emails: non distribution only emails
:param dl_emails: (Optional) distribution only emails
:return: Set of dl emails and non dl emails (returned via reference)
"""
if dl_emails is None:
dl_emails = set()
if email_address in non_dl_emails or email_address in dl_emails:
return None
dl_emails.add(email_address)
for member in self.expand_group(email_address):
if (
member["mailboxType"] == "PublicDL"
or member["mailboxType"] == "PrivateDL"
):
self.expand_group_recursive(member.get("mailbox"), non_dl_emails, dl_emails)
else:
if member["mailbox"] not in non_dl_emails:
non_dl_emails[member["mailbox"]] = member
# If you are modifying this probably also need to modify in other files
def exchangelib_cleanup():
key_protocols = list(exchangelib.protocol.CachingProtocol._protocol_cache.items())
try:
exchangelib.close_connections()
except Exception as ex:
demisto.error("Error was found in exchangelib cleanup, ignoring: {}".format(ex))
for key, protocol in key_protocols:
try:
if "thread_pool" in protocol.__dict__:
demisto.debug(
"terminating thread pool key{} id: {}".format(
key, id(protocol.thread_pool)
)
)
protocol.thread_pool.terminate()
del protocol.__dict__["thread_pool"]
else:
demisto.info(
"Thread pool not found (ignoring terminate) in protcol dict: {}".format(
dir(protocol.__dict__)
)
)
except Exception as ex:
demisto.error("Error with thread_pool.terminate, ignoring: {}".format(ex))
""" LOGGING """
log_stream = None
log_handler = None
def start_logging():
global log_stream
global log_handler
logging.raiseExceptions = False
if log_stream is None:
log_stream = StringIO()
log_handler = logging.StreamHandler(stream=log_stream)
log_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logger = logging.getLogger()
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
""" Helper Functions """
def get_attachment_name(attachment_name):
"""
Retrieve attachment name or error string if none is provided
:param attachment_name: attachment name to retrieve
:return: string
"""
if attachment_name is None or attachment_name == "":
return "demisto_untitled_attachment"
return attachment_name
def get_entry_for_object(title, context_key, obj, headers=None):
"""
Create an entry for a given object
:param title: Title of the human readable
:param context_key: Context key used for entry context
:param obj: Object to create entry for
:param headers: (Optional) headers used in the tableToMarkDown
:return: Entry object to be used with demisto.results()
"""
if len(obj) == 0:
return "There is no output results"
if headers and isinstance(obj, dict):
headers = list(set(headers).intersection(set(obj.keys())))
return {
"Type": entryTypes["note"],
"Contents": obj,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": tableToMarkdown(title, obj, headers),
"EntryContext": {context_key: obj},
}
def prepare_args(args):
"""
Prepare arguments to be used as the API expects it
:param args: demisto args
:return: transformed args
"""
args = dict((k.replace("-", "_"), v) for k, v in list(args.items()))
if "is_public" in args:
args["is_public"] = args["is_public"] == "True"
return args
def get_limited_number_of_messages_from_qs(qs, limit):
"""
Retrieve a limited number of messages from query search
:param qs: query search to execute
:param limit: limit on number of items to retrieve from search
:return: list of exchangelib.Message
"""
count = 0
results = []
for item in qs:
if count == limit:
break
if isinstance(item, Message):
count += 1
results.append(item)
return results
def keys_to_camel_case(value):
"""
Transform keys from snake to camel case (does nothing if no snakes are found)
:param value: value to transform
:return: transformed value
"""
def str_to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
if value is None:
return None
if isinstance(value, (list, set)):
return list(map(keys_to_camel_case, value))
if isinstance(value, dict):
return dict(
(
keys_to_camel_case(k),
keys_to_camel_case(v) if isinstance(v, (list, dict)) else v,
)
for (k, v) in list(value.items())
)
return str_to_camel_case(value)
def get_last_run(client: EWSClient, last_run=None):
"""
Retrieve the last run time
:param client: EWS Client
:param last_run: (Optional) last run object
:return: last run dict
"""
if not last_run or last_run.get(LAST_RUN_FOLDER) != client.folder_name:
last_run = {
LAST_RUN_TIME: None,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: [],
}
if LAST_RUN_TIME in last_run and last_run[LAST_RUN_TIME] is not None:
last_run[LAST_RUN_TIME] = EWSDateTime.from_string(last_run[LAST_RUN_TIME])
# In case we have existing last_run data
if last_run.get(LAST_RUN_IDS) is None:
last_run[LAST_RUN_IDS] = []
return last_run
def email_ec(item):
"""
Create entry context for an email
:param item: exchangelib.Item
:return: entry context dict
"""
return {
"CC": None
if not item.cc_recipients
else [mailbox.email_address for mailbox in item.cc_recipients],
"BCC": None
if not item.bcc_recipients
else [mailbox.email_address for mailbox in item.bcc_recipients],
"To": None
if not item.to_recipients
else [mailbox.email_address for mailbox in item.to_recipients],
"From": item.author.email_address,
"Subject": item.subject,
"Text": item.text_body,
"HTML": item.body,
"HeadersMap": {header.name: header.value for header in item.headers},
}
def parse_item_as_dict(item, email_address=None, camel_case=False, compact_fields=False):
"""
Parses an exchangelib item as a dict
:param item: exchangelib.Item to parse
:param (Optional) email_address: string mailbox
:param (Optional) camel_case: Is camel case
:param (Optional) compact_fields: Is compact fields
:return: Item as a dict
"""
def parse_object_as_dict(obj):
raw_dict = {}
if obj is not None:
for field in obj.FIELDS:
raw_dict[field.name] = getattr(obj, field.name, None)
return raw_dict
def parse_folder_as_json(folder):
raw_dict = parse_object_as_dict(folder)
if "parent_folder_id" in raw_dict:
raw_dict["parent_folder_id"] = parse_folder_as_json(
raw_dict["parent_folder_id"]
)
if "effective_rights" in raw_dict:
raw_dict["effective_rights"] = parse_object_as_dict(
raw_dict["effective_rights"]
)
return raw_dict
raw_dict = {}
for field, value in item._field_vals():
if type(value) in [str, str, int, float, bool, Body, HTMLBody, None]:
raw_dict[field] = value
raw_dict["id"] = item.id
if getattr(item, "attachments", None):
raw_dict["attachments"] = [
parse_attachment_as_dict(item.id, x) for x in item.attachments
]
for time_field in [
"datetime_sent",
"datetime_created",
"datetime_received",
"last_modified_time",
"reminder_due_by",
]:
value = getattr(item, time_field, None)
if value:
raw_dict[time_field] = value.ewsformat()
for dict_field in [
"effective_rights",
"parent_folder_id",
"conversation_id",
"author",
"extern_id",
"received_by",
"received_representing",
"reply_to",
"sender",
"folder",
]:
value = getattr(item, dict_field, None)
if value:
if isinstance(value, list):
raw_dict[dict_field] = []
for single_val in value:
raw_dict[dict_field].append(parse_object_as_dict(single_val))
else:
raw_dict[dict_field] = parse_object_as_dict(value)
for list_dict_field in ["headers", "cc_recipients", "to_recipients"]:
value = getattr(item, list_dict_field, None)
if value:
raw_dict[list_dict_field] = [parse_object_as_dict(x) for x in value]
if getattr(item, "folder", None):
raw_dict["folder"] = parse_folder_as_json(item.folder)
folder_path = (
item.folder.absolute[len(TOIS_PATH):]
if item.folder.absolute.startswith(TOIS_PATH)
else item.folder.absolute
)
raw_dict["folder_path"] = folder_path
if compact_fields:
new_dict = {}
# noinspection PyListCreation
fields_list = [
"datetime_created",
"datetime_received",
"datetime_sent",
"sender",
"has_attachments",
"importance",
"message_id",
"last_modified_time",
"size",
"subject",
"text_body",
"headers",
"body",
"folder_path",
"is_read",
]
if "id" in raw_dict:
new_dict["itemId"] = raw_dict["id"]
fields_list.append("itemId")
for field in fields_list:
if field in raw_dict:
new_dict[field] = raw_dict.get(field)
for field in ["received_by", "author", "sender"]:
if field in raw_dict:
new_dict[field] = raw_dict.get(field, {}).get("email_address")
for field in ["to_recipients"]:
if field in raw_dict:
new_dict[field] = [x.get("email_address") for x in raw_dict[field]]
attachments = raw_dict.get("attachments")
if attachments and len(attachments) > 0:
file_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == FILE_ATTACHMENT_TYPE
]
if len(file_attachments) > 0:
new_dict["FileAttachments"] = file_attachments
item_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == ITEM_ATTACHMENT_TYPE
]
if len(item_attachments) > 0:
new_dict["ItemAttachments"] = item_attachments
raw_dict = new_dict
if camel_case:
raw_dict = keys_to_camel_case(raw_dict)
if email_address:
raw_dict[MAILBOX] = email_address
return raw_dict
def get_entry_for_file_attachment(item_id, attachment):
"""
Creates a file entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: file entry dict for attachment
"""
entry = fileResult(get_attachment_name(attachment.name), attachment.content)
entry["EntryContext"] = {
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT
+ CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment)
}
return entry
def parse_attachment_as_dict(item_id, attachment):
"""
Creates a note entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: note entry dict for attachment
"""
try:
attachment_content = (
attachment.content
if isinstance(attachment, FileAttachment)
else attachment.item.mime_content
)
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": hashlib.sha256(attachment_content).hexdigest()
if attachment_content
else None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
def get_entry_for_item_attachment(item_id, attachment, target_email):
"""
Creates a note entry for an item attachment
:param item_id: Item id
:param attachment: exchangelib attachment
:param target_email: target email
:return: note entry dict for item attachment
"""
item = attachment.item
dict_result = parse_attachment_as_dict(item_id, attachment)
dict_result.update(
parse_item_as_dict(item, target_email, camel_case=True, compact_fields=True)
)
title = f'EWS get attachment got item for "{target_email}", "{get_attachment_name(attachment.name)}"'
return get_entry_for_object(
title,
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT + CONTEXT_UPDATE_ITEM_ATTACHMENT,
dict_result,
)
""" Command Functions """
def get_expanded_group(client: EWSClient, email_address, recursive_expansion=False):
"""
Retrieve expanded group command
:param client: EWS Client
:param email_address: Email address of the group to expand
:param (Optional) recursive_expansion: Whether to enable recursive expansion. Default is "False".
:return: Expanded groups output tuple
"""
group_members = ExpandGroup(protocol=client.protocol).call(
email_address, recursive_expansion
)
group_details = {"name": email_address, "members": group_members}
output = {"EWS.ExpandGroup": group_details}
readable_output = tableToMarkdown("Group Members", group_members)
return readable_output, output, group_details
def get_searchable_mailboxes(client: EWSClient):
"""
Retrieve searchable mailboxes command
:param client: EWS Client
:return: Searchable mailboxes output tuple
"""
searchable_mailboxes = GetSearchableMailboxes(protocol=client.protocol).call()
readable_output = tableToMarkdown(
"Searchable mailboxes", searchable_mailboxes, headers=["displayName", "mailbox"]
)
output = {"EWS.Mailboxes": searchable_mailboxes}
return readable_output, output, searchable_mailboxes
def delete_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Deletes attachments for a given message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids to delete
:return: entries that were delted
"""
attachments = client.get_attachments_for_item(
item_id, target_mailbox, attachment_ids
)
deleted_file_attachments = []
deleted_item_attachments = [] # type: ignore
for attachment in attachments:
attachment_deleted_action = {
ATTACHMENT_ID: attachment.attachment_id.id,
ACTION: "deleted",
}
if isinstance(attachment, FileAttachment):
deleted_file_attachments.append(attachment_deleted_action)
else:
deleted_item_attachments.append(attachment_deleted_action)
attachment.detach()
entries = []
if len(deleted_file_attachments) > 0:
entry = get_entry_for_object(
"Deleted file attachments",
"EWS.Items" + CONTEXT_UPDATE_FILE_ATTACHMENT,
deleted_file_attachments,
)
entries.append(entry)
if len(deleted_item_attachments) > 0:
entry = get_entry_for_object(
"Deleted item attachments",
"EWS.Items" + CONTEXT_UPDATE_ITEM_ATTACHMENT,
deleted_item_attachments,
)
entries.append(entry)
return entries
def fetch_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Fetches attachments for a message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids
:return: list of parsed entries
"""
account = client.get_account(target_mailbox)
attachments = client.get_attachments_for_item(item_id, account, attachment_ids)
entries = []
for attachment in attachments:
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
entries.append(get_entry_for_file_attachment(item_id, attachment))
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
else:
entries.append(
get_entry_for_item_attachment(
item_id, attachment, account.primary_smtp_address
)
)
if attachment.item.mime_content:
entries.append(
fileResult(
get_attachment_name(attachment.name) + ".eml",
attachment.item.mime_content,
)
)
return entries
def move_item_between_mailboxes(
client: EWSClient,
item_id,
destination_mailbox,
destination_folder_path,
source_mailbox=None,
is_public=None,
):
"""
Moves item between mailboxes
:param client: EWS Client
:param item_id: item id
:param destination_mailbox: destination mailbox
:param destination_folder_path: destination folder path
:param (Optional) source_mailbox: source mailbox
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
source_account = client.get_account(source_mailbox)
destination_account = client.get_account(destination_mailbox)
is_public = client.is_default_folder(destination_folder_path, is_public)
destination_folder = client.get_folder_by_path(
destination_folder_path, destination_account, is_public
)
item = client.get_item_from_mailbox(source_account, item_id)
exported_items = source_account.export([item])
destination_account.upload([(destination_folder, exported_items[0])])
source_account.bulk_delete([item])
move_result = {
MOVED_TO_MAILBOX: destination_mailbox,
MOVED_TO_FOLDER: destination_folder_path,
}
readable_output = "Item was moved successfully."
output = {f"EWS.Items(val.itemId === '{item_id}')": move_result}
return readable_output, output, move_result
def move_item(
client: EWSClient, item_id, target_folder_path, target_mailbox=None, is_public=None
):
"""
Moves an item within the same mailbox
:param client: EWS Client
:param item_id: item id
:param target_folder_path: target folder path
:param (Optional) target_mailbox: mailbox containing the item
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, is_public=is_public)
item = client.get_item_from_mailbox(account, item_id)
if isinstance(item, ErrorInvalidIdMalformed):
raise Exception("Item not found")
item.move(target_folder)
move_result = {
NEW_ITEM_ID: item.id,
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: "moved",
}
readable_output = tableToMarkdown("Moved items", move_result)
output = {CONTEXT_UPDATE_EWS_ITEM: move_result}
return readable_output, output, move_result
def delete_items(client: EWSClient, item_ids, delete_type, target_mailbox=None):
"""
Delete items in a mailbox
:param client: EWS Client
:param item_ids: items ids to delete
:param delete_type: delte type soft/hard
:param (Optional) target_mailbox: mailbox containinf the items
:return: Output tuple
"""
deleted_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
delete_type = delete_type.lower()
for item in items:
item_id = item.id
if delete_type == "trash":
item.move_to_trash()
elif delete_type == "soft":
item.soft_delete()
elif delete_type == "hard":
item.delete()
else:
raise Exception(
f'invalid delete type: {delete_type}. Use "trash" \\ "soft" \\ "hard"'
)
deleted_items.append(
{
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: f"{delete_type}-deleted",
}
)
readable_output = tableToMarkdown(
f"Deleted items ({delete_type} delete type)", deleted_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: deleted_items}
return readable_output, output, deleted_items
def search_items_in_mailbox(
client: EWSClient,
query=None,
message_id=None,
folder_path="",
limit=100,
target_mailbox=None,
is_public=None,
selected_fields="all",
):
"""
Search items in mailbox
:param client: EWS Client
:param (Optional) query: query to execute
:param (Optional) message_id: message ids to search
:param (Optional) folder_path: folder path to search
:param (Optional) limit: max amount of items to fetch
:param (Optional) target_mailbox: mailbox containing the items
:param (Optional) is_public: is the targeted folder public
:param (Optional) selected_fields: Selected fields
:return: Output tuple
"""
if not query and not message_id:
return_error("Missing required argument. Provide query or message-id")
if message_id and message_id[0] != "<" and message_id[-1] != ">":
message_id = "<{}>".format(message_id)
account = client.get_account(target_mailbox)
limit = int(limit)
if folder_path.lower() == "inbox":
folders = [account.inbox]
elif folder_path:
is_public = client.is_default_folder(folder_path, is_public)
folders = [client.get_folder_by_path(folder_path, account, is_public)]
else:
folders = account.inbox.parent.walk() # pylint: disable=E1101
items = [] # type: ignore
selected_all_fields = selected_fields == "all"
if selected_all_fields:
restricted_fields = list([x.name for x in Message.FIELDS]) # type: ignore
else:
restricted_fields = set(argToList(selected_fields)) # type: ignore
restricted_fields.update(["id", "message_id"]) # type: ignore
for folder in folders:
if Message not in folder.supported_item_models:
continue
if query:
items_qs = folder.filter(query).only(*restricted_fields)
else:
items_qs = folder.filter(message_id=message_id).only(*restricted_fields)
items += get_limited_number_of_messages_from_qs(items_qs, limit)
if len(items) >= limit:
break
items = items[:limit]
searched_items_result = [
parse_item_as_dict(
item,
account.primary_smtp_address,
camel_case=True,
compact_fields=selected_all_fields,
)
for item in items
]
if not selected_all_fields:
searched_items_result = [
{k: v for (k, v) in i.items() if k in keys_to_camel_case(restricted_fields)}
for i in searched_items_result
]
for item in searched_items_result:
item["itemId"] = item.pop("id", "")
readable_output = tableToMarkdown(
"Searched items",
searched_items_result,
headers=ITEMS_RESULTS_HEADERS if selected_all_fields else None,
)
output = {CONTEXT_UPDATE_EWS_ITEM: searched_items_result}
return readable_output, output, searched_items_result
def get_out_of_office_state(client: EWSClient, target_mailbox=None):
"""
Retrieve get out of office state of the targeted mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
oof = account.oof_settings
oof_dict = {
"state": oof.state, # pylint: disable=E1101
"externalAudience": getattr(oof, "external_audience", None),
"start": oof.start.ewsformat() if oof.start else None, # pylint: disable=E1101
"end": oof.end.ewsformat() if oof.end else None, # pylint: disable=E1101
"internalReply": getattr(oof, "internal_replay", None),
"externalReply": getattr(oof, "external_replay", None),
MAILBOX: account.primary_smtp_address,
}
readable_output = tableToMarkdown(
f"Out of office state for {account.primary_smtp_address}", oof_dict
)
output = {f"Account.Email(val.Address == obj.{MAILBOX}).OutOfOffice": oof_dict}
return readable_output, output, oof_dict
def recover_soft_delete_item(
client: EWSClient,
message_ids,
target_folder_path="Inbox",
target_mailbox=None,
is_public=None,
):
"""
Recovers soft deleted items
:param client: EWS Client
:param message_ids: Message ids to recover
:param (Optional) target_folder_path: target folder path
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the target folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, account, is_public)
recovered_messages = []
message_ids = argToList(message_ids)
items_to_recover = account.recoverable_items_deletions.filter( # pylint: disable=E1101
message_id__in=message_ids
).all() # pylint: disable=E1101
recovered_items = set()
for item in items_to_recover:
recovered_items.add(item)
if len(recovered_items) != len(message_ids):
missing_items = set(message_ids).difference(recovered_items)
raise Exception(
f"Some message ids are missing in recoverable items directory: {missing_items}"
)
for item in recovered_items:
item.move(target_folder)
recovered_messages.append(
{ITEM_ID: item.id, MESSAGE_ID: item.message_id, ACTION: "recovered"}
)
readable_output = tableToMarkdown("Recovered messages", recovered_messages)
output = {CONTEXT_UPDATE_EWS_ITEM: recovered_messages}
return readable_output, output, recovered_messages
def get_contacts(client: EWSClient, limit, target_mailbox=None):
"""
Retrieve contacts of the target mailbox or client mailbox
:param client: EWS Client
:param limit: max amount of contacts to retrieve
:param (Optional) target_mailbox: Target mailbox
:return:
"""
def parse_physical_address(address):
result = {}
for attr in ["city", "country", "label", "state", "street", "zipcode"]:
result[attr] = getattr(address, attr, None)
return result
def parse_phone_number(phone_number):
result = {}
for attr in ["label", "phone_number"]:
result[attr] = getattr(phone_number, attr, None)
return result
def parse_contact(contact):
contact_dict = dict(
(k, v if not isinstance(v, EWSDateTime) else v.ewsformat())
for k, v in list(contact._field_vals())
if isinstance(v, str) or isinstance(v, EWSDateTime)
)
if isinstance(contact, Contact) and contact.physical_addresses:
contact_dict["physical_addresses"] = list(
map(parse_physical_address, contact.physical_addresses)
)
if isinstance(contact, Contact) and contact.phone_numbers:
contact_dict["phone_numbers"] = list(
map(parse_phone_number, contact.phone_numbers)
)
if (
isinstance(contact, Contact)
and contact.email_addresses
and len(contact.email_addresses) > 0
):
contact_dict["emailAddresses"] = [x.email for x in contact.email_addresses]
contact_dict = keys_to_camel_case(contact_dict)
contact_dict = dict((k, v) for k, v in list(contact_dict.items()) if v)
contact_dict.pop("mimeContent", None)
contact_dict["originMailbox"] = target_mailbox
return contact_dict
account = client.get_account(target_mailbox)
contacts = []
for contact in account.contacts.all()[: int(limit)]: # pylint: disable=E1101
contacts.append(parse_contact(contact))
readable_output = tableToMarkdown(f"Email contacts for {target_mailbox}", contacts)
output = {"Account.Email(val.Address == obj.originMailbox).EwsContacts": contacts}
return readable_output, output, contacts
def create_folder(client: EWSClient, new_folder_name, folder_path, target_mailbox=None):
"""
Creates a folder in the target mailbox or the client mailbox
:param client: EWS Client
:param new_folder_name: new folder name
:param folder_path: path of the new folder
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
full_path = os.path.join(folder_path, new_folder_name)
try:
if client.get_folder_by_path(full_path, account):
return f"Folder {full_path} already exists",
except Exception:
pass
parent_folder = client.get_folder_by_path(folder_path, account)
f = Folder(parent=parent_folder, name=new_folder_name)
f.save()
client.get_folder_by_path(full_path, account)
return f"Folder {full_path} created successfully",
def find_folders(client: EWSClient, target_mailbox=None):
"""
Finds folders in the mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
root = account.root
if client.is_public_folder:
root = account.public_folders_root
folders = []
for f in root.walk(): # pylint: disable=E1101
folder = folder_to_context_entry(f)
folders.append(folder)
folders_tree = root.tree() # pylint: disable=E1101
readable_output = folders_tree
output = {"EWS.Folders(val.id == obj.id)": folders}
return readable_output, output, folders
def mark_item_as_junk(client: EWSClient, item_id, move_items, target_mailbox=None):
"""
Marks item as junk in the target mailbox or client mailbox
:param client: EWS Client
:param item_id: item ids to mark as junk
:param move_items: "yes" or "no" - to move or not to move to trash
:param (Optional) target_mailbox: target mailbox
:return:
"""
account = client.get_account(target_mailbox)
move_items = move_items.lower() == "yes"
ews_result = MarkAsJunk(account=account).call(item_id=item_id, move_item=move_items)
mark_as_junk_result = {
ITEM_ID: item_id,
}
if ews_result == "Success":
mark_as_junk_result[ACTION] = "marked-as-junk"
else:
raise Exception("Failed mark-item-as-junk with error: " + ews_result)
readable_output = tableToMarkdown("Mark item as junk", mark_as_junk_result)
output = {CONTEXT_UPDATE_EWS_ITEM: mark_as_junk_result}
return readable_output, output, mark_as_junk_result
def get_items_from_folder(
client: EWSClient,
folder_path,
limit=100,
target_mailbox=None,
is_public=None,
get_internal_item="no",
):
"""
Retrieve items from folder path
:param client: EWS Client
:param folder_path: folder path
:param (Optional) limit: max amount of items to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:param (Optional) get_internal_item: should also retrieve internal items ("no" by default)
:return: Output tuple
"""
account = client.get_account(target_mailbox)
limit = int(limit)
get_internal_item = get_internal_item == "yes"
is_public = client.is_default_folder(folder_path, is_public)
folder = client.get_folder_by_path(folder_path, account, is_public)
qs = folder.filter().order_by("-datetime_created")[:limit]
items = get_limited_number_of_messages_from_qs(qs, limit)
items_result = []
for item in items:
item_attachment = parse_item_as_dict(
item, account.primary_smtp_address, camel_case=True, compact_fields=True
)
for attachment in item.attachments:
if (
get_internal_item
and isinstance(attachment, ItemAttachment)
and isinstance(attachment.item, Message)
):
# if found item attachment - switch item to the attchment
item_attachment = parse_item_as_dict(
attachment.item,
account.primary_smtp_address,
camel_case=True,
compact_fields=True,
)
break
items_result.append(item_attachment)
hm_headers = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"id",
]
readable_output = tableToMarkdown(
"Items in folder " + folder_path, items_result, headers=hm_headers
)
output = {CONTEXT_UPDATE_EWS_ITEM: items_result}
return readable_output, output, items_result
def get_items(client: EWSClient, item_ids, target_mailbox=None):
"""
Get items from target mailbox or client mailbox
:param client: EWS Client
:param item_ids: item ids to retrieve
:param (Optional) target_mailbox: target mailbox to retrieve items from
:return:
"""
item_ids = argToList(item_ids)
account = client.get_account(target_mailbox)
items = client.get_items_from_mailbox(account, item_ids)
items = [x for x in items if isinstance(x, Message)]
items_as_incidents = [parse_incident_from_item(x) for x in items]
items_to_context = [
parse_item_as_dict(x, account.primary_smtp_address, True, True) for x in items
]
readable_output = tableToMarkdown(
"Get items", items_to_context, ITEMS_RESULTS_HEADERS
)
output = {
CONTEXT_UPDATE_EWS_ITEM: items_to_context,
"Email": [email_ec(item) for item in items],
}
return readable_output, output, items_as_incidents
def get_folder(client: EWSClient, folder_path, target_mailbox=None, is_public=None):
"""
Retrieve a folder from the target mailbox or client mailbox
:param client: EWS Client
:param folder_path: folder path to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(folder_path, is_public)
folder = folder_to_context_entry(
client.get_folder_by_path(folder_path, account=account, is_public=is_public)
)
readable_output = tableToMarkdown(f"Folder {folder_path}", folder)
output = {CONTEXT_UPDATE_FOLDER: folder}
return readable_output, output, folder
def folder_to_context_entry(f):
"""
Create a context entry from a folder response
:param f: folder response
:return: dict context entry
"""
try:
f_entry = {
"name": f.name,
"totalCount": f.total_count,
"id": f.id,
"childrenFolderCount": f.child_folder_count,
"changeKey": f.changekey,
}
if "unread_count" in [x.name for x in Folder.FIELDS]:
f_entry["unreadCount"] = f.unread_count
return f_entry
except AttributeError:
if isinstance(f, dict):
return {
"name": f.get("name"),
"totalCount": f.get("total_count"),
"id": f.get("id"),
"childrenFolderCount": f.get("child_folder_count"),
"changeKey": f.get("changekey"),
"unreadCount": f.get("unread_count"),
}
def mark_item_as_read(
client: EWSClient, item_ids, operation="read", target_mailbox=None
):
"""
Marks item as read
:param client: EWS Client
:param item_ids: items ids to mark as read
:param (Optional) operation: operation to execute
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
marked_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
items = [x for x in items if isinstance(x, Message)]
for item in items:
item.is_read = operation == "read"
item.save()
marked_items.append(
{
ITEM_ID: item.id,
MESSAGE_ID: item.message_id,
ACTION: "marked-as-{}".format(operation),
}
)
readable_output = tableToMarkdown(
f"Marked items ({operation} marked operation)", marked_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: marked_items}
return readable_output, output, marked_items
def random_word_generator(length):
"""Generate a random string of given length
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def handle_html(html_body):
"""
Extract all data-url content from within the html and return as separate attachments.
Due to security implications, we support only images here
We might not have Beautiful Soup so just do regex search
"""
attachments = []
clean_body = ''
last_index = 0
for i, m in enumerate(
re.finditer(r'<img.+?src=\"(data:(image\/.+?);base64,([a-zA-Z0-9+/=\r\n]+?))\"', html_body, re.I)):
attachment = {
'data': base64.b64decode(m.group(3)),
'name': f'image{i}'
}
attachment['cid'] = f'{attachment["name"]}@{random_word_generator(8)}.{random_word_generator(8)}'
attachments.append(attachment)
clean_body += html_body[last_index:m.start(1)] + 'cid:' + attachment['cid']
last_index = m.end() - 1
clean_body += html_body[last_index:]
return clean_body, attachments
def collect_manual_attachments(manualAttachObj):
"""Collect all manual attachments' data
Args:
manualAttachObj (str): String representation of the manually attached files list.
Returns:
List[Dict]. List of the files data.
"""
manually_attached_objects = argToList(manualAttachObj)
attachments = []
for attachment in manually_attached_objects:
file_res = demisto.getFilePath(os.path.basename(attachment['RealFileName']))
path = file_res['path']
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': attachment['FileName'],
'data': data,
'cid': ''
})
return attachments
def collect_attachments(attachments_ids, attachments_cids, attachments_names):
"""Collect all attachments' data
Args:
attachments_ids (str): String representation of the files ids list.
attachments_cids (str): String representation of the files content ids list.
attachments_names (str): String representation of the files names list.
Returns:
List[Dict]. List of the files data.
"""
attachments = []
files_ids = argToList(attachments_ids)
files_cids = argToList(attachments_cids)
files_names = argToList(attachments_names)
for index, file_id in enumerate(files_ids):
try:
file_res = demisto.getFilePath(file_id)
path = file_res['path']
if len(files_names) > index and files_names[index]:
filename = files_names[index]
else:
filename = file_res['name']
if len(files_cids) > index and files_cids[index]:
cid = files_cids[index]
else:
cid = ''
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': filename,
'data': data,
'cid': cid
})
except Exception as e:
demisto.error(f'Invalid entry {file_id} with exception: {e}')
return_error(f'Entry {file_id} is not valid or is not a file entry')
return attachments
def handle_transient_files(transient_files, transient_files_contents, transient_files_cids):
"""Creates the transient attachments data
Args:
transient_files (str): String representation of the transient files names list.
transient_files_contents (str): String representation of the transient files content list.
transient_files_cids (str): String representation of the transient files content ids list.
Returns:
List[Dict]. List of the transient files data.
"""
transient_attachments = []
files_names = argToList(transient_files)
files_contents = argToList(transient_files_contents)
files_cids = argToList(transient_files_cids)
for index in range(len(files_names)):
file_name = files_names[index]
if index >= len(files_contents):
break
file_content = bytes(files_contents[index], UTF_8)
if index >= len(files_cids):
file_cid = ''
else:
file_cid = files_cids[index]
transient_attachments.append({
'name': file_name,
'data': file_content,
'cid': file_cid
})
return transient_attachments
def handle_template_params(template_params):
"""Translates the template params if they exist from the context
Args:
template_params (str): JSON string that represent the variables names to be replaced and the desired value.
Value can be either real value or context key to fetch the value from.
Returns:
Dict. `variable_name: value_to_use` of the templated parameters.
"""
actual_params = {}
if template_params:
try:
params = json.loads(template_params)
for p in params:
if params[p].get('value'):
actual_params[p] = params[p]['value']
elif params[p].get('key'):
actual_params[p] = demisto.dt(demisto.context(), params[p]['key'])
except ValueError as e:
return_error('Unable to parse template_params: %s' % (str(e)))
return actual_params
def create_message_object(to, cc, bcc, subject, body, additional_headers):
"""Creates the message object according to the existence of additional custom headers.
"""
if additional_headers:
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body,
**additional_headers
)
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body
)
def create_message(to, subject='', body='', bcc=None, cc=None, html_body=None, attachments=None,
additional_headers=None):
"""Creates the Message object that will be sent.
Args:
to (list): Main recipients.
cc (list): CC recipients.
bcc (list): BCC recipients.
subject (str): Email's subject.
body (str): Email's simple text body.
html_body (str): Email's html body.
attachments (list): Files to be attached to the mail, both inline and as files.
additional_headers (Dict): Custom headers to be added to the message.
Returns:
Message. Message object ready to be sent.
"""
if not html_body:
# This is a simple text message - we cannot have CIDs here
message = create_message_object(to, cc, bcc, subject, body, additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
message.attach(new_attachment)
else:
html_body, html_attachments = handle_html(html_body)
attachments += html_attachments
message = create_message_object(to, cc, bcc, subject, HTMLBody(html_body), additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
else:
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'),
is_inline=True, content_id=attachment.get('cid'))
message.attach(new_attachment)
return message
def add_additional_headers(additional_headers):
"""Adds custom headers to the Message object
Args:
additional_headers (str): Headers list as string. Example: headerName1=headerValue1,headerName2=headerValue2
Returns:
Dict. Headers dictionary in the form of: `header_name: header value`
"""
headers = dict()
for header in argToList(additional_headers):
header_name, header_value = header.split('=', 1)
class TempClass(ExtendedProperty):
distinguished_property_set_id = 'InternetHeaders'
property_name = header_name
property_type = 'String'
try:
Message.register(header_name, TempClass)
headers[header_name] = header_value
except ValueError as e:
demisto.debug('EWSO365 - Header ' + header_name + ' could not be registered. ' + str(e))
return headers
def send_email(client: EWSClient, to, subject='', body="", bcc=None, cc=None, htmlBody=None,
attachIDs="", attachCIDs="", attachNames="", manualAttachObj=None,
transientFile=None, transientFileContent=None, transientFileCID=None, templateParams=None,
additionalHeader=None, raw_message=None):
to = argToList(to)
cc = argToList(cc)
bcc = argToList(bcc)
# Basic validation - we allow pretty much everything but you have to have at least a recipient
# We allow messages without subject and also without body
if not to and not cc and not bcc:
return_error('You must have at least one recipient')
if raw_message:
message = Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
body=raw_message
)
else:
if additionalHeader:
additionalHeader = add_additional_headers(additionalHeader)
# collect all types of attachments
attachments = collect_attachments(attachIDs, attachCIDs, attachNames)
attachments.extend(collect_manual_attachments(manualAttachObj))
attachments.extend(handle_transient_files(transientFile, transientFileContent, transientFileCID))
# update body and html_body with the templated params, if exists
template_params = handle_template_params(templateParams)
if template_params:
body = body.format(**template_params)
if htmlBody:
htmlBody = htmlBody.format(**template_params)
message = create_message(to, subject, body, bcc, cc, htmlBody, attachments, additionalHeader)
client.send_email(message)
return 'Mail sent successfully', {}, {}
def get_item_as_eml(client: EWSClient, item_id, target_mailbox=None):
"""
Retrieve item as an eml
:param client: EWS Client
:param item_id: Item id to retrieve
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
item = client.get_item_from_mailbox(account, item_id)
if item.mime_content:
mime_content = item.mime_content
if isinstance(mime_content, bytes):
email_content = email.message_from_bytes(mime_content)
else:
email_content = email.message_from_string(mime_content)
if item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(email_content.items())
]
for header in item.headers:
if (
header.name,
header.value,
) not in attached_email_headers and header.name != "Content-Type":
email_content.add_header(header.name, header.value)
eml_name = item.subject if item.subject else "demisto_untitled_eml"
file_result = fileResult(eml_name + ".eml", email_content.as_string())
file_result = (
file_result if file_result else "Failed uploading eml file to war room"
)
return file_result
def parse_incident_from_item(item):
"""
Parses an incident from an item
:param item: item to parse
:return: Parsed item
"""
incident = {}
labels = []
try:
incident["details"] = item.text_body or item.body
except AttributeError:
incident["details"] = item.body
incident["name"] = item.subject
labels.append({"type": "Email/subject", "value": item.subject})
incident["occurred"] = item.datetime_created.ewsformat()
# handle recipients
if item.to_recipients:
for recipient in item.to_recipients:
labels.append({"type": "Email", "value": recipient.email_address})
# handle cc
if item.cc_recipients:
for recipient in item.cc_recipients:
labels.append({"type": "Email/cc", "value": recipient.email_address})
# handle email from
if item.sender:
labels.append({"type": "Email/from", "value": item.sender.email_address})
# email format
email_format = ""
try:
if item.text_body:
labels.append({"type": "Email/text", "value": item.text_body})
email_format = "text"
except AttributeError:
pass
if item.body:
labels.append({"type": "Email/html", "value": item.body})
email_format = "HTML"
labels.append({"type": "Email/format", "value": email_format})
# handle attachments
if item.attachments:
incident["attachment"] = []
for attachment in item.attachments:
file_result = None
label_attachment_type = None
label_attachment_id_type = None
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
# file attachment
label_attachment_type = "attachments"
label_attachment_id_type = "attachmentId"
# save the attachment
file_name = get_attachment_name(attachment.name)
file_result = fileResult(file_name, attachment.content)
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name),
}
)
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
continue
else:
# other item attachment
label_attachment_type = "attachmentItems"
label_attachment_id_type = "attachmentItemsId"
# save the attachment
if attachment.item.mime_content:
mime_content = attachment.item.mime_content
attached_email = email.message_from_bytes(mime_content) if isinstance(mime_content, bytes) \
else email.message_from_string(mime_content)
if attachment.item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(attached_email.items())
]
for header in attachment.item.headers:
if (
(header.name, header.value)
not in attached_email_headers
and header.name != "Content-Type"
):
attached_email.add_header(header.name, header.value)
file_result = fileResult(
get_attachment_name(attachment.name) + ".eml",
attached_email.as_bytes().decode('utf-8'),
)
if file_result:
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name) + ".eml",
}
)
labels.append(
{
"type": label_attachment_type,
"value": get_attachment_name(attachment.name),
}
)
labels.append(
{"type": label_attachment_id_type, "value": attachment.attachment_id.id}
)
# handle headers
if item.headers:
headers = []
for header in item.headers:
labels.append(
{
"type": "Email/Header/{}".format(header.name),
"value": str(header.value),
}
)
headers.append("{}: {}".format(header.name, header.value))
labels.append({"type": "Email/headers", "value": "\r\n".join(headers)})
# handle item id
if item.message_id:
labels.append({"type": "Email/MessageId", "value": str(item.message_id)})
if item.id:
labels.append({"type": "Email/ID", "value": item.id})
labels.append({"type": "Email/itemId", "value": item.id})
# handle conversion id
if item.conversation_id:
labels.append({"type": "Email/ConversionID", "value": item.conversation_id.id})
incident["labels"] = labels
incident["rawJSON"] = json.dumps(parse_item_as_dict(item, None), ensure_ascii=False)
return incident
def fetch_emails_as_incidents(client: EWSClient, last_run):
"""
Fetch incidents
:param client: EWS Client
:param last_run: last run dict
:return:
"""
last_run = get_last_run(client, last_run)
try:
last_emails = fetch_last_emails(
client,
client.folder_name,
last_run.get(LAST_RUN_TIME),
last_run.get(LAST_RUN_IDS),
)
ids = deque(
last_run.get(LAST_RUN_IDS, []), maxlen=client.last_run_ids_queue_size
)
incidents = []
incident: Dict[str, str] = {}
for item in last_emails:
if item.message_id:
ids.append(item.message_id)
incident = parse_incident_from_item(item)
incidents.append(incident)
if len(incidents) >= client.max_fetch:
break
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0,
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
def fetch_last_emails(
client: EWSClient, folder_name="Inbox", since_datetime=None, exclude_ids=None
):
"""
Fetches last emails
:param client: EWS client
:param (Optional) folder_name: folder name to pull from
:param (Optional) since_datetime: items will be searched after this datetime
:param (Optional) exclude_ids: exclude ids from fetch
:return: list of exchangelib.Items
"""
qs = client.get_folder_by_path(folder_name, is_public=client.is_public_folder)
if since_datetime:
qs = qs.filter(datetime_received__gte=since_datetime)
else:
last_10_min = EWSDateTime.now(tz=EWSTimeZone.timezone("UTC")) - timedelta(
minutes=10
)
qs = qs.filter(last_modified_time__gte=last_10_min)
qs = qs.filter().only(*[x.name for x in Message.FIELDS])
qs = qs.filter().order_by("datetime_received")
result = qs.all()
result = [x for x in result if isinstance(x, Message)]
if exclude_ids and len(exclude_ids) > 0:
exclude_ids = set(exclude_ids)
result = [x for x in result if x.message_id not in exclude_ids]
return result
def test_module(client: EWSClient, max_fetch):
"""
test-module
* Max incidents per fetch <= MAX_INCIDENTS_PER_FETCH
* Account can be retrieved
* Account has read rights
* Test access to fetch folder
:param client: EWS Client
:param max_fetch: Max fetches per incident
:return: "ok"
"""
try:
if int(max_fetch) > MAX_INCIDENTS_PER_FETCH:
return_error(f'Error - Max incidents per fetch cannot be greater than {MAX_INCIDENTS_PER_FETCH}. '
f'You provided: {max_fetch}')
account = client.get_account()
if not account.root.effective_rights.read: # pylint: disable=E1101
raise Exception(
"Success to authenticate, but user has no permissions to read from the mailbox. "
"Need to delegate the user permissions to the mailbox - "
"please read integration documentation and follow the instructions"
)
client.get_folder_by_path(
client.folder_name, account, client.is_public_folder
).test_access()
except ErrorFolderNotFound as e:
if "Top of Information Store" in str(e):
raise Exception(
"Success to authenticate, but user probably has no permissions to read from the specific folder."
"Check user permissions. You can try !ews-find-folders command to "
"get all the folders structure that the user has permissions to"
)
return "ok"
def sub_main():
is_test_module = False
params = demisto.params()
args = prepare_args(demisto.args())
# client's default_target_mailbox is the authorization source for the instance
params['default_target_mailbox'] = args.get('target_mailbox',
args.get('source_mailbox', params['default_target_mailbox']))
client = EWSClient(**params)
start_logging()
try:
command = demisto.command()
# commands that return a single note result
normal_commands = {
"ews-get-searchable-mailboxes": get_searchable_mailboxes,
"ews-move-item-between-mailboxes": move_item_between_mailboxes,
"ews-move-item": move_item,
"ews-delete-items": delete_items,
"ews-search-mailbox": search_items_in_mailbox,
"ews-get-contacts": get_contacts,
"ews-get-out-of-office": get_out_of_office_state,
"ews-recover-messages": recover_soft_delete_item,
"ews-create-folder": create_folder,
"ews-mark-item-as-junk": mark_item_as_junk,
"ews-find-folders": find_folders,
"ews-get-items-from-folder": get_items_from_folder,
"ews-get-items": get_items,
"ews-get-folder": get_folder,
"ews-expand-group": get_expanded_group,
"ews-mark-items-as-read": mark_item_as_read,
"send-mail": send_email,
}
# commands that may return multiple results or non-note result
special_output_commands = {
"ews-get-attachment": fetch_attachments_for_message,
"ews-delete-attachment": delete_attachments_for_message,
"ews-get-items-as-eml": get_item_as_eml,
}
# system commands:
if command == "test-module":
is_test_module = True
demisto.results(test_module(client, params.get('max_fetch')))
elif command == "fetch-incidents":
last_run = demisto.getLastRun()
incidents = fetch_emails_as_incidents(client, last_run)
demisto.incidents(incidents)
# special outputs commands
elif command in special_output_commands:
demisto.results(special_output_commands[command](client, **args)) # type: ignore[operator]
# normal commands
else:
output = normal_commands[command](client, **args) # type: ignore[operator]
return_outputs(*output)
except Exception as e:
start_logging()
debug_log = log_stream.getvalue() # type: ignore[union-attr]
error_message_simple = ""
# Office365 regular maintenance case
if isinstance(e, ErrorMailboxStoreUnavailable) or isinstance(
e, ErrorMailboxMoveInProgress
):
log_message = (
"Office365 is undergoing load balancing operations. "
"As a result, the service is temporarily unavailable."
)
if demisto.command() == "fetch-incidents":
demisto.info(log_message)
demisto.incidents([])
sys.exit(0)
if is_test_module:
demisto.results(
log_message + " Please retry the instance configuration test."
)
sys.exit(0)
error_message_simple = log_message + " Please retry your request."
if isinstance(e, ConnectionError):
error_message_simple = (
"Could not connect to the server.\n"
f"Additional information: {str(e)}"
)
else:
if is_test_module and isinstance(e, MalformedResponseError):
error_message_simple = (
"Got invalid response from the server.\n"
)
# Legacy error handling
if "Status code: 401" in debug_log:
error_message_simple = (
"Got unauthorized from the server. "
)
if "Status code: 503" in debug_log:
error_message_simple = (
"Got timeout from the server. "
"Probably the server is not reachable with the current settings. "
)
if not error_message_simple:
error_message = error_message_simple = str(e)
else:
error_message = error_message_simple + "\n" + str(e)
stacktrace = traceback.format_exc()
if stacktrace:
error_message += "\nFull stacktrace:\n" + stacktrace
if debug_log:
error_message += "\nFull debug log:\n" + debug_log
if demisto.command() == "fetch-incidents":
raise
if demisto.command() == "ews-search-mailbox" and isinstance(e, ValueError):
return_error(
message="Selected invalid field, please specify valid field name.",
error=e,
)
if is_test_module:
demisto.results(error_message_simple)
else:
demisto.results(
{
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": error_message_simple,
}
)
demisto.error(f"{e.__class__.__name__}: {error_message}")
finally:
exchangelib_cleanup()
if log_stream:
try:
logging.getLogger().removeHandler(log_handler) # type: ignore
log_stream.close()
except Exception as ex:
demisto.error(
"EWS: unexpected exception when trying to remove log handler: {}".format(
ex
)
)
def process_main():
"""setup stdin to fd=0 so we can read from the server"""
sys.stdin = os.fdopen(0, "r")
sub_main()
def main():
# When running big queries, like 'ews-search-mailbox' the memory might not freed by the garbage
# collector. `separate_process` flag will run the integration on a separate process that will prevent
# memory leakage.
separate_process = demisto.params().get("separate_process", False)
demisto.debug("Running as separate_process: {}".format(separate_process))
if separate_process:
try:
p = Process(target=process_main)
p.start()
p.join()
except Exception as ex:
demisto.error("Failed starting Process: {}".format(ex))
else:
sub_main()
from MicrosoftApiModule import * # noqa: E402
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
| 35.034283 | 116 | 0.624691 | import random
import string
from typing import Dict
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import sys
import traceback
import json
import os
import hashlib
from datetime import timedelta
from io import StringIO
import logging
import warnings
import email
from requests.exceptions import ConnectionError
from collections import deque
from multiprocessing import Process
import exchangelib
from exchangelib.errors import (
ErrorItemNotFound,
ResponseMessageError,
RateLimitError,
ErrorInvalidIdMalformed,
ErrorFolderNotFound,
ErrorMailboxStoreUnavailable,
ErrorMailboxMoveInProgress,
ErrorNameResolutionNoResults,
MalformedResponseError,
)
from exchangelib.items import Item, Message, Contact
from exchangelib.services.common import EWSService, EWSAccountService
from exchangelib.util import create_element, add_xml_child, MNS, TNS
from exchangelib import (
IMPERSONATION,
Account,
EWSDateTime,
EWSTimeZone,
Configuration,
FileAttachment,
Version,
Folder,
HTMLBody,
Body,
ItemAttachment,
OAUTH2,
OAuth2AuthorizationCodeCredentials,
Identity,
ExtendedProperty
)
from oauthlib.oauth2 import OAuth2Token
from exchangelib.version import EXCHANGE_O365
from exchangelib.protocol import BaseProtocol, NoVerifyHTTPAdapter
warnings.filterwarnings("ignore")
APP_NAME = "ms-ews-o365"
FOLDER_ID_LEN = 120
MAX_INCIDENTS_PER_FETCH = 50
MOVED_TO_MAILBOX = "movedToMailbox"
MOVED_TO_FOLDER = "movedToFolder"
FILE_ATTACHMENT_TYPE = "FileAttachment"
ITEM_ATTACHMENT_TYPE = "ItemAttachment"
ATTACHMENT_TYPE = "attachmentType"
TOIS_PATH = "/root/Top of Information Store/"
ATTACHMENT_ID = "attachmentId"
ATTACHMENT_ORIGINAL_ITEM_ID = "originalItemId"
NEW_ITEM_ID = "newItemId"
MESSAGE_ID = "messageId"
ITEM_ID = "itemId"
ACTION = "action"
MAILBOX = "mailbox"
MAILBOX_ID = "mailboxId"
FOLDER_ID = "id"
TARGET_MAILBOX = 'receivedBy'
CONTEXT_UPDATE_EWS_ITEM = f"EWS.Items((val.{ITEM_ID} === obj.{ITEM_ID} || " \
f"(val.{MESSAGE_ID} && obj.{MESSAGE_ID} && val.{MESSAGE_ID} === obj.{MESSAGE_ID}))" \
f" && val.{TARGET_MAILBOX} === obj.{TARGET_MAILBOX})"
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT = "EWS.Items(val.{0} == obj.{1})".format(
ITEM_ID, ATTACHMENT_ORIGINAL_ITEM_ID
)
CONTEXT_UPDATE_ITEM_ATTACHMENT = ".ItemAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FILE_ATTACHMENT = ".FileAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FOLDER = "EWS.Folders(val.{0} == obj.{0})".format(FOLDER_ID)
LAST_RUN_TIME = "lastRunTime"
LAST_RUN_IDS = "ids"
LAST_RUN_FOLDER = "folderName"
ERROR_COUNTER = "errorCounter"
ITEMS_RESULTS_HEADERS = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"textBody",
]
UTF_8 = 'utf-8'
class ProxyAdapter(requests.adapters.HTTPAdapter):
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class InsecureProxyAdapter(NoVerifyHTTPAdapter):
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class EWSClient:
def __init__(
self,
default_target_mailbox,
client_id,
client_secret,
tenant_id,
folder="Inbox",
is_public_folder=False,
request_timeout="120",
max_fetch=MAX_INCIDENTS_PER_FETCH,
self_deployed=True,
insecure=True,
proxy=False,
**kwargs,
):
BaseProtocol.TIMEOUT = int(request_timeout)
self.ews_server = "https://outlook.office365.com/EWS/Exchange.asmx/"
self.ms_client = MicrosoftClient(
tenant_id=tenant_id,
auth_id=client_id,
enc_key=client_secret,
app_name=APP_NAME,
base_url=self.ews_server,
verify=not insecure,
proxy=proxy,
self_deployed=self_deployed,
scope="https://outlook.office.com/.default",
)
self.folder_name = folder
self.is_public_folder = is_public_folder
self.access_type = kwargs.get('access_type') or IMPERSONATION
self.max_fetch = min(MAX_INCIDENTS_PER_FETCH, int(max_fetch))
self.last_run_ids_queue_size = 500
self.client_id = client_id
self.client_secret = client_secret
self.account_email = default_target_mailbox
self.config = self.__prepare(insecure)
self.protocol = BaseProtocol(self.config)
def __prepare(self, insecure):
BaseProtocol.HTTP_ADAPTER_CLS = InsecureProxyAdapter if insecure else ProxyAdapter
access_token = self.ms_client.get_access_token()
oauth2_token = OAuth2Token({"access_token": access_token})
self.credentials = credentials = OAuth2AuthorizationCodeCredentials(
client_id=self.client_id,
client_secret=self.client_secret,
access_token=oauth2_token,
)
self.credentials.identity = Identity(upn=self.account_email)
config_args = {
"credentials": credentials,
"auth_type": OAUTH2,
"version": Version(EXCHANGE_O365),
"service_endpoint": "https://outlook.office365.com/EWS/Exchange.asmx",
}
return Configuration(**config_args)
def get_account(self, target_mailbox=None):
if not target_mailbox:
target_mailbox = self.account_email
return Account(
primary_smtp_address=target_mailbox,
autodiscover=False,
config=self.config,
access_type=self.access_type,
)
def get_items_from_mailbox(self, account, item_ids):
if isinstance(account, str):
account = self.get_account(account)
else:
account = self.get_account(self.account_email)
if type(item_ids) is not list:
item_ids = [item_ids]
items = [Item(id=x) for x in item_ids]
result = list(account.fetch(ids=items))
result = [x for x in result if not isinstance(x, ErrorItemNotFound)]
if len(result) != len(item_ids):
raise Exception(
"One or more items were not found. Check the input item ids"
)
return result
def get_item_from_mailbox(self, account, item_id):
result = self.get_items_from_mailbox(account, [item_id])
if len(result) == 0:
raise Exception(f"ItemId {str(item_id)} not found")
return result[0]
def get_attachments_for_item(self, item_id, account, attachment_ids=None):
item = self.get_item_from_mailbox(account, item_id)
attachments = []
attachment_ids = argToList(attachment_ids)
if item:
if item.attachments:
for attachment in item.attachments:
if (
attachment_ids
and attachment.attachment_id.id not in attachment_ids
):
continue
attachments.append(attachment)
else:
raise Exception("Message item not found: " + item_id)
if attachment_ids and len(attachments) < len(attachment_ids):
raise Exception(
"Some attachment id did not found for message:" + str(attachment_ids)
)
return attachments
def is_default_folder(self, folder_path, is_public=None):
if is_public is not None:
return is_public
if folder_path == self.folder_name:
return self.is_public_folder
return False
def get_folder_by_path(self, path, account=None, is_public=False):
if account is None:
account = self.get_account()
if len(path) == FOLDER_ID_LEN:
folders_map = account.root._folders_map
if path in folders_map:
return account.root._folders_map[path]
if is_public:
folder_result = account.public_folders_root
elif path == "AllItems":
folder_result = account.root
else:
folder_result = account.inbox.parent
path = path.replace("/", "\\")
path = path.split("\\")
for sub_folder_name in path:
folder_filter_by_name = [
x
for x in folder_result.children
if x.name.lower() == sub_folder_name.lower()
]
if len(folder_filter_by_name) == 0:
raise Exception(f"No such folder {path}")
folder_result = folder_filter_by_name[0]
return folder_result
def send_email(self, message: Message):
account = self.get_account()
message.account = account
message.send_and_save()
class MarkAsJunk(EWSAccountService):
SERVICE_NAME = "MarkAsJunk"
def call(self, item_id, move_item):
elements = list(
self._get_elements(
payload=self.get_payload(item_id=item_id, move_item=move_item)
)
)
for element in elements:
if isinstance(element, ResponseMessageError):
return str(element)
return "Success"
def get_payload(self, item_id, move_item):
junk = create_element(
f"m:{self.SERVICE_NAME}",
{"IsJunk": "true", "MoveItem": "true" if move_item else "false"},
)
items_list = create_element("m:ItemIds")
item_element = create_element("t:ItemId", {"Id": item_id})
items_list.append(item_element)
junk.append(items_list)
return junk
class GetSearchableMailboxes(EWSService):
SERVICE_NAME = "GetSearchableMailboxes"
element_container_name = f"{{{MNS}}}SearchableMailboxes"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}PrimarySmtpAddress").text
if element.find(f"{{{TNS}}}PrimarySmtpAddress") is not None
else None,
MAILBOX_ID: element.find(f"{{{TNS}}}ReferenceId").text
if element.find(f"{{{TNS}}}ReferenceId") is not None
else None,
"displayName": element.find(f"{{{TNS}}}DisplayName").text
if element.find(f"{{{TNS}}}DisplayName") is not None
else None,
"isExternal": element.find(f"{{{TNS}}}IsExternalMailbox").text
if element.find(f"{{{TNS}}}IsExternalMailbox") is not None
else None,
"externalEmailAddress": element.find(f"{{{TNS}}}ExternalEmailAddress").text
if element.find(f"{{{TNS}}}ExternalEmailAddress") is not None
else None,
}
def call(self):
elements = self._get_elements(payload=self.get_payload())
return [
self.parse_element(x)
for x in elements
if x.find(f"{{{TNS}}}ReferenceId").text
]
def get_payload(self):
element = create_element(f"m:{self.SERVICE_NAME}")
return element
class ExpandGroup(EWSService):
SERVICE_NAME = "ExpandDL"
element_container_name = f"{{{MNS}}}DLExpansion"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}EmailAddress").text
if element.find(f"{{{TNS}}}EmailAddress") is not None
else None,
"displayName": element.find(f"{{{TNS}}}Name").text
if element.find(f"{{{TNS}}}Name") is not None
else None,
"mailboxType": element.find(f"{{{TNS}}}MailboxType").text
if element.find(f"{{{TNS}}}MailboxType") is not None
else None,
}
def call(self, email_address, recursive_expansion=False):
try:
if recursive_expansion == "True":
group_members: Dict = {}
self.expand_group_recursive(email_address, group_members)
return list(group_members.values())
else:
return self.expand_group(email_address)
except ErrorNameResolutionNoResults:
demisto.results("No results were found.")
sys.exit()
def get_payload(self, email_address):
element = create_element(f"m:{self.SERVICE_NAME}")
mailbox_element = create_element("m:Mailbox")
add_xml_child(mailbox_element, "t:EmailAddress", email_address)
element.append(mailbox_element)
return element
def expand_group(self, email_address):
elements = self._get_elements(payload=self.get_payload(email_address))
return [self.parse_element(x) for x in elements]
def expand_group_recursive(self, email_address, non_dl_emails, dl_emails=None):
if dl_emails is None:
dl_emails = set()
if email_address in non_dl_emails or email_address in dl_emails:
return None
dl_emails.add(email_address)
for member in self.expand_group(email_address):
if (
member["mailboxType"] == "PublicDL"
or member["mailboxType"] == "PrivateDL"
):
self.expand_group_recursive(member.get("mailbox"), non_dl_emails, dl_emails)
else:
if member["mailbox"] not in non_dl_emails:
non_dl_emails[member["mailbox"]] = member
def exchangelib_cleanup():
key_protocols = list(exchangelib.protocol.CachingProtocol._protocol_cache.items())
try:
exchangelib.close_connections()
except Exception as ex:
demisto.error("Error was found in exchangelib cleanup, ignoring: {}".format(ex))
for key, protocol in key_protocols:
try:
if "thread_pool" in protocol.__dict__:
demisto.debug(
"terminating thread pool key{} id: {}".format(
key, id(protocol.thread_pool)
)
)
protocol.thread_pool.terminate()
del protocol.__dict__["thread_pool"]
else:
demisto.info(
"Thread pool not found (ignoring terminate) in protcol dict: {}".format(
dir(protocol.__dict__)
)
)
except Exception as ex:
demisto.error("Error with thread_pool.terminate, ignoring: {}".format(ex))
log_stream = None
log_handler = None
def start_logging():
global log_stream
global log_handler
logging.raiseExceptions = False
if log_stream is None:
log_stream = StringIO()
log_handler = logging.StreamHandler(stream=log_stream)
log_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logger = logging.getLogger()
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
def get_attachment_name(attachment_name):
if attachment_name is None or attachment_name == "":
return "demisto_untitled_attachment"
return attachment_name
def get_entry_for_object(title, context_key, obj, headers=None):
if len(obj) == 0:
return "There is no output results"
if headers and isinstance(obj, dict):
headers = list(set(headers).intersection(set(obj.keys())))
return {
"Type": entryTypes["note"],
"Contents": obj,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": tableToMarkdown(title, obj, headers),
"EntryContext": {context_key: obj},
}
def prepare_args(args):
args = dict((k.replace("-", "_"), v) for k, v in list(args.items()))
if "is_public" in args:
args["is_public"] = args["is_public"] == "True"
return args
def get_limited_number_of_messages_from_qs(qs, limit):
count = 0
results = []
for item in qs:
if count == limit:
break
if isinstance(item, Message):
count += 1
results.append(item)
return results
def keys_to_camel_case(value):
def str_to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
if value is None:
return None
if isinstance(value, (list, set)):
return list(map(keys_to_camel_case, value))
if isinstance(value, dict):
return dict(
(
keys_to_camel_case(k),
keys_to_camel_case(v) if isinstance(v, (list, dict)) else v,
)
for (k, v) in list(value.items())
)
return str_to_camel_case(value)
def get_last_run(client: EWSClient, last_run=None):
if not last_run or last_run.get(LAST_RUN_FOLDER) != client.folder_name:
last_run = {
LAST_RUN_TIME: None,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: [],
}
if LAST_RUN_TIME in last_run and last_run[LAST_RUN_TIME] is not None:
last_run[LAST_RUN_TIME] = EWSDateTime.from_string(last_run[LAST_RUN_TIME])
if last_run.get(LAST_RUN_IDS) is None:
last_run[LAST_RUN_IDS] = []
return last_run
def email_ec(item):
return {
"CC": None
if not item.cc_recipients
else [mailbox.email_address for mailbox in item.cc_recipients],
"BCC": None
if not item.bcc_recipients
else [mailbox.email_address for mailbox in item.bcc_recipients],
"To": None
if not item.to_recipients
else [mailbox.email_address for mailbox in item.to_recipients],
"From": item.author.email_address,
"Subject": item.subject,
"Text": item.text_body,
"HTML": item.body,
"HeadersMap": {header.name: header.value for header in item.headers},
}
def parse_item_as_dict(item, email_address=None, camel_case=False, compact_fields=False):
def parse_object_as_dict(obj):
raw_dict = {}
if obj is not None:
for field in obj.FIELDS:
raw_dict[field.name] = getattr(obj, field.name, None)
return raw_dict
def parse_folder_as_json(folder):
raw_dict = parse_object_as_dict(folder)
if "parent_folder_id" in raw_dict:
raw_dict["parent_folder_id"] = parse_folder_as_json(
raw_dict["parent_folder_id"]
)
if "effective_rights" in raw_dict:
raw_dict["effective_rights"] = parse_object_as_dict(
raw_dict["effective_rights"]
)
return raw_dict
raw_dict = {}
for field, value in item._field_vals():
if type(value) in [str, str, int, float, bool, Body, HTMLBody, None]:
raw_dict[field] = value
raw_dict["id"] = item.id
if getattr(item, "attachments", None):
raw_dict["attachments"] = [
parse_attachment_as_dict(item.id, x) for x in item.attachments
]
for time_field in [
"datetime_sent",
"datetime_created",
"datetime_received",
"last_modified_time",
"reminder_due_by",
]:
value = getattr(item, time_field, None)
if value:
raw_dict[time_field] = value.ewsformat()
for dict_field in [
"effective_rights",
"parent_folder_id",
"conversation_id",
"author",
"extern_id",
"received_by",
"received_representing",
"reply_to",
"sender",
"folder",
]:
value = getattr(item, dict_field, None)
if value:
if isinstance(value, list):
raw_dict[dict_field] = []
for single_val in value:
raw_dict[dict_field].append(parse_object_as_dict(single_val))
else:
raw_dict[dict_field] = parse_object_as_dict(value)
for list_dict_field in ["headers", "cc_recipients", "to_recipients"]:
value = getattr(item, list_dict_field, None)
if value:
raw_dict[list_dict_field] = [parse_object_as_dict(x) for x in value]
if getattr(item, "folder", None):
raw_dict["folder"] = parse_folder_as_json(item.folder)
folder_path = (
item.folder.absolute[len(TOIS_PATH):]
if item.folder.absolute.startswith(TOIS_PATH)
else item.folder.absolute
)
raw_dict["folder_path"] = folder_path
if compact_fields:
new_dict = {}
fields_list = [
"datetime_created",
"datetime_received",
"datetime_sent",
"sender",
"has_attachments",
"importance",
"message_id",
"last_modified_time",
"size",
"subject",
"text_body",
"headers",
"body",
"folder_path",
"is_read",
]
if "id" in raw_dict:
new_dict["itemId"] = raw_dict["id"]
fields_list.append("itemId")
for field in fields_list:
if field in raw_dict:
new_dict[field] = raw_dict.get(field)
for field in ["received_by", "author", "sender"]:
if field in raw_dict:
new_dict[field] = raw_dict.get(field, {}).get("email_address")
for field in ["to_recipients"]:
if field in raw_dict:
new_dict[field] = [x.get("email_address") for x in raw_dict[field]]
attachments = raw_dict.get("attachments")
if attachments and len(attachments) > 0:
file_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == FILE_ATTACHMENT_TYPE
]
if len(file_attachments) > 0:
new_dict["FileAttachments"] = file_attachments
item_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == ITEM_ATTACHMENT_TYPE
]
if len(item_attachments) > 0:
new_dict["ItemAttachments"] = item_attachments
raw_dict = new_dict
if camel_case:
raw_dict = keys_to_camel_case(raw_dict)
if email_address:
raw_dict[MAILBOX] = email_address
return raw_dict
def get_entry_for_file_attachment(item_id, attachment):
entry = fileResult(get_attachment_name(attachment.name), attachment.content)
entry["EntryContext"] = {
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT
+ CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment)
}
return entry
def parse_attachment_as_dict(item_id, attachment):
try:
attachment_content = (
attachment.content
if isinstance(attachment, FileAttachment)
else attachment.item.mime_content
)
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": hashlib.sha256(attachment_content).hexdigest()
if attachment_content
else None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
def get_entry_for_item_attachment(item_id, attachment, target_email):
item = attachment.item
dict_result = parse_attachment_as_dict(item_id, attachment)
dict_result.update(
parse_item_as_dict(item, target_email, camel_case=True, compact_fields=True)
)
title = f'EWS get attachment got item for "{target_email}", "{get_attachment_name(attachment.name)}"'
return get_entry_for_object(
title,
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT + CONTEXT_UPDATE_ITEM_ATTACHMENT,
dict_result,
)
def get_expanded_group(client: EWSClient, email_address, recursive_expansion=False):
group_members = ExpandGroup(protocol=client.protocol).call(
email_address, recursive_expansion
)
group_details = {"name": email_address, "members": group_members}
output = {"EWS.ExpandGroup": group_details}
readable_output = tableToMarkdown("Group Members", group_members)
return readable_output, output, group_details
def get_searchable_mailboxes(client: EWSClient):
searchable_mailboxes = GetSearchableMailboxes(protocol=client.protocol).call()
readable_output = tableToMarkdown(
"Searchable mailboxes", searchable_mailboxes, headers=["displayName", "mailbox"]
)
output = {"EWS.Mailboxes": searchable_mailboxes}
return readable_output, output, searchable_mailboxes
def delete_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
attachments = client.get_attachments_for_item(
item_id, target_mailbox, attachment_ids
)
deleted_file_attachments = []
deleted_item_attachments = []
for attachment in attachments:
attachment_deleted_action = {
ATTACHMENT_ID: attachment.attachment_id.id,
ACTION: "deleted",
}
if isinstance(attachment, FileAttachment):
deleted_file_attachments.append(attachment_deleted_action)
else:
deleted_item_attachments.append(attachment_deleted_action)
attachment.detach()
entries = []
if len(deleted_file_attachments) > 0:
entry = get_entry_for_object(
"Deleted file attachments",
"EWS.Items" + CONTEXT_UPDATE_FILE_ATTACHMENT,
deleted_file_attachments,
)
entries.append(entry)
if len(deleted_item_attachments) > 0:
entry = get_entry_for_object(
"Deleted item attachments",
"EWS.Items" + CONTEXT_UPDATE_ITEM_ATTACHMENT,
deleted_item_attachments,
)
entries.append(entry)
return entries
def fetch_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
account = client.get_account(target_mailbox)
attachments = client.get_attachments_for_item(item_id, account, attachment_ids)
entries = []
for attachment in attachments:
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
entries.append(get_entry_for_file_attachment(item_id, attachment))
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
else:
entries.append(
get_entry_for_item_attachment(
item_id, attachment, account.primary_smtp_address
)
)
if attachment.item.mime_content:
entries.append(
fileResult(
get_attachment_name(attachment.name) + ".eml",
attachment.item.mime_content,
)
)
return entries
def move_item_between_mailboxes(
client: EWSClient,
item_id,
destination_mailbox,
destination_folder_path,
source_mailbox=None,
is_public=None,
):
source_account = client.get_account(source_mailbox)
destination_account = client.get_account(destination_mailbox)
is_public = client.is_default_folder(destination_folder_path, is_public)
destination_folder = client.get_folder_by_path(
destination_folder_path, destination_account, is_public
)
item = client.get_item_from_mailbox(source_account, item_id)
exported_items = source_account.export([item])
destination_account.upload([(destination_folder, exported_items[0])])
source_account.bulk_delete([item])
move_result = {
MOVED_TO_MAILBOX: destination_mailbox,
MOVED_TO_FOLDER: destination_folder_path,
}
readable_output = "Item was moved successfully."
output = {f"EWS.Items(val.itemId === '{item_id}')": move_result}
return readable_output, output, move_result
def move_item(
client: EWSClient, item_id, target_folder_path, target_mailbox=None, is_public=None
):
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, is_public=is_public)
item = client.get_item_from_mailbox(account, item_id)
if isinstance(item, ErrorInvalidIdMalformed):
raise Exception("Item not found")
item.move(target_folder)
move_result = {
NEW_ITEM_ID: item.id,
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: "moved",
}
readable_output = tableToMarkdown("Moved items", move_result)
output = {CONTEXT_UPDATE_EWS_ITEM: move_result}
return readable_output, output, move_result
def delete_items(client: EWSClient, item_ids, delete_type, target_mailbox=None):
deleted_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
delete_type = delete_type.lower()
for item in items:
item_id = item.id
if delete_type == "trash":
item.move_to_trash()
elif delete_type == "soft":
item.soft_delete()
elif delete_type == "hard":
item.delete()
else:
raise Exception(
f'invalid delete type: {delete_type}. Use "trash" \\ "soft" \\ "hard"'
)
deleted_items.append(
{
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: f"{delete_type}-deleted",
}
)
readable_output = tableToMarkdown(
f"Deleted items ({delete_type} delete type)", deleted_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: deleted_items}
return readable_output, output, deleted_items
def search_items_in_mailbox(
client: EWSClient,
query=None,
message_id=None,
folder_path="",
limit=100,
target_mailbox=None,
is_public=None,
selected_fields="all",
):
if not query and not message_id:
return_error("Missing required argument. Provide query or message-id")
if message_id and message_id[0] != "<" and message_id[-1] != ">":
message_id = "<{}>".format(message_id)
account = client.get_account(target_mailbox)
limit = int(limit)
if folder_path.lower() == "inbox":
folders = [account.inbox]
elif folder_path:
is_public = client.is_default_folder(folder_path, is_public)
folders = [client.get_folder_by_path(folder_path, account, is_public)]
else:
folders = account.inbox.parent.walk()
items = []
selected_all_fields = selected_fields == "all"
if selected_all_fields:
restricted_fields = list([x.name for x in Message.FIELDS])
else:
restricted_fields = set(argToList(selected_fields))
restricted_fields.update(["id", "message_id"])
for folder in folders:
if Message not in folder.supported_item_models:
continue
if query:
items_qs = folder.filter(query).only(*restricted_fields)
else:
items_qs = folder.filter(message_id=message_id).only(*restricted_fields)
items += get_limited_number_of_messages_from_qs(items_qs, limit)
if len(items) >= limit:
break
items = items[:limit]
searched_items_result = [
parse_item_as_dict(
item,
account.primary_smtp_address,
camel_case=True,
compact_fields=selected_all_fields,
)
for item in items
]
if not selected_all_fields:
searched_items_result = [
{k: v for (k, v) in i.items() if k in keys_to_camel_case(restricted_fields)}
for i in searched_items_result
]
for item in searched_items_result:
item["itemId"] = item.pop("id", "")
readable_output = tableToMarkdown(
"Searched items",
searched_items_result,
headers=ITEMS_RESULTS_HEADERS if selected_all_fields else None,
)
output = {CONTEXT_UPDATE_EWS_ITEM: searched_items_result}
return readable_output, output, searched_items_result
def get_out_of_office_state(client: EWSClient, target_mailbox=None):
account = client.get_account(target_mailbox)
oof = account.oof_settings
oof_dict = {
"state": oof.state,
"externalAudience": getattr(oof, "external_audience", None),
"start": oof.start.ewsformat() if oof.start else None,
"end": oof.end.ewsformat() if oof.end else None,
"internalReply": getattr(oof, "internal_replay", None),
"externalReply": getattr(oof, "external_replay", None),
MAILBOX: account.primary_smtp_address,
}
readable_output = tableToMarkdown(
f"Out of office state for {account.primary_smtp_address}", oof_dict
)
output = {f"Account.Email(val.Address == obj.{MAILBOX}).OutOfOffice": oof_dict}
return readable_output, output, oof_dict
def recover_soft_delete_item(
client: EWSClient,
message_ids,
target_folder_path="Inbox",
target_mailbox=None,
is_public=None,
):
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, account, is_public)
recovered_messages = []
message_ids = argToList(message_ids)
items_to_recover = account.recoverable_items_deletions.filter(
message_id__in=message_ids
).all()
recovered_items = set()
for item in items_to_recover:
recovered_items.add(item)
if len(recovered_items) != len(message_ids):
missing_items = set(message_ids).difference(recovered_items)
raise Exception(
f"Some message ids are missing in recoverable items directory: {missing_items}"
)
for item in recovered_items:
item.move(target_folder)
recovered_messages.append(
{ITEM_ID: item.id, MESSAGE_ID: item.message_id, ACTION: "recovered"}
)
readable_output = tableToMarkdown("Recovered messages", recovered_messages)
output = {CONTEXT_UPDATE_EWS_ITEM: recovered_messages}
return readable_output, output, recovered_messages
def get_contacts(client: EWSClient, limit, target_mailbox=None):
def parse_physical_address(address):
result = {}
for attr in ["city", "country", "label", "state", "street", "zipcode"]:
result[attr] = getattr(address, attr, None)
return result
def parse_phone_number(phone_number):
result = {}
for attr in ["label", "phone_number"]:
result[attr] = getattr(phone_number, attr, None)
return result
def parse_contact(contact):
contact_dict = dict(
(k, v if not isinstance(v, EWSDateTime) else v.ewsformat())
for k, v in list(contact._field_vals())
if isinstance(v, str) or isinstance(v, EWSDateTime)
)
if isinstance(contact, Contact) and contact.physical_addresses:
contact_dict["physical_addresses"] = list(
map(parse_physical_address, contact.physical_addresses)
)
if isinstance(contact, Contact) and contact.phone_numbers:
contact_dict["phone_numbers"] = list(
map(parse_phone_number, contact.phone_numbers)
)
if (
isinstance(contact, Contact)
and contact.email_addresses
and len(contact.email_addresses) > 0
):
contact_dict["emailAddresses"] = [x.email for x in contact.email_addresses]
contact_dict = keys_to_camel_case(contact_dict)
contact_dict = dict((k, v) for k, v in list(contact_dict.items()) if v)
contact_dict.pop("mimeContent", None)
contact_dict["originMailbox"] = target_mailbox
return contact_dict
account = client.get_account(target_mailbox)
contacts = []
for contact in account.contacts.all()[: int(limit)]:
contacts.append(parse_contact(contact))
readable_output = tableToMarkdown(f"Email contacts for {target_mailbox}", contacts)
output = {"Account.Email(val.Address == obj.originMailbox).EwsContacts": contacts}
return readable_output, output, contacts
def create_folder(client: EWSClient, new_folder_name, folder_path, target_mailbox=None):
account = client.get_account(target_mailbox)
full_path = os.path.join(folder_path, new_folder_name)
try:
if client.get_folder_by_path(full_path, account):
return f"Folder {full_path} already exists",
except Exception:
pass
parent_folder = client.get_folder_by_path(folder_path, account)
f = Folder(parent=parent_folder, name=new_folder_name)
f.save()
client.get_folder_by_path(full_path, account)
return f"Folder {full_path} created successfully",
def find_folders(client: EWSClient, target_mailbox=None):
account = client.get_account(target_mailbox)
root = account.root
if client.is_public_folder:
root = account.public_folders_root
folders = []
for f in root.walk():
folder = folder_to_context_entry(f)
folders.append(folder)
folders_tree = root.tree()
readable_output = folders_tree
output = {"EWS.Folders(val.id == obj.id)": folders}
return readable_output, output, folders
def mark_item_as_junk(client: EWSClient, item_id, move_items, target_mailbox=None):
account = client.get_account(target_mailbox)
move_items = move_items.lower() == "yes"
ews_result = MarkAsJunk(account=account).call(item_id=item_id, move_item=move_items)
mark_as_junk_result = {
ITEM_ID: item_id,
}
if ews_result == "Success":
mark_as_junk_result[ACTION] = "marked-as-junk"
else:
raise Exception("Failed mark-item-as-junk with error: " + ews_result)
readable_output = tableToMarkdown("Mark item as junk", mark_as_junk_result)
output = {CONTEXT_UPDATE_EWS_ITEM: mark_as_junk_result}
return readable_output, output, mark_as_junk_result
def get_items_from_folder(
client: EWSClient,
folder_path,
limit=100,
target_mailbox=None,
is_public=None,
get_internal_item="no",
):
account = client.get_account(target_mailbox)
limit = int(limit)
get_internal_item = get_internal_item == "yes"
is_public = client.is_default_folder(folder_path, is_public)
folder = client.get_folder_by_path(folder_path, account, is_public)
qs = folder.filter().order_by("-datetime_created")[:limit]
items = get_limited_number_of_messages_from_qs(qs, limit)
items_result = []
for item in items:
item_attachment = parse_item_as_dict(
item, account.primary_smtp_address, camel_case=True, compact_fields=True
)
for attachment in item.attachments:
if (
get_internal_item
and isinstance(attachment, ItemAttachment)
and isinstance(attachment.item, Message)
):
item_attachment = parse_item_as_dict(
attachment.item,
account.primary_smtp_address,
camel_case=True,
compact_fields=True,
)
break
items_result.append(item_attachment)
hm_headers = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"id",
]
readable_output = tableToMarkdown(
"Items in folder " + folder_path, items_result, headers=hm_headers
)
output = {CONTEXT_UPDATE_EWS_ITEM: items_result}
return readable_output, output, items_result
def get_items(client: EWSClient, item_ids, target_mailbox=None):
item_ids = argToList(item_ids)
account = client.get_account(target_mailbox)
items = client.get_items_from_mailbox(account, item_ids)
items = [x for x in items if isinstance(x, Message)]
items_as_incidents = [parse_incident_from_item(x) for x in items]
items_to_context = [
parse_item_as_dict(x, account.primary_smtp_address, True, True) for x in items
]
readable_output = tableToMarkdown(
"Get items", items_to_context, ITEMS_RESULTS_HEADERS
)
output = {
CONTEXT_UPDATE_EWS_ITEM: items_to_context,
"Email": [email_ec(item) for item in items],
}
return readable_output, output, items_as_incidents
def get_folder(client: EWSClient, folder_path, target_mailbox=None, is_public=None):
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(folder_path, is_public)
folder = folder_to_context_entry(
client.get_folder_by_path(folder_path, account=account, is_public=is_public)
)
readable_output = tableToMarkdown(f"Folder {folder_path}", folder)
output = {CONTEXT_UPDATE_FOLDER: folder}
return readable_output, output, folder
def folder_to_context_entry(f):
try:
f_entry = {
"name": f.name,
"totalCount": f.total_count,
"id": f.id,
"childrenFolderCount": f.child_folder_count,
"changeKey": f.changekey,
}
if "unread_count" in [x.name for x in Folder.FIELDS]:
f_entry["unreadCount"] = f.unread_count
return f_entry
except AttributeError:
if isinstance(f, dict):
return {
"name": f.get("name"),
"totalCount": f.get("total_count"),
"id": f.get("id"),
"childrenFolderCount": f.get("child_folder_count"),
"changeKey": f.get("changekey"),
"unreadCount": f.get("unread_count"),
}
def mark_item_as_read(
client: EWSClient, item_ids, operation="read", target_mailbox=None
):
marked_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
items = [x for x in items if isinstance(x, Message)]
for item in items:
item.is_read = operation == "read"
item.save()
marked_items.append(
{
ITEM_ID: item.id,
MESSAGE_ID: item.message_id,
ACTION: "marked-as-{}".format(operation),
}
)
readable_output = tableToMarkdown(
f"Marked items ({operation} marked operation)", marked_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: marked_items}
return readable_output, output, marked_items
def random_word_generator(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def handle_html(html_body):
attachments = []
clean_body = ''
last_index = 0
for i, m in enumerate(
re.finditer(r'<img.+?src=\"(data:(image\/.+?);base64,([a-zA-Z0-9+/=\r\n]+?))\"', html_body, re.I)):
attachment = {
'data': base64.b64decode(m.group(3)),
'name': f'image{i}'
}
attachment['cid'] = f'{attachment["name"]}@{random_word_generator(8)}.{random_word_generator(8)}'
attachments.append(attachment)
clean_body += html_body[last_index:m.start(1)] + 'cid:' + attachment['cid']
last_index = m.end() - 1
clean_body += html_body[last_index:]
return clean_body, attachments
def collect_manual_attachments(manualAttachObj):
manually_attached_objects = argToList(manualAttachObj)
attachments = []
for attachment in manually_attached_objects:
file_res = demisto.getFilePath(os.path.basename(attachment['RealFileName']))
path = file_res['path']
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': attachment['FileName'],
'data': data,
'cid': ''
})
return attachments
def collect_attachments(attachments_ids, attachments_cids, attachments_names):
attachments = []
files_ids = argToList(attachments_ids)
files_cids = argToList(attachments_cids)
files_names = argToList(attachments_names)
for index, file_id in enumerate(files_ids):
try:
file_res = demisto.getFilePath(file_id)
path = file_res['path']
if len(files_names) > index and files_names[index]:
filename = files_names[index]
else:
filename = file_res['name']
if len(files_cids) > index and files_cids[index]:
cid = files_cids[index]
else:
cid = ''
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': filename,
'data': data,
'cid': cid
})
except Exception as e:
demisto.error(f'Invalid entry {file_id} with exception: {e}')
return_error(f'Entry {file_id} is not valid or is not a file entry')
return attachments
def handle_transient_files(transient_files, transient_files_contents, transient_files_cids):
transient_attachments = []
files_names = argToList(transient_files)
files_contents = argToList(transient_files_contents)
files_cids = argToList(transient_files_cids)
for index in range(len(files_names)):
file_name = files_names[index]
if index >= len(files_contents):
break
file_content = bytes(files_contents[index], UTF_8)
if index >= len(files_cids):
file_cid = ''
else:
file_cid = files_cids[index]
transient_attachments.append({
'name': file_name,
'data': file_content,
'cid': file_cid
})
return transient_attachments
def handle_template_params(template_params):
actual_params = {}
if template_params:
try:
params = json.loads(template_params)
for p in params:
if params[p].get('value'):
actual_params[p] = params[p]['value']
elif params[p].get('key'):
actual_params[p] = demisto.dt(demisto.context(), params[p]['key'])
except ValueError as e:
return_error('Unable to parse template_params: %s' % (str(e)))
return actual_params
def create_message_object(to, cc, bcc, subject, body, additional_headers):
if additional_headers:
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body,
**additional_headers
)
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body
)
def create_message(to, subject='', body='', bcc=None, cc=None, html_body=None, attachments=None,
additional_headers=None):
if not html_body:
message = create_message_object(to, cc, bcc, subject, body, additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
message.attach(new_attachment)
else:
html_body, html_attachments = handle_html(html_body)
attachments += html_attachments
message = create_message_object(to, cc, bcc, subject, HTMLBody(html_body), additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
else:
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'),
is_inline=True, content_id=attachment.get('cid'))
message.attach(new_attachment)
return message
def add_additional_headers(additional_headers):
headers = dict()
for header in argToList(additional_headers):
header_name, header_value = header.split('=', 1)
class TempClass(ExtendedProperty):
distinguished_property_set_id = 'InternetHeaders'
property_name = header_name
property_type = 'String'
try:
Message.register(header_name, TempClass)
headers[header_name] = header_value
except ValueError as e:
demisto.debug('EWSO365 - Header ' + header_name + ' could not be registered. ' + str(e))
return headers
def send_email(client: EWSClient, to, subject='', body="", bcc=None, cc=None, htmlBody=None,
attachIDs="", attachCIDs="", attachNames="", manualAttachObj=None,
transientFile=None, transientFileContent=None, transientFileCID=None, templateParams=None,
additionalHeader=None, raw_message=None):
to = argToList(to)
cc = argToList(cc)
bcc = argToList(bcc)
if not to and not cc and not bcc:
return_error('You must have at least one recipient')
if raw_message:
message = Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
body=raw_message
)
else:
if additionalHeader:
additionalHeader = add_additional_headers(additionalHeader)
attachments = collect_attachments(attachIDs, attachCIDs, attachNames)
attachments.extend(collect_manual_attachments(manualAttachObj))
attachments.extend(handle_transient_files(transientFile, transientFileContent, transientFileCID))
template_params = handle_template_params(templateParams)
if template_params:
body = body.format(**template_params)
if htmlBody:
htmlBody = htmlBody.format(**template_params)
message = create_message(to, subject, body, bcc, cc, htmlBody, attachments, additionalHeader)
client.send_email(message)
return 'Mail sent successfully', {}, {}
def get_item_as_eml(client: EWSClient, item_id, target_mailbox=None):
account = client.get_account(target_mailbox)
item = client.get_item_from_mailbox(account, item_id)
if item.mime_content:
mime_content = item.mime_content
if isinstance(mime_content, bytes):
email_content = email.message_from_bytes(mime_content)
else:
email_content = email.message_from_string(mime_content)
if item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(email_content.items())
]
for header in item.headers:
if (
header.name,
header.value,
) not in attached_email_headers and header.name != "Content-Type":
email_content.add_header(header.name, header.value)
eml_name = item.subject if item.subject else "demisto_untitled_eml"
file_result = fileResult(eml_name + ".eml", email_content.as_string())
file_result = (
file_result if file_result else "Failed uploading eml file to war room"
)
return file_result
def parse_incident_from_item(item):
incident = {}
labels = []
try:
incident["details"] = item.text_body or item.body
except AttributeError:
incident["details"] = item.body
incident["name"] = item.subject
labels.append({"type": "Email/subject", "value": item.subject})
incident["occurred"] = item.datetime_created.ewsformat()
if item.to_recipients:
for recipient in item.to_recipients:
labels.append({"type": "Email", "value": recipient.email_address})
if item.cc_recipients:
for recipient in item.cc_recipients:
labels.append({"type": "Email/cc", "value": recipient.email_address})
if item.sender:
labels.append({"type": "Email/from", "value": item.sender.email_address})
email_format = ""
try:
if item.text_body:
labels.append({"type": "Email/text", "value": item.text_body})
email_format = "text"
except AttributeError:
pass
if item.body:
labels.append({"type": "Email/html", "value": item.body})
email_format = "HTML"
labels.append({"type": "Email/format", "value": email_format})
if item.attachments:
incident["attachment"] = []
for attachment in item.attachments:
file_result = None
label_attachment_type = None
label_attachment_id_type = None
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
label_attachment_type = "attachments"
label_attachment_id_type = "attachmentId"
file_name = get_attachment_name(attachment.name)
file_result = fileResult(file_name, attachment.content)
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name),
}
)
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
continue
else:
label_attachment_type = "attachmentItems"
label_attachment_id_type = "attachmentItemsId"
if attachment.item.mime_content:
mime_content = attachment.item.mime_content
attached_email = email.message_from_bytes(mime_content) if isinstance(mime_content, bytes) \
else email.message_from_string(mime_content)
if attachment.item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(attached_email.items())
]
for header in attachment.item.headers:
if (
(header.name, header.value)
not in attached_email_headers
and header.name != "Content-Type"
):
attached_email.add_header(header.name, header.value)
file_result = fileResult(
get_attachment_name(attachment.name) + ".eml",
attached_email.as_bytes().decode('utf-8'),
)
if file_result:
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name) + ".eml",
}
)
labels.append(
{
"type": label_attachment_type,
"value": get_attachment_name(attachment.name),
}
)
labels.append(
{"type": label_attachment_id_type, "value": attachment.attachment_id.id}
)
if item.headers:
headers = []
for header in item.headers:
labels.append(
{
"type": "Email/Header/{}".format(header.name),
"value": str(header.value),
}
)
headers.append("{}: {}".format(header.name, header.value))
labels.append({"type": "Email/headers", "value": "\r\n".join(headers)})
if item.message_id:
labels.append({"type": "Email/MessageId", "value": str(item.message_id)})
if item.id:
labels.append({"type": "Email/ID", "value": item.id})
labels.append({"type": "Email/itemId", "value": item.id})
if item.conversation_id:
labels.append({"type": "Email/ConversionID", "value": item.conversation_id.id})
incident["labels"] = labels
incident["rawJSON"] = json.dumps(parse_item_as_dict(item, None), ensure_ascii=False)
return incident
def fetch_emails_as_incidents(client: EWSClient, last_run):
last_run = get_last_run(client, last_run)
try:
last_emails = fetch_last_emails(
client,
client.folder_name,
last_run.get(LAST_RUN_TIME),
last_run.get(LAST_RUN_IDS),
)
ids = deque(
last_run.get(LAST_RUN_IDS, []), maxlen=client.last_run_ids_queue_size
)
incidents = []
incident: Dict[str, str] = {}
for item in last_emails:
if item.message_id:
ids.append(item.message_id)
incident = parse_incident_from_item(item)
incidents.append(incident)
if len(incidents) >= client.max_fetch:
break
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0,
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
def fetch_last_emails(
client: EWSClient, folder_name="Inbox", since_datetime=None, exclude_ids=None
):
qs = client.get_folder_by_path(folder_name, is_public=client.is_public_folder)
if since_datetime:
qs = qs.filter(datetime_received__gte=since_datetime)
else:
last_10_min = EWSDateTime.now(tz=EWSTimeZone.timezone("UTC")) - timedelta(
minutes=10
)
qs = qs.filter(last_modified_time__gte=last_10_min)
qs = qs.filter().only(*[x.name for x in Message.FIELDS])
qs = qs.filter().order_by("datetime_received")
result = qs.all()
result = [x for x in result if isinstance(x, Message)]
if exclude_ids and len(exclude_ids) > 0:
exclude_ids = set(exclude_ids)
result = [x for x in result if x.message_id not in exclude_ids]
return result
def test_module(client: EWSClient, max_fetch):
try:
if int(max_fetch) > MAX_INCIDENTS_PER_FETCH:
return_error(f'Error - Max incidents per fetch cannot be greater than {MAX_INCIDENTS_PER_FETCH}. '
f'You provided: {max_fetch}')
account = client.get_account()
if not account.root.effective_rights.read:
raise Exception(
"Success to authenticate, but user has no permissions to read from the mailbox. "
"Need to delegate the user permissions to the mailbox - "
"please read integration documentation and follow the instructions"
)
client.get_folder_by_path(
client.folder_name, account, client.is_public_folder
).test_access()
except ErrorFolderNotFound as e:
if "Top of Information Store" in str(e):
raise Exception(
"Success to authenticate, but user probably has no permissions to read from the specific folder."
"Check user permissions. You can try !ews-find-folders command to "
"get all the folders structure that the user has permissions to"
)
return "ok"
def sub_main():
is_test_module = False
params = demisto.params()
args = prepare_args(demisto.args())
params['default_target_mailbox'] = args.get('target_mailbox',
args.get('source_mailbox', params['default_target_mailbox']))
client = EWSClient(**params)
start_logging()
try:
command = demisto.command()
# commands that return a single note result
normal_commands = {
"ews-get-searchable-mailboxes": get_searchable_mailboxes,
"ews-move-item-between-mailboxes": move_item_between_mailboxes,
"ews-move-item": move_item,
"ews-delete-items": delete_items,
"ews-search-mailbox": search_items_in_mailbox,
"ews-get-contacts": get_contacts,
"ews-get-out-of-office": get_out_of_office_state,
"ews-recover-messages": recover_soft_delete_item,
"ews-create-folder": create_folder,
"ews-mark-item-as-junk": mark_item_as_junk,
"ews-find-folders": find_folders,
"ews-get-items-from-folder": get_items_from_folder,
"ews-get-items": get_items,
"ews-get-folder": get_folder,
"ews-expand-group": get_expanded_group,
"ews-mark-items-as-read": mark_item_as_read,
"send-mail": send_email,
}
# commands that may return multiple results or non-note result
special_output_commands = {
"ews-get-attachment": fetch_attachments_for_message,
"ews-delete-attachment": delete_attachments_for_message,
"ews-get-items-as-eml": get_item_as_eml,
}
# system commands:
if command == "test-module":
is_test_module = True
demisto.results(test_module(client, params.get('max_fetch')))
elif command == "fetch-incidents":
last_run = demisto.getLastRun()
incidents = fetch_emails_as_incidents(client, last_run)
demisto.incidents(incidents)
# special outputs commands
elif command in special_output_commands:
demisto.results(special_output_commands[command](client, **args)) # type: ignore[operator]
# normal commands
else:
output = normal_commands[command](client, **args) # type: ignore[operator]
return_outputs(*output)
except Exception as e:
start_logging()
debug_log = log_stream.getvalue() # type: ignore[union-attr]
error_message_simple = ""
# Office365 regular maintenance case
if isinstance(e, ErrorMailboxStoreUnavailable) or isinstance(
e, ErrorMailboxMoveInProgress
):
log_message = (
"Office365 is undergoing load balancing operations. "
"As a result, the service is temporarily unavailable."
)
if demisto.command() == "fetch-incidents":
demisto.info(log_message)
demisto.incidents([])
sys.exit(0)
if is_test_module:
demisto.results(
log_message + " Please retry the instance configuration test."
)
sys.exit(0)
error_message_simple = log_message + " Please retry your request."
if isinstance(e, ConnectionError):
error_message_simple = (
"Could not connect to the server.\n"
f"Additional information: {str(e)}"
)
else:
if is_test_module and isinstance(e, MalformedResponseError):
error_message_simple = (
"Got invalid response from the server.\n"
)
# Legacy error handling
if "Status code: 401" in debug_log:
error_message_simple = (
"Got unauthorized from the server. "
)
if "Status code: 503" in debug_log:
error_message_simple = (
"Got timeout from the server. "
"Probably the server is not reachable with the current settings. "
)
if not error_message_simple:
error_message = error_message_simple = str(e)
else:
error_message = error_message_simple + "\n" + str(e)
stacktrace = traceback.format_exc()
if stacktrace:
error_message += "\nFull stacktrace:\n" + stacktrace
if debug_log:
error_message += "\nFull debug log:\n" + debug_log
if demisto.command() == "fetch-incidents":
raise
if demisto.command() == "ews-search-mailbox" and isinstance(e, ValueError):
return_error(
message="Selected invalid field, please specify valid field name.",
error=e,
)
if is_test_module:
demisto.results(error_message_simple)
else:
demisto.results(
{
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": error_message_simple,
}
)
demisto.error(f"{e.__class__.__name__}: {error_message}")
finally:
exchangelib_cleanup()
if log_stream:
try:
logging.getLogger().removeHandler(log_handler) # type: ignore
log_stream.close()
except Exception as ex:
demisto.error(
"EWS: unexpected exception when trying to remove log handler: {}".format(
ex
)
)
def process_main():
sys.stdin = os.fdopen(0, "r")
sub_main()
def main():
# When running big queries, like 'ews-search-mailbox' the memory might not freed by the garbage
# collector. `separate_process` flag will run the integration on a separate process that will prevent
# memory leakage.
separate_process = demisto.params().get("separate_process", False)
demisto.debug("Running as separate_process: {}".format(separate_process))
if separate_process:
try:
p = Process(target=process_main)
p.start()
p.join()
except Exception as ex:
demisto.error("Failed starting Process: {}".format(ex))
else:
sub_main()
from MicrosoftApiModule import * # noqa: E402
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
| true | true |
f728c391f0d3f70e7cfa1e9837dfcc22ca3a34d2 | 3,369 | py | Python | tests/PyPoE/poe/test_patchserver.py | Openarl/PyPoE | ab5377e3b16f1920d4d9ada443e1e9059715f0fb | [
"MIT"
] | 15 | 2017-09-19T05:40:42.000Z | 2021-04-23T00:59:24.000Z | tests/PyPoE/poe/test_patchserver.py | Openarl/PyPoE | ab5377e3b16f1920d4d9ada443e1e9059715f0fb | [
"MIT"
] | null | null | null | tests/PyPoE/poe/test_patchserver.py | Openarl/PyPoE | ab5377e3b16f1920d4d9ada443e1e9059715f0fb | [
"MIT"
] | 3 | 2018-02-14T00:02:09.000Z | 2020-07-26T15:18:55.000Z | """
Tests for PyPoE.poe.patchserver
Overview
===============================================================================
+----------+------------------------------------------------------------------+
| Path | tests/PyPoE/poe/test_patchserver.py |
+----------+------------------------------------------------------------------+
| Version | 1.0.0a0 |
+----------+------------------------------------------------------------------+
| Revision | $Id: f728c391f0d3f70e7cfa1e9837dfcc22ca3a34d2 $ |
+----------+------------------------------------------------------------------+
| Author | Omega_K2 |
+----------+------------------------------------------------------------------+
Description
===============================================================================
Tests for patchserver.py
Agreement
===============================================================================
See PyPoE/LICENSE
TODO
===============================================================================
Testing on live data is difficult, since we can't verify it was downloaded
correctly as the contents of the files may change. Perhaps find a good
candidate for testing.
"""
# =============================================================================
# Imports
# =============================================================================
# Python
import os
import re
from urllib.error import HTTPError
from tempfile import TemporaryDirectory
# 3rd-party
import pytest
# self
from PyPoE.poe import patchserver
# =============================================================================
# Setup
# =============================================================================
_TEST_URL = 'Data/Wordlists.dat'
_re_version = re.compile(r'[\d]+\.[\d]+\.[\d]+\.[\d]+', re.UNICODE)
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture(scope='module')
def patch():
return patchserver.Patch()
# =============================================================================
# Tests
# =============================================================================
class TestPatch(object):
def test_dst_file(self, patch):
with TemporaryDirectory() as temp:
patch.download(
file_path=_TEST_URL,
dst_file=os.path.join(temp, 'test.txt'),
)
def test_dst_dir(self, patch):
with TemporaryDirectory() as temp:
patch.download(
file_path=_TEST_URL,
dst_dir=temp,
)
def test_missing_dst_error(self, patch):
with pytest.raises(ValueError):
patch.download(
file_path=_TEST_URL,
)
def test_file_not_found(self, patch):
with pytest.raises(HTTPError):
patch.download_raw(
file_path='THIS_SHOULD_NOT_EXIST.FILE',
)
def test_version(self, patch):
assert _re_version.match(patch.version) is not None, 'patch.version ' \
'result is expected to match the x.x.x.x format'
| 33.356436 | 122 | 0.344613 |
import os
import re
from urllib.error import HTTPError
from tempfile import TemporaryDirectory
import pytest
from PyPoE.poe import patchserver
_TEST_URL = 'Data/Wordlists.dat'
_re_version = re.compile(r'[\d]+\.[\d]+\.[\d]+\.[\d]+', re.UNICODE)
@pytest.fixture(scope='module')
def patch():
return patchserver.Patch()
class TestPatch(object):
def test_dst_file(self, patch):
with TemporaryDirectory() as temp:
patch.download(
file_path=_TEST_URL,
dst_file=os.path.join(temp, 'test.txt'),
)
def test_dst_dir(self, patch):
with TemporaryDirectory() as temp:
patch.download(
file_path=_TEST_URL,
dst_dir=temp,
)
def test_missing_dst_error(self, patch):
with pytest.raises(ValueError):
patch.download(
file_path=_TEST_URL,
)
def test_file_not_found(self, patch):
with pytest.raises(HTTPError):
patch.download_raw(
file_path='THIS_SHOULD_NOT_EXIST.FILE',
)
def test_version(self, patch):
assert _re_version.match(patch.version) is not None, 'patch.version ' \
'result is expected to match the x.x.x.x format'
| true | true |
f728c39309dad5b00d332f9ff13663aed2eca343 | 1,076 | py | Python | setup.py | bockstaller/pretix-eventparts | b5cb8f89cb86677facc0509f9a36cf9359c94534 | [
"Apache-2.0"
] | null | null | null | setup.py | bockstaller/pretix-eventparts | b5cb8f89cb86677facc0509f9a36cf9359c94534 | [
"Apache-2.0"
] | null | null | null | setup.py | bockstaller/pretix-eventparts | b5cb8f89cb86677facc0509f9a36cf9359c94534 | [
"Apache-2.0"
] | null | null | null | import os
from distutils.command.build import build
from django.core import management
from setuptools import find_packages, setup
from pretix_eventparts import __version__
try:
with open(
os.path.join(os.path.dirname(__file__), "README.rst"), encoding="utf-8"
) as f:
long_description = f.read()
except Exception:
long_description = ""
class CustomBuild(build):
def run(self):
management.call_command("compilemessages", verbosity=1)
build.run(self)
cmdclass = {"build": CustomBuild}
setup(
name="pretix-eventparts",
version=__version__,
description="Short description",
long_description=long_description,
url="https://github.com/bockstaller/pretix-eventparts",
author="Lukas Bockstaller",
author_email="lukas.bockstaller@posteo.de",
license="Apache",
install_requires=[],
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
cmdclass=cmdclass,
entry_points="""
[pretix.plugin]
pretix_eventparts=pretix_eventparts:PretixPluginMeta
""",
)
| 23.391304 | 79 | 0.712825 | import os
from distutils.command.build import build
from django.core import management
from setuptools import find_packages, setup
from pretix_eventparts import __version__
try:
with open(
os.path.join(os.path.dirname(__file__), "README.rst"), encoding="utf-8"
) as f:
long_description = f.read()
except Exception:
long_description = ""
class CustomBuild(build):
def run(self):
management.call_command("compilemessages", verbosity=1)
build.run(self)
cmdclass = {"build": CustomBuild}
setup(
name="pretix-eventparts",
version=__version__,
description="Short description",
long_description=long_description,
url="https://github.com/bockstaller/pretix-eventparts",
author="Lukas Bockstaller",
author_email="lukas.bockstaller@posteo.de",
license="Apache",
install_requires=[],
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
cmdclass=cmdclass,
entry_points="""
[pretix.plugin]
pretix_eventparts=pretix_eventparts:PretixPluginMeta
""",
)
| true | true |
f728c4ae68c4daf5e29a11718ad58e5fdf400b10 | 498 | py | Python | algebreb/listas/ejemplos/lista_ecuaciones_univariables/lista_ecuaciones_grado1.py | Ivan0123456789/algebreb | c1548df99a7fc960b73239d296db4e2c914926cd | [
"MIT"
] | null | null | null | algebreb/listas/ejemplos/lista_ecuaciones_univariables/lista_ecuaciones_grado1.py | Ivan0123456789/algebreb | c1548df99a7fc960b73239d296db4e2c914926cd | [
"MIT"
] | null | null | null | algebreb/listas/ejemplos/lista_ecuaciones_univariables/lista_ecuaciones_grado1.py | Ivan0123456789/algebreb | c1548df99a7fc960b73239d296db4e2c914926cd | [
"MIT"
] | 1 | 2021-12-13T03:20:08.000Z | 2021-12-13T03:20:08.000Z | from algebreb.listas.listas_ecuaciones_univariables import ListaEcuacionesGrado1
from sympy.abc import a, b, c, x, y , z
import json
caracteristicas = {}
caracteristicas['cantidad'] = 10
caracteristicas['variables'] = [a]
caracteristicas['dominio'] = 'ZZ'
caracteristicas['fraccion'] = False
caracteristicas['cmin'] = 1
caracteristicas['cmax'] = 10
leg1 = ListaEcuacionesGrado1(caracteristicas)
print(leg1.as_str_latex())
json_object = json.dumps(leg1.as_str_latex(), indent=4)
print(json_object) | 31.125 | 80 | 0.777108 | from algebreb.listas.listas_ecuaciones_univariables import ListaEcuacionesGrado1
from sympy.abc import a, b, c, x, y , z
import json
caracteristicas = {}
caracteristicas['cantidad'] = 10
caracteristicas['variables'] = [a]
caracteristicas['dominio'] = 'ZZ'
caracteristicas['fraccion'] = False
caracteristicas['cmin'] = 1
caracteristicas['cmax'] = 10
leg1 = ListaEcuacionesGrado1(caracteristicas)
print(leg1.as_str_latex())
json_object = json.dumps(leg1.as_str_latex(), indent=4)
print(json_object) | true | true |
f728c5406b686bacc61a455b3a183b0b5467af90 | 5,386 | py | Python | conflowgen/tests/previews/test_vehicle_capacity_exceeded_preview_report.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | 5 | 2022-02-16T11:44:42.000Z | 2022-02-24T20:02:17.000Z | conflowgen/tests/previews/test_vehicle_capacity_exceeded_preview_report.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | 90 | 2021-12-08T14:05:44.000Z | 2022-03-24T08:53:31.000Z | conflowgen/tests/previews/test_vehicle_capacity_exceeded_preview_report.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | 5 | 2021-12-07T16:05:15.000Z | 2022-02-16T08:24:07.000Z | import datetime
import unittest
from conflowgen.application.models.container_flow_generation_properties import ContainerFlowGenerationProperties
from conflowgen.domain_models.distribution_repositories.mode_of_transport_distribution_repository import \
ModeOfTransportDistributionRepository
from conflowgen.previews.vehicle_capacity_exceeded_preview_report import \
VehicleCapacityExceededPreviewReport
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.large_vehicle_schedule import Schedule
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestVehicleCapacityExceededPreviewReport(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
ModeOfTransportDistribution,
ContainerFlowGenerationProperties
])
ModeOfTransportDistributionRepository().set_mode_of_transport_distributions({
ModeOfTransport.truck: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.train: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.barge: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.feeder: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
},
ModeOfTransport.deep_sea_vessel: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
}
})
now = datetime.datetime.now()
ContainerFlowGenerationProperties.create(
start_date=now,
end_date=now + datetime.timedelta(weeks=2)
).save() # mostly use default values
self.preview_report = VehicleCapacityExceededPreviewReport()
self.preview_report.reload()
def test_report_with_no_schedules(self):
"""If no schedules are provided, no flows exist, and nothing can be exceeded"""
actual_report = self.preview_report.get_report_as_text()
expected_report = """
vehicle type maximum capacity (in TEU) required capacity (in TEU) exceeded difference (in TEU)
deep sea vessel 0.0 0.0 no 0.0
feeder 0.0 0.0 no 0.0
barge 0.0 0.0 no 0.0
train 0.0 0.0 no 0.0
truck -1.0 0.0 no 0.0
(rounding errors might exist)
"""
self.assertEqual(expected_report, actual_report)
def test_inbound_with_single_arrival_schedules(self):
"""A feeder delivers containers for every vehicle type. For the types truck and feeder it is fine, deep sea
vessels, barges and trains do not exist und thus their capacity is exceeded."""
one_week_later = datetime.datetime.now() + datetime.timedelta(weeks=1)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=one_week_later.date(),
vehicle_arrives_at_time=one_week_later.time(),
average_vehicle_capacity=400,
average_moved_capacity=300,
vehicle_arrives_every_k_days=-1
)
schedule.save()
actual_report = self.preview_report.get_report_as_text()
expected_report = """
vehicle type maximum capacity (in TEU) required capacity (in TEU) exceeded difference (in TEU)
deep sea vessel 0.0 75.0 yes 75.0
feeder 360.0 75.0 no 0.0
barge 0.0 30.0 yes 30.0
train 0.0 120.0 yes 120.0
truck -1.0 60.0 no 0.0
(rounding errors might exist)
"""
self.assertEqual(expected_report, actual_report)
| 49.87037 | 115 | 0.576866 | import datetime
import unittest
from conflowgen.application.models.container_flow_generation_properties import ContainerFlowGenerationProperties
from conflowgen.domain_models.distribution_repositories.mode_of_transport_distribution_repository import \
ModeOfTransportDistributionRepository
from conflowgen.previews.vehicle_capacity_exceeded_preview_report import \
VehicleCapacityExceededPreviewReport
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.large_vehicle_schedule import Schedule
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestVehicleCapacityExceededPreviewReport(unittest.TestCase):
def setUp(self) -> None:
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
ModeOfTransportDistribution,
ContainerFlowGenerationProperties
])
ModeOfTransportDistributionRepository().set_mode_of_transport_distributions({
ModeOfTransport.truck: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.train: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.barge: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.feeder: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
},
ModeOfTransport.deep_sea_vessel: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
}
})
now = datetime.datetime.now()
ContainerFlowGenerationProperties.create(
start_date=now,
end_date=now + datetime.timedelta(weeks=2)
).save()
self.preview_report = VehicleCapacityExceededPreviewReport()
self.preview_report.reload()
def test_report_with_no_schedules(self):
actual_report = self.preview_report.get_report_as_text()
expected_report = """
vehicle type maximum capacity (in TEU) required capacity (in TEU) exceeded difference (in TEU)
deep sea vessel 0.0 0.0 no 0.0
feeder 0.0 0.0 no 0.0
barge 0.0 0.0 no 0.0
train 0.0 0.0 no 0.0
truck -1.0 0.0 no 0.0
(rounding errors might exist)
"""
self.assertEqual(expected_report, actual_report)
def test_inbound_with_single_arrival_schedules(self):
one_week_later = datetime.datetime.now() + datetime.timedelta(weeks=1)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=one_week_later.date(),
vehicle_arrives_at_time=one_week_later.time(),
average_vehicle_capacity=400,
average_moved_capacity=300,
vehicle_arrives_every_k_days=-1
)
schedule.save()
actual_report = self.preview_report.get_report_as_text()
expected_report = """
vehicle type maximum capacity (in TEU) required capacity (in TEU) exceeded difference (in TEU)
deep sea vessel 0.0 75.0 yes 75.0
feeder 360.0 75.0 no 0.0
barge 0.0 30.0 yes 30.0
train 0.0 120.0 yes 120.0
truck -1.0 60.0 no 0.0
(rounding errors might exist)
"""
self.assertEqual(expected_report, actual_report)
| true | true |
f728c57ea266b83ce894b550bd353ddbc7ef393c | 22,031 | py | Python | preprocessor/legacy_functions/transform_column_values.py | clokman/KFIR | 01c9bad491aa5c104adce38294ee2b15bd49b7ec | [
"MIT"
] | 1 | 2021-12-20T03:23:42.000Z | 2021-12-20T03:23:42.000Z | preprocessor/legacy_functions/transform_column_values.py | clokman/KFIR | 01c9bad491aa5c104adce38294ee2b15bd49b7ec | [
"MIT"
] | null | null | null | preprocessor/legacy_functions/transform_column_values.py | clokman/KFIR | 01c9bad491aa5c104adce38294ee2b15bd49b7ec | [
"MIT"
] | 1 | 2022-03-23T08:37:03.000Z | 2022-03-23T08:37:03.000Z | def transform_column_values(target_replacement_dictionary, target_column_headers_list, dataset):
"""
Replaces values in columns by using a dictionary of conversions (e.g., in order to quantify likert scales).
:param target_replacement_dictionary: (dict) A dictionary in which *keys* are old (target) values and
dictionary *values* are new (replacement) values.
:param target_column_headers_list: (str) A list of headers as a list of strings, which specifies in which
columns the transformation will occur.
:param dataset: (var) A variable that holds the dataset. Headers must be included.
:returns:
Transforms the original dataset, and also returns it.
Assignment of output to a variable is not necessary; inputted dataset will be changed without assignment
as well.
:example (single target column as input):
>>> from preprocessor.test_data.demo_data import demo_data
>>> from preprocessor.legacy_functions.print_columns import print_columns
>>> print_columns("consent", demo_data)
<BLANKLINE>
consent is: ['Ja, ik neem deel', 'Ja, ik neem deel', 'Ja, ik neem deel', 'Ja, ik neem deel', 'Ja, ik neem deel', 'Ja, ik neem deel', 'Ja, ik neem deel', 'Ja, ik neem deel', 'Ja, ik neem deel', 'Ja, ik neem deel', 'Ja, ik neem deel']
>>> transform_column_values({"Ja, ik neem deel":1, "no":2}, "consent", demo_data)
[['date', 'consent', 'id', 'sex', 'age', 'edu', 'timezone_change', 'sleep_disorder', 'nightshift', 'psy_disorder', 'wake', 'young_kids', 'partn', 'btptr_1', 'btptr_2', 'btptr_3', 'btptr_4', 'btptr_5', 'btptr_6', 'btptr_7', 'btptr_8', 'btptr_9', 'ats_1', 'atbr_1', 'sq_1', 'sq_2', 'sq_3', 'sq_4', 'sq_5', 'sq_6', 'atbr_2', 'atbr_3', 'ats_2', 'ats_3', 'chron_1', 'chron_2', 'chron_3', 'chron_4', 'chron_5', 'chron_6', 'chron_7', 'chron_8', 'sc_1', 'sc_2', 'sc_3', 'sc_4', 'sc_5', 'sc_6', 'sc_7', 'sc_8', 'sc_9', 'sc_10', 'sc_11', 'sc_12', 'sc_13'], ['2017/04/01 8:35:57 p.m. EET', 1, 'EM11', 'Vrouw', '44', 'HBO', 'Nee', 'Nee', 'Nee', 'Nee', 'Ja', 'Nee', 'Soms', 'soms', '(bijna) altijd', '(bijna) altijd', 'soms', '(bijna) nooit', 'soms', '(bijna) altijd', '(bijna) nooit', '(bijna) altijd', '(bijna) nooit', '(bijna) nooit', 'binnen een kwartier', 'nooit', 'nooit', 'nooit', 'een beetje', 'erg goed', '(bijna) nooit', '(bijna) nooit', 'vaak', '(bijna) altijd', 'helemaal eens', 'helemaal oneens', 'helemaal oneens', 'helemaal eens', 'oneens', 'helemaal eens', 'helemaal eens', 'helemaal oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'helemaal oneens', 'helemaal oneens', 'oneens', 'eens', 'even vaak eens als oneens', 'eens', 'oneens', 'eens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens'], ['2017/04/01 8:15:27 p.m. EET', 1, 'gh93', 'Man', '54', 'WO', 'Nee', 'Ja', 'Nee', 'Ja', 'Ja', 'Nee', 'Soms', 'vaak', 'vaak', 'regelmatig', 'soms', 'soms', 'vaak', '(bijna) nooit', 'soms', '(bijna) altijd', 'vaak', '(bijna) nooit', 'binnen een uur', '1 nacht per week', '2-3 keer per nacht', 'nooit', 'heel vaak', 'redelijk goed', '(bijna) nooit', '(bijna) altijd', 'vaak', 'vaak', 'even vaak eens als oneens', 'eens', 'helemaal eens', 'helemaal oneens', 'eens', 'even vaak eens als oneens', 'oneens', 'helemaal eens', 'oneens', 'eens', 'helemaal oneens', 'helemaal oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'helemaal oneens'], ['2017/04/01 9:01:28 a.m. EET', 1, 'AB64', 'Vrouw', '49', 'WO', 'Nee', 'Nee', 'Nee', 'Nee', 'Ja', 'Nee', 'Niet van toepassing', 'vaak', 'soms', 'soms', 'soms', 'vaak', 'regelmatig', '(bijna) nooit', 'vaak', 'regelmatig', '(bijna) nooit', '(bijna) nooit', 'binnen een kwartier', 'nooit', '2-3 keer per nacht', 'nooit', 'helemaal niet', 'goed', '(bijna) nooit', 'soms', '(bijna) nooit', '(bijna) altijd', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'helemaal oneens', 'oneens', 'eens', 'eens', 'helemaal oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'eens', 'oneens', 'eens', 'even vaak eens als oneens', 'oneens', 'eens', 'even vaak eens als oneens'], ['2017/04/01 5:17:20 p.m. EET', 1, 'FT12', 'Man', '51', 'WO', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Niet van toepassing', 'regelmatig', 'vaak', 'vaak', 'soms', 'soms', 'soms', 'regelmatig', 'soms', 'vaak', 'soms', 'soms', 'binnen een kwartier', '1 nacht per week', '4-5 keer per nacht', '1 nacht per week', 'een beetje', 'redelijk goed', 'soms', 'soms', 'soms', 'soms', 'eens', 'oneens', 'even vaak eens als oneens', 'oneens', 'oneens', 'eens', 'even vaak eens als oneens', 'oneens', 'eens', 'eens', 'oneens', 'oneens', 'eens', 'eens', 'oneens', 'even vaak eens als oneens', 'eens', 'oneens', 'eens', 'eens', 'eens'], ['2017/04/01 9:29:43 p.m. EET', 1, 'MJ87', 'Vrouw', '23', 'WO', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Niet van toepassing', 'regelmatig', 'regelmatig', 'vaak', 'soms', 'soms', 'soms', 'soms', 'soms', 'regelmatig', '(bijna) nooit', 'soms', 'binnen een half uur', '1 nacht per week', 'nooit', '2-3 nachten per week', 'een beetje', 'goed', 'soms', 'soms', 'soms', '(bijna) altijd', 'even vaak eens als oneens', 'helemaal oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'helemaal oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'oneens', 'oneens', 'eens', 'oneens', 'even vaak eens als oneens', 'oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens'], ['2017/04/01 11:08:39 p.m. EET', 1, 'PM61', 'Man', '25', 'HBO', 'Nee', 'Nee', 'Nee', 'Ja', 'Ja', 'Nee', 'Nooit', 'regelmatig', 'regelmatig', 'soms', 'vaak', 'regelmatig', 'regelmatig', 'regelmatig', 'regelmatig', 'soms', 'regelmatig', 'vaak', 'binnen een uur', '2-3 nachten per week', 'nooit', 'nooit', 'enigszins', 'redelijk goed', 'vaak', 'regelmatig', 'vaak', 'vaak', 'eens', 'helemaal eens', 'oneens', 'helemaal oneens', 'oneens', 'oneens', 'eens', 'eens', 'oneens', 'eens', 'eens', 'helemaal oneens', 'eens', 'oneens', 'helemaal eens', 'helemaal oneens', 'oneens', 'eens', 'eens', 'eens', 'eens'], ['2017/04/01 10:53:53 a.m. EET', 1, 'JL25', 'Vrouw', '44', 'HBO', 'Nee', 'Nee', 'Nee', 'Nee', 'Ja', 'Nee', 'Soms', 'vaak', 'regelmatig', 'regelmatig', 'soms', 'regelmatig', 'regelmatig', 'soms', 'soms', 'regelmatig', 'soms', 'soms', 'binnen een half uur', '1 nacht per week', '2-3 keer per nacht', '2-3 nachten per week', 'een beetje', 'redelijk goed', 'soms', 'soms', 'regelmatig', 'regelmatig', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'helemaal oneens', 'eens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'eens', 'even vaak eens als oneens', 'oneens', 'eens', 'even vaak eens als oneens', 'helemaal eens', 'oneens', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens'], ['2017/04/01 12:22:06 a.m. EET', 1, 'GW98', 'Man', '28', 'WO', 'Nee', 'Nee', 'Ja', 'Nee', 'Nee', 'Nee', 'Nooit', '(bijna) altijd', '(bijna) nooit', 'vaak', '(bijna) altijd', 'soms', '(bijna) altijd', '(bijna) nooit', 'regelmatig', 'soms', 'regelmatig', 'vaak', 'binnen een kwartier', 'nooit', 'nooit', 'nooit', 'een beetje', 'goed', '(bijna) altijd', '(bijna) altijd', '(bijna) nooit', '(bijna) altijd', 'oneens', 'even vaak eens als oneens', 'eens', 'helemaal oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'oneens', 'even vaak eens als oneens', 'eens', 'helemaal oneens', 'helemaal eens', 'oneens', 'helemaal eens', 'helemaal oneens', 'eens', 'eens', 'oneens', 'eens', 'even vaak eens als oneens'], ['2017/04/01 7:35:17 p.m. EET', 1, 'HA61', 'Man', '51', 'WO', 'Nee', 'Nee', 'Nee', 'Nee', 'Ja', 'Nee', 'Niet van toepassing', '(bijna) nooit', 'vaak', 'vaak', 'soms', 'soms', 'soms', 'regelmatig', 'soms', 'regelmatig', '(bijna) nooit', '(bijna) nooit', 'binnen een half uur', 'nooit', '2-3 keer per nacht', '4-5 nachten per week', 'vaak', 'slecht', '(bijna) nooit', 'soms', '(bijna) nooit', 'regelmatig', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens', 'helemaal oneens', 'even vaak eens als oneens', 'eens', 'oneens', 'helemaal oneens', 'eens', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'helemaal oneens', 'oneens', 'eens', 'even vaak eens als oneens', 'oneens'], ['2017/04/01 8:55:08 a.m. EET', 1, 'wh18', 'Vrouw', '70', 'MBO', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Nooit', 'soms', 'soms', '(bijna) altijd', '(bijna) nooit', '(bijna) nooit', '(bijna) nooit', '(bijna) nooit', '(bijna) nooit', '(bijna) altijd', '(bijna) nooit', '(bijna) nooit', 'binnen een kwartier', 'nooit', '2-3 keer per nacht', '1 nacht per week', 'helemaal niet', 'redelijk goed', '(bijna) nooit', '(bijna) nooit', '(bijna) nooit', 'vaak', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'eens', 'oneens', 'eens', 'oneens', 'oneens', 'eens', 'oneens', 'helemaal oneens', 'helemaal oneens', 'even vaak eens als oneens', 'oneens', 'oneens', 'eens', 'oneens', 'oneens', 'eens', 'oneens', 'oneens'], ['2017/04/01 8:14:46 p.m. EET', 1, 'he46', 'Man', '44', 'WO', 'Nee', 'Ja', 'Nee', 'Nee', 'Ja', 'Nee', 'Niet van toepassing', 'vaak', 'regelmatig', 'soms', 'vaak', 'vaak', 'vaak', '(bijna) nooit', 'vaak', 'soms', 'soms', 'soms', 'binnen een half uur', '2-3 nachten per week', '1 keer per nacht', '1 nacht per week', 'een beetje', 'slecht', 'vaak', 'vaak', 'soms', 'vaak', 'even vaak eens als oneens', 'even vaak eens als oneens', 'eens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens']]
>>> print_columns("consent", demo_data)
<BLANKLINE>
consent is: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
:example (list of target columns as input):
>>> replacements_dictionary = {"oneens":1, "eens":2, "even vaak eens als oneens":3, "helemaal oneens":4, "x":5}
>>> print_columns("sc_9", demo_data)
<BLANKLINE>
sc_9 is: ['oneens', 'even vaak eens als oneens', 'eens', 'eens', 'oneens', 'oneens', 'even vaak eens als oneens', 'eens', 'helemaal oneens', 'oneens', 'eens']
>>> transform_column_values(replacements_dictionary, ["sc_9", "sc_10"], demo_data)
[['date', 'consent', 'id', 'sex', 'age', 'edu', 'timezone_change', 'sleep_disorder', 'nightshift', 'psy_disorder', 'wake', 'young_kids', 'partn', 'btptr_1', 'btptr_2', 'btptr_3', 'btptr_4', 'btptr_5', 'btptr_6', 'btptr_7', 'btptr_8', 'btptr_9', 'ats_1', 'atbr_1', 'sq_1', 'sq_2', 'sq_3', 'sq_4', 'sq_5', 'sq_6', 'atbr_2', 'atbr_3', 'ats_2', 'ats_3', 'chron_1', 'chron_2', 'chron_3', 'chron_4', 'chron_5', 'chron_6', 'chron_7', 'chron_8', 'sc_1', 'sc_2', 'sc_3', 'sc_4', 'sc_5', 'sc_6', 'sc_7', 'sc_8', 'sc_9', 'sc_10', 'sc_11', 'sc_12', 'sc_13'], ['2017/04/01 8:35:57 p.m. EET', 1, 'EM11', 'Vrouw', '44', 'HBO', 'Nee', 'Nee', 'Nee', 'Nee', 'Ja', 'Nee', 'Soms', 'soms', '(bijna) altijd', '(bijna) altijd', 'soms', '(bijna) nooit', 'soms', '(bijna) altijd', '(bijna) nooit', '(bijna) altijd', '(bijna) nooit', '(bijna) nooit', 'binnen een kwartier', 'nooit', 'nooit', 'nooit', 'een beetje', 'erg goed', '(bijna) nooit', '(bijna) nooit', 'vaak', '(bijna) altijd', 'helemaal eens', 'helemaal oneens', 'helemaal oneens', 'helemaal eens', 'oneens', 'helemaal eens', 'helemaal eens', 'helemaal oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'helemaal oneens', 'helemaal oneens', 'oneens', 'eens', 'even vaak eens als oneens', 'eens', 1, 2, 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens'], ['2017/04/01 8:15:27 p.m. EET', 1, 'gh93', 'Man', '54', 'WO', 'Nee', 'Ja', 'Nee', 'Ja', 'Ja', 'Nee', 'Soms', 'vaak', 'vaak', 'regelmatig', 'soms', 'soms', 'vaak', '(bijna) nooit', 'soms', '(bijna) altijd', 'vaak', '(bijna) nooit', 'binnen een uur', '1 nacht per week', '2-3 keer per nacht', 'nooit', 'heel vaak', 'redelijk goed', '(bijna) nooit', '(bijna) altijd', 'vaak', 'vaak', 'even vaak eens als oneens', 'eens', 'helemaal eens', 'helemaal oneens', 'eens', 'even vaak eens als oneens', 'oneens', 'helemaal eens', 'oneens', 'eens', 'helemaal oneens', 'helemaal oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 3, 2, 'even vaak eens als oneens', 'even vaak eens als oneens', 'helemaal oneens'], ['2017/04/01 9:01:28 a.m. EET', 1, 'AB64', 'Vrouw', '49', 'WO', 'Nee', 'Nee', 'Nee', 'Nee', 'Ja', 'Nee', 'Niet van toepassing', 'vaak', 'soms', 'soms', 'soms', 'vaak', 'regelmatig', '(bijna) nooit', 'vaak', 'regelmatig', '(bijna) nooit', '(bijna) nooit', 'binnen een kwartier', 'nooit', '2-3 keer per nacht', 'nooit', 'helemaal niet', 'goed', '(bijna) nooit', 'soms', '(bijna) nooit', '(bijna) altijd', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'helemaal oneens', 'oneens', 'eens', 'eens', 'helemaal oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'eens', 'oneens', 2, 3, 'oneens', 'eens', 'even vaak eens als oneens'], ['2017/04/01 5:17:20 p.m. EET', 1, 'FT12', 'Man', '51', 'WO', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Niet van toepassing', 'regelmatig', 'vaak', 'vaak', 'soms', 'soms', 'soms', 'regelmatig', 'soms', 'vaak', 'soms', 'soms', 'binnen een kwartier', '1 nacht per week', '4-5 keer per nacht', '1 nacht per week', 'een beetje', 'redelijk goed', 'soms', 'soms', 'soms', 'soms', 'eens', 'oneens', 'even vaak eens als oneens', 'oneens', 'oneens', 'eens', 'even vaak eens als oneens', 'oneens', 'eens', 'eens', 'oneens', 'oneens', 'eens', 'eens', 'oneens', 'even vaak eens als oneens', 2, 1, 'eens', 'eens', 'eens'], ['2017/04/01 9:29:43 p.m. EET', 1, 'MJ87', 'Vrouw', '23', 'WO', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Niet van toepassing', 'regelmatig', 'regelmatig', 'vaak', 'soms', 'soms', 'soms', 'soms', 'soms', 'regelmatig', '(bijna) nooit', 'soms', 'binnen een half uur', '1 nacht per week', 'nooit', '2-3 nachten per week', 'een beetje', 'goed', 'soms', 'soms', 'soms', '(bijna) altijd', 'even vaak eens als oneens', 'helemaal oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'helemaal oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'oneens', 'oneens', 'eens', 'oneens', 'even vaak eens als oneens', 1, 1, 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens'], ['2017/04/01 11:08:39 p.m. EET', 1, 'PM61', 'Man', '25', 'HBO', 'Nee', 'Nee', 'Nee', 'Ja', 'Ja', 'Nee', 'Nooit', 'regelmatig', 'regelmatig', 'soms', 'vaak', 'regelmatig', 'regelmatig', 'regelmatig', 'regelmatig', 'soms', 'regelmatig', 'vaak', 'binnen een uur', '2-3 nachten per week', 'nooit', 'nooit', 'enigszins', 'redelijk goed', 'vaak', 'regelmatig', 'vaak', 'vaak', 'eens', 'helemaal eens', 'oneens', 'helemaal oneens', 'oneens', 'oneens', 'eens', 'eens', 'oneens', 'eens', 'eens', 'helemaal oneens', 'eens', 'oneens', 'helemaal eens', 'helemaal oneens', 1, 2, 'eens', 'eens', 'eens'], ['2017/04/01 10:53:53 a.m. EET', 1, 'JL25', 'Vrouw', '44', 'HBO', 'Nee', 'Nee', 'Nee', 'Nee', 'Ja', 'Nee', 'Soms', 'vaak', 'regelmatig', 'regelmatig', 'soms', 'regelmatig', 'regelmatig', 'soms', 'soms', 'regelmatig', 'soms', 'soms', 'binnen een half uur', '1 nacht per week', '2-3 keer per nacht', '2-3 nachten per week', 'een beetje', 'redelijk goed', 'soms', 'soms', 'regelmatig', 'regelmatig', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'helemaal oneens', 'eens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'eens', 'even vaak eens als oneens', 'oneens', 'eens', 'even vaak eens als oneens', 'helemaal eens', 'oneens', 3, 1, 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens'], ['2017/04/01 12:22:06 a.m. EET', 1, 'GW98', 'Man', '28', 'WO', 'Nee', 'Nee', 'Ja', 'Nee', 'Nee', 'Nee', 'Nooit', '(bijna) altijd', '(bijna) nooit', 'vaak', '(bijna) altijd', 'soms', '(bijna) altijd', '(bijna) nooit', 'regelmatig', 'soms', 'regelmatig', 'vaak', 'binnen een kwartier', 'nooit', 'nooit', 'nooit', 'een beetje', 'goed', '(bijna) altijd', '(bijna) altijd', '(bijna) nooit', '(bijna) altijd', 'oneens', 'even vaak eens als oneens', 'eens', 'helemaal oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'oneens', 'even vaak eens als oneens', 'eens', 'helemaal oneens', 'helemaal eens', 'oneens', 'helemaal eens', 'helemaal oneens', 2, 2, 'oneens', 'eens', 'even vaak eens als oneens'], ['2017/04/01 7:35:17 p.m. EET', 1, 'HA61', 'Man', '51', 'WO', 'Nee', 'Nee', 'Nee', 'Nee', 'Ja', 'Nee', 'Niet van toepassing', '(bijna) nooit', 'vaak', 'vaak', 'soms', 'soms', 'soms', 'regelmatig', 'soms', 'regelmatig', '(bijna) nooit', '(bijna) nooit', 'binnen een half uur', 'nooit', '2-3 keer per nacht', '4-5 nachten per week', 'vaak', 'slecht', '(bijna) nooit', 'soms', '(bijna) nooit', 'regelmatig', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens', 'helemaal oneens', 'even vaak eens als oneens', 'eens', 'oneens', 'helemaal oneens', 'eens', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 4, 1, 'eens', 'even vaak eens als oneens', 'oneens'], ['2017/04/01 8:55:08 a.m. EET', 1, 'wh18', 'Vrouw', '70', 'MBO', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Nee', 'Nooit', 'soms', 'soms', '(bijna) altijd', '(bijna) nooit', '(bijna) nooit', '(bijna) nooit', '(bijna) nooit', '(bijna) nooit', '(bijna) altijd', '(bijna) nooit', '(bijna) nooit', 'binnen een kwartier', 'nooit', '2-3 keer per nacht', '1 nacht per week', 'helemaal niet', 'redelijk goed', '(bijna) nooit', '(bijna) nooit', '(bijna) nooit', 'vaak', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'eens', 'oneens', 'eens', 'oneens', 'oneens', 'eens', 'oneens', 'helemaal oneens', 'helemaal oneens', 'even vaak eens als oneens', 'oneens', 'oneens', 'eens', 1, 1, 'eens', 'oneens', 'oneens'], ['2017/04/01 8:14:46 p.m. EET', 1, 'he46', 'Man', '44', 'WO', 'Nee', 'Ja', 'Nee', 'Nee', 'Ja', 'Nee', 'Niet van toepassing', 'vaak', 'regelmatig', 'soms', 'vaak', 'vaak', 'vaak', '(bijna) nooit', 'vaak', 'soms', 'soms', 'soms', 'binnen een half uur', '2-3 nachten per week', '1 keer per nacht', '1 nacht per week', 'een beetje', 'slecht', 'vaak', 'vaak', 'soms', 'vaak', 'even vaak eens als oneens', 'even vaak eens als oneens', 'eens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens', 'eens', 'even vaak eens als oneens', 2, 3, 'even vaak eens als oneens', 'even vaak eens als oneens', 'even vaak eens als oneens']]
>>> print_columns("sc_9", demo_data)
<BLANKLINE>
sc_9 is: [1, 3, 2, 2, 1, 1, 3, 2, 4, 1, 2]
"""
#############################################################################################################
from preprocessor.legacy_functions.select_column import select_column
from preprocessor.legacy_functions.replace_column import replace_column
from preprocessor.legacy_functions.print_columns import print_columns
# If target_column_headers_list is not a list but a string (i.e., target is a single column)...
# Convert this string to a single list item so that the upcoming lines in the function can still take it as input.
if type(target_column_headers_list) is str: # If parameter is string
target_column_headers_list = [target_column_headers_list] # Convert it to a list
# Separate headers from data
# headers_list = get_headers(dataset)
# data = get_data(dataset)
# Separate the dictionary to targets and replacements
targets_list = []
replacements_list = []
for i, key in enumerate(target_replacement_dictionary): # iterate over each item in the input dictionary
targets_list.append(key) # add keys to targets list
replacements_list.append(target_replacement_dictionary[key]) # add values to replacements list
# Extract values of the specified column in the given dataset by using a separate headers variable
columns = {}
for i, target_column_header in enumerate(target_column_headers_list):
columns[target_column_header] = select_column(target_column_header, dataset)
# and not 'data'; the headers in 'dataset' is necessary for the select_column() to work.
# Search targets in each of the extracted columns, and when the target values are found, replace them
# with their counterparts specific in the dictionary.
for column in columns:
for i, target in enumerate(targets_list):
for j, value in enumerate(columns[column]):
if value == target:
columns[column][j] = replacements_list[i]
# Replace columns within a copy of the provided dataset and return this dataset
for col_name, col_values in columns.items():
replace_column(col_values, col_name, dataset) # and not 'data' but 'dataset', which includes headers
return dataset # and not 'data' but 'dataset', which includes headers
| 268.670732 | 8,889 | 0.631111 | def transform_column_values(target_replacement_dictionary, target_column_headers_list, dataset):
| true | true |
f728c5f4de2c11e28d4609ce1f5201d79318c7af | 1,743 | py | Python | neutron_lbaas/openstack/common/_i18n.py | citrix-openstack-build/neutron-lbaas | 972873d232090b9dae063fd3592447c00b2b74e9 | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/openstack/common/_i18n.py | citrix-openstack-build/neutron-lbaas | 972873d232090b9dae063fd3592447c00b2b74e9 | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/openstack/common/_i18n.py | citrix-openstack-build/neutron-lbaas | 972873d232090b9dae063fd3592447c00b2b74e9 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
try:
import oslo.i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
_translators = oslo.i18n.TranslatorFactory(domain='neutron_lbaas')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
except ImportError:
# NOTE(dims): Support for cases where a project wants to use
# code from neutron_lbaas-incubator, but is not ready to be internationalized
# (like tempest)
_ = _LI = _LW = _LE = _LC = lambda x: x
| 37.891304 | 81 | 0.711991 |
try:
import oslo.i18n
_translators = oslo.i18n.TranslatorFactory(domain='neutron_lbaas')
_ = _translators.primary
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
except ImportError:
_ = _LI = _LW = _LE = _LC = lambda x: x
| true | true |
f728c6ed83c3c076db3a41e77244c07ee28a212f | 12,327 | py | Python | boozetools/support/interfaces.py | kjosib/booze-tools | ed3333643e0df99231202c024da8c86a9bb5b2bc | [
"MIT"
] | 10 | 2019-01-24T04:45:56.000Z | 2020-09-16T14:27:29.000Z | boozetools/support/interfaces.py | kjosib/booze-tools | ed3333643e0df99231202c024da8c86a9bb5b2bc | [
"MIT"
] | 40 | 2019-04-10T21:54:38.000Z | 2021-10-04T02:49:11.000Z | boozetools/support/interfaces.py | kjosib/booze-tools | ed3333643e0df99231202c024da8c86a9bb5b2bc | [
"MIT"
] | 1 | 2020-05-22T16:28:02.000Z | 2020-05-22T16:28:02.000Z | """
This file aggregates various abstract classes and exception types which BoozeTools deals in.
There's a principle of object-oriented design which says "ask not for data, but for help."
At first glance the ADTs for FiniteAutomaton and ParseTable appear to respect that dictum
only by its violation, as suggested by all these `get_foobar` methods. What gives?
Quite a bit, actually: The scanning and parsing algorithms are data-driven, but the essential
nature of those algorithms should not care about the internal structure and organization of that
data, so long as the proper relevant questions may be answered. This provides the flexibility
to plug in different types of compaction (or no compaction at all) without a complete re-write.
A good modular interface exposes abstract data types and the operations among those types.
The methods on FiniteAutomaton and ParseTable are exactly those needed for the interesting
data-driven algorithms they support, without regard to their internal structure.
On a separate note, you could make a good case for splitting this file in twain. Maybe later.
"""
from typing import Callable
from . import pretty
END_OF_TOKENS = '<END>' # An agreed artificial "end-of-text" terminal-symbol.
ERROR_SYMBOL = '$error$' # An agreed "error" symbol.
# Note that the scanner should NEVER emit either of the above two symbols.
# However, the error symbol may appear in the right-hand side of a production rule.
DEFAULT_INITIAL_CONDITION = 'INITIAL' # This really is another design constant.
class LanguageError(ValueError):
""" Base class of all exceptions arising from the language machinery. """
class ScannerBlocked(LanguageError):
"""
Raised (by default) if a scanner gets blocked.
Parameters are:
the string offset where it happened.
the current start-condition of the scanner.
"""
def __init__(self, position, condition):
super().__init__(position, condition)
self.position, self.condition = position, condition
class GeneralizedParseError(LanguageError): pass
class ParseErrorListener:
"""
Implement this interface to report/respond to parse errors.
For the moment I'm assuming you have a handle to the scanner so you
can get the input-file location of error events...
"""
def unexpected_token(self, kind, semantic, pds):
"""
The parser has just been given a bogus token.
It will enter recovery mode next.
`kind` and `semantic` are whatever the scanner provided.
`pds` is the state of the push-down automaton at the point of error.
"""
def unexpected_eof(self, pds):
"""
The parser ran out of tokens unexpectedly.
`pds` is the state of the push-down automaton at the point of error.
"""
def will_recover(self, tokens):
"""
The parser has seen a token sequence sufficient to resynchronize.
`tokens` is that sequence. The parser will next commit to this
recovery. (Perhaps there should be a way to prevent it?)
The return value from this method will appear as the semantic content
of the "error" position in the error rule that was ultimately chosen.
"""
def did_not_recover(self):
"""
The parser ran out of tokens while in error-recovery mode, and was
unable to recover.
"""
def cannot_recover(self):
"""
The parser attempted to enter recovery mode, but there are no
recoverable states on the parse stack, so recovery is impossible.
Default behavior is
"""
return self.did_not_recover()
def exception_parsing(self, ex:Exception, message, args):
"""
Q: If a combining function raises an exception, what should happen?
A: It depends.
Maybe the exception should not happen: some extra context might help
you reproduce and debug the problem. Log the context and re-raise.
Maybe certain exceptions represent non-fatal conditions, but you'd
rather separate policy from mechanism. Deal with it and return the
semantic value that should replace the aborted attribute-synthesis.
"""
raise ex from None # Hide the catch-and-rethrow from the traceback.
class Classifier:
"""
Normally a finite-state automaton (FA) based scanner does not treat all possible input
characters as individual and distinct. Rather, all possible characters are mapped
to a much smaller alphabet of symbols which are distinguishable from their neighbors
in terms of their effect on the operation of the FA.
It is this object's responsibility to perform that mapping via method `classify`.
"""
def classify(self, codepoint:int) -> int:
"""
Map a unicode codepoint to a specific numbered character class
such that 0 <= result < self.cardinality()
as known to a corresponding finite automaton.
"""
raise NotImplementedError(type(self))
def cardinality(self) -> int:
""" Return the number of distinct classes which may be emitted by self.classify(...). """
raise NotImplementedError(type(self))
def display(self):
""" Pretty-print a suitable representation of the innards of this classifier's data. """
raise NotImplementedError(type(self))
class FiniteAutomaton:
"""
A finite automaton determines which rule matches but knows nothing about the rules themselves.
This interface captures the operations required to execute the general scanning algorithm.
It is deliberately decoupled from any particular representation of the underlying data.
"""
def jam_state(self): raise NotImplementedError(type(self)) # DFA might provide -1, while NFA might provide an empty frozenset().
def get_condition(self, condition_name) -> tuple:
""" A "condition" is implemented as a pair of state_ids for the normal and begining-of-line cases. """
raise NotImplementedError(type(self))
def get_next_state(self, current_state: int, codepoint: int) -> int:
""" Does what it says on the tin. codepoint will be -1 at end-of-file, so be prepared. """
raise NotImplementedError(type(self))
def get_state_rule_id(self, state_id: int) -> int:
""" Return the associated rule ID if this state is terminal, otherwise None. """
raise NotImplementedError(type(self))
class ParseTable:
"""
This interface captures the operations needed to perform table-driven parsing, as well as a modicum
of reasonable error reporting. Again, no particular structure or organization is implied.
"""
def get_translation(self, symbol) -> int: raise NotImplementedError(type(self, 'Because scanners should not care the order of terminals in the parse table. Zero is reserved for end-of-text.'))
def get_action(self, state_id:int, terminal_id) -> int: raise NotImplementedError(type(self), 'Positive -> successor state id. Negative -> rule id for reduction. Zero -> error.')
def get_goto(self, state_id:int, nonterminal_id) -> int: raise NotImplementedError(type(self, 'return a successor state id.'))
def get_rule(self, rule_id:int) -> tuple: raise NotImplementedError(type(self), 'return a (nonterminal_id, length, constructor_id, view) quad.')
def get_constructor(self, constructor_id) -> object: raise NotImplementedError(type(self), 'return whatever will make sense to the corresponding combiner.')
def get_initial(self, language) -> int: raise NotImplementedError(type(self), 'return the initial state id for the selected language, which by the way is usually `None `.')
def get_breadcrumb(self, state_id:int) -> str: raise NotImplementedError(type(self), 'This is used in error reporting. Return the name of the symbol that shifts into this state.')
def interactive_step(self, state_id:int) -> int: raise NotImplementedError(type(self), 'Return the reduce instruction for interactive-reducing states; zero otherwise.')
# These next two methods are in support of GLR parsing:
def get_split_offset(self) -> int: raise NotImplementedError(type(self), "Action entries >= this number mean to split the parser.")
def get_split(self, split_id:int) -> list: raise NotImplementedError(type(self), "A list of parse actions of the usual (deterministic) form.")
class Scanner:
"""
This is the interface a scanner action can expect to be able to operate on.
As a convenience, scan-context stack operations are provided here. There is no "reject" action,
but a powerful and fast alternative is built into the DFA generator in the form of rule priority
ranks. The longest-match heuristic breaks ties among the highest ranked rules that match.
"""
def token(self, kind, semantic=None):
""" Inform the system that a token of whatever kind and semantic is recognized from the current focus. """
raise NotImplementedError(type(self))
def enter(self, condition):
""" Enter the scan condition named by parameter `condition`. """
raise NotImplementedError(type(self))
def push(self, condition):
""" Save the current scan condition to a stack, and enter the scan state named by parameter `condition`. """
raise NotImplementedError(type(self))
def pop(self):
""" Enter the scan condition popped from the top of the stack. """
raise NotImplementedError(type(self))
def matched_text(self) -> str:
""" Return the text currently matched. """
raise NotImplementedError(type(self))
def less(self, nr_chars:int):
""" Put back characters into the stream to be matched: This also provides the mechanism for fixed trailing context. """
raise NotImplementedError(type(self))
def current_position(self) -> int:
""" As advertised. This was motivated by a desire to produce helpful error messages. """
raise NotImplementedError(type(self))
def current_span(self):
""" Return the position and length of the current match-text for use in error-reporting calls and the like. """
raise NotImplementedError(type(self))
def current_condition(self) -> str:
""" Return the most recently entered (or pushed, or popped) start-condition name, which is super-helpful debugging scanners. """
raise NotImplementedError(type(self))
"""
The Scan Rule Actor Interface is just a function.
For example, if you want to emit tokens, call yy.token(kind, semantic)
Said function *IS RESPONSIBLE* for dealing with trailing context, if that's a feature in your scanner.
(The simple way is to call yy.less(trail), as documented.)
"""
ScanActor = Callable[[Scanner, int], object]
class ScanErrorListener:
"""
Implement this interface to report/respond to scan errors.
For the moment I'm assuming you have a handle to the scanner so you
can get the input-file location of error events...
"""
def unexpected_character(self, yy:Scanner):
"""
The scanner will call this to report blockage. It will have prepared
to skip the offending character. Your job is to report the error to
the user. Try to recover. Emit a designated "nonsense" token and let
the parser handle it. Delegate to a driver. Do whatever.
Default behavior is to raise an exception, which by the way will kill
off a parse(...) in progress -- at least until I get parse error
recovery mode finished.
"""
raise ScannerBlocked(yy.current_position(), yy.current_condition())
def exception_scanning(self, yy:Scanner, rule_id:int, ex:Exception):
"""
If the implementation of scan rule raises an exception, the scanner
engine will pass that exception to this method (along with its own
state and the ID number of the failing rule). You're welcome to add
any sort of context cues, logging, even trying to recover.
If this returns normally, then scanning will resume normally.
"""
raise ex from None # Hide the catch-and-rethrow from the traceback.
class AbstractGeneralizedParser:
"""
Before I get too deep into it, let's lay out the general structure of a generalized parse:
"""
def __init__(self, table: ParseTable, combine, language=None):
""" Please note this takes a driver not a combiner: it does its own selection of arguments from the stack. """
self._table = table
self._combine = combine
self._nr_states = table.get_split_offset()
self.reset(table.get_initial(language))
def reset(self, initial_state):
""" Configure the initial stack situation for the given initial automaton state. """
raise NotImplementedError(type(self))
def consume(self, terminal, semantic):
""" Call this from your scanning loop. """
raise NotImplementedError(type(self))
def finish(self) -> list:
"""
Call this after the last token to wrap up and
:return: a valid semantic value for the parse.
"""
raise NotImplementedError(type(self))
| 45.825279 | 193 | 0.754523 |
from typing import Callable
from . import pretty
END_OF_TOKENS = '<END>'
ERROR_SYMBOL = '$error$'
DEFAULT_INITIAL_CONDITION = 'INITIAL'
class LanguageError(ValueError):
class ScannerBlocked(LanguageError):
def __init__(self, position, condition):
super().__init__(position, condition)
self.position, self.condition = position, condition
class GeneralizedParseError(LanguageError): pass
class ParseErrorListener:
def unexpected_token(self, kind, semantic, pds):
def unexpected_eof(self, pds):
def will_recover(self, tokens):
def did_not_recover(self):
def cannot_recover(self):
return self.did_not_recover()
def exception_parsing(self, ex:Exception, message, args):
raise ex from None
class Classifier:
def classify(self, codepoint:int) -> int:
raise NotImplementedError(type(self))
def cardinality(self) -> int:
raise NotImplementedError(type(self))
def display(self):
raise NotImplementedError(type(self))
class FiniteAutomaton:
def jam_state(self): raise NotImplementedError(type(self))
def get_condition(self, condition_name) -> tuple:
raise NotImplementedError(type(self))
def get_next_state(self, current_state: int, codepoint: int) -> int:
raise NotImplementedError(type(self))
def get_state_rule_id(self, state_id: int) -> int:
raise NotImplementedError(type(self))
class ParseTable:
def get_translation(self, symbol) -> int: raise NotImplementedError(type(self, 'Because scanners should not care the order of terminals in the parse table. Zero is reserved for end-of-text.'))
def get_action(self, state_id:int, terminal_id) -> int: raise NotImplementedError(type(self), 'Positive -> successor state id. Negative -> rule id for reduction. Zero -> error.')
def get_goto(self, state_id:int, nonterminal_id) -> int: raise NotImplementedError(type(self, 'return a successor state id.'))
def get_rule(self, rule_id:int) -> tuple: raise NotImplementedError(type(self), 'return a (nonterminal_id, length, constructor_id, view) quad.')
def get_constructor(self, constructor_id) -> object: raise NotImplementedError(type(self), 'return whatever will make sense to the corresponding combiner.')
def get_initial(self, language) -> int: raise NotImplementedError(type(self), 'return the initial state id for the selected language, which by the way is usually `None `.')
def get_breadcrumb(self, state_id:int) -> str: raise NotImplementedError(type(self), 'This is used in error reporting. Return the name of the symbol that shifts into this state.')
def interactive_step(self, state_id:int) -> int: raise NotImplementedError(type(self), 'Return the reduce instruction for interactive-reducing states; zero otherwise.')
def get_split_offset(self) -> int: raise NotImplementedError(type(self), "Action entries >= this number mean to split the parser.")
def get_split(self, split_id:int) -> list: raise NotImplementedError(type(self), "A list of parse actions of the usual (deterministic) form.")
class Scanner:
def token(self, kind, semantic=None):
raise NotImplementedError(type(self))
def enter(self, condition):
raise NotImplementedError(type(self))
def push(self, condition):
raise NotImplementedError(type(self))
def pop(self):
raise NotImplementedError(type(self))
def matched_text(self) -> str:
raise NotImplementedError(type(self))
def less(self, nr_chars:int):
raise NotImplementedError(type(self))
def current_position(self) -> int:
raise NotImplementedError(type(self))
def current_span(self):
raise NotImplementedError(type(self))
def current_condition(self) -> str:
raise NotImplementedError(type(self))
ScanActor = Callable[[Scanner, int], object]
class ScanErrorListener:
def unexpected_character(self, yy:Scanner):
raise ScannerBlocked(yy.current_position(), yy.current_condition())
def exception_scanning(self, yy:Scanner, rule_id:int, ex:Exception):
raise ex from None
class AbstractGeneralizedParser:
def __init__(self, table: ParseTable, combine, language=None):
self._table = table
self._combine = combine
self._nr_states = table.get_split_offset()
self.reset(table.get_initial(language))
def reset(self, initial_state):
raise NotImplementedError(type(self))
def consume(self, terminal, semantic):
raise NotImplementedError(type(self))
def finish(self) -> list:
raise NotImplementedError(type(self))
| true | true |
f728c75bcb4c38d6a6148828f54657f473212313 | 4,460 | py | Python | experiments.py | carlo-/RNNet | 995fcce1da58ac2c840afd865bde88d11d81006f | [
"MIT"
] | null | null | null | experiments.py | carlo-/RNNet | 995fcce1da58ac2c840afd865bde88d11d81006f | [
"MIT"
] | null | null | null | experiments.py | carlo-/RNNet | 995fcce1da58ac2c840afd865bde88d11d81006f | [
"MIT"
] | null | null | null | #
# KTH Royal Institute of Technology
# DD2424: Deep Learning in Data Science
# Assignment 4
#
# Carlo Rapisarda (carlora@kth.se)
#
import numpy as np
import matplotlib.pyplot as plt
import dataset as dt
from os.path import exists
from model import RNNet
from utilities import compute_grads_numerical, compare_grads, unpickle, pickle, eprint, simple_smooth_1d
GOBLET_RESULTS_PATH = '../goblet_results.pkl'
def check_gradients():
book = dt.load_goblet_of_fire()
seq_len = 25
m = 5
X, Y, _ = book.get_labeled_data(0, seq_len)
h0 = np.zeros((m, 1))
np.random.seed(42)
net = RNNet(m=m, K=book.K)
print('===> Computing numerical gradients...')
num_grads = compute_grads_numerical(X, Y, h0, net)
print('===> Computing analytical gradients...')
grads = net._backward(X, Y, h0, *net._forward(X, h0))
errors = compare_grads(num_grads, grads, m, book.K)
errors_v = vars(errors)
for k in errors_v:
v = errors_v[k]
print(f'MSEs for {k} -> max: {v.max()},\t avg: {v.mean()},\t std: {v.std()}')
def train_with_goblet_of_fire(results_path=None):
book = dt.load_goblet_of_fire()
np.random.seed(42)
net = RNNet(m=100, K=book.K)
# optimizer = RNNet.AdaGrad(net, eta=0.1)
optimizer = RNNet.RMSProp(net, eta=0.001, gamma=0.9)
config = {
'epochs': 10,
'output_folder': '../out',
'optimizer': optimizer,
'sequence_length': 25,
'record_interval': 1_000,
'test_length': 200
}
res = net.train(book, config)
if results_path is not None:
pickle(res, results_path)
return res
def plot_results(res, fig_path=None):
interval = res['interval']
smooth_losses_by_interval = res['smooth_losses_by_interval']
smooth_losses_by_epoch = res['smooth_losses_by_epoch']
epochs = len(smooth_losses_by_epoch)
iters_per_epoch = 1.0 * len(smooth_losses_by_interval) * interval / epochs
smoother = np.array(smooth_losses_by_interval)
smoother = simple_smooth_1d(smoother, 0.95)
fig = plt.figure(figsize=(9, 4))
ax1 = fig.add_subplot(111)
ax1.plot(np.arange(len(smooth_losses_by_interval)) * interval, smooth_losses_by_interval)
ax1.plot(np.arange(smoother.size) * interval, smoother)
ax1.set_xlabel('step')
ax1.set_ylabel('loss')
ax2 = ax1.twiny()
ax2.set_xlabel('epoch')
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(np.arange(1,epochs+1) * iters_per_epoch)
ax2.set_xticklabels(np.arange(1,epochs+1))
ax2.grid()
ax1.grid(axis='y')
fig.tight_layout()
fig.legend(['training loss', 'smoothed'], bbox_to_anchor=(0.98, 0.86), bbox_transform=fig.transFigure)
if fig_path is not None:
fig.savefig(fig_path, bbox_inches='tight')
fig.show()
def print_evolution(res, interval, limit=None):
smooth_losses = res['smooth_losses_by_interval']
synth_samples = res['synthesized_text_by_interval']
res_interval = res['interval']
assert interval % res_interval == 0, 'Print interval must be a multiple of the recorded interval'
selected_indexes = [x for x in range(0, len(synth_samples), interval // res_interval)]
if limit is not None:
selected_indexes = selected_indexes[:limit]
# last_step = selected_indexes[-1] * res_interval
# print(f'\nModel evolution from step 1 to {last_step}:\n')
print('\n')
for i in selected_indexes:
step = max(i * res_interval, 1)
text = synth_samples[i]
smooth_loss = smooth_losses[i]
print(f'===> Step: {step}, smooth_loss: {round(smooth_loss, 4)}, synthesized:\n{text}\n\n')
def synthesize_with_best_model():
model_path = '../trained_models/2018-06-12-2205-e10.pkl'
if exists(model_path):
book = dt.load_goblet_of_fire()
net = RNNet.import_model(model_path)
np.random.seed(50)
print(net.synthesize(1000, book.char_to_one_hot, book.index_to_char))
else:
eprint('Best trained model found!')
def main():
check_gradients()
if not exists(GOBLET_RESULTS_PATH):
train_with_goblet_of_fire(GOBLET_RESULTS_PATH)
results = unpickle(GOBLET_RESULTS_PATH)
plot_results(results, '../Report/Figs/training_goblet.eps')
print_evolution(results, 10_000, 11)
print(f'===> Passage from the final model (smooth_loss: {results["smooth_losses_by_epoch"][-1]}):')
synthesize_with_best_model()
if __name__ == '__main__':
main()
| 28.407643 | 106 | 0.673318 |
import numpy as np
import matplotlib.pyplot as plt
import dataset as dt
from os.path import exists
from model import RNNet
from utilities import compute_grads_numerical, compare_grads, unpickle, pickle, eprint, simple_smooth_1d
GOBLET_RESULTS_PATH = '../goblet_results.pkl'
def check_gradients():
book = dt.load_goblet_of_fire()
seq_len = 25
m = 5
X, Y, _ = book.get_labeled_data(0, seq_len)
h0 = np.zeros((m, 1))
np.random.seed(42)
net = RNNet(m=m, K=book.K)
print('===> Computing numerical gradients...')
num_grads = compute_grads_numerical(X, Y, h0, net)
print('===> Computing analytical gradients...')
grads = net._backward(X, Y, h0, *net._forward(X, h0))
errors = compare_grads(num_grads, grads, m, book.K)
errors_v = vars(errors)
for k in errors_v:
v = errors_v[k]
print(f'MSEs for {k} -> max: {v.max()},\t avg: {v.mean()},\t std: {v.std()}')
def train_with_goblet_of_fire(results_path=None):
book = dt.load_goblet_of_fire()
np.random.seed(42)
net = RNNet(m=100, K=book.K)
optimizer = RNNet.RMSProp(net, eta=0.001, gamma=0.9)
config = {
'epochs': 10,
'output_folder': '../out',
'optimizer': optimizer,
'sequence_length': 25,
'record_interval': 1_000,
'test_length': 200
}
res = net.train(book, config)
if results_path is not None:
pickle(res, results_path)
return res
def plot_results(res, fig_path=None):
interval = res['interval']
smooth_losses_by_interval = res['smooth_losses_by_interval']
smooth_losses_by_epoch = res['smooth_losses_by_epoch']
epochs = len(smooth_losses_by_epoch)
iters_per_epoch = 1.0 * len(smooth_losses_by_interval) * interval / epochs
smoother = np.array(smooth_losses_by_interval)
smoother = simple_smooth_1d(smoother, 0.95)
fig = plt.figure(figsize=(9, 4))
ax1 = fig.add_subplot(111)
ax1.plot(np.arange(len(smooth_losses_by_interval)) * interval, smooth_losses_by_interval)
ax1.plot(np.arange(smoother.size) * interval, smoother)
ax1.set_xlabel('step')
ax1.set_ylabel('loss')
ax2 = ax1.twiny()
ax2.set_xlabel('epoch')
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(np.arange(1,epochs+1) * iters_per_epoch)
ax2.set_xticklabels(np.arange(1,epochs+1))
ax2.grid()
ax1.grid(axis='y')
fig.tight_layout()
fig.legend(['training loss', 'smoothed'], bbox_to_anchor=(0.98, 0.86), bbox_transform=fig.transFigure)
if fig_path is not None:
fig.savefig(fig_path, bbox_inches='tight')
fig.show()
def print_evolution(res, interval, limit=None):
smooth_losses = res['smooth_losses_by_interval']
synth_samples = res['synthesized_text_by_interval']
res_interval = res['interval']
assert interval % res_interval == 0, 'Print interval must be a multiple of the recorded interval'
selected_indexes = [x for x in range(0, len(synth_samples), interval // res_interval)]
if limit is not None:
selected_indexes = selected_indexes[:limit]
print('\n')
for i in selected_indexes:
step = max(i * res_interval, 1)
text = synth_samples[i]
smooth_loss = smooth_losses[i]
print(f'===> Step: {step}, smooth_loss: {round(smooth_loss, 4)}, synthesized:\n{text}\n\n')
def synthesize_with_best_model():
model_path = '../trained_models/2018-06-12-2205-e10.pkl'
if exists(model_path):
book = dt.load_goblet_of_fire()
net = RNNet.import_model(model_path)
np.random.seed(50)
print(net.synthesize(1000, book.char_to_one_hot, book.index_to_char))
else:
eprint('Best trained model found!')
def main():
check_gradients()
if not exists(GOBLET_RESULTS_PATH):
train_with_goblet_of_fire(GOBLET_RESULTS_PATH)
results = unpickle(GOBLET_RESULTS_PATH)
plot_results(results, '../Report/Figs/training_goblet.eps')
print_evolution(results, 10_000, 11)
print(f'===> Passage from the final model (smooth_loss: {results["smooth_losses_by_epoch"][-1]}):')
synthesize_with_best_model()
if __name__ == '__main__':
main()
| true | true |
f728c871ef2519e02ec712ca36350b7b9e405031 | 3,468 | py | Python | mysite/settings.py | forfrt/Gelato | fde9cde624658d7168ce56e3606ee9749ad84ac2 | [
"Apache-2.0"
] | null | null | null | mysite/settings.py | forfrt/Gelato | fde9cde624658d7168ce56e3606ee9749ad84ac2 | [
"Apache-2.0"
] | null | null | null | mysite/settings.py | forfrt/Gelato | fde9cde624658d7168ce56e3606ee9749ad84ac2 | [
"Apache-2.0"
] | null | null | null | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&b^mo7i6ep+h^j^)(5+h%t4yt!kj$u$(^=fho=)*dl88=cnr@f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cmdb',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# },
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'gelato',
'USER': 'li8850222',
'PASSWORD': '8850222',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# python manage.py inspectdb > app/models.py
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS= (
os.path.join(BASE_DIR,'static'),
)
| 25.5 | 91 | 0.673587 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '&b^mo7i6ep+h^j^)(5+h%t4yt!kj$u$(^=fho=)*dl88=cnr@f'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cmdb',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# },
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'gelato',
'USER': 'li8850222',
'PASSWORD': '8850222',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# python manage.py inspectdb > app/models.py
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS= (
os.path.join(BASE_DIR,'static'),
)
| true | true |
f728c8790e5b7d441572fd6b0eced63c85fbe802 | 3,383 | py | Python | setup.py | Limych/python-beward | 2144f9cd3d99120b5598e09db430df4c87724236 | [
"MIT"
] | null | null | null | setup.py | Limych/python-beward | 2144f9cd3d99120b5598e09db430df4c87724236 | [
"MIT"
] | null | null | null | setup.py | Limych/python-beward | 2144f9cd3d99120b5598e09db430df4c87724236 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Library module setup."""
import re
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
"""PyTest controller."""
# Code from here:
# https://docs.pytest.org/en/latest/goodpractices.html#manual-integration
# pylint: disable=attribute-defined-outside-init
def finalize_options(self):
"""Finalize test command options."""
TestCommand.finalize_options(self)
# we don't run integration tests which need an actual beward device
self.test_args = ["-m", "not integration"]
self.test_suite = True
# pylint: disable=import-outside-toplevel,import-error
def run_tests(self):
"""Run tests."""
# import here, cause outside the eggs aren't loaded
import shlex
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
def load_requirements(fpath: str) -> list:
"""Load requirements from file."""
with open(fpath, encoding="utf-8") as fpt:
data = list(fpt)
imp = re.compile(r"^(-r|--requirement)\s+(\S+)")
reqs = []
for i in data:
# pylint: disable=invalid-name
m = imp.match(i)
if m:
reqs.extend(load_requirements(m.group(2)))
else:
reqs.append(i)
return reqs
with open("beward/const.py", encoding="utf-8") as fp:
src = fp.read()
metadata = dict(re.findall(r'([a-z]+) = "([^"]+)"', src, re.IGNORECASE))
metadata.update(dict(re.findall(r"([a-z]+) = '([^']+)'", src, re.IGNORECASE)))
docstrings = re.findall(r'"""(.*?)"""', src, re.MULTILINE | re.DOTALL)
NAME = "beward"
PACKAGES = [x for x in find_packages() if x not in ["bin", "tests"]]
VERSION = metadata["VERSION"]
AUTHOR_EMAIL = metadata.get("AUTHOR", "Unknown <no@email.com>")
WEBSITE = metadata.get("WEBSITE", "")
LICENSE = metadata.get("LICENSE", "")
DESCRIPTION = docstrings[0]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Home Automation",
"Topic :: Security",
"Topic :: Multimedia :: Video :: Capture",
]
with open("README.md", encoding="utf-8") as file:
LONG_DESCRIPTION = file.read()
LONG_DESCRIPTION_TYPE = "text/markdown"
# Extract name and e-mail ("Firstname Lastname <mail@example.org>")
AUTHOR, EMAIL = re.match(r"(.*) <(.*)>", AUTHOR_EMAIL).groups()
REQUIREMENTS = load_requirements("requirements.txt")
TEST_REQUIREMENTS = load_requirements("requirements-test.txt")
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
url=WEBSITE,
packages=PACKAGES,
install_requires=REQUIREMENTS,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_TYPE,
classifiers=CLASSIFIERS,
cmdclass={"pytest": PyTest},
test_suite="tests",
tests_require=TEST_REQUIREMENTS,
)
| 29.938053 | 78 | 0.652971 |
import re
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
lize_options(self):
TestCommand.finalize_options(self)
self.test_args = ["-m", "not integration"]
self.test_suite = True
# pylint: disable=import-outside-toplevel,import-error
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import shlex
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
def load_requirements(fpath: str) -> list:
with open(fpath, encoding="utf-8") as fpt:
data = list(fpt)
imp = re.compile(r"^(-r|--requirement)\s+(\S+)")
reqs = []
for i in data:
m = imp.match(i)
if m:
reqs.extend(load_requirements(m.group(2)))
else:
reqs.append(i)
return reqs
with open("beward/const.py", encoding="utf-8") as fp:
src = fp.read()
metadata = dict(re.findall(r'([a-z]+) = "([^"]+)"', src, re.IGNORECASE))
metadata.update(dict(re.findall(r"([a-z]+) = '([^']+)'", src, re.IGNORECASE)))
docstrings = re.findall(r'"""(.*?)"""', src, re.MULTILINE | re.DOTALL)
NAME = "beward"
PACKAGES = [x for x in find_packages() if x not in ["bin", "tests"]]
VERSION = metadata["VERSION"]
AUTHOR_EMAIL = metadata.get("AUTHOR", "Unknown <no@email.com>")
WEBSITE = metadata.get("WEBSITE", "")
LICENSE = metadata.get("LICENSE", "")
DESCRIPTION = docstrings[0]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Home Automation",
"Topic :: Security",
"Topic :: Multimedia :: Video :: Capture",
]
with open("README.md", encoding="utf-8") as file:
LONG_DESCRIPTION = file.read()
LONG_DESCRIPTION_TYPE = "text/markdown"
# Extract name and e-mail ("Firstname Lastname <mail@example.org>")
AUTHOR, EMAIL = re.match(r"(.*) <(.*)>", AUTHOR_EMAIL).groups()
REQUIREMENTS = load_requirements("requirements.txt")
TEST_REQUIREMENTS = load_requirements("requirements-test.txt")
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
url=WEBSITE,
packages=PACKAGES,
install_requires=REQUIREMENTS,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_TYPE,
classifiers=CLASSIFIERS,
cmdclass={"pytest": PyTest},
test_suite="tests",
tests_require=TEST_REQUIREMENTS,
)
| true | true |
f728c8b37d5cb62dc2dc83e99271ab7475335ffc | 797 | py | Python | biochallenge/apps/challenge/migrations/0002_auto_20190904_0809.py | coolmaksat/biochallenge | 792e5ad6d4e2d51017219df67c3f4eb7174e8eb6 | [
"BSD-2-Clause"
] | null | null | null | biochallenge/apps/challenge/migrations/0002_auto_20190904_0809.py | coolmaksat/biochallenge | 792e5ad6d4e2d51017219df67c3f4eb7174e8eb6 | [
"BSD-2-Clause"
] | 15 | 2019-09-04T07:49:40.000Z | 2022-02-10T11:31:17.000Z | biochallenge/apps/challenge/migrations/0002_auto_20190904_0809.py | coolmaksat/biochallenge | 792e5ad6d4e2d51017219df67c3f4eb7174e8eb6 | [
"BSD-2-Clause"
] | 1 | 2019-09-03T03:31:28.000Z | 2019-09-03T03:31:28.000Z | # Generated by Django 2.2.5 on 2019-09-04 08:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('challenge', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='release',
name='sparql_endpoint',
field=models.CharField(default='aaa', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='release',
name='sparql_query',
field=models.TextField(default='aaa'),
preserve_default=False,
),
migrations.AlterField(
model_name='challenge',
name='sparql_endpoint',
field=models.CharField(max_length=255),
),
]
| 25.709677 | 66 | 0.570891 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('challenge', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='release',
name='sparql_endpoint',
field=models.CharField(default='aaa', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='release',
name='sparql_query',
field=models.TextField(default='aaa'),
preserve_default=False,
),
migrations.AlterField(
model_name='challenge',
name='sparql_endpoint',
field=models.CharField(max_length=255),
),
]
| true | true |
f728c9b398e39d694a16f78ea0a8e8989f2a0b89 | 527 | py | Python | scripts/create_cell_data.py | cleopatra-itn/GOAL | 73809a755157fc9e51278b7fd246d13d19e2ab59 | [
"MIT"
] | null | null | null | scripts/create_cell_data.py | cleopatra-itn/GOAL | 73809a755157fc9e51278b7fd246d13d19e2ab59 | [
"MIT"
] | 12 | 2020-07-07T18:02:28.000Z | 2022-03-12T00:40:03.000Z | scripts/create_cell_data.py | cleopatra-itn/GOAL | 73809a755157fc9e51278b7fd246d13d19e2ab59 | [
"MIT"
] | 1 | 2020-10-22T09:07:08.000Z | 2020-10-22T09:07:08.000Z | import os
import json
from glob import glob
from pathlib import Path
ROOT_PATH = Path(os.path.dirname(__file__)).parent
# iterate through data files
raw_data = json.load(open(f'{ROOT_PATH}/data/raw/raw_data.json'))
cell_coord = {}
for k, v in raw_data.items():
if v['coordinates_class'] not in cell_coord:
cell_coord[str(v['coordinates_class'])] = v['coordinates_cell']
with open(f'{str(ROOT_PATH)}/data/raw/cell_data.json', 'w') as json_file:
json.dump(cell_coord, json_file, ensure_ascii=False, indent=4)
| 29.277778 | 73 | 0.732448 | import os
import json
from glob import glob
from pathlib import Path
ROOT_PATH = Path(os.path.dirname(__file__)).parent
raw_data = json.load(open(f'{ROOT_PATH}/data/raw/raw_data.json'))
cell_coord = {}
for k, v in raw_data.items():
if v['coordinates_class'] not in cell_coord:
cell_coord[str(v['coordinates_class'])] = v['coordinates_cell']
with open(f'{str(ROOT_PATH)}/data/raw/cell_data.json', 'w') as json_file:
json.dump(cell_coord, json_file, ensure_ascii=False, indent=4)
| true | true |
f728ca46394162c18ca5039e6ed4befa7596c6a8 | 15,328 | py | Python | video_level_code/xp_frame_level_models.py | mpekalski/Y8M | 24b61107a0f482fdb36ab8b15b768cea24e5808a | [
"Apache-2.0"
] | 32 | 2017-06-16T06:12:40.000Z | 2021-09-19T17:22:02.000Z | video_level_code/xp_frame_level_models.py | Kimilovesy/Y8M | 24b61107a0f482fdb36ab8b15b768cea24e5808a | [
"Apache-2.0"
] | 1 | 2018-05-21T07:52:04.000Z | 2018-05-21T07:52:04.000Z | video_level_code/xp_frame_level_models.py | Kimilovesy/Y8M | 24b61107a0f482fdb36ab8b15b768cea24e5808a | [
"Apache-2.0"
] | 13 | 2017-06-11T16:45:48.000Z | 2019-12-13T15:04:45.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a collection of models which operate on variable-length sequences.
"""
import math
import models
import video_level_models
import tensorflow as tf
import model_utils as utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
from tensorflow import logging
FLAGS = flags.FLAGS
class RangeLogisticModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
# num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
# feature_size = model_input.get_shape().as_list()[2]
# denominators = tf.reshape(
# tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
# avg_pooled = tf.reduce_sum(model_input,
# axis=[1]) / denominators
range_pooled = tf.reduce_max(model_input, axis=[1]) - \
tf.reduce_min(model_input, axis=[1])
output = slim.fully_connected(
range_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(1e-4))
return {"predictions": output}
class FNN_mvt_Model(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames,
l2_penalty=1e-4, is_training=True, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
inter_f_mean, inter_f_var = tf.nn.moments(model_input, [1])
inter_f_std = tf.sqrt(inter_f_var)
kk = 3
xt = tf.transpose(model_input, perm=[0,2,1])
tk = tf.nn.top_k(xt, kk).values
logging.info( 'xt: {}'.format(xt.get_shape().as_list() ))
logging.info( 'tk: {}'.format(tk.get_shape().as_list() ))
topk = tf.reshape(tk, [-1, kk * tk.get_shape().as_list()[1]])
logging.info( 'topk: {}'.format(topk.get_shape().as_list() ))
# inter_f_feats = tf.concat([inter_f_mean, inter_f_std], 1)
inter_f_feats = tf.concat([inter_f_mean, inter_f_std, topk], 1)
logging.info('inter_f_mean: {}'.format(inter_f_mean.get_shape().as_list()))
logging.info( 'feats: {}'.format(inter_f_feats.get_shape().as_list() ))
tf.summary.histogram("inter_f_mean", inter_f_mean)
tf.summary.histogram("inter_f_std", inter_f_std)
with tf.name_scope('FNN_mvt_Model'):
A0 = slim.batch_norm(
inter_f_feats,
center=True,
scale=True,
is_training=is_training,
scope="BN")
h1Units = 3600
A1 = slim.fully_connected(
A0, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
output = slim.fully_connected(
A1, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_P')
return {"predictions": output}
class DbofModel2(models.BaseModel):
"""Creates a Deep Bag of Frames model.
The model projects the features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.dbof_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.dbof_cluster_size
hidden1_size = hidden_size or FLAGS.dbof_hidden_size
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
tf.summary.histogram("input_hist", reshaped_input)
if add_batch_norm:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
cluster_weights = tf.get_variable("cluster_weights",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, max_frames, cluster_size])
activation = utils.FramePooling(activation, FLAGS.dbof_pooling_method)
hidden1_weights = tf.get_variable("hidden1_weights",
[cluster_size, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
tf.summary.histogram("hidden1_weights", hidden1_weights)
activation = tf.matmul(activation, hidden1_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("hidden1_output", activation)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
**unused_params)
class LstmModel2(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
## Batch normalize the input
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
for _ in range(number_of_layers)
], state_is_tuple=False)
#loss = 0.0
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state,
vocab_size=vocab_size,
num_mixtures=2,
**unused_params)
class FMoeModel1(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames,
l2_penalty=1e-4, is_training=True, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
inter_f_mean, inter_f_var = tf.nn.moments(model_input, [1])
inter_f_std = tf.sqrt(inter_f_var)
kk = 5
xt = tf.transpose(model_input, perm=[0,2,1])
tk = tf.nn.top_k(xt, kk).values
logging.info( 'xt: {}'.format(xt.get_shape().as_list() ))
logging.info( 'tk: {}'.format(tk.get_shape().as_list() ))
topk = tf.reshape(tk, [-1, kk * tk.get_shape().as_list()[1]])
logging.info( 'topk: {}'.format(topk.get_shape().as_list() ))
# inter_f_feats = tf.concat([inter_f_mean, inter_f_std], 1)
inter_f_feats = tf.concat([inter_f_mean, inter_f_std, topk], 1)
logging.info('inter_f_mean: {}'.format(inter_f_mean.get_shape().as_list()))
logging.info( 'feats: {}'.format(inter_f_feats.get_shape().as_list() ))
tf.summary.histogram("inter_f_mean", inter_f_mean)
tf.summary.histogram("inter_f_std", inter_f_std)
A0 = slim.batch_norm(
inter_f_feats,
center=True,
scale=True,
is_training=is_training,
scope="BN")
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=A0,
vocab_size=vocab_size,
num_mixtures=2,
**unused_params)
class FMoeModel2(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames,
l2_penalty=1e-4, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
# num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
# feature_size = model_input.get_shape().as_list()[2]
#
# logging.info('model_input shape: {}'.format(
# model_input.get_shape().as_list()))
#
# denominators = tf.reshape(
# tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
# avg_pooled = tf.reduce_sum(model_input, axis=[1]) / denominators
avg_pooled = utils.FramePooling(model_input, 'average')
logging.info( 'avg_pooled shape: {}'.format(
avg_pooled.get_shape().as_list() ))
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=avg_pooled,
vocab_size=vocab_size,
num_mixtures=2,
**unused_params)
| 39.002545 | 85 | 0.663035 |
import math
import models
import video_level_models
import tensorflow as tf
import model_utils as utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
from tensorflow import logging
FLAGS = flags.FLAGS
class RangeLogisticModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
range_pooled = tf.reduce_max(model_input, axis=[1]) - \
tf.reduce_min(model_input, axis=[1])
output = slim.fully_connected(
range_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(1e-4))
return {"predictions": output}
class FNN_mvt_Model(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames,
l2_penalty=1e-4, is_training=True, **unused_params):
inter_f_mean, inter_f_var = tf.nn.moments(model_input, [1])
inter_f_std = tf.sqrt(inter_f_var)
kk = 3
xt = tf.transpose(model_input, perm=[0,2,1])
tk = tf.nn.top_k(xt, kk).values
logging.info( 'xt: {}'.format(xt.get_shape().as_list() ))
logging.info( 'tk: {}'.format(tk.get_shape().as_list() ))
topk = tf.reshape(tk, [-1, kk * tk.get_shape().as_list()[1]])
logging.info( 'topk: {}'.format(topk.get_shape().as_list() ))
inter_f_feats = tf.concat([inter_f_mean, inter_f_std, topk], 1)
logging.info('inter_f_mean: {}'.format(inter_f_mean.get_shape().as_list()))
logging.info( 'feats: {}'.format(inter_f_feats.get_shape().as_list() ))
tf.summary.histogram("inter_f_mean", inter_f_mean)
tf.summary.histogram("inter_f_std", inter_f_std)
with tf.name_scope('FNN_mvt_Model'):
A0 = slim.batch_norm(
inter_f_feats,
center=True,
scale=True,
is_training=is_training,
scope="BN")
h1Units = 3600
A1 = slim.fully_connected(
A0, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
output = slim.fully_connected(
A1, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_P')
return {"predictions": output}
class DbofModel2(models.BaseModel):
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.dbof_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.dbof_cluster_size
hidden1_size = hidden_size or FLAGS.dbof_hidden_size
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
tf.summary.histogram("input_hist", reshaped_input)
if add_batch_norm:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
cluster_weights = tf.get_variable("cluster_weights",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, max_frames, cluster_size])
activation = utils.FramePooling(activation, FLAGS.dbof_pooling_method)
hidden1_weights = tf.get_variable("hidden1_weights",
[cluster_size, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
tf.summary.histogram("hidden1_weights", hidden1_weights)
activation = tf.matmul(activation, hidden1_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("hidden1_output", activation)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
**unused_params)
class LstmModel2(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
rib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
for _ in range(number_of_layers)
], state_is_tuple=False)
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state,
vocab_size=vocab_size,
num_mixtures=2,
**unused_params)
class FMoeModel1(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames,
l2_penalty=1e-4, is_training=True, **unused_params):
inter_f_mean, inter_f_var = tf.nn.moments(model_input, [1])
inter_f_std = tf.sqrt(inter_f_var)
kk = 5
xt = tf.transpose(model_input, perm=[0,2,1])
tk = tf.nn.top_k(xt, kk).values
logging.info( 'xt: {}'.format(xt.get_shape().as_list() ))
logging.info( 'tk: {}'.format(tk.get_shape().as_list() ))
topk = tf.reshape(tk, [-1, kk * tk.get_shape().as_list()[1]])
logging.info( 'topk: {}'.format(topk.get_shape().as_list() ))
inter_f_feats = tf.concat([inter_f_mean, inter_f_std, topk], 1)
logging.info('inter_f_mean: {}'.format(inter_f_mean.get_shape().as_list()))
logging.info( 'feats: {}'.format(inter_f_feats.get_shape().as_list() ))
tf.summary.histogram("inter_f_mean", inter_f_mean)
tf.summary.histogram("inter_f_std", inter_f_std)
A0 = slim.batch_norm(
inter_f_feats,
center=True,
scale=True,
is_training=is_training,
scope="BN")
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=A0,
vocab_size=vocab_size,
num_mixtures=2,
**unused_params)
class FMoeModel2(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames,
l2_penalty=1e-4, **unused_params):
avg_pooled = utils.FramePooling(model_input, 'average')
logging.info( 'avg_pooled shape: {}'.format(
avg_pooled.get_shape().as_list() ))
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=avg_pooled,
vocab_size=vocab_size,
num_mixtures=2,
**unused_params)
| true | true |
f728ca47b3ece41fdc86cad4bcc6bb0bd696850c | 4,814 | py | Python | ForgeBlog/forgeblog/tests/test_commands.py | 99Kies/allura | 745ab3c5a9bd287b365b699bd38ef94650afc32e | [
"Apache-2.0"
] | 1 | 2021-12-09T21:52:12.000Z | 2021-12-09T21:52:12.000Z | ForgeBlog/forgeblog/tests/test_commands.py | 99Kies/allura | 745ab3c5a9bd287b365b699bd38ef94650afc32e | [
"Apache-2.0"
] | null | null | null | ForgeBlog/forgeblog/tests/test_commands.py | 99Kies/allura | 745ab3c5a9bd287b365b699bd38ef94650afc32e | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
from datetime import datetime, timedelta
from tg import app_globals as g
from datadiff.tools import assert_equal
from IPython.testing.decorators import module_not_available, skipif
import pkg_resources
import mock
import feedparser
from ming.orm.ormsession import ThreadLocalORMSession
from alluratest.controller import setup_basic_test, setup_global_objects
from allura import model as M
from forgeblog import model as BM
test_config = pkg_resources.resource_filename(
'allura', '../test.ini') + '#main'
def setUp():
setup_basic_test()
setup_global_objects()
def _mock_feed(*entries):
class attrdict(dict):
def __getattr__(self, name):
return self[name]
feed = mock.Mock()
feed.bozo = False
feed.entries = []
for e in entries:
_mock_feed.i += 1
entry = attrdict(
content_type='text/plain',
title='Default Title %d' % _mock_feed.i,
subtitle='',
summary='',
link='http://example.com/',
updated=datetime.utcnow() + timedelta(days=_mock_feed.i - 100))
entry.update(e)
entry['updated_parsed'] = entry['updated'].timetuple()
if 'content' in entry:
entry['content'] = [
attrdict(type=entry['content_type'], value=entry['content'])]
if 'summary_detail' in entry:
entry['summary_detail'] = attrdict(entry['summary_detail'])
feed.entries.append(entry)
return feed
_mock_feed.i = 0
@skipif(module_not_available('html2text'))
@mock.patch.object(feedparser, 'parse')
def test_pull_rss_feeds(parsefeed):
html_content = (
"<p>1. foo</p>\n"
"\n"
"<p>\n"
"#foo bar <a href='baz'>baz</a>\n"
"foo bar\n"
"</p>\n"
"\n"
"<p>#foo bar <a href='baz'>\n"
"baz\n"
"</a></p>\n"
)
rendered_html_content = "\n".join([
r"1\. foo",
"",
r"\#foo bar [baz](baz) foo bar ",
"",
r"\#foo bar [ baz ](baz)",
" [link](http://example.com/)",
])
parsefeed.return_value = _mock_feed(
dict(title='Test', subtitle='test', summary='This is a test'),
dict(content_type='text/plain', content='Test feed'),
dict(content_type='text/html', content=html_content),
dict(summary_detail=dict(type='text/html', value=html_content)),
)
base_app = M.AppConfig.query.find().all()[0]
tmp_app = M.AppConfig(
tool_name='Blog', discussion_id=base_app.discussion_id,
project_id=base_app.project_id,
options={'ordinal': 0, 'show_right_bar': True,
'project_name': base_app.project.name,
'mount_point': 'blog',
'mount_label': 'Blog'})
new_external_feeds = ['http://example.com/news/feed/']
BM.Globals(app_config_id=tmp_app._id, external_feeds=new_external_feeds)
ThreadLocalORMSession.flush_all()
from forgeblog.command import rssfeeds # importing this sets html2text.BODY_WIDTH to a value this test expects
cmd = rssfeeds.RssFeedsCommand('pull-rss-feeds')
cmd.run([test_config, '-a', tmp_app._id])
cmd.command()
parsefeed.assert_called_with('http://example.com/news/feed/')
posts = BM.BlogPost.query.find(
{'app_config_id': tmp_app._id}).sort('timestamp', 1)
assert_equal(posts.count(), 4)
posts = posts.all()
assert_equal(posts[0].title, 'Test')
assert_equal(posts[0].text, 'This is a test [link](http://example.com/)')
assert_equal(posts[1].title, 'Default Title 2')
assert_equal(posts[1].text, 'Test feed [link](http://example.com/)')
assert_equal(posts[2].title, 'Default Title 3')
assert_equal(posts[2].text, rendered_html_content)
assert_equal(posts[3].title, 'Default Title 4')
assert_equal(posts[3].text, rendered_html_content)
| 35.397059 | 115 | 0.645825 |
from __future__ import unicode_literals
from __future__ import absolute_import
from datetime import datetime, timedelta
from tg import app_globals as g
from datadiff.tools import assert_equal
from IPython.testing.decorators import module_not_available, skipif
import pkg_resources
import mock
import feedparser
from ming.orm.ormsession import ThreadLocalORMSession
from alluratest.controller import setup_basic_test, setup_global_objects
from allura import model as M
from forgeblog import model as BM
test_config = pkg_resources.resource_filename(
'allura', '../test.ini') + '#main'
def setUp():
setup_basic_test()
setup_global_objects()
def _mock_feed(*entries):
class attrdict(dict):
def __getattr__(self, name):
return self[name]
feed = mock.Mock()
feed.bozo = False
feed.entries = []
for e in entries:
_mock_feed.i += 1
entry = attrdict(
content_type='text/plain',
title='Default Title %d' % _mock_feed.i,
subtitle='',
summary='',
link='http://example.com/',
updated=datetime.utcnow() + timedelta(days=_mock_feed.i - 100))
entry.update(e)
entry['updated_parsed'] = entry['updated'].timetuple()
if 'content' in entry:
entry['content'] = [
attrdict(type=entry['content_type'], value=entry['content'])]
if 'summary_detail' in entry:
entry['summary_detail'] = attrdict(entry['summary_detail'])
feed.entries.append(entry)
return feed
_mock_feed.i = 0
@skipif(module_not_available('html2text'))
@mock.patch.object(feedparser, 'parse')
def test_pull_rss_feeds(parsefeed):
html_content = (
"<p>1. foo</p>\n"
"\n"
"<p>\n"
"#foo bar <a href='baz'>baz</a>\n"
"foo bar\n"
"</p>\n"
"\n"
"<p>#foo bar <a href='baz'>\n"
"baz\n"
"</a></p>\n"
)
rendered_html_content = "\n".join([
r"1\. foo",
"",
r"\#foo bar [baz](baz) foo bar ",
"",
r"\#foo bar [ baz ](baz)",
" [link](http://example.com/)",
])
parsefeed.return_value = _mock_feed(
dict(title='Test', subtitle='test', summary='This is a test'),
dict(content_type='text/plain', content='Test feed'),
dict(content_type='text/html', content=html_content),
dict(summary_detail=dict(type='text/html', value=html_content)),
)
base_app = M.AppConfig.query.find().all()[0]
tmp_app = M.AppConfig(
tool_name='Blog', discussion_id=base_app.discussion_id,
project_id=base_app.project_id,
options={'ordinal': 0, 'show_right_bar': True,
'project_name': base_app.project.name,
'mount_point': 'blog',
'mount_label': 'Blog'})
new_external_feeds = ['http://example.com/news/feed/']
BM.Globals(app_config_id=tmp_app._id, external_feeds=new_external_feeds)
ThreadLocalORMSession.flush_all()
from forgeblog.command import rssfeeds
cmd = rssfeeds.RssFeedsCommand('pull-rss-feeds')
cmd.run([test_config, '-a', tmp_app._id])
cmd.command()
parsefeed.assert_called_with('http://example.com/news/feed/')
posts = BM.BlogPost.query.find(
{'app_config_id': tmp_app._id}).sort('timestamp', 1)
assert_equal(posts.count(), 4)
posts = posts.all()
assert_equal(posts[0].title, 'Test')
assert_equal(posts[0].text, 'This is a test [link](http://example.com/)')
assert_equal(posts[1].title, 'Default Title 2')
assert_equal(posts[1].text, 'Test feed [link](http://example.com/)')
assert_equal(posts[2].title, 'Default Title 3')
assert_equal(posts[2].text, rendered_html_content)
assert_equal(posts[3].title, 'Default Title 4')
assert_equal(posts[3].text, rendered_html_content)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.