hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b6dfcc1aa89979f25274c267c92368e302bbc2a8 | 2,741 | py | Python | modules/rc3b.py | HondaLab/KeyboardRC | 6740117abf0f3afd4dd5bb1a2d3a1c6c69b29ad6 | [
"MIT"
] | null | null | null | modules/rc3b.py | HondaLab/KeyboardRC | 6740117abf0f3afd4dd5bb1a2d3a1c6c69b29ad6 | [
"MIT"
] | 1 | 2022-03-19T03:35:25.000Z | 2022-03-19T03:35:25.000Z | modules/rc3b.py | HondaLab/KeyboardRC | 6740117abf0f3afd4dd5bb1a2d3a1c6c69b29ad6 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# rc3b.py
# Yasushi Honda 2021 9/3
# How to execute
# sudo pigpiod
# pyhton3 rcXY.py
import modules.keyin as keyin # キーボード入力を監視するモジュール
import modules.motor as mt # pwmでモーターを回転させるためのモジュール
import time
STEP=8
HANDLE_STEP=8
HANDLE_TIME=0.3
TRIM_STEP=8
TRIM_TIME=0.2
ANGL_GAIN=1.2
class KeyAssign():
def __init__(self):
self.mL=mt.Lmotor(23)
self.mR=mt.Rmotor(14)
self.csv=mt.Servo(18)
self.left=0
self.right=0
self.angl=0
def update(self,ch):
if ch == "j" :
#TRIM_STEP=int(0.5*(left+right)*1.0)
self.left-= TRIM_STEP
self.right+= TRIM_STEP
self.angl=int(ANGL_GAIN*(self.right-self.left))
if ch == "k" :
#TRIM_STEP=int(0.5*(self.left+self.right)*1.0)
self.left+= TRIM_STEP
self.right-= TRIM_STEP
self.angl=int(ANGL_GAIN*(self.right-self.left))
if ch == "g" :
self.left=int(0.5*(self.left+self.right)*1.0)
self.right=self.left
self.angl=0
if ch == "l" :
HANDLE_STEP=int(0.5*(self.left+self.right)*2.0)
self.left+= HANDLE_STEP
self.right-= HANDLE_STEP
Run(self.mL,self.mR,self.csv,self.left,self.right,self.angl)
time.sleep(HANDLE_TIME)
self.left-= HANDLE_STEP
self.right+= HANDLE_STEP
if ch == "h" :
HANDLE_STEP=int(0.5*(self.left+self.right)*2.0)
self.left-= HANDLE_STEP
self.right+= HANDLE_STEP
Run(self.mL,self.mR,self.csv,self.left,self.right,self.angl)
time.sleep(HANDLE_TIME)
self.left+= HANDLE_STEP
self.right-= HANDLE_STEP
if ch == "f" :
self.left+= STEP
self.right+= STEP
if ch == "d" :
self.left-= STEP
self.right-= STEP
if ch == "s" :
self.left= 0
self.right= 0
self.angl=0
Run(self.mL,self.mR,self.csv,self.left,self.right,self.angl)
#print("\r %4d %4d %4d" % (self.left,self.right,self.angl),end='')
return self.left, self.right
def stop(self):
self.mL.run(0)
self.mR.run(0)
self.csv.move(0)
def Run(mL,mR,sv,left,right,angl):
if left<-100: left = -100
if left>100: left = 100
mL.run(left)
if right<-100: right = -100
if right>100: right = 100
mR.run(right)
if angl>120: angl=120
if angl<-120: angl=-120
sv.move(angl)
if __name__=="__main__":
SLEEP=0.1
ssr3=SsrRc()
key = keyin.Keyboard()
ch="c"
print("Input q to stop.")
while ch!="q":
ch = key.read()
try:
ssr3.update(ch)
time.sleep(SLEEP)
except KeyboardInterrupt:
ssr3.stop()
break
print("\nTidying up")
ssr3.stop()
| 22.841667 | 72 | 0.573148 |
ad60934d6a1f55fd685585f3cf1018233ab25e49 | 1,817 | py | Python | deepthought3/.experiments/ismir2014/train_convnet.py | chanhakim/deepthought | 9f5dd5c7a21da51b65d6049e7a19e29fc3a072f9 | [
"BSD-3-Clause"
] | null | null | null | deepthought3/.experiments/ismir2014/train_convnet.py | chanhakim/deepthought | 9f5dd5c7a21da51b65d6049e7a19e29fc3a072f9 | [
"BSD-3-Clause"
] | 4 | 2021-06-04T20:36:46.000Z | 2021-06-08T22:32:39.000Z | deepthought3/.experiments/ismir2014/train_convnet.py | chanhakim/deepthought3 | 9f5dd5c7a21da51b65d6049e7a19e29fc3a072f9 | [
"BSD-3-Clause"
] | null | null | null | """
Created on Apr 10, 2014
@author: sstober
"""
import logging
import os
log = logging.getLogger(__name__)
import numpy as np
from pylearn2.utils.timing import log_timing
from deepthought3.experiments.ismir2014.util import load_config
from deepthought3.util.yaml_util import load_yaml_file, save_yaml_file
from deepthought3.experiments.ismir2014.plot import scan_for_best_performance
from deepthought3.experiments.ismir2014.extract_results import extract_results
def train_convnet(config):
train, yaml_str = load_yaml_file(
os.path.join(os.path.dirname(__file__), "train_convnet_template.yaml"),
params=config,
)
save_yaml_file(yaml_str, os.path.join(config.experiment_root, "settings.yaml"))
with log_timing(log, "training network"):
train.main_loop()
def get_default_config_path():
return os.path.join(os.path.dirname(__file__), "train_convnet.cfg")
if __name__ == "__main__":
config = load_config(default_config=get_default_config_path(), reset_logging=False)
if not config.get("only_extract_results", False):
train_convnet(config)
scan_for_best_performance(config.experiment_root, "valid_y_misclass")
scan_for_best_performance(config.experiment_root, "valid_ptrial_misclass_rate")
values = extract_results(config.experiment_root, mode="misclass")
print(
np.multiply(
100,
[
# 1 - values['test_y_misclass'],
# 1 - values['test_wseq_misclass_rate'],
# 1 - values['test_wtrial_misclass_rate']]);
1 - values["frame_misclass"],
1 - values["sequence_misclass"],
1 - values["trial_misclass"],
],
)
)
| 28.84127 | 87 | 0.664832 |
fbbb541299434d447195f7f3e03119b1771e56e2 | 219 | py | Python | test/end-to-end/Graphs/Python/main.py | HighSchoolHacking/GLS-Draft | 9e418b6290e7c8e3f2da87668784bdba1cde5a76 | [
"MIT"
] | 30 | 2019-10-29T12:47:50.000Z | 2022-02-12T06:41:39.000Z | test/end-to-end/Graphs/Python/main.py | HighSchoolHacking/GLS-Draft | 9e418b6290e7c8e3f2da87668784bdba1cde5a76 | [
"MIT"
] | 247 | 2017-09-21T17:11:18.000Z | 2019-10-08T12:59:07.000Z | test/end-to-end/Graphs/Python/main.py | HighSchoolHacking/GLS-Draft | 9e418b6290e7c8e3f2da87668784bdba1cde5a76 | [
"MIT"
] | 17 | 2017-10-01T16:53:20.000Z | 2018-11-28T07:20:35.000Z | from data.unweighted_node import UnweightedNode
from data.weighted_node import WeightedNode
from testing.tests import test_unweighted, test_weighted
if __name__ == "__main__":
test_unweighted()
test_weighted()
| 27.375 | 56 | 0.812785 |
3f45344c0f148e99b5fc9f47ba81e9a36868c335 | 178 | py | Python | software/glasgow/__init__.py | gregdavill/Glasgow | 85f9279e48383453eb116b641473940b8d018307 | [
"Apache-2.0",
"0BSD"
] | 6 | 2020-01-09T10:05:00.000Z | 2021-03-10T07:07:15.000Z | software/glasgow/__init__.py | q3k/Glasgow | 9fa367d486b458ead9225a7a1b1a50cb57f40564 | [
"Apache-2.0",
"0BSD"
] | null | null | null | software/glasgow/__init__.py | q3k/Glasgow | 9fa367d486b458ead9225a7a1b1a50cb57f40564 | [
"Apache-2.0",
"0BSD"
] | 1 | 2020-12-28T11:42:24.000Z | 2020-12-28T11:42:24.000Z | import logging
logging.addLevelName(5, "TRACE")
logging.TRACE = 5
logging.Logger.trace = lambda self, msg, *args, **kwargs: \
self.log(logging.TRACE, msg, *args, **kwargs)
| 22.25 | 59 | 0.696629 |
920920a8ddef03bd1cde967e8525b94387f997fb | 2,391 | py | Python | configs/cosod_path.py | lartpang/CoSaliencyProj | fe2acf887c74977010aed8e2d519a503a7e75ae3 | [
"MIT"
] | 1 | 2020-12-26T04:03:13.000Z | 2020-12-26T04:03:13.000Z | configs/cosod_path.py | lartpang/CoSaliencyProj | fe2acf887c74977010aed8e2d519a503a7e75ae3 | [
"MIT"
] | null | null | null | configs/cosod_path.py | lartpang/CoSaliencyProj | fe2acf887c74977010aed8e2d519a503a7e75ae3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/12/3
# @Author : Lart Pang
# @GitHub : https://github.com/lartpang
import os
_CoSOD_ROOT = "/home/lart/Datasets/Saliency/CoSOD"
COCO9213 = dict(
root=os.path.join(_CoSOD_ROOT, "COCO9213-os"),
image=dict(path=os.path.join(_CoSOD_ROOT, "COCO9213-os", "img"), suffix=".png"),
mask=dict(path=os.path.join(_CoSOD_ROOT, "COCO9213-os", "gt"), suffix=".png"),
)
CoCA = dict(
root=os.path.join(_CoSOD_ROOT, "CoCA"),
image=dict(path=os.path.join(_CoSOD_ROOT, "CoCA", "image"), suffix=".jpg"),
mask=dict(path=os.path.join(_CoSOD_ROOT, "CoCA", "binary"), suffix=".png"),
bbox=dict(path=os.path.join(_CoSOD_ROOT, "CoCA", "bbox"), suffix=".txt"),
instance=dict(path=os.path.join(_CoSOD_ROOT, "CoCA", "instance"), suffix=".png"),
)
CoSal2015 = dict(
root=os.path.join(_CoSOD_ROOT, "CoSal2015"),
image=dict(path=os.path.join(_CoSOD_ROOT, "CoSal2015", "Image"), suffix=".jpg"),
mask=dict(path=os.path.join(_CoSOD_ROOT, "CoSal2015", "GroundTruth"), suffix=".png"),
)
CoSOD3k = dict(
root=os.path.join(_CoSOD_ROOT, "CoSOD3k"),
image=dict(path=os.path.join(_CoSOD_ROOT, "CoSOD3k", "Image"), suffix=".jpg"),
mask=dict(path=os.path.join(_CoSOD_ROOT, "CoSOD3k", "GroundTruth"), suffix=".png"),
bbox=dict(path=os.path.join(_CoSOD_ROOT, "CoSOD3k", "BoundingBox"), suffix=".txt"),
instance=dict(path=os.path.join(_CoSOD_ROOT, "CoSOD3k", "SegmentationObject"), suffix=".png"),
)
iCoSeg = dict(
root=os.path.join(_CoSOD_ROOT, "iCoSeg"),
image=dict(path=os.path.join(_CoSOD_ROOT, "iCoSeg", "Image"), suffix=".jpg"),
mask=dict(path=os.path.join(_CoSOD_ROOT, "iCoSeg", "GroundTruth"), suffix=".png"),
)
ImagePair = dict(
root=os.path.join(_CoSOD_ROOT, "ImagePair"),
image=dict(path=os.path.join(_CoSOD_ROOT, "ImagePair", "Image"), suffix=".jpg"),
mask=dict(path=os.path.join(_CoSOD_ROOT, "ImagePair", "GroundTruth"), suffix=".png"),
)
MSRC = dict(
root=os.path.join(_CoSOD_ROOT, "MSRC"),
image=dict(path=os.path.join(_CoSOD_ROOT, "MSRC", "Image"), suffix=".jpg"),
mask=dict(path=os.path.join(_CoSOD_ROOT, "MSRC", "GroundTruth"), suffix=".png"),
)
WICOS = dict(
root=os.path.join(_CoSOD_ROOT, "WICOS"),
image=dict(path=os.path.join(_CoSOD_ROOT, "WICOS", "Image"), suffix=".jpg"),
mask=dict(path=os.path.join(_CoSOD_ROOT, "WICOS", "GroundTruth"), suffix=".png"),
)
| 44.277778 | 98 | 0.664157 |
f3b5010e3f753ebc37456b47850fe3c176915633 | 1,563 | py | Python | setup.py | gyusang/jajucha | 91ebcf8816f8591da955676a4aee578b5f1c4ad9 | [
"MIT-0",
"BSD-3-Clause"
] | 3 | 2020-12-25T08:08:39.000Z | 2022-01-04T14:15:39.000Z | setup.py | gyusang/jajucha | 91ebcf8816f8591da955676a4aee578b5f1c4ad9 | [
"MIT-0",
"BSD-3-Clause"
] | null | null | null | setup.py | gyusang/jajucha | 91ebcf8816f8591da955676a4aee578b5f1c4ad9 | [
"MIT-0",
"BSD-3-Clause"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name='jajucha',
packages=setuptools.find_packages(),
version='2.2.3',
license='MIT',
description='Controller Library for jajucha, a model car for autonomous driving education.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Sanggyu Lee',
author_email='sanggyu523@naver.com',
url='https://github.com/gyusang/jajucha',
download_url='https://github.com/gyusang/jajucha/archive/v_2_2_3.tar.gz',
project_urls={
'Source': 'https://github.com/gyusang/jajucha',
'Report Bugs': 'https://github.com/gyusang/jajucha/issues'
},
keywords=['education', 'autonomous driving', 'jajucha', '자주차'],
install_requires=[
'numpy!=1.19.4',
'opencv-python',
'pyzmq',
'imagezmq',
'pillow',
'scipy',
],
python_requires='~=3.7',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Operating System :: Microsoft :: Windows',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Topic :: Education',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Processing',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
package_data={
'jajucha': ['ABOUT.txt', 'CREDITS.txt'],
}
)
| 33.255319 | 96 | 0.618682 |
8ca7f61056ad111b039f7f71bedc374f47befc5b | 36,753 | py | Python | label_studio/project.py | abduhbm/label-studio | 9a5110d411073e951b84099fa29a5abfc7c0f41d | [
"Apache-2.0"
] | 5 | 2021-04-09T07:54:38.000Z | 2021-09-28T11:42:22.000Z | label_studio/project.py | abduhbm/label-studio | 9a5110d411073e951b84099fa29a5abfc7c0f41d | [
"Apache-2.0"
] | 10 | 2021-01-12T05:56:29.000Z | 2021-05-11T21:37:59.000Z | label_studio/project.py | abduhbm/label-studio | 9a5110d411073e951b84099fa29a5abfc7c0f41d | [
"Apache-2.0"
] | 3 | 2020-09-28T21:34:47.000Z | 2021-01-29T02:04:19.000Z | import os
import io
import logging
import json
import random
from shutil import copy2
from collections import defaultdict, OrderedDict
from operator import itemgetter
from xml.etree import ElementTree
from uuid import uuid4
from copy import deepcopy
from label_studio_converter import Converter
from label_studio.utils.misc import (
config_line_stripped, config_comments_free, parse_config, timestamp_now, timestamp_to_local_datetime)
from label_studio.utils.analytics import Analytics
from label_studio.utils.models import ProjectObj, MLBackend
from label_studio.utils.exceptions import ValidationError
from label_studio.utils.io import find_file, delete_dir_content, json_load
from label_studio.utils.validation import is_url
from label_studio.utils.functions import get_full_hostname
from label_studio.tasks import Tasks
from label_studio.storage import create_storage, get_available_storage_names
logger = logging.getLogger(__name__)
class ProjectNotFound(KeyError):
pass
class Project(object):
_storage = {}
def __init__(self, config, name, root_dir='.', context=None):
self.config = config
self.name = name
self.path = os.path.join(root_dir, self.name)
self.on_boarding = {}
self.context = context or {}
self.source_storage = None
self.target_storage = None
self.create_storages()
self.tasks = None
self.label_config_line, self.label_config_full, self.parsed_label_config, self.input_data_tags = None, None, None, None # noqa
self.derived_input_schema, self.derived_output_schema = None, None
self.load_label_config()
self.update_derived_input_schema()
self.update_derived_output_schema()
self.analytics = None
self.load_analytics()
self.project_obj = None
self.ml_backends = []
self.load_project_ml_backend()
self.converter = None
self.load_converter()
self.max_tasks_file_size = 250
def get_storage(self, storage_for):
if storage_for == 'source':
return self.source_storage
elif storage_for == 'target':
return self.target_storage
def get_available_storage_names(self, storage_for):
if storage_for == 'source':
return self.get_available_source_storage_names()
elif storage_for == 'target':
return self.get_available_target_storage_names()
@classmethod
def get_available_source_storages(cls):
return ['tasks-json', 's3', 'gcs']
@classmethod
def get_available_target_storages(cls):
return ['completions-dir', 's3-completions', 'gcs-completions']
def get_available_source_storage_names(self):
names = OrderedDict()
nameset = set(self.get_available_source_storages())
for name, desc in get_available_storage_names().items():
# we don't expose configurable filesystem storage in UI to avoid security problems
if name in nameset:
names[name] = desc
return names
def get_available_target_storage_names(self):
names = OrderedDict()
nameset = set(self.get_available_target_storages())
for name, desc in get_available_storage_names().items():
# blobs have no sense for target storages
if name in nameset:
names[name] = desc
return names
def create_storages(self):
source = self.config['source']
target = self.config['target']
self.source_storage = create_storage(source['type'], 'source', source['path'], self.path, self,
**source.get('params', {}))
self.target_storage = create_storage(target['type'], 'target', target['path'], self.path, self,
**target.get('params', {}))
def update_storage(self, storage_for, storage_kwargs):
def _update_storage(storage_for, storage_kwargs):
storage_name = storage_kwargs.pop('name', storage_for)
storage_type = storage_kwargs.pop('type')
storage_path = storage_kwargs.pop('path', None)
# storage_path = self.config[storage_for]['path']
storage = create_storage(storage_type, storage_name, storage_path, self.path, self, **storage_kwargs)
self.config[storage_for] = {
'name': storage_name,
'type': storage_type,
'path': storage_path,
'params': storage_kwargs
}
self._save_config()
logger.debug('Created storage type "' + storage_type + '"')
return storage
if storage_for == 'source':
self.source_storage = _update_storage('source', storage_kwargs)
elif storage_for == 'target':
self.target_storage = _update_storage('target', storage_kwargs)
self.update_derived_input_schema()
self.update_derived_output_schema()
@property
def can_manage_tasks(self):
return self.config['source']['type'] not in {'s3', 's3-completions', 'gcs', 'gcs-completions'}
@property
def can_manage_completions(self):
return self.config['target']['type'] not in {'s3', 's3-completions', 'gcs', 'gcs-completions'}
@property
def can_delete_tasks(self):
return self.can_manage_tasks and self.can_manage_completions
@property
def data_types_json(self):
return self.project_obj.data_types_json
def load_label_config(self):
self.label_config_full = config_comments_free(open(self.config['label_config'], encoding='utf8').read())
self.label_config_line = config_line_stripped(self.label_config_full)
self.parsed_label_config = parse_config(self.label_config_line)
self.input_data_tags = self.get_input_data_tags(self.label_config_line)
def update_derived_input_schema(self):
self.derived_input_schema = set()
for task_id, task in self.source_storage.items():
data_keys = set(task['data'].keys())
if not self.derived_input_schema:
self.derived_input_schema = data_keys
else:
self.derived_input_schema &= data_keys
logger.debug('Derived input schema: ' + str(self.derived_input_schema))
def update_derived_output_schema(self):
self.derived_output_schema = {
'from_name_to_name_type': set(),
'labels': defaultdict(set)
}
# for all already completed tasks we update derived output schema for further label config validation
for task_id, c in self.target_storage.items():
for completion in c['completions']:
self._update_derived_output_schema(completion)
logger.debug('Derived output schema: ' + str(self.derived_output_schema))
def load_analytics(self):
collect_analytics = os.getenv('collect_analytics')
if collect_analytics is None:
collect_analytics = self.config.get('collect_analytics', True)
collect_analytics = bool(int(collect_analytics))
self.analytics = Analytics(self.label_config_line, collect_analytics, self.name, self.context)
def add_ml_backend(self, params, raise_on_error=True):
ml_backend = MLBackend.from_params(params)
if not ml_backend.connected and raise_on_error:
raise ValueError('ML backend with URL: "' + str(params['url']) + '" is not connected.')
self.ml_backends.append(ml_backend)
def remove_ml_backend(self, name):
# remove from memory
remove_idx = next((i for i, b in enumerate(self.ml_backends) if b.model_name == name), None)
if remove_idx is None:
raise KeyError('Can\'t remove ML backend with name "' + name + '": not found.')
self.ml_backends.pop(remove_idx)
# remove from config
config_params = self.config.get('ml_backends', [])
remove_idx = next((i for i, b in enumerate(config_params) if b['name'] == name), None)
if remove_idx is not None:
config_params.pop(remove_idx)
self.config['ml_backends'] = config_params
self._save_config()
def load_project_ml_backend(self):
# configure project
self.project_obj = ProjectObj(label_config=self.label_config_line, label_config_full=self.label_config_full)
# configure multiple machine learning backends
self.ml_backends = []
ml_backends_params = self.config.get('ml_backends', [])
for ml_backend_params in ml_backends_params:
self.add_ml_backend(ml_backend_params, raise_on_error=False)
def load_converter(self):
self.converter = Converter(self.parsed_label_config)
@property
def id(self):
return self.project_obj.id
@property
def data_types(self):
return self.project_obj.data_types
@property
def label_config(self):
return self.project_obj.label_config
@property
def ml_backends_connected(self):
return len(self.ml_backends) > 0
@property
def task_data_login(self):
return self.project_obj.task_data_login
@property
def task_data_password(self):
return self.project_obj.task_data_password
def extract_data_types(self, config):
return self.project_obj.extract_data_types(config)
def validate_label_config(self, config_string):
logger.debug('Validate label config')
self.project_obj.validate_label_config(config_string)
logger.debug('Get parsed config')
parsed_config = parse_config(config_string)
logger.debug('Validate label config on derived input schema')
self.validate_label_config_on_derived_input_schema(parsed_config)
logger.debug('Validate label config on derived output schema')
self.validate_label_config_on_derived_output_schema(parsed_config)
def _save_config(self):
with io.open(self.config['config_path'], mode='w') as f:
json.dump(self.config, f, indent=2)
def update_params(self, params):
if 'ml_backend' in params:
ml_backend_params = self._create_ml_backend_params(params['ml_backend'], self.name)
self.add_ml_backend(ml_backend_params)
self.config['ml_backends'].append(ml_backend_params)
self._save_config()
def update_label_config(self, new_label_config):
label_config_file = self.config['label_config']
# save xml label config to file
new_label_config = new_label_config.replace('\r\n', '\n')
with io.open(label_config_file, mode='w', encoding='utf8') as f:
f.write(new_label_config)
# reload everything that depends on label config
self.load_label_config()
self.update_derived_output_schema()
self.load_analytics()
self.load_project_ml_backend()
self.load_converter()
# save project config state
self.config['label_config_updated'] = True
with io.open(self.config['config_path'], mode='w', encoding='utf8') as f:
json.dump(self.config, f)
logger.info('Label config saved to: {path}'.format(path=label_config_file))
def _update_derived_output_schema(self, completion):
"""
Given completion, output schema is updated. Output schema consists of unique tuples (from_name, to_name, type)
and list of unique labels derived from existed completions
:param completion:
:return:
"""
for result in completion['result']:
result_type = result.get('type')
if result_type in ('relation', 'rating', 'pairwise'):
continue
if 'from_name' not in result or 'to_name' not in result:
logger.error('Unexpected completion.result format: "from_name" or "to_name" not found in %r' % result)
continue
self.derived_output_schema['from_name_to_name_type'].add((
result['from_name'], result['to_name'], result_type
))
for label in result['value'].get(result_type, []):
self.derived_output_schema['labels'][result['from_name']].add(label)
def validate_label_config_on_derived_input_schema(self, config_string_or_parsed_config):
"""
Validate label config on input schemas (tasks types and data keys) derived from imported tasks
:param config_string_or_parsed_config: label config string or parsed config object
:return: True if config match already imported tasks
"""
# check if schema exists, i.e. at least one task has been uploaded
if not self.derived_input_schema:
return
config = config_string_or_parsed_config
if isinstance(config, str):
config = parse_config(config)
input_types, input_values = set(), set()
for input_items in map(itemgetter('inputs'), config.values()):
for input_item in input_items:
input_types.add(input_item['type'])
input_values.add(input_item['value'])
# check input data values: they must be in schema
for item in input_values:
if item not in self.derived_input_schema:
raise ValidationError(
'You have already imported tasks and they are incompatible with a new config. '
'You\'ve specified value=${item}, but imported tasks contain only keys: {input_schema_values}'
.format(item=item, input_schema_values=list(self.derived_input_schema)))
def validate_label_config_on_derived_output_schema(self, config_string_or_parsed_config):
"""
Validate label config on output schema (from_names, to_names and labeling types) derived from completions
:param config_string_or_parsed_config: label config string or parsed config object
:return: True if config match already created completions
"""
output_schema = self.derived_output_schema
# check if schema exists, i.e. at least one completion has been created
if not output_schema['from_name_to_name_type']:
return
config = config_string_or_parsed_config
if isinstance(config, str):
config = parse_config(config)
completion_tuples = set()
for from_name, to in config.items():
completion_tuples.add((from_name, to['to_name'][0], to['type'].lower()))
for from_name, to_name, type in output_schema['from_name_to_name_type']:
if (from_name, to_name, type) not in completion_tuples:
raise ValidationError(
'You\'ve already completed some tasks, but some of them couldn\'t be loaded with this config: '
'name={from_name}, toName={to_name}, type={type} are expected'
.format(from_name=from_name, to_name=to_name, type=type)
)
for from_name, expected_label_set in output_schema['labels'].items():
if from_name not in config:
raise ValidationError(
'You\'ve already completed some tasks, but some of them couldn\'t be loaded with this config: '
'name=' + from_name + ' is expected'
)
found_labels = set(config[from_name]['labels'])
extra_labels = list(expected_label_set - found_labels)
if extra_labels:
raise ValidationError(
'You\'ve already completed some tasks, but some of them couldn\'t be loaded with this config: '
'there are labels already created for "{from_name}":\n{extra_labels}'
.format(from_name=from_name, extra_labels=extra_labels)
)
def no_tasks(self):
return self.source_storage.empty()
def delete_tasks(self):
"""
Deletes all tasks & completions from filesystem, then reloads clean project
:return:
"""
self.source_storage.remove_all()
self.target_storage.remove_all()
self.update_derived_input_schema()
self.update_derived_output_schema()
# delete everything on ML backend
if self.ml_backends_connected:
for m in self.ml_backends:
m.clear(self)
def next_task(self, completed_tasks_ids):
completed_tasks_ids = set(completed_tasks_ids)
sampling = self.config.get('sampling', 'sequential')
# Tasks are ordered ascending by their "id" fields. This is default mode.
task_iter = filter(lambda i: i not in self.target_storage, self.source_storage.ids())
if sampling == 'sequential':
task_id = next(task_iter, None)
if task_id is not None:
return self.source_storage.get(task_id)
# Tasks are sampled with equal probabilities
elif sampling == 'uniform':
actual_tasks_ids = list(task_iter)
if not actual_tasks_ids:
return None
random.shuffle(actual_tasks_ids)
return self.source_storage.get(actual_tasks_ids[0])
# Task with minimum / maximum average prediction score is taken
elif sampling.startswith('prediction-score'):
id_score_map = {}
for task_id, task in self.source_storage.items():
if task_id in completed_tasks_ids:
continue
if 'predictions' in task and len(task['predictions']) > 0:
score = sum((p['score'] for p in task['predictions']), 0) / len(task['predictions'])
id_score_map[task_id] = score
if not id_score_map:
return None
if sampling.endswith('-min'):
best_idx = min(id_score_map, key=id_score_map.get)
elif sampling.endswith('-max'):
best_idx = max(id_score_map, key=id_score_map.get)
else:
raise NotImplementedError('Unknown sampling method ' + sampling)
return self.source_storage.get(best_idx)
else:
raise NotImplementedError('Unknown sampling method ' + sampling)
def remove_task(self, task_id):
self.source_storage.remove(task_id)
self.delete_completion(task_id)
self.update_derived_input_schema()
self.update_derived_output_schema()
def get_completions_ids(self):
""" List completion ids from output_dir directory
:return: filenames without extensions and directories
"""
task_ids = set(self.source_storage.ids())
completion_ids = set(self.target_storage.ids())
completions = completion_ids.intersection(task_ids)
#completions = list(self.target_storage.ids())
logger.debug('{num} completions found in {output_dir}'.format(
num=len(completions), output_dir=self.config["output_dir"]))
return sorted(completions)
def get_completed_at(self):
""" Get completed time for tasks
:return: list of string with formatted datetime
"""
times = {}
for _, data in self.target_storage.items():
id = data['id']
try:
latest_time = max(data['completions'], key=itemgetter('created_at'))['created_at']
except Exception as exc:
times[id] = 'undefined'
else:
times[id] = timestamp_to_local_datetime(latest_time).strftime('%Y-%m-%d %H:%M:%S')
return times
def get_skipped_status(self):
""" Get skipped status for tasks: returns skipped completion number for task
:return: list of int
"""
items = {}
for _, data in self.target_storage.items():
id = data['id']
try:
flag = sum([completion.get('skipped', False) for completion in data['completions']])
except Exception as exc:
items[id] = -1
else:
items[id] = flag
return items
def get_task_with_completions(self, task_id):
""" Get task with completions
:param task_id: task ids
:return: json dict with completion
"""
data = self.target_storage.get(task_id)
logger.debug('Get task ' + str(task_id) + ' from target storage: ' + str(data))
if data:
logger.debug('Get predictions ' + str(task_id) + ' from source storage')
# tasks can hold the newest version of predictions, so task it from tasks
data['predictions'] = self.source_storage.get(task_id).get('predictions', [])
return data
def save_completion(self, task_id, completion):
""" Save completion
:param task_id: task id
:param completion: json data from label (editor)
"""
# try to get completions with task first
task = self.get_task_with_completions(task_id)
# init task if completions with task not exists
if not task:
task = deepcopy(self.source_storage.get(task_id))
task['completions'] = []
else:
task = deepcopy(task)
# update old completion
updated = False
if 'id' in completion:
for i, item in enumerate(task['completions']):
if item['id'] == completion['id']:
task['completions'][i].update(completion)
updated = True
# write new completion
if not updated:
completion['id'] = task['id'] * 1000 + len(task['completions']) + 1
task['completions'].append(completion)
try:
self._update_derived_output_schema(completion)
except Exception as exc:
logger.error(exc, exc_info=True)
logger.debug(json.dumps(completion, indent=2))
# save completion time
completion['created_at'] = timestamp_now()
# write task + completions to file
self.target_storage.set(task_id, task)
logger.debug('Completion ' + str(task_id) + ' saved:\n' + json.dumps(task, indent=2))
return completion['id']
def delete_completion(self, task_id):
""" Delete completion from disk
:param task_id: task id
"""
self.target_storage.remove(task_id)
self.update_derived_output_schema()
def make_predictions(self, task):
task = deepcopy(task)
task['predictions'] = []
try:
for ml_backend in self.ml_backends:
if not ml_backend.connected:
continue
predictions = ml_backend.make_predictions(task, self)
predictions['created_by'] = ml_backend.model_name
task['predictions'].append(predictions)
except Exception as exc:
logger.debug(exc)
return task
def train(self):
completions = []
for _, c in self.target_storage.items():
completions.append(c)
train_status = False
if self.ml_backends_connected:
for ml_backend in self.ml_backends:
if ml_backend.connected:
ml_backend.train(completions, self)
train_status = True
return train_status
@classmethod
def get_project_dir(cls, project_name, args):
return os.path.join(args.root_dir, project_name)
@classmethod
def get_input_data_tags(cls, label_config):
tag_iter = ElementTree.fromstring(label_config).iter()
return [
tag for tag in tag_iter
if tag.attrib.get('name') and tag.attrib.get('value', '').startswith('$')
]
@classmethod
def _load_tasks(cls, input_path, args, label_config_file):
with io.open(label_config_file, encoding='utf8') as f:
label_config = f.read()
task_loader = Tasks()
if args.input_format == 'json':
return task_loader.from_json_file(input_path)
if args.input_format == 'json-dir':
return task_loader.from_dir_with_json_files(input_path)
input_data_tags = cls.get_input_data_tags(label_config)
if len(input_data_tags) > 1:
val = ",".join(tag.attrib.get("name") for tag in input_data_tags)
print('Warning! Multiple input data tags found: ' +
val + '. Only first one is used.')
elif len(input_data_tags) == 0:
raise ValueError(
'You\'ve specified input format "{fmt}" which requires label config being explicitly defined. '
'Please specify --label-config=path/to/config.xml or use --format=json or format=json_dir'.format(
fmt=args.input_format)
)
input_data_tag = input_data_tags[0]
data_key = input_data_tag.attrib.get('value').lstrip('$')
if args.input_format == 'text':
return task_loader.from_text_file(input_path, data_key)
if args.input_format == 'text-dir':
return task_loader.from_dir_with_text_files(input_path, data_key)
if args.input_format == 'image-dir':
return task_loader.from_dir_with_image_files(input_path, data_key)
if args.input_format == 'audio-dir':
return task_loader.from_dir_with_audio_files(input_path, data_key)
raise RuntimeError('Can\'t load tasks for input format={}'.format(args.input_format))
@classmethod
def _create_ml_backend_params(cls, url, project_name=None):
if '=http' in url:
name, url = url.split('=', 1)
else:
project_name = os.path.basename(project_name or '')
name = project_name + str(uuid4())[:4]
if not is_url(url):
raise ValueError('Specified string "' + url + '" doesn\'t look like URL.')
return {'url': url, 'name': name}
@classmethod
def create_project_dir(cls, project_name, args):
"""
Create project directory in args.root_dir/project_name, and initialize there all required files
If some files are missed, restore them from defaults.
If config files are specified by args, copy them in project directory
:param project_name:
:param args:
:return:
"""
dir = cls.get_project_dir(project_name, args)
if args.force:
delete_dir_content(dir)
os.makedirs(dir, exist_ok=True)
config = json_load(args.config_path) if args.config_path else json_load(find_file('default_config.json'))
def already_exists_error(what, path):
raise RuntimeError('{path} {what} already exists. Use "--force" option to recreate it.'.format(
path=path, what=what
))
input_path = args.input_path or config.get('input_path')
# save label config
config_xml = 'config.xml'
config_xml_path = os.path.join(dir, config_xml)
label_config_file = args.label_config or config.get('label_config')
if label_config_file:
copy2(label_config_file, config_xml_path)
print(label_config_file + ' label config copied to ' + config_xml_path)
else:
if os.path.exists(config_xml_path) and not args.force:
already_exists_error('label config', config_xml_path)
if not input_path:
# create default config with polygons only if input data is not set
default_label_config = find_file('examples/image_polygons/config.xml')
copy2(default_label_config, config_xml_path)
print(default_label_config + ' label config copied to ' + config_xml_path)
else:
with io.open(config_xml_path, mode='w') as fout:
fout.write('<View></View>')
print('Empty config has been created in ' + config_xml_path)
config['label_config'] = config_xml
if args.source:
config['source'] = {
'type': args.source,
'path': args.source_path,
'params': args.source_params
}
else:
# save tasks.json
tasks_json = 'tasks.json'
tasks_json_path = os.path.join(dir, tasks_json)
if input_path:
tasks = cls._load_tasks(input_path, args, config_xml_path)
else:
tasks = {}
with io.open(tasks_json_path, mode='w') as fout:
json.dump(tasks, fout, indent=2)
config['input_path'] = tasks_json
config['source'] = {
'name': 'Tasks',
'type': 'tasks-json',
'path': os.path.abspath(tasks_json_path)
}
logger.debug('{tasks_json_path} input file with {n} tasks has been created from {input_path}'.format(
tasks_json_path=tasks_json_path, n=len(tasks), input_path=input_path))
if args.target:
config['target'] = {
'type': args.target,
'path': args.target_path,
'params': args.target_params
}
else:
completions_dir = os.path.join(dir, 'completions')
if os.path.exists(completions_dir) and not args.force:
already_exists_error('output dir', completions_dir)
if os.path.exists(completions_dir):
delete_dir_content(completions_dir)
print(completions_dir + ' output dir already exists. Clear it.')
else:
os.makedirs(completions_dir, exist_ok=True)
print(completions_dir + ' output dir has been created.')
config['output_dir'] = 'completions'
config['target'] = {
'name': 'Completions',
'type': 'completions-dir',
'path': os.path.abspath(completions_dir)
}
if 'ml_backends' not in config or not isinstance(config['ml_backends'], list):
config['ml_backends'] = []
if args.ml_backends:
for url in args.ml_backends:
config['ml_backends'].append(cls._create_ml_backend_params(url, project_name))
if args.sampling:
config['sampling'] = args.sampling
if args.port:
config['port'] = args.port
if args.host:
config['host'] = args.host
if args.allow_serving_local_files:
config['allow_serving_local_files'] = True
# create config.json
config_json = 'config.json'
config_json_path = os.path.join(dir, config_json)
if os.path.exists(config_json_path) and not args.force:
already_exists_error('config', config_json_path)
with io.open(config_json_path, mode='w') as f:
json.dump(config, f, indent=2)
print('')
print('Label Studio has been successfully initialized. Check project states in ' + dir)
print('Start the server: label-studio start ' + dir)
return dir
@classmethod
def get_config(cls, project_name, args):
return cls._get_config(cls.get_project_dir(project_name, args))
@classmethod
def _get_config(cls, project_dir, args=None):
"""
Get config from input args Namespace acquired by Argparser
:param args:
:return:
"""
# check if project directory exists
if not os.path.exists(project_dir):
project_name = args.project_name if args is not None else '<project_name>'
raise FileNotFoundError(
'Couldn\'t find directory ' + project_dir +
', maybe you\'ve missed appending "--init" option:\nlabel-studio start ' +
project_name + ' --init'
)
# check config.json exists in directory
config_path = os.path.join(project_dir, 'config.json')
if not os.path.exists(config_path):
project_name = args.project_name if args is not None else '<project_name>'
raise FileNotFoundError(
'Couldn\'t find config file ' + config_path + ' in project directory ' + project_dir +
', maybe you\'ve missed appending "--init" option:\nlabel-studio start ' + project_name + ' --init'
)
config_path = os.path.abspath(config_path)
with io.open(config_path) as c:
config = json.load(c)
config['config_path'] = config_path
if config.get('input_path'):
config['input_path'] = os.path.join(os.path.dirname(config_path), config['input_path'])
config['label_config'] = os.path.join(os.path.dirname(config_path), config['label_config'])
if config.get('output_dir'):
config['output_dir'] = os.path.join(os.path.dirname(config_path), config['output_dir'])
if not config.get('source'):
config['source'] = {
'name': 'Tasks',
'type': 'tasks-json',
'path': os.path.abspath(config['input_path'])
}
if not config.get('target'):
config['target'] = {
'name': 'Completions',
'type': 'completions-dir',
'path': os.path.abspath(config['output_dir'])
}
return config
@classmethod
def _load_from_dir(cls, project_dir, project_name, args, context):
config = cls._get_config(project_dir, args)
return cls(config, project_name, context=context, root_dir=args.root_dir)
@classmethod
def get(cls, project_name, args, context):
# If project stored in memory, just return it
if project_name in cls._storage:
return cls._storage[project_name]
# If project directory exists, load project from directory and update in-memory storage
project_dir = cls.get_project_dir(project_name, args)
if os.path.exists(project_dir):
project = cls._load_from_dir(project_dir, project_name, args, context)
cls._storage[project_name] = project
return project
raise ProjectNotFound('Project {p} doesn\'t exist'.format(p=project_name))
@classmethod
def create(cls, project_name, args, context):
# "create" method differs from "get" as it can create new directory with project resources
project_dir = cls.create_project_dir(project_name, args)
project = cls._load_from_dir(project_dir, project_name, args, context)
cls._storage[project_name] = project
return project
@classmethod
def get_or_create(cls, project_name, args, context):
try:
project = cls.get(project_name, args, context)
logger.info('Get project "' + project_name + '".')
except ProjectNotFound:
project = cls.create(project_name, args, context)
logger.info('Project "' + project_name + '" created.')
return project
def update_on_boarding_state(self):
self.on_boarding['setup'] = self.config.get('label_config_updated', False)
self.on_boarding['import'] = not self.no_tasks()
self.on_boarding['labeled'] = not self.target_storage.empty()
return self.on_boarding
@property
def generate_sample_task_escape(self):
return self.project_obj.generate_sample_task_escape
@property
def supported_formats(self):
return self.project_obj.supported_formats
def serialize(self):
""" Serialize project to json dict
"""
project = self
banlist = ('json', 'dir-jsons')
available_storages = list(filter(lambda i: i[0] not in banlist, get_available_storage_names().items()))
output = {
'project_name': project.name,
'task_count': len(project.source_storage.ids()),
'completion_count': len(project.get_completions_ids()),
'config': project.config,
'can_manage_tasks': project.can_manage_tasks,
'can_manage_completions': project.can_manage_completions,
'can_delete_tasks': project.can_delete_tasks,
'target_storage': {'readable_path': project.target_storage.readable_path},
'source_storage': {'readable_path': project.source_storage.readable_path},
'available_storages': available_storages,
'source_syncing': self.source_storage.is_syncing,
'target_syncing': self.target_storage.is_syncing,
'data_types': self.data_types
}
return output
| 41.018973 | 135 | 0.625745 |
410733d317c1d9bee69738962a5e62ba4bb3589e | 1,857 | py | Python | project/account/views.py | fael07/Company-Management | e91bbf7bc251edced127c8e5f6acf7bbdf38a580 | [
"MIT"
] | null | null | null | project/account/views.py | fael07/Company-Management | e91bbf7bc251edced127c8e5f6acf7bbdf38a580 | [
"MIT"
] | null | null | null | project/account/views.py | fael07/Company-Management | e91bbf7bc251edced127c8e5f6acf7bbdf38a580 | [
"MIT"
] | null | null | null | # Usuário
from django.contrib.auth.models import User
from django.contrib import messages, auth
from .support import *
# Renderizar
from django.shortcuts import render, redirect
def login(request):
user = auth.get_user(request)
if user.is_authenticated:
return redirect('minha_conta')
elif request.method != 'POST':
return render(request, 'login.html')
usuario = request.POST.get('usuario')
senha = request.POST.get('senha')
user = auth.authenticate(request, username=usuario, password=senha)
if user is None:
messages.add_message(request, messages.ERROR, 'Usuário ou senha inválidos')
return redirect('login')
else:
auth.login(request, user)
messages.add_message(request, messages.SUCCESS, 'Login realizado com sucesso')
return redirect('minha_conta')
def cadastro(request):
user = auth.get_user(request)
if user.is_authenticated:
return redirect('minha_conta')
elif request.method != 'POST':
return render(request, 'cadastro.html')
foto = request.POST.get('foto')
if checks_null([foto]):
foto = 'images/empresa.jpg'
nome = request.POST.get('nome').title()
email = request.POST.get('email')
usuario = request.POST.get('usuario')
senha = request.POST.get('senha')
senha2 = request.POST.get('senha2')
if not validate_cadastro_usuario(request, usuario, senha, senha2, nome, email):
return redirect('cadastro')
new_user = User.objects.create_user(username=usuario, email=email, password=senha, first_name=nome, last_name='', foto=foto)
new_user.save()
messages.add_message(request, messages.SUCCESS, f'{usuario} foi registrado com sucesso')
return redirect('login')
def logout(request):
auth.logout(request)
return redirect('login') | 32.578947 | 128 | 0.679052 |
73e04b571db19bbc8434f66e0b0deae5db4b83ba | 5,878 | py | Python | Machine Learning/Preparing Data for Machine Learning Models/extracting_features_from_images_ml.py | data-intelligence-analysis/data_science | 66b5d130129a6b001684f86c58cd91e3f9382140 | [
"Apache-2.0"
] | 2 | 2020-09-23T04:08:25.000Z | 2021-03-17T03:07:59.000Z | Machine Learning/Preparing Data for Machine Learning Models/extracting_features_from_images_ml.py | data-intelligence-analysis/data_science | 66b5d130129a6b001684f86c58cd91e3f9382140 | [
"Apache-2.0"
] | null | null | null | Machine Learning/Preparing Data for Machine Learning Models/extracting_features_from_images_ml.py | data-intelligence-analysis/data_science | 66b5d130129a6b001684f86c58cd91e3f9382140 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
###Final Data Preprocessing
'''Preparing an unorganized data set to feed the machine learning models in an unbiased way so that the model can give good results, for example we don't want the machine learning model to know that
1st 50 records always go to the object, and the next 80 records will always go to the background and so on. So we have to mix it up for the model to learn'''
## Create a shuffling function
def shuffle(features,labels):
arr=np.random.permutation(features.shape[0])
features_shuffled = features[arr, :]
shuffled_labels=labels[arr]
###Shuffling every record of the feature
print("Features Shuffled Array Size:",features_shuffled.shape)
#Shuffle the corresponding lable size
print("Labels Shuffled Array Size:", shuffled_labels.shape)
print("Shuffled Array labels Content", shuffled_labels)
return features_shuffled, shuffled_labels
###Extracting Dataset - Extracting Features of an Image
#Function to call the extracted features
#ROI - Region of Interest
def ROI(img):
#### Background
'''-------------------------'''
###selecting a region of interest on an image
'''cv2.selectROI('ROI',img,False) - #ROI - Window Name, img = directory to the image,
False = The direction of the selector of the image is from upper left to lower right in a rectangular formation'''
'''#convert the coordinates to a Numpy array, x = x coordinate of the rectangle we draw, y= coordinate of the rectangle we draw,
w = width of the rectangle, h = height or length of the rectangle you draw'''
#Slecting the Region of Interestt of the background of photo and putting it into a numpy array (essentially a list)
x,y,w,h = np.array(cv2.selectROI('ROI',img,False)
#use image as background and convert to a numpy array
#3d image - 3D Size because image consists of three channels, So imagine this figure exist three times every time for each channel
background = img[y : y+h , x : x+w] #Pixel Value of background image - Image containing all pixeks increase the height and width +h +w
print('Background Array Size:', background.shape)
# Convert from 3D array to 2D array
#background - array we would want to reshape
'''background.shape[0]*background.shape[1] - the desired size we want our array to have,
which is background.shape[0] - represents the number of rows in our original array, multiplied by background.shape[1],
which represents the array of columns in our original array and
then comma 3 - this represents the new number of rows in our new array and this represents the new number of columns'''
background = np.reshape(background, (background.shape[0]*background.shape[1],3)) #Total number of pictures in the background ROI
print('Background New Array Size:', background.shape)
#### Object
'''--------------------------'''
#Slecting the Region of interest of the object of photo and putting it into a numpy array (essentially a list)
#convert the coordinates to a Numpy array
x,y,w,h = np.array(cv2.selectROI('ROI',img,False)
#use image as object and convert to a numpy array
#3d image - 3D Size because image consists of three channels, So imagine this figure exist three times every time for each channel
object2 = img[y : y+h, x : x+w] #Pixel Value of background image - Image containing all pixeks increase the height and width +h +w
print("Object Array Size:", object2.shape)
# Convert from 3D array to 2D array
# object2 - array we would want to reshape
object2 = np.reshape(object2, (object2.shape[0]*object2.shape[1],3))#Total number of pictures in the object ROI
print('Object New Array Size:', object2.shape)
#Concatenate arrays and label the dataset
#feature= np.concatenate() We will specify the 2 arrays. We want to add together, those are (object, background) and then axis=0' for the rows
features = np.concatenate((object2,background),axis=0)
print("Features Array Size:",features.shape)
#np.full() - This function creates a numpy array and we ill fill it all with zero
'''#object2.shpae[0] and background.shape[0] - Desired size of our array which will be equal to the number of records we have in the object array,
The number of records we have in the object array is number of rows of the object array.'''
object_labels=np.full(object2.shape[0],0)
background_labels=np.full(background.shape[0],0)
labels=np.concatenate((object_labels,background_labels),axis=0)
#Desired size of our array which will be equal to the number of records we have in the object array, The number of records we have in the object array is number of rows of the object array.
print("Labels Array Size:", labels.shape)
# [0,0...1,1]
#Background label will be number 1 and the object label will be number 0
print("Labels Array Content:", labels)
features,labels = shuffle(features, labels)
return features, labels
#Featues as Numpy arrays
if __name__ == '__main__':
# 1 - Indicates you want to read images in color
# 0 - Indicates you want to read images in black or white
# use option "r" to fix directories with spaces inbetween them
# cv2.imread(r'directory to image', 1)
img = cv2.imread(r'<directory_to_image>', 1)
#calling the function, image is returned
features, labels=ROI(img)
#you can draw a rectangle on the image,
#which produces 4 coordinates of the rectangle we drew
'''# cv2.imshow('window_name', image read from directory)
cv2.imshow('Picture', img)
#write zero to terminate the window manually
cv2.waitKey(0)
#destroy all windows
cv2.destroyAllWindows()'''
| 48.983333 | 199 | 0.703471 |
2fdcc7b24635b91afa8835eb3af0edaaafc1d14d | 5,644 | py | Python | launch/launch/launch_introspector.py | oswinso/launch | abaa861f68f60b77585284c6ebf9fcd5bea419db | [
"Apache-2.0"
] | null | null | null | launch/launch/launch_introspector.py | oswinso/launch | abaa861f68f60b77585284c6ebf9fcd5bea419db | [
"Apache-2.0"
] | null | null | null | launch/launch/launch_introspector.py | oswinso/launch | abaa861f68f60b77585284c6ebf9fcd5bea419db | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the LaunchIntrospector class."""
from typing import cast
from typing import List
from typing import Text
from .action import Action
from .actions import EmitEvent
from .actions import ExecuteProcess
from .actions import LogInfo
from .actions import RegisterEventHandler
from .event_handler import EventHandler
from .launch_description import LaunchDescription
from .launch_description_entity import LaunchDescriptionEntity
from .some_substitutions_type import SomeSubstitutionsType
from .utilities import is_a
from .utilities import normalize_to_list_of_substitutions
def indent(lines: List[Text], indention: Text = ' ') -> List[Text]:
"""Indent a list of strings and return them."""
return ['{}{}'.format(indention, line) for line in lines]
def tree_like_indent(lines: List[Text]) -> List[Text]:
"""Replace whitespace with "tree"-like indentation symbols."""
result = []
previous_first_non_whitespace = None
for old_line in lines:
if not old_line.startswith(' '):
continue
line = str(old_line)
line = '│' + line[1:]
first_non_whitespace = len(old_line) - len(old_line.lstrip())
if previous_first_non_whitespace is not None and '└' in result[-1]:
if previous_first_non_whitespace <= first_non_whitespace:
result[-1] = result[-1].replace('└', '├', 1)
previous_first_non_whitespace = first_non_whitespace
line = line[0:first_non_whitespace - 4] + '└── ' + line[first_non_whitespace:]
result.append(line)
if result[-1].startswith('│'):
result[-1] = ' ' + result[-1][1:]
# TODO(wjwwood): figure out how to handle remaining cases like how to fix this sample:
# ├── OnProcessExit(...)
# │ └── Action(...)
# ├── OnProcessExit(...)
# └── Action(...)
# ^ this dangling stub
return result
def format_entities(entities: List[LaunchDescriptionEntity]) -> List[Text]:
"""Return a list of lines of text that represent of a list of LaunchDescriptionEntity's."""
result = []
for entity in entities:
if is_a(entity, Action):
result.extend(format_action(cast(Action, entity)))
else:
result.append("Unknown entity('{}')".format(entity))
return result
def format_substitutions(substitutions: SomeSubstitutionsType) -> Text:
"""Return a text representation of some set of substitutions."""
normalized_substitutions = normalize_to_list_of_substitutions(substitutions)
return ' + '.join([sub.describe() for sub in normalized_substitutions])
def format_event_handler(event_handler: EventHandler) -> List[Text]:
"""Return a text representation of an event handler."""
if hasattr(event_handler, 'describe'):
# TODO(wjwwood): consider supporting mode complex descriptions of branching
description, entities = event_handler.describe() # type: ignore
result = [description]
result.extend(indent(format_entities(entities)))
return result
else:
return ["EventHandler('{}')".format(hex(id(event_handler)))]
def format_action(action: Action) -> List[Text]:
"""Return a text representation of an action."""
if is_a(action, LogInfo):
return ['LogInfo({})'.format(format_substitutions(cast(LogInfo, action).msg))]
elif is_a(action, EmitEvent):
return ["EmitEvent(event='{}')".format(cast(EmitEvent, action).event.name)]
elif is_a(action, ExecuteProcess):
typed_action = cast(ExecuteProcess, action)
msg = 'ExecuteProcess(cmd=[{}], cwd={}, env={}, shell={})'.format(
', '.join([format_substitutions(x) for x in typed_action.cmd]),
typed_action.cwd if typed_action.cwd is None else "'{}'".format(
format_substitutions(typed_action.cwd)
),
typed_action.env if typed_action.env is None else '{' + ', '.join(
['{}: {}'.format(format_substitutions(k), format_substitutions(v))
for k, v in typed_action.env]) + '}',
typed_action.shell,
)
return [msg]
elif is_a(action, RegisterEventHandler):
# Different variable name used to assist with type checking.
typed_action2 = cast(RegisterEventHandler, action)
result = ["RegisterEventHandler('{}'):".format(typed_action2.event_handler)]
result.extend(indent(format_event_handler(typed_action2.event_handler)))
return result
else:
return ["Action('{}')".format(action)]
class LaunchIntrospector:
"""Provides an interface through which you can visit all entities of a LaunchDescription."""
def format_launch_description(self, launch_description: LaunchDescription) -> Text:
"""Return a string representation of a LaunchDescription."""
result = '{}\n'.format(launch_description)
entity_descriptions = format_entities(launch_description.entities)
result += '\n'.join(tree_like_indent(indent(entity_descriptions)))
return result
| 42.43609 | 96 | 0.681963 |
5f8fefb2ac25a44581571469eb00654e1fa5157d | 380 | py | Python | questions/migrations/0005_auto_20150211_1929.py | Kuzenkov/SimpleAnalogueStackOverflow | ae102279ae72170cc84c7c7b08e5c3bf1b7ea61a | [
"MIT"
] | null | null | null | questions/migrations/0005_auto_20150211_1929.py | Kuzenkov/SimpleAnalogueStackOverflow | ae102279ae72170cc84c7c7b08e5c3bf1b7ea61a | [
"MIT"
] | null | null | null | questions/migrations/0005_auto_20150211_1929.py | Kuzenkov/SimpleAnalogueStackOverflow | ae102279ae72170cc84c7c7b08e5c3bf1b7ea61a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0004_auto_20150211_0703'),
]
operations = [
migrations.AlterModelOptions(
name='question',
options={'ordering': ['-created_on']},
),
]
| 20 | 50 | 0.607895 |
cc20280ebfd2c31c1818b235c24c370df088a4d0 | 751 | py | Python | tests/timing_merge/nboids/nboids_times4.py | vic-c137/mpi-boids-simulation | a822f20f5c1cd7cd2a6261a53adeb24e2c0115ec | [
"Apache-2.0"
] | null | null | null | tests/timing_merge/nboids/nboids_times4.py | vic-c137/mpi-boids-simulation | a822f20f5c1cd7cd2a6261a53adeb24e2c0115ec | [
"Apache-2.0"
] | null | null | null | tests/timing_merge/nboids/nboids_times4.py | vic-c137/mpi-boids-simulation | a822f20f5c1cd7cd2a6261a53adeb24e2c0115ec | [
"Apache-2.0"
] | null | null | null | # Import statements
import subprocess
from os import system
# Variable declarations
np = "10"
cexe = "./Times"
nboids = "50"
nloops = "1000"
k = "7"
maxv = "10"
acc = "1.25"
width = "1000"
height = "1000"
sf1 = "1"
sf2 = "32"
min = "50"
sf3 = "8"
sf4 = "10"
dataPath = "./data/"
timeData = "time_data_nboids4.csv"
# Test calls
collection = [1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500]
for i in collection:
print "Running test %s" % (str(i))
nboids = str(i)
subprocess.call("mpirun -np " + np +" "+ cexe +" "+ nboids +" "+ nloops +" "+ k +" "+ maxv +" "+ acc +" "+ width +" "+ height +" "+ sf1 +" "+ sf2 +" "+ min +" "+ sf3 +" "+ sf4 + " >> " + dataPath + timeData, shell=True) | 28.884615 | 220 | 0.561917 |
d38e6a79db63434cbf9e868cf910c6c9f1f8acf6 | 95 | py | Python | Backjoon/11720.py | hanjungwoo1/CodingTest | 0112488d04dd53cea1c869439341fb602e699f2a | [
"MIT"
] | 3 | 2022-03-29T04:56:50.000Z | 2022-03-30T08:06:42.000Z | Backjoon/11720.py | hanjungwoo1/CodingTest | 0112488d04dd53cea1c869439341fb602e699f2a | [
"MIT"
] | null | null | null | Backjoon/11720.py | hanjungwoo1/CodingTest | 0112488d04dd53cea1c869439341fb602e699f2a | [
"MIT"
] | null | null | null | N = int(input())
data = input()
sum = 0
for i in range(N):
sum += int(data[i])
print(sum) | 11.875 | 23 | 0.557895 |
2b8ec4095a54c3f9ceedb4f2b460c0f080a6703d | 2,480 | py | Python | alerta/models/customer.py | proffust/alerta | 2b53be0cdf8b91ea24560a295ef2e07df556883b | [
"Apache-2.0"
] | null | null | null | alerta/models/customer.py | proffust/alerta | 2b53be0cdf8b91ea24560a295ef2e07df556883b | [
"Apache-2.0"
] | 60 | 2020-07-27T07:00:45.000Z | 2022-03-21T18:02:18.000Z | alerta/models/customer.py | proffust/alerta | 2b53be0cdf8b91ea24560a295ef2e07df556883b | [
"Apache-2.0"
] | 1 | 2020-11-24T03:16:49.000Z | 2020-11-24T03:16:49.000Z | from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import uuid4
from alerta.app import db
from alerta.database.base import Query
from alerta.utils.response import absolute_url
JSON = Dict[str, Any]
class Customer:
def __init__(self, match: str, customer: str, **kwargs) -> None:
self.id = kwargs.get('id') or str(uuid4())
self.match = match
self.customer = customer
@classmethod
def parse(cls, json: JSON) -> 'Customer':
return Customer(
id=json.get('id', None),
match=json.get('match', None),
customer=json.get('customer', None)
)
@property
def serialize(self) -> Dict[str, Any]:
return {
'id': self.id,
'href': absolute_url('/customer/' + self.id),
'match': self.match,
'customer': self.customer
}
def __repr__(self) -> str:
return 'Customer(id={!r}, match={!r}, customer={!r})'.format(
self.id, self.match, self.customer)
@classmethod
def from_document(cls, doc: Dict[str, Any]) -> 'Customer':
return Customer(
id=doc.get('id', None) or doc.get('_id'),
match=doc.get('match', None),
customer=doc.get('customer', None)
)
@classmethod
def from_record(cls, rec) -> 'Customer':
return Customer(
id=rec.id,
match=rec.match,
customer=rec.customer
)
@classmethod
def from_db(cls, r: Union[Dict, Tuple]) -> 'Customer':
if isinstance(r, dict):
return cls.from_document(r)
elif isinstance(r, tuple):
return cls.from_record(r)
def create(self) -> 'Customer':
return Customer.from_db(db.create_customer(self))
@staticmethod
def find_by_id(id: str) -> Optional['Customer']:
return Customer.from_db(db.get_customer(id))
@staticmethod
def find_all(query: Query = None) -> List['Customer']:
return [Customer.from_db(customer) for customer in db.get_customers(query)]
def update(self, **kwargs) -> 'Customer':
return Customer.from_db(db.update_customer(self.id, **kwargs))
def delete(self) -> bool:
return db.delete_customer(self.id)
@classmethod
def lookup(cls, login: str, groups: List[str]) -> List[str]:
customers = db.get_customers_by_match(login, matches=groups)
return customers if customers != '*' else []
| 29.52381 | 83 | 0.592339 |
03f82b285d2c473f19a2d0ab45a6404aa6582e98 | 3,313 | py | Python | tests/200_cli/003_startproject.py | Sam-prog-sudo/boussole | 5d6ec94356f9a91ff4d6d23c1700d3512b67006a | [
"MIT"
] | null | null | null | tests/200_cli/003_startproject.py | Sam-prog-sudo/boussole | 5d6ec94356f9a91ff4d6d23c1700d3512b67006a | [
"MIT"
] | null | null | null | tests/200_cli/003_startproject.py | Sam-prog-sudo/boussole | 5d6ec94356f9a91ff4d6d23c1700d3512b67006a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import os
from click.testing import CliRunner
from boussole.cli.console_script import cli_frontend
from boussole.conf.json_backend import SettingsBackendJson
from boussole.conf.yaml_backend import SettingsBackendYaml
# Backend default filename shortcuts
YAML_FILENAME = SettingsBackendYaml._default_filename
JSON_FILENAME = SettingsBackendJson._default_filename
def test_001(settings, caplog):
"""
Basic
"""
runner = CliRunner()
# Temporary isolated current dir
with runner.isolated_filesystem():
test_cwd = os.getcwd()
sourcedir = os.path.join(test_cwd, "scss")
targetdir = os.path.join(test_cwd, "css")
config_filepath = os.path.join(test_cwd, JSON_FILENAME)
opts = [
'startproject',
'--basedir={}'.format(test_cwd),
'--config={}'.format(JSON_FILENAME),
'--sourcedir={}'.format("scss"),
'--targetdir={}'.format("css"),
]
# Execute command with opts
result = runner.invoke(cli_frontend, opts)
# Command stop on success exit code
assert result.exit_code == 0
# Validate return log output
assert caplog.record_tuples == [
(
'boussole',
20,
("Project directory structure and configuration file have "
"been created.")
),
(
'boussole',
20,
("Now you should start to create some Sass sources into '{}', "
"then compile them using:").format(sourcedir)
),
(
'boussole',
20,
' boussole compile --config={}'.format(config_filepath)
),
]
# Ensure dir and file has been created
assert os.path.exists(config_filepath)
assert os.path.exists(sourcedir)
assert os.path.exists(targetdir)
# Validate created configuration file
with open(config_filepath, "r") as fp:
assert json.load(fp) == {
'SOURCES_PATH': 'scss',
'TARGET_PATH': 'css',
"LIBRARY_PATHS": [],
"OUTPUT_STYLES": "nested",
"SOURCE_COMMENTS": False,
"EXCLUDES": []
}
def test_002(settings, caplog):
"""
Error from given arguments (multiple identical paths)
"""
runner = CliRunner()
# Temporary isolated current dir
with runner.isolated_filesystem():
test_cwd = os.getcwd()
sourcedir = os.path.join(test_cwd, "css")
opts = [
'startproject',
'--basedir={}'.format(test_cwd),
'--config={}'.format(JSON_FILENAME),
'--sourcedir={}'.format("css"),
'--targetdir={}'.format("css"),
]
# Execute command with opts
result = runner.invoke(cli_frontend, opts)
# Command stop on success exit code
assert result.exit_code == 1
# Validate return log output
assert caplog.record_tuples == [
(
'boussole',
50,
'Multiple occurences finded for path: {}'.format(sourcedir)
)
]
| 28.316239 | 79 | 0.545427 |
f9ec16edbd9899e0837124aeada22dff80b80e60 | 1,150 | py | Python | Language Skills/Python/Unit 07 Lists and Functions/02 Battleship/You Sunk my Battleship/17-A Real Win.py | rhyep/Python_tutorials | f5c8a64b91802b005dfe7dd9035f8d8daae8c3e3 | [
"MIT"
] | 346 | 2016-02-22T20:21:10.000Z | 2022-01-27T20:55:53.000Z | Language Skills/Python/Unit 07 Lists and Functions/02 Battleship/You Sunk my Battleship/17-A Real Win.py | Dithn/Codecademy-Exercise-Answers | 696017af889c3d0bb60965fa32bdafd80740baa3 | [
"MIT"
] | 55 | 2016-04-07T13:58:44.000Z | 2020-06-25T12:20:24.000Z | Language Skills/Python/Unit 07 Lists and Functions/02 Battleship/You Sunk my Battleship/17-A Real Win.py | Dithn/Codecademy-Exercise-Answers | 696017af889c3d0bb60965fa32bdafd80740baa3 | [
"MIT"
] | 477 | 2016-02-21T06:17:02.000Z | 2021-12-22T10:08:01.000Z | from random import randint
board = []
for x in range(5):
board.append(["O"] * 5)
def print_board(board):
for row in board:
print " ".join(row)
print "Let's play Battleship!"
print_board(board)
def random_row(board):
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board[0]) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
print ship_row
print ship_col
for turn in range(4):
print "Turn", turn + 1
guess_row = int(raw_input("Guess Row:"))
guess_col = int(raw_input("Guess Col:"))
if guess_row == ship_row and guess_col == ship_col:
print "Congratulations! You sunk my battleship!"
for turn in range(4):
break
else:
if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):
print "Oops, that's not even in the ocean."
elif(board[guess_row][guess_col] == "X"):
print "You guessed that one already."
else:
print "You missed my battleship!"
board[guess_row][guess_col] = "X"
print_board(board)
if turn == 3:
print "Game Over"
| 23.469388 | 80 | 0.613913 |
9e06cdecf988351cf4079ae52adfe81817acccc7 | 2,684 | py | Python | bundle/YouCompleteMe/third_party/ycmd/third_party/waitress/waitress/compat.py | xiaoyin199/myvim | 910dac2ae265eb4896468d4dd447df4b188ddaf1 | [
"Vim"
] | null | null | null | bundle/YouCompleteMe/third_party/ycmd/third_party/waitress/waitress/compat.py | xiaoyin199/myvim | 910dac2ae265eb4896468d4dd447df4b188ddaf1 | [
"Vim"
] | null | null | null | bundle/YouCompleteMe/third_party/ycmd/third_party/waitress/waitress/compat.py | xiaoyin199/myvim | 910dac2ae265eb4896468d4dd447df4b188ddaf1 | [
"Vim"
] | null | null | null | import sys
import types
try:
import urlparse
except ImportError: # pragma: no cover
from urllib import parse as urlparse
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
long = int
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
long = long
if PY3: # pragma: no cover
from urllib.parse import unquote_to_bytes
def unquote_bytes_to_wsgi(bytestring):
return unquote_to_bytes(bytestring).decode('latin-1')
else:
from urlparse import unquote as unquote_to_bytes
def unquote_bytes_to_wsgi(bytestring):
return unquote_to_bytes(bytestring)
def text_(s, encoding='latin-1', errors='strict'):
""" If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
return s # pragma: no cover
if PY3: # pragma: no cover
def tostr(s):
if isinstance(s, text_type):
s = s.encode('latin-1')
return str(s, 'latin-1', 'strict')
def tobytes(s):
return bytes(s, 'latin-1')
else:
tostr = str
def tobytes(s):
return s
try:
from Queue import (
Queue,
Empty,
)
except ImportError: # pragma: no cover
from queue import (
Queue,
Empty,
)
if PY3: # pragma: no cover
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
del builtins
else: # pragma: no cover
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
try:
from StringIO import StringIO as NativeIO
except ImportError: # pragma: no cover
from io import StringIO as NativeIO
try:
import httplib
except ImportError: # pragma: no cover
from http import client as httplib
try:
MAXINT = sys.maxint
except AttributeError: # pragma: no cover
MAXINT = sys.maxsize
| 23.964286 | 61 | 0.620343 |
bc15a58eaa41dcae09c1bbf48263c7850b0df7f2 | 2,815 | py | Python | magenta/models/polyphony_rnn/polyphony_encoder_decoder.py | hologerry/magenta | c08c17a548f97a3f5d294a010c28ea2803718d6f | [
"Apache-2.0"
] | null | null | null | magenta/models/polyphony_rnn/polyphony_encoder_decoder.py | hologerry/magenta | c08c17a548f97a3f5d294a010c28ea2803718d6f | [
"Apache-2.0"
] | null | null | null | magenta/models/polyphony_rnn/polyphony_encoder_decoder.py | hologerry/magenta | c08c17a548f97a3f5d294a010c28ea2803718d6f | [
"Apache-2.0"
] | 1 | 2021-09-09T15:30:36.000Z | 2021-09-09T15:30:36.000Z | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for converting between polyphonic input and model input/output."""
from __future__ import division
from magenta.models.polyphony_rnn import polyphony_lib
from magenta.models.polyphony_rnn.polyphony_lib import PolyphonicEvent
from magenta.music import encoder_decoder
EVENT_CLASSES_WITHOUT_PITCH = [
PolyphonicEvent.START,
PolyphonicEvent.END,
PolyphonicEvent.STEP_END,
]
EVENT_CLASSES_WITH_PITCH = [
PolyphonicEvent.NEW_NOTE,
PolyphonicEvent.CONTINUED_NOTE,
]
PITCH_CLASSES = polyphony_lib.MAX_MIDI_PITCH + 1
class PolyphonyOneHotEncoding(encoder_decoder.OneHotEncoding):
"""One-hot encoding for polyphonic events."""
@property
def num_classes(self):
return len(EVENT_CLASSES_WITHOUT_PITCH) + (
len(EVENT_CLASSES_WITH_PITCH) * PITCH_CLASSES)
@property
def default_event(self):
return PolyphonicEvent(
event_type=PolyphonicEvent.STEP_END, pitch=0)
def encode_event(self, event):
if event.event_type in EVENT_CLASSES_WITHOUT_PITCH:
return EVENT_CLASSES_WITHOUT_PITCH.index(event.event_type)
elif event.event_type in EVENT_CLASSES_WITH_PITCH:
return len(EVENT_CLASSES_WITHOUT_PITCH) + (
EVENT_CLASSES_WITH_PITCH.index(event.event_type) * PITCH_CLASSES +
event.pitch)
else:
raise ValueError('Unknown event type: %s' % event.event_type)
def decode_event(self, index):
if index < len(EVENT_CLASSES_WITHOUT_PITCH):
return PolyphonicEvent(
event_type=EVENT_CLASSES_WITHOUT_PITCH[index], pitch=0)
pitched_index = index - len(EVENT_CLASSES_WITHOUT_PITCH)
if pitched_index < len(EVENT_CLASSES_WITH_PITCH) * PITCH_CLASSES:
event_type = len(EVENT_CLASSES_WITHOUT_PITCH) + (
pitched_index // PITCH_CLASSES)
pitch = pitched_index % PITCH_CLASSES
return PolyphonicEvent(
event_type=event_type, pitch=pitch)
raise ValueError('Unknown event index: %s' % index)
def event_to_num_steps(self, event):
if event.event_type == PolyphonicEvent.STEP_END:
return 1
else:
return 0
| 35.1875 | 82 | 0.710124 |
15c48fb7a657d0bdfaa4172d3ebe636f2ddf6920 | 8,397 | py | Python | plugins/exchange_rate/qt.py | stratisproject/electrum | c60fa543418c31ce7f5dcf5aa717d82a5c47e216 | [
"MIT"
] | 26 | 2017-06-09T04:13:13.000Z | 2021-11-15T11:35:30.000Z | plugins/exchange_rate/qt.py | stratisproject/electrum | c60fa543418c31ce7f5dcf5aa717d82a5c47e216 | [
"MIT"
] | 29 | 2017-05-07T05:08:06.000Z | 2021-02-19T13:15:03.000Z | plugins/exchange_rate/qt.py | stratisproject/electrum | c60fa543418c31ce7f5dcf5aa717d82a5c47e216 | [
"MIT"
] | 21 | 2017-05-31T14:24:20.000Z | 2021-01-30T17:35:43.000Z | import time
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_stratis_gui.qt.util import *
from electrum_stratis_gui.qt.amountedit import AmountEdit
from electrum_stratis.stratis import COIN
from electrum_stratis.i18n import _
from decimal import Decimal
from functools import partial
from electrum_stratis.plugins import hook
from exchange_rate import FxPlugin
from electrum_stratis.util import timestamp_to_datetime
class Plugin(FxPlugin, QObject):
def __init__(self, parent, config, name):
FxPlugin.__init__(self, parent, config, name)
QObject.__init__(self)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(BLACK_FG)
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.exchange_rate()
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(BLUE_FG)
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(BLUE_FG)
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def do_clear(self, window):
window.fiat_send_e.setText('')
def on_close(self):
self.emit(SIGNAL('close_fx_plugin'))
def restore_window(self, window):
window.update_status()
window.history_list.refresh_headers()
window.fiat_send_e.hide()
window.fiat_receive_e.hide()
def on_quotes(self):
self.emit(SIGNAL('new_fx_quotes'))
def on_history(self):
self.emit(SIGNAL('new_fx_history'))
def on_fx_history(self, window):
'''Called when historical fx quotes are updated'''
window.history_list.update()
def on_fx_quotes(self, window):
'''Called when fresh spot fx quotes come in'''
window.update_status()
self.populate_ccy_combo()
# Refresh edits with the new rate
edit = window.fiat_send_e if window.fiat_send_e.is_last_edited else window.amount_e
edit.textEdited.emit(edit.text())
edit = window.fiat_receive_e if window.fiat_receive_e.is_last_edited else window.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.history_used_spot:
self.on_fx_history(window)
def on_ccy_combo_change(self):
'''Called when the chosen currency changes'''
ccy = str(self.ccy_combo.currentText())
if ccy and ccy != self.ccy:
self.set_currency(ccy)
self.hist_checkbox_update()
def hist_checkbox_update(self):
if self.hist_checkbox:
self.hist_checkbox.setEnabled(self.ccy in self.exchange.history_ccys())
self.hist_checkbox.setChecked(self.config_history())
def populate_ccy_combo(self):
# There should be at most one instance of the settings dialog
combo = self.ccy_combo
# NOTE: bool(combo) is False if it is empty. Nuts.
if combo is not None:
combo.blockSignals(True)
combo.clear()
combo.addItems(sorted(self.exchange.quotes.keys()))
combo.blockSignals(False)
combo.setCurrentIndex(combo.findText(self.ccy))
@hook
def on_new_window(self, window):
# Additional send and receive edit boxes
if not hasattr(window, 'fiat_send_e'):
send_e = AmountEdit(self.get_currency)
window.send_grid.addWidget(send_e, 4, 2, Qt.AlignLeft)
window.amount_e.frozen.connect(
lambda: send_e.setFrozen(window.amount_e.isReadOnly()))
receive_e = AmountEdit(self.get_currency)
window.receive_grid.addWidget(receive_e, 2, 2, Qt.AlignLeft)
window.fiat_send_e = send_e
window.fiat_receive_e = receive_e
self.connect_fields(window, window.amount_e, send_e, window.fee_e)
self.connect_fields(window, window.receive_amount_e, receive_e, None)
else:
window.fiat_send_e.show()
window.fiat_receive_e.show()
window.history_list.refresh_headers()
window.update_status()
window.connect(self, SIGNAL('new_fx_quotes'), lambda: self.on_fx_quotes(window))
window.connect(self, SIGNAL('new_fx_history'), lambda: self.on_fx_history(window))
window.connect(self, SIGNAL('close_fx_plugin'), lambda: self.restore_window(window))
window.connect(self, SIGNAL('refresh_headers'), window.history_list.refresh_headers)
def settings_widget(self, window):
return EnterButton(_('Settings'), partial(self.settings_dialog, window))
def settings_dialog(self, window):
d = WindowModalDialog(window, _("Exchange Rate Settings"))
layout = QGridLayout(d)
layout.addWidget(QLabel(_('Exchange rate API: ')), 0, 0)
layout.addWidget(QLabel(_('Currency: ')), 1, 0)
layout.addWidget(QLabel(_('History Rates: ')), 2, 0)
# Currency list
self.ccy_combo = QComboBox()
self.ccy_combo.currentIndexChanged.connect(self.on_ccy_combo_change)
self.populate_ccy_combo()
def on_change_ex(idx):
exchange = str(combo_ex.currentText())
if exchange != self.exchange.name():
self.set_exchange(exchange)
self.hist_checkbox_update()
def on_change_hist(checked):
if checked:
self.config.set_key('history_rates', 'checked')
self.get_historical_rates()
else:
self.config.set_key('history_rates', 'unchecked')
self.emit(SIGNAL('refresh_headers'))
def ok_clicked():
self.timeout = 0
self.ccy_combo = None
d.accept()
combo_ex = QComboBox()
combo_ex.addItems(sorted(self.exchanges.keys()))
combo_ex.setCurrentIndex(combo_ex.findText(self.config_exchange()))
combo_ex.currentIndexChanged.connect(on_change_ex)
self.hist_checkbox = QCheckBox()
self.hist_checkbox.stateChanged.connect(on_change_hist)
self.hist_checkbox_update()
ok_button = QPushButton(_("OK"))
ok_button.clicked.connect(lambda: ok_clicked())
layout.addWidget(self.ccy_combo,1,1)
layout.addWidget(combo_ex,0,1)
layout.addWidget(self.hist_checkbox,2,1)
layout.addWidget(ok_button,3,1)
return d.exec_()
def config_history(self):
return self.config.get('history_rates', 'unchecked') != 'unchecked'
def show_history(self):
return self.config_history() and self.ccy in self.exchange.history_ccys()
@hook
def history_tab_headers(self, headers):
if self.show_history():
headers.extend(['%s '%self.ccy + _('Amount'), '%s '%self.ccy + _('Balance')])
@hook
def history_tab_update_begin(self):
self.history_used_spot = False
@hook
def history_tab_update(self, tx, entry):
if not self.show_history():
return
tx_hash, height, conf, timestamp, value, balance = tx
if conf <= 0:
date = timestamp_to_datetime(time.time())
else:
date = timestamp_to_datetime(timestamp)
for amount in [value, balance]:
text = self.historical_value_str(amount, date)
entry.append(text)
| 36.828947 | 105 | 0.623437 |
c1b97e6d59454536a5cc112d373a54746337d1be | 520 | py | Python | data/scripts/templates/object/tangible/component/weapon/shared_vibro_unit_enhancement_durability.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/component/weapon/shared_vibro_unit_enhancement_durability.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/component/weapon/shared_vibro_unit_enhancement_durability.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/weapon/shared_vibro_unit_enhancement_durability.iff"
result.attribute_template_id = -1
result.stfName("craft_weapon_ingredients_n","blade_vibro_unit_enhancement_durability")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 30.588235 | 98 | 0.763462 |
95bcedecbb4954c0b6b17654bc9ef2d1bf65b50b | 4,523 | py | Python | tests/contrib/sensors/test_emr_job_flow_sensor.py | dyna-dot/airflow | 02ef974e4b7c2a91b3074ddd8abcf4cd31d09e6f | [
"Apache-2.0"
] | 2 | 2020-09-30T01:06:15.000Z | 2021-08-07T09:16:21.000Z | tests/contrib/sensors/test_emr_job_flow_sensor.py | dyna-dot/airflow | 02ef974e4b7c2a91b3074ddd8abcf4cd31d09e6f | [
"Apache-2.0"
] | 1 | 2020-11-23T08:40:10.000Z | 2020-11-23T08:40:10.000Z | tests/contrib/sensors/test_emr_job_flow_sensor.py | dyna-dot/airflow | 02ef974e4b7c2a91b3074ddd8abcf4cd31d09e6f | [
"Apache-2.0"
] | 2 | 2019-07-04T02:46:30.000Z | 2019-07-15T00:56:09.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import datetime
from dateutil.tz import tzlocal
from unittest.mock import MagicMock, patch
from airflow import configuration
from airflow.contrib.sensors.emr_job_flow_sensor import EmrJobFlowSensor
DESCRIBE_CLUSTER_RUNNING_RETURN = {
'Cluster': {
'Applications': [
{'Name': 'Spark', 'Version': '1.6.1'}
],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'STARTING',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())}
},
'Tags': [
{'Key': 'app', 'Value': 'analytics'},
{'Key': 'environment', 'Value': 'development'}
],
'TerminationProtected': False,
'VisibleToAllUsers': True
},
'ResponseMetadata': {
'HTTPStatusCode': 200,
'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'
}
}
DESCRIBE_CLUSTER_TERMINATED_RETURN = {
'Cluster': {
'Applications': [
{'Name': 'Spark', 'Version': '1.6.1'}
],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'TERMINATED',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())}
},
'Tags': [
{'Key': 'app', 'Value': 'analytics'},
{'Key': 'environment', 'Value': 'development'}
],
'TerminationProtected': False,
'VisibleToAllUsers': True
},
'ResponseMetadata': {
'HTTPStatusCode': 200,
'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'
}
}
class TestEmrJobFlowSensor(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
# Mock out the emr_client (moto has incorrect response)
self.mock_emr_client = MagicMock()
self.mock_emr_client.describe_cluster.side_effect = [
DESCRIBE_CLUSTER_RUNNING_RETURN,
DESCRIBE_CLUSTER_TERMINATED_RETURN
]
mock_emr_session = MagicMock()
mock_emr_session.client.return_value = self.mock_emr_client
# Mock out the emr_client creator
self.boto3_session_mock = MagicMock(return_value=mock_emr_session)
def test_execute_calls_with_the_job_flow_id_until_it_reaches_a_terminal_state(self):
with patch('boto3.session.Session', self.boto3_session_mock):
operator = EmrJobFlowSensor(
task_id='test_task',
poke_interval=2,
job_flow_id='j-8989898989',
aws_conn_id='aws_default'
)
operator.execute(None)
# make sure we called twice
self.assertEqual(self.mock_emr_client.describe_cluster.call_count, 2)
# make sure it was called with the job_flow_id
self.mock_emr_client.describe_cluster.assert_called_with(ClusterId='j-8989898989')
if __name__ == '__main__':
unittest.main()
| 34.265152 | 103 | 0.621932 |
0c76e28aeebab0b61d2f322aa68cd32b5c338a05 | 1,805 | py | Python | src/main.py | gimoAI/Colorization-PR-V1 | 7c28bd71f25a445678c61c65d4dc480b50df5d13 | [
"Apache-2.0"
] | null | null | null | src/main.py | gimoAI/Colorization-PR-V1 | 7c28bd71f25a445678c61c65d4dc480b50df5d13 | [
"Apache-2.0"
] | null | null | null | src/main.py | gimoAI/Colorization-PR-V1 | 7c28bd71f25a445678c61c65d4dc480b50df5d13 | [
"Apache-2.0"
] | null | null | null | import os
import random
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from .options import ModelOptions
from .models import Cifar10Model, Places365Model
from .dataset import CIFAR10_DATASET, PLACES365_DATASET
def main(options):
# reset tensorflow graph
tf.reset_default_graph()
# initialize random seed
tf.set_random_seed(options.seed)
np.random.seed(options.seed)
random.seed(options.seed)
# create a session environment
with tf.Session() as sess:
if options.dataset == CIFAR10_DATASET:
model = Cifar10Model(sess, options)
elif options.dataset == PLACES365_DATASET:
model = Places365Model(sess, options)
if not os.path.exists(options.checkpoints_path):
os.makedirs(options.checkpoints_path)
if options.log:
open(model.train_log_file, 'w').close()
open(model.test_log_file, 'w').close()
# build the model and initialize
model.build()
sess.run(tf.global_variables_initializer())
# load model only after global variables initialization
model.load()
if options.mode == 0:
args = vars(options)
print('\n------------ Options -------------')
with open(os.path.join(options.checkpoints_path, 'options.dat'), 'w') as f:
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
f.write('%s: %s\n' % (str(k), str(v)))
print('-------------- End ----------------\n')
model.train()
elif options.mode == 1:
model.test()
else:
model.turing_test()
if __name__ == "__main__":
main(ModelOptions().parse())
| 26.544118 | 87 | 0.578393 |
8504d14b93c8f582922f3681d65e568705298a91 | 78 | py | Python | cmdlogger/__init__.py | Kami-DiscordBot/JojoCogs | 15e737831995eda83d5599e3edf87b85ba9395bf | [
"MIT"
] | null | null | null | cmdlogger/__init__.py | Kami-DiscordBot/JojoCogs | 15e737831995eda83d5599e3edf87b85ba9395bf | [
"MIT"
] | 3 | 2021-07-28T12:20:18.000Z | 2021-08-14T03:03:54.000Z | cmdlogger/__init__.py | Kami-DiscordBot/JojoCogs | 15e737831995eda83d5599e3edf87b85ba9395bf | [
"MIT"
] | null | null | null | from .core import CmdLogger
def setup(bot):
bot.add_cog(CmdLogger(bot))
| 13 | 31 | 0.717949 |
cd7fe56f681e05841aaa557540e4ade95ef5c062 | 13,287 | py | Python | tests/job/test_jobStatus.py | pyiron/pyiron_base | 3f62c6c59b90b774a04a61dd6d8a461fc6ef5bd1 | [
"BSD-3-Clause"
] | 7 | 2020-09-12T11:01:09.000Z | 2022-03-01T20:59:46.000Z | tests/job/test_jobStatus.py | pyiron/pyiron_base | 3f62c6c59b90b774a04a61dd6d8a461fc6ef5bd1 | [
"BSD-3-Clause"
] | 417 | 2018-07-03T12:44:00.000Z | 2022-03-31T14:25:31.000Z | tests/job/test_jobStatus.py | pyiron/pyiron_base | 3f62c6c59b90b774a04a61dd6d8a461fc6ef5bd1 | [
"BSD-3-Clause"
] | 8 | 2018-04-03T05:21:07.000Z | 2021-12-27T09:55:19.000Z | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import os
from datetime import datetime
from pyiron_base.project.generic import Project
from pyiron_base.database.generic import DatabaseAccess
from pyiron_base.job.jobstatus import JobStatus
import unittest
from pyiron_base._tests import PyironTestCase
class TestJobStatus(PyironTestCase):
@classmethod
def setUpClass(cls):
cls.jobstatus = JobStatus()
cls.database = DatabaseAccess("sqlite:///test_job_status.db", "simulation")
par_dict = {
"chemicalformula": "H",
"computer": "localhost",
"hamilton": "Test",
"hamversion": "0.1",
"job": "testing",
"parentid": 0,
"project": "database.testing",
"projectpath": "/TESTING",
"status": "initialized",
"timestart": datetime(2016, 5, 2, 11, 31, 4, 253377),
"timestop": datetime(2016, 5, 2, 11, 31, 4, 371165),
"totalcputime": 0.117788,
"username": "Test",
}
cls.job_id = cls.database.add_item_dict(par_dict)
cls.jobstatus_database = JobStatus(db=cls.database, job_id=cls.job_id)
def setUp(self):
self.jobstatus.initialized = True
@classmethod
def tearDownClass(cls):
try:
os.remove("test_job_status.db")
except (WindowsError, OSError):
pass
def test_initialized(self):
self.assertTrue(self.jobstatus.initialized)
self.jobstatus.string = "finished"
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertTrue(self.jobstatus.finished)
self.jobstatus.initialized = True
self.assertTrue(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_appended(self):
self.jobstatus.appended = True
self.assertFalse(self.jobstatus.initialized)
self.assertTrue(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_created(self):
self.jobstatus.created = True
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertTrue(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_submitted(self):
self.jobstatus.submitted = True
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertTrue(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_running(self):
self.jobstatus.running = True
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertTrue(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_aborted(self):
self.jobstatus.aborted = True
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertTrue(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_collect(self):
self.jobstatus.collect = True
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertTrue(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_suspended(self):
self.jobstatus.suspended = True
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertTrue(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_refresh(self):
self.jobstatus.refresh = True
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertTrue(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_busy(self):
self.jobstatus.busy = True
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertTrue(self.jobstatus.busy)
self.assertFalse(self.jobstatus.finished)
def test_finished(self):
self.jobstatus.finished = True
self.assertFalse(self.jobstatus.initialized)
self.assertFalse(self.jobstatus.appended)
self.assertFalse(self.jobstatus.created)
self.assertFalse(self.jobstatus.submitted)
self.assertFalse(self.jobstatus.running)
self.assertFalse(self.jobstatus.aborted)
self.assertFalse(self.jobstatus.collect)
self.assertFalse(self.jobstatus.suspended)
self.assertFalse(self.jobstatus.refresh)
self.assertFalse(self.jobstatus.busy)
self.assertTrue(self.jobstatus.finished)
def test_string(self):
self.jobstatus.string = "initialized"
self.assertTrue(self.jobstatus.initialized)
self.assertEqual(str(self.jobstatus), "initialized")
self.assertEqual(self.jobstatus.string, "initialized")
self.jobstatus.string = "appended"
self.assertTrue(self.jobstatus.appended)
self.assertEqual(str(self.jobstatus), "appended")
self.assertEqual(self.jobstatus.string, "appended")
self.jobstatus.string = "created"
self.assertTrue(self.jobstatus.created)
self.assertEqual(str(self.jobstatus), "created")
self.assertEqual(self.jobstatus.string, "created")
self.jobstatus.string = "submitted"
self.assertTrue(self.jobstatus.submitted)
self.assertEqual(str(self.jobstatus), "submitted")
self.assertEqual(self.jobstatus.string, "submitted")
self.jobstatus.string = "running"
self.assertTrue(self.jobstatus.running)
self.assertEqual(str(self.jobstatus), "running")
self.assertEqual(self.jobstatus.string, "running")
self.jobstatus.string = "aborted"
self.assertTrue(self.jobstatus.aborted)
self.assertEqual(str(self.jobstatus), "aborted")
self.assertEqual(self.jobstatus.string, "aborted")
self.jobstatus.string = "collect"
self.assertTrue(self.jobstatus.collect)
self.assertEqual(str(self.jobstatus), "collect")
self.assertEqual(self.jobstatus.string, "collect")
self.jobstatus.string = "suspended"
self.assertTrue(self.jobstatus.suspended)
self.assertEqual(str(self.jobstatus), "suspended")
self.assertEqual(self.jobstatus.string, "suspended")
self.jobstatus.string = "refresh"
self.assertTrue(self.jobstatus.refresh)
self.assertEqual(str(self.jobstatus), "refresh")
self.assertEqual(self.jobstatus.string, "refresh")
self.jobstatus.string = "busy"
self.assertTrue(self.jobstatus.busy)
self.assertEqual(str(self.jobstatus), "busy")
self.assertEqual(self.jobstatus.string, "busy")
self.jobstatus.string = "finished"
self.assertTrue(self.jobstatus.finished)
self.assertEqual(str(self.jobstatus), "finished")
self.assertEqual(self.jobstatus.string, "finished")
def test_database_connection(self):
current_status = self.database.get_item_by_id(self.job_id)["status"]
self.assertTrue(self.jobstatus_database.initialized)
self.assertEqual(current_status, str(self.jobstatus_database))
self.jobstatus_database.created = True
new_status = self.database.get_item_by_id(self.job_id)["status"]
self.assertTrue(self.jobstatus_database.created)
self.assertNotEqual(current_status, str(self.jobstatus_database))
self.assertEqual(new_status, str(self.jobstatus_database))
self.database.item_update({"status": "finished"}, self.job_id)
finished_status = self.database.get_item_by_id(self.job_id)["status"]
self.assertTrue(self.jobstatus_database.finished)
self.assertNotEqual(current_status, str(self.jobstatus_database))
self.assertNotEqual(new_status, str(self.jobstatus_database))
self.assertEqual(finished_status, str(self.jobstatus_database))
class JobStatusIntegration(PyironTestCase):
@classmethod
def setUpClass(cls):
cls.file_location = os.path.dirname(os.path.abspath(__file__))
cls.project = Project(os.path.join(cls.file_location, "random_testing"))
cls.ham = cls.project.create_job('ScriptJob', "job_test_run")
@classmethod
def tearDownClass(cls):
project = Project(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "random_testing")
)
ham = project.load(project.get_job_ids()[0])
ham.remove()
project.remove(enable=True)
def test_inspect_job(self):
self.assertTrue(self.ham.status.initialized)
self.assertEqual(self.ham.status, "initialized")
self.ham.save()
self.assertTrue(self.ham.status.created)
self.assertEqual(self.ham.status, "created")
job_inspect = self.project.inspect(self.ham.job_name)
self.assertEqual(job_inspect.status, "created")
if __name__ == "__main__":
unittest.main()
| 43.563934 | 108 | 0.693535 |
12077cd1c1d9fae98dc3988c79bd4fbf53cc43a8 | 2,925 | py | Python | dags/oss_know/oss_know_dags/dags_github/dag_github_init_issues_comments.py | HexaemeronFsk/airflow-jobs | 674f4c15f6889653bf5578117b085ef794c7b3f4 | [
"Apache-2.0"
] | null | null | null | dags/oss_know/oss_know_dags/dags_github/dag_github_init_issues_comments.py | HexaemeronFsk/airflow-jobs | 674f4c15f6889653bf5578117b085ef794c7b3f4 | [
"Apache-2.0"
] | null | null | null | dags/oss_know/oss_know_dags/dags_github/dag_github_init_issues_comments.py | HexaemeronFsk/airflow-jobs | 674f4c15f6889653bf5578117b085ef794c7b3f4 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from airflow import DAG
from airflow.operators.python import PythonOperator
from oss_know.libs.base_dict.variable_key import OPENSEARCH_CONN_DATA, GITHUB_TOKENS
from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator
from oss_know.libs.util.token import TokenManager
# v0.0.1
NEED_INIT_SYNC_GITHUB_ISSUES_COMMENTS_REPOS = "need_init_github_issues_comments_repos"
with DAG(
dag_id='github_init_issues_comments_v1',
schedule_interval=None,
start_date=datetime(2000, 1, 1),
catchup=False,
tags=['github'],
) as dag:
def scheduler_init_github_issues_comments(ds, **kwargs):
return 'End:scheduler_init_github_issues_comments'
op_scheduler_init_github_issues_comments = PythonOperator(
task_id='op_scheduler_init_github_issues_comments',
python_callable=scheduler_init_github_issues_comments
)
def do_init_github_issues_comments(params):
from airflow.models import Variable
from oss_know.libs.github import init_issues_comments
github_tokens = Variable.get(GITHUB_TOKENS, deserialize_json=True)
opensearch_conn_info = Variable.get(OPENSEARCH_CONN_DATA, deserialize_json=True)
proxy_confs = Variable.get('proxy_confs', deserialize_json=True)
proxies = []
for line in proxy_confs['reserved_proxies']:
proxies.append(f'http://{line}')
proxy_service = KuaiProxyService(proxy_confs['api_url'], proxy_confs['orderid'])
proxy_manager = ProxyManager(proxies, proxy_service)
token_manager = TokenManager(github_tokens)
proxy_accommodator = GithubTokenProxyAccommodator(token_manager, proxy_manager, shuffle=True,
policy=GithubTokenProxyAccommodator.POLICY_FIXED_MAP)
owner = params["owner"]
repo = params["repo"]
init_issues_comments.init_github_issues_comments(opensearch_conn_info, owner, repo, proxy_accommodator)
return params
need_do_init_ops = []
from airflow.models import Variable
need_init_github_issues_comments_repos = Variable.get(NEED_INIT_SYNC_GITHUB_ISSUES_COMMENTS_REPOS,
deserialize_json=True)
for need_init_github_issues_comments_repo in need_init_github_issues_comments_repos:
op_do_init_github_issues_comments = PythonOperator(
task_id='op_do_init_github_issues_comments_{owner}_{repo}'.format(
owner=need_init_github_issues_comments_repo["owner"],
repo=need_init_github_issues_comments_repo["repo"]),
python_callable=do_init_github_issues_comments,
op_kwargs={'params': need_init_github_issues_comments_repo},
)
op_scheduler_init_github_issues_comments >> op_do_init_github_issues_comments
| 39.527027 | 111 | 0.728547 |
69ced9578b76ac63101f8922a9419e30ffc3d2f7 | 8,561 | py | Python | telethon/client/account.py | kingjan123/Telethon | c21b2d3ed7123834e1f0e08f1dc0361136dece56 | [
"MIT"
] | null | null | null | telethon/client/account.py | kingjan123/Telethon | c21b2d3ed7123834e1f0e08f1dc0361136dece56 | [
"MIT"
] | null | null | null | telethon/client/account.py | kingjan123/Telethon | c21b2d3ed7123834e1f0e08f1dc0361136dece56 | [
"MIT"
] | null | null | null | import functools
import inspect
from .users import UserMethods, _NOT_A_REQUEST
from .. import helpers, utils
from ..tl import functions, TLRequest
# TODO Make use of :tl:`InvokeWithMessagesRange` somehow
# For that, we need to use :tl:`GetSplitRanges` first.
class _TakeoutClient:
"""
Proxy object over the client.
"""
__PROXY_INTERFACE = ('__enter__', '__exit__', '__aenter__', '__aexit__')
def __init__(self, finalize, client, request):
# We use the name mangling for attributes to make them inaccessible
# from within the shadowed client object and to distinguish them from
# its own attributes where needed.
self.__finalize = finalize
self.__client = client
self.__request = request
self.__success = None
@property
def success(self):
return self.__success
@success.setter
def success(self, value):
self.__success = value
async def __aenter__(self):
# Enter/Exit behaviour is "overrode", we don't want to call start.
client = self.__client
if client.session.takeout_id is None:
client.session.takeout_id = (await client(self.__request)).id
elif self.__request is not None:
raise ValueError("Can't send a takeout request while another "
"takeout for the current session still not been finished yet.")
return self
async def __aexit__(self, exc_type, exc_value, traceback):
if self.__success is None and self.__finalize:
self.__success = exc_type is None
if self.__success is not None:
result = await self(functions.account.FinishTakeoutSessionRequest(
self.__success))
if not result:
raise ValueError("Failed to finish the takeout.")
self.session.takeout_id = None
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
async def __call__(self, request, ordered=False):
takeout_id = self.__client.session.takeout_id
if takeout_id is None:
raise ValueError('Takeout mode has not been initialized '
'(are you calling outside of "with"?)')
single = not utils.is_list_like(request)
requests = ((request,) if single else request)
wrapped = []
for r in requests:
if not isinstance(r, TLRequest):
raise _NOT_A_REQUEST()
await r.resolve(self, utils)
wrapped.append(functions.InvokeWithTakeoutRequest(takeout_id, r))
return await self.__client(
wrapped[0] if single else wrapped, ordered=ordered)
def __getattribute__(self, name):
# We access class via type() because __class__ will recurse infinitely.
# Also note that since we've name-mangled our own class attributes,
# they'll be passed to __getattribute__() as already decorated. For
# example, 'self.__client' will be passed as '_TakeoutClient__client'.
# https://docs.python.org/3/tutorial/classes.html#private-variables
if name.startswith('__') and name not in type(self).__PROXY_INTERFACE:
raise AttributeError # force call of __getattr__
# Try to access attribute in the proxy object and check for the same
# attribute in the shadowed object (through our __getattr__) if failed.
return super().__getattribute__(name)
def __getattr__(self, name):
value = getattr(self.__client, name)
if inspect.ismethod(value):
# Emulate bound methods behavior by partially applying our proxy
# class as the self parameter instead of the client.
return functools.partial(
getattr(self.__client.__class__, name), self)
return value
def __setattr__(self, name, value):
if name.startswith('_{}__'.format(type(self).__name__.lstrip('_'))):
# This is our own name-mangled attribute, keep calm.
return super().__setattr__(name, value)
return setattr(self.__client, name, value)
class AccountMethods(UserMethods):
def takeout(
self, finalize=True, *, contacts=None, users=None, chats=None,
megagroups=None, channels=None, files=None, max_file_size=None):
"""
Creates a proxy object over the current :ref:`TelegramClient` through
which making requests will use :tl:`InvokeWithTakeoutRequest` to wrap
them. In other words, returns the current client modified so that
requests are done as a takeout:
>>> from telethon.sync import TelegramClient
>>>
>>> with TelegramClient(...) as client:
>>> with client.takeout() as takeout:
>>> client.get_messages('me') # normal call
>>> takeout.get_messages('me') # wrapped through takeout
Some of the calls made through the takeout session will have lower
flood limits. This is useful if you want to export the data from
conversations or mass-download media, since the rate limits will
be lower. Only some requests will be affected, and you will need
to adjust the `wait_time` of methods like `client.iter_messages
<telethon.client.messages.MessageMethods.iter_messages>`.
By default, all parameters are ``None``, and you need to enable those
you plan to use by setting them to either ``True`` or ``False``.
You should ``except errors.TakeoutInitDelayError as e``, since this
exception will raise depending on the condition of the session. You
can then access ``e.seconds`` to know how long you should wait for
before calling the method again.
There's also a `success` property available in the takeout proxy
object, so from the `with` body you can set the boolean result that
will be sent back to Telegram. But if it's left ``None`` as by
default, then the action is based on the `finalize` parameter. If
it's ``True`` then the takeout will be finished, and if no exception
occurred during it, then ``True`` will be considered as a result.
Otherwise, the takeout will not be finished and its ID will be
preserved for future usage as `client.session.takeout_id
<telethon.sessions.abstract.Session.takeout_id>`.
Args:
contacts (`bool`):
Set to ``True`` if you plan on downloading contacts.
users (`bool`):
Set to ``True`` if you plan on downloading information
from users and their private conversations with you.
chats (`bool`):
Set to ``True`` if you plan on downloading information
from small group chats, such as messages and media.
megagroups (`bool`):
Set to ``True`` if you plan on downloading information
from megagroups (channels), such as messages and media.
channels (`bool`):
Set to ``True`` if you plan on downloading information
from broadcast channels, such as messages and media.
files (`bool`):
Set to ``True`` if you plan on downloading media and
you don't only wish to export messages.
max_file_size (`int`):
The maximum file size, in bytes, that you plan
to download for each message with media.
"""
request_kwargs = dict(
contacts=contacts,
message_users=users,
message_chats=chats,
message_megagroups=megagroups,
message_channels=channels,
files=files,
file_max_size=max_file_size
)
arg_specified = (arg is not None for arg in request_kwargs.values())
if self.session.takeout_id is None or any(arg_specified):
request = functions.account.InitTakeoutSessionRequest(
**request_kwargs)
else:
request = None
return _TakeoutClient(finalize, self, request)
async def end_takeout(self, success):
"""
Finishes a takeout, with specified result sent back to Telegram.
Returns:
``True`` if the operation was successful, ``False`` otherwise.
"""
try:
async with _TakeoutClient(True, self, None) as takeout:
takeout.success = success
except ValueError:
return False
return True
| 41.158654 | 79 | 0.633104 |
d6b340379bf394f4757c6054d049251a527e5c43 | 491 | py | Python | polymatrix/game.py | python-polymatrix-games/polymatrix | cd7966ff0f028bb9bcd80f5c5340918b1748735e | [
"MIT"
] | 2 | 2020-10-31T00:27:42.000Z | 2020-11-20T15:52:30.000Z | polymatrix/game.py | python-polymatrix-games/polymatrix-games | cd7966ff0f028bb9bcd80f5c5340918b1748735e | [
"MIT"
] | null | null | null | polymatrix/game.py | python-polymatrix-games/polymatrix-games | cd7966ff0f028bb9bcd80f5c5340918b1748735e | [
"MIT"
] | 1 | 2021-03-05T15:50:01.000Z | 2021-03-05T15:50:01.000Z | import plac
from polymatrixgame import QuickPolymatrixGame
@plac.annotations(number_of_players=("Amount of players", "option", "n", int),
log_level=("Preferred level of logging", "option", "log", str),
timesteps=("Amount of rounds to be played", "option", "t", int))
def main(number_of_players=3, timesteps=0, log_level="warning"):
QuickPolymatrixGame(number_of_players, timesteps, log_level)
if __name__ == "__main__":
plac.call(main)
| 37.769231 | 84 | 0.676171 |
8bd69deabc2579811e212810d92eded249d99458 | 566 | py | Python | src/products/urls.py | ilyshev/try-django | 158cc6ba9f50890ffeb2fb488cfb8ba20719745f | [
"MIT"
] | null | null | null | src/products/urls.py | ilyshev/try-django | 158cc6ba9f50890ffeb2fb488cfb8ba20719745f | [
"MIT"
] | null | null | null | src/products/urls.py | ilyshev/try-django | 158cc6ba9f50890ffeb2fb488cfb8ba20719745f | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path
from products.views import (
product_detail_view,
product_create_view,
render_initial_data,
dynamic_lookup_view,
product_list_view,
product_delete_view
)
app_name = 'products'
urlpatterns = [
path('create/', product_create_view, name='product-create'),
path('product/', product_detail_view),
path('<int:id>/', dynamic_lookup_view, name='product-detail'),
path('<int:id>/delete/', product_delete_view, name='product-delete'),
path('', product_list_view, name='product-list'),
] | 29.789474 | 73 | 0.745583 |
db400174954b81085fc7e1ee13e9597ed2064a6d | 11,571 | py | Python | src/pytorch-template/old/baseline/Network_num_1.py | kaderghal/ADNI_Data_processing | 454462d3913d77e3bc4de2b9725b456301c7b351 | [
"MIT"
] | 5 | 2021-01-07T10:11:57.000Z | 2022-01-16T04:57:51.000Z | src/pytorch-template/old/baseline/Network_num_1.py | kaderghal/ADNI_Data_processing | 454462d3913d77e3bc4de2b9725b456301c7b351 | [
"MIT"
] | null | null | null | src/pytorch-template/old/baseline/Network_num_1.py | kaderghal/ADNI_Data_processing | 454462d3913d77e3bc4de2b9725b456301c7b351 | [
"MIT"
] | 1 | 2021-08-05T07:34:16.000Z | 2021-08-05T07:34:16.000Z |
import torch.nn as nn
from torch import optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import BatchSampler
from torchsummary import summary
from torchvision import transforms
from torchvision.datasets import DatasetFolder
import errno
import numpy as np
import os
import pickle
import random
import sys
import torch
import torch.nn.functional as F
import torchvision
import matplotlib.pyplot as plt
import math
ratio = 3 # reduction ratio for SE
###############################################################################################################
# server
###############################################################################################################
sys.path.append('/data/ADERGHAL/code-source/ADNI_Data_processing/src/data_processing/')
root_path = '/data/ADERGHAL/ADNI_workspace/results/ADNI_des/F_28P_F10_MS2_MB05D/HIPP/3D/AD-NC/'
###############################################################################################################
# HP computer
###############################################################################################################
#sys.path.append('/home/karim/workspace/vscode-python/ADNI_Data_processing/src/data_processing')
#root_path = '/home/karim/workspace/ADNI_workspace/results/ADNI_des/F_28P_F100_MS2_MB10D/HIPP/3D/AD-NC/'
ADNI_MODEL_EXTENSIONS = ('.pkl')
# 1 pickle loader (load one sample)
def pickle_loader(path_file):
dir_name = os.path.dirname(path_file)
with open(path_file, 'rb') as f:
model_adni = pickle.load(f)
return model_adni
# to check if the file type is allowed
def has_file_allowed_extension(filename, extensions):
return filename.lower().endswith(extensions)
def is_image_file(filename):
return has_file_allowed_extension(filename, ADNI_MODEL_EXTENSIONS)
# function
def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None):
images = []
dir = os.path.expanduser(dir)
if not ((extensions is None) ^ (is_valid_file is None)):
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x):
return has_file_allowed_extension(x, extensions)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = (path, class_to_idx[target])
images.append(item)
return images
# 2 Class Datafolder
class Dataset_ADNI_Folder(DatasetFolder):
# Methodes
def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None):
self.root = root
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n"
"Supported extensions are: " + ",".join(extensions)))
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.transform = transforms.Compose([transforms.ToTensor()])
self.targets = [s[1] for s in samples]
# __getitem__
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
# if self.transform is not None:
# sample = self.transform(sample)
# if self.target_transform is not None:
# target = self.target_transform(target)
# sample is objet instance from HippModel (L, R, V, Label)
return (sample.hippLeft, sample.hippRight, sample.hippMetaDataVector, target)
# __len__
def __len__(self):
return len(self.samples)
# _find_classes
def _find_classes(self, dir):
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
#==============================================================================
# Network definition
#==============================================================================
class Network_Baseline(nn.Module):
def __init__(self):
super(Network_Baseline, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(28, 16, kernel_size=5, stride=1, padding=1),
nn.BatchNorm2d(16),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=0),
nn.ReLU()
)
self.layer3 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=4, stride=1, padding=0),
nn.ReLU()
)
self.pool1 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=2, padding=0)
)
self.fc1 = nn.Linear(64*8*8, 120)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(120, 2)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.pool1(x)
# print("size", x.size())
x = x.view(-1, self.num_flat_features(x))
x = self.dropout(x)
# print("size", x.size())
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
#==========================================================================
# Function: Main definition
#==========================================================================
def main():
# parames for data
id_device = 1
params_num_workers = 4
batch_size = 64
num_classes = 2
save_frequency = 2
learning_rate = 0.0001
num_epochs = 500
weight_decay = 0.0001
momentum = 0.9
train_losses, test_losses = [], []
running_loss = 0
steps = 0
print_every = 35 # 175/5
# select device
device = torch.device("cuda:" + str(id_device) if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0
print("using device :", device)
model = Network_Baseline().to(device)
# weights initialization
# model.apply(weights_init)
# DataFolder
train_data = Dataset_ADNI_Folder(root=root_path + 'train/', loader=pickle_loader, extensions='.pkl', transform=None)
valid_data = Dataset_ADNI_Folder(root=root_path + 'valid/', loader=pickle_loader, extensions='.pkl', transform=None)
test_data = Dataset_ADNI_Folder(root=root_path + 'test/' , loader=pickle_loader, extensions='.pkl', transform=None)
# Dataloader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=params_num_workers)
valid_loader = test_loader
# net = LeNet()
# summary(model, (28, 28, 28))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
#scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=10, gamma=0.1)
# Train the model
total_step = len(train_loader)
loss_list = []
acc_list = []
valid_acc = []
running_loss = 0.0
for epoch in range(num_epochs):
for i, (d1, d2, v, labels) in enumerate(train_loader):
#
steps += 1
# # forward + backward + optimize
# print("d1 size:", d1.size())
# d1 = torch.unsqueeze(d1, 1).to(device, dtype=torch.float)
d1 = d1.to(device, dtype=torch.float)
# print("d1 size:", d1.size())
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
outputs = model(d1)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
# Track the accuracy
total = labels.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
# acc_list.append((correct / total) * 100)
if steps % print_every == 0:
acc_list.append((correct / total) * 100)
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for i, (v_d1, v_d2, v_v, v_labels) in enumerate(valid_loader):
# v_d1 = torch.unsqueeze(v_d1, 1).to(device, dtype=torch.float)
v_d1 = v_d1.to(device, dtype=torch.float)
v_labels = v_labels.to(device)
v_outputs = model(v_d1)
batch_loss = criterion(v_outputs, v_labels)
test_loss += batch_loss.item()
ps = torch.exp(v_outputs)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == v_labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# train_losses.append(running_loss/len(train_loader))
train_losses.append(running_loss/print_every)
test_losses.append(test_loss/len(valid_loader))
print(f"Epoch {epoch+1}/{num_epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Train accuracy: {(correct / total) * 100:.3f}.. "
f"Test loss: {test_loss/len(valid_loader):.3f}.. "
f"Test accuracy: {(accuracy/len(valid_loader) * 100):.3f}")
valid_acc.append((accuracy/len(valid_loader) * 100))
running_loss = 0
model.train()
# scheduler.step()
plt.plot(acc_list, label='Training accu')
plt.plot(valid_acc, label='Validation accu')
plt.legend(frameon=False)
plt.show()
plt.plot(train_losses, label='Training loss')
plt.plot(test_losses, label='Validation loss')
plt.legend(frameon=False)
plt.show()
print('Finished Training')
#==========================================================================
# Start : __Main__
#==========================================================================
if __name__ == '__main__':
main() | 33.932551 | 127 | 0.548872 |
e3dce2d09de97bc075bd488acd87eb7b5bc98723 | 1,414 | py | Python | snakemake/configs/mm10_Oct_10_2016_Oct_10_2016_HuR_Human_Mouse_Liver_RNA-Seq-Mouse.py | saketkc/re-ribo-smk | c9326cbafdfa060e22e9af692d9146c37f5035ba | [
"BSD-2-Clause"
] | 1 | 2019-09-11T17:09:48.000Z | 2019-09-11T17:09:48.000Z | snakemake/configs/mm10_Oct_10_2016_Oct_10_2016_HuR_Human_Mouse_Liver_RNA-Seq-Mouse.py | saketkc/re-ribo-smk | c9326cbafdfa060e22e9af692d9146c37f5035ba | [
"BSD-2-Clause"
] | null | null | null | snakemake/configs/mm10_Oct_10_2016_Oct_10_2016_HuR_Human_Mouse_Liver_RNA-Seq-Mouse.py | saketkc/re-ribo-smk | c9326cbafdfa060e22e9af692d9146c37f5035ba | [
"BSD-2-Clause"
] | null | null | null |
RAWDATA_DIR = '/home/cmb-06/as/skchoudh/dna/Oct_10_2016_HuR_Human_Mouse_Liver/rna-seq/Penalva_L_08182016/mouse'
OUT_DIR = '/staging/as/skchoudh/rna/Oct_10_2016_HuR_Human_Mouse_Liver_ribocop_run_Jan2019/RNA-Seq-Mouse'
GENOME_FASTA = '/home/cmb-06/as/skchoudh/genomes/mm10/fasta/mm10.fa'
CHROM_SIZES = '/home/cmb-06/as/skchoudh/genomes/mm10/fasta/mm10.chrom.sizes'
STAR_INDEX = '/home/cmb-06/as/skchoudh/genomes/mm10/star_annotated'
GTF = '/home/cmb-06/as/skchoudh/genomes/mm10/annotation/gencode.vM11.annotation.gtf'
GENE_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/vM11/gene.bed.gz'
STAR_CODON_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/vM11/start_codon.bed.gz'
STOP_CODON_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/vM11/stop_codon.bed.gz'
CDS_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/vM11/cds.bed.gz'
UTR5_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/vM11/utr5.bed.gz'
UTR3_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/vM11/utr3.bed.gz'
INTRON_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/vM11/intron.bed.gz'
ORIENTATIONS = ['5prime', '3prime']
STRANDS = ['pos', 'neg', 'combined']
FRAGMENT_LENGTHS = range(18, 39)
| 78.555556 | 121 | 0.806223 |
96dc6a26350a0c78357936b9a9536c1ff6811c68 | 6,677 | py | Python | src/mcmc_alpha.py | QEC-project-2020/EWD-QEC | 1bc80ba406a70a802898b43c661378b689f10622 | [
"MIT"
] | null | null | null | src/mcmc_alpha.py | QEC-project-2020/EWD-QEC | 1bc80ba406a70a802898b43c661378b689f10622 | [
"MIT"
] | null | null | null | src/mcmc_alpha.py | QEC-project-2020/EWD-QEC | 1bc80ba406a70a802898b43c661378b689f10622 | [
"MIT"
] | 2 | 2021-12-13T15:12:10.000Z | 2021-12-20T19:20:38.000Z | import numpy as np
import random as rand
import copy
from numba import njit
from src.xzzx_model import xzzx_code, _apply_random_stabilizer as apply_stabilizer_fast_xzzx
from src.rotated_surface_model import RotSurCode, _apply_random_stabilizer as apply_stabilizer_fast_rotated
from src.planar_model import Planar_code, _apply_random_stabilizer as apply_stabilizer_fast_planar
from src.toric_model import Toric_code, _apply_random_stabilizer as apply_stabilizer_fast_toric
from src.xyz2_model import xyz_code, _apply_random_stabilizer as apply_stabilizer_fast_xyzxyz
class Chain_alpha:
def __init__(self, code, pz_tilde, alpha):
self.code = code
self.pz_tilde = pz_tilde
self.alpha = alpha
self.p_logical = 0
self.flag = 0
# runs iters number of steps of the metroplois-hastings algorithm
def update_chain(self, iters):
if self.p_logical != 0:
for _ in range(iters):
# apply logical or stabilizer with p_logical
if rand.random() < self.p_logical:
new_matrix, (dx, dy, dz) = self.code.apply_random_logical()
else:
new_matrix, (dx, dy, dz) = self.code.apply_random_stabilizer()
if rand.random() < self.pz_tilde**(dz + self.alpha*(dx + dy)):
self.code.qubit_matrix = new_matrix
else:
for _ in range(iters):
new_matrix, (dx, dy, dz) = self.code.apply_random_stabilizer()
if rand.random() < self.pz_tilde**(dz + self.alpha*(dx + dy)):
self.code.qubit_matrix = new_matrix
def update_chain_fast(self, iters):
if isinstance(self.code, xzzx_code):
self.code.qubit_matrix = _update_chain_fast_xzzx(self.code.qubit_matrix, self.pz_tilde, self.alpha, iters)
elif isinstance(self.code, RotSurCode):
self.code.qubit_matrix = _update_chain_fast_rotated(self.code.qubit_matrix, self.pz_tilde, self.alpha, iters)
elif isinstance(self.code, Planar_code):
self.code.qubit_matrix = _update_chain_fast_planar(self.code.qubit_matrix, self.pz_tilde, self.alpha, iters)
elif isinstance(self.code, Toric_code):
self.code.qubit_matrix = _update_chain_fast_toric(self.code.qubit_matrix, self.pz_tilde, self.alpha, iters)
elif isinstance(self.code, xyz_code):
self.code.qubit_matrix = _update_chain_fast_xyzxyz(self.code.qubit_matrix, self.pz_tilde, self.alpha, iters)
else:
raise ValueError("Fast chain updates not available for this code")
class Ladder_alpha:
def __init__(self, pz_tilde_bottom, init_code, alpha, Nc, p_logical=0):
self.alpha = alpha
self.pz_tilde_bottom = pz_tilde_bottom
# seed code
self.init_code = init_code
# number of chains
self.Nc = Nc
# logical sampling rate in top chain
self.p_logical = p_logical
# pz_tilde_top is 0.75 for depolarizing noise
pz_tilde_top = 1
# temporary list of sampling probabilities
pz_tilde_ladder = np.linspace(pz_tilde_bottom, pz_tilde_top, Nc)
self.pz_tilde_ladder = pz_tilde_ladder
# list of relative probabilities
self.pz_tilde_diff = (pz_tilde_ladder[:-1] * (1 - pz_tilde_ladder[1:])) / (pz_tilde_ladder[1:] * (1 - pz_tilde_ladder[:-1]))
# list of Chains of increasing p
self.chains = [Chain_alpha(copy.deepcopy(init_code) ,pz_tilde, alpha) for pz_tilde in pz_tilde_ladder]
# special properties of top chain
self.chains[-1].flag = 1
self.chains[-1].p_logical = p_logical
# count of chains that have "fallen all the way down"
self.tops0 = 0
def update_ladder(self, iters):
for chain in self.chains:
chain.update_chain(iters)
# returns true if flip should be performed
def r_flip(self, ind_lo):
# chain lengths
pz_tilde_low = self.chains[ind_lo].pz_tilde
pz_tilde_high = self.chains[ind_lo+1].pz_tilde
nx, ny, nz = self.chains[ind_lo].code.chain_lengths()
n_eff_low = nz + self.chains[ind_lo].alpha * (nx + ny)
nx, ny, nz = self.chains[ind_lo+1].code.chain_lengths()
n_eff_high = nz + self.chains[ind_lo+1].alpha * (nx + ny)
return rand.random() < (pz_tilde_low/pz_tilde_high)**(n_eff_high-n_eff_low)
def step(self, iters):
self.update_ladder(iters)
for i in reversed(range(self.Nc - 1)):
if self.r_flip(i):
self.chains[i].code, self.chains[i + 1].code = self.chains[i + 1].code, self.chains[i].code
self.chains[i].flag, self.chains[i + 1].flag = self.chains[i + 1].flag, self.chains[i].flag
self.chains[-1].flag = 1
if self.chains[0].flag == 1:
self.tops0 += 1
self.chains[0].flag = 0
@njit(cache=True)
def _update_chain_fast_xzzx(qubit_matrix, pz_tilde, alpha, iters):
for _ in range(iters):
new_matrix, (dx, dy, dz) = apply_stabilizer_fast_xzzx(qubit_matrix)
p = pz_tilde**(dz + alpha*(dx + dy))
if p > 1 or rand.random() < p:
qubit_matrix = new_matrix
return qubit_matrix
@njit(cache=True)
def _update_chain_fast_rotated(qubit_matrix, pz_tilde, alpha, iters):
for _ in range(iters):
new_matrix, (dx, dy, dz) = apply_stabilizer_fast_rotated(qubit_matrix)
p = pz_tilde**(dz + alpha*(dx + dy))
if p > 1 or rand.random() < p:
qubit_matrix = new_matrix
return qubit_matrix
@njit(cache=True)
def _update_chain_fast_planar(qubit_matrix, pz_tilde, alpha, iters):
for _ in range(iters):
new_matrix, (dx, dy, dz) = apply_stabilizer_fast_planar(qubit_matrix)
p = pz_tilde**(dz + alpha*(dx + dy))
if p > 1 or rand.random() < p:
qubit_matrix = new_matrix
return qubit_matrix
@njit(cache=True)
def _update_chain_fast_toric(qubit_matrix, pz_tilde, alpha, iters):
for _ in range(iters):
new_matrix, (dx, dy, dz) = apply_stabilizer_fast_toric(qubit_matrix)
p = pz_tilde**(dz + alpha*(dx + dy))
if p > 1 or rand.random() < p:
qubit_matrix = new_matrix
return qubit_matrix
@njit(cache=True)
def _update_chain_fast_xyzxyz(qubit_matrix, pz_tilde, alpha, iters):
for _ in range(iters):
new_matrix, (dx, dy, dz) = apply_stabilizer_fast_xyzxyz(qubit_matrix)
p = pz_tilde**(dz + alpha*(dx + dy))
if p > 1 or rand.random() < p:
qubit_matrix = new_matrix
return qubit_matrix
| 38.819767 | 132 | 0.6449 |
ae6ce073b348563767a598dbadb4ac79dab3164c | 2,035 | py | Python | plugins/cuckoo/komand_cuckoo/actions/submit_files/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/cuckoo/komand_cuckoo/actions/submit_files/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/cuckoo/komand_cuckoo/actions/submit_files/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Adds one or more files and/or files embedded in archives to the list of pending tasks"
class Input:
FILES = "files"
class Output:
ERRORS = "errors"
SUBMIT_ID = "submit_id"
TASK_ID = "task_id"
class SubmitFilesInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"files": {
"type": "array",
"title": "Files",
"description": "List of files of the format: {'filename': 'blah.exe', 'contents': '\\u003cb64-encoded-bytes\\u003e'}",
"items": {
"$ref": "#/definitions/file"
},
"order": 1
}
},
"required": [
"files"
],
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class SubmitFilesOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"errors": {
"type": "array",
"title": "Errors",
"description": "Errors if any",
"items": {
"type": "string"
},
"order": 3
},
"submit_id": {
"type": "integer",
"title": "Submit ID",
"description": "Submission ID",
"order": 2
},
"task_id": {
"type": "integer",
"title": "Task ID",
"description": "Task ID",
"order": 1
}
},
"required": [
"task_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 19.757282 | 124 | 0.503686 |
911f6d144f9ce1d4cdcc80b48a4e3bb483a05d38 | 67,589 | py | Python | tests/integration/test_storage_rabbitmq/test.py | edani/ClickHouse | 17a8a4e9664fabed5b370b37e148139ba698acf5 | [
"Apache-2.0"
] | 3 | 2021-02-16T13:50:34.000Z | 2021-03-19T12:22:52.000Z | tests/integration/test_storage_rabbitmq/test.py | edani/ClickHouse | 17a8a4e9664fabed5b370b37e148139ba698acf5 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_storage_rabbitmq/test.py | edani/ClickHouse | 17a8a4e9664fabed5b370b37e148139ba698acf5 | [
"Apache-2.0"
] | 1 | 2021-07-30T19:50:45.000Z | 2021-07-30T19:50:45.000Z | import json
import os.path as p
import random
import subprocess
import threading
import time
from random import randrange
import pika
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from . import rabbitmq_pb2
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/rabbitmq.xml', 'configs/log_conf.xml'],
with_rabbitmq=True)
# clickhouse_path_dir='clickhouse_path')
rabbitmq_id = ''
# Helpers
def check_rabbitmq_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
rabbitmq_id,
'rabbitmqctl',
'await_startup'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def enable_consistent_hash_plugin():
p = subprocess.Popen(('docker',
'exec',
'-i',
rabbitmq_id,
"rabbitmq-plugins", "enable", "rabbitmq_consistent_hash_exchange"),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_rabbitmq_is_available(max_retries=50):
retries = 0
while True:
if check_rabbitmq_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "RabbitMQ is not available"
print("Waiting for RabbitMQ to start up")
time.sleep(1)
def wait_rabbitmq_plugin_enabled(max_retries=50):
retries = 0
while True:
if enable_consistent_hash_plugin():
break
else:
retries += 1
if retries > max_retries:
raise "RabbitMQ plugin is not available"
print("Waiting for plugin")
time.sleep(1)
def rabbitmq_check_result(result, check=False, ref_file='test_rabbitmq_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def kill_rabbitmq():
p = subprocess.Popen(('docker', 'stop', rabbitmq_id), stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def revive_rabbitmq():
p = subprocess.Popen(('docker', 'start', rabbitmq_id), stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
# Fixtures
@pytest.fixture(scope="module")
def rabbitmq_cluster():
try:
global rabbitmq_id
cluster.start()
rabbitmq_id = instance.cluster.rabbitmq_docker_id
print(("rabbitmq_id is {}".format(rabbitmq_id)))
instance.query('CREATE DATABASE test')
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def rabbitmq_setup_teardown():
wait_rabbitmq_is_available()
wait_rabbitmq_plugin_enabled()
print("RabbitMQ is available - running test")
yield # run test
instance.query('DROP TABLE IF EXISTS test.rabbitmq')
# Tests
@pytest.mark.timeout(240)
def test_rabbitmq_select(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'select',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='select', routing_key='', body=message)
connection.close()
# The order of messages in select * from test.rabbitmq is not guaranteed, so sleep to collect everything in one select
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_select_empty(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'empty',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.rabbitmq')) == 0
@pytest.mark.timeout(240)
def test_rabbitmq_json_without_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'json',
rabbitmq_format = 'JSONEachRow'
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='json', routing_key='', body=message)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='json', routing_key='', body=message)
connection.close()
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_csv_with_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'csv',
rabbitmq_format = 'CSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='csv', routing_key='', body=message)
connection.close()
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_tsv_with_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'tsv',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='tsv', routing_key='', body=message)
connection.close()
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_materialized_view(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'mv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='mv', routing_key='', body=message)
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if (rabbitmq_check_result(result)):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'mvsq',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.rabbitmq);
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='mvsq', routing_key='', body=message)
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if rabbitmq_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_many_materialized_views(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'mmv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.rabbitmq;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='mmv', routing_key='', body=message)
while True:
result1 = instance.query('SELECT * FROM test.view1 ORDER BY key')
result2 = instance.query('SELECT * FROM test.view2 ORDER BY key')
if rabbitmq_check_result(result1) and rabbitmq_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
connection.close()
rabbitmq_check_result(result1, True)
rabbitmq_check_result(result2, True)
@pytest.mark.skip(reason="clichouse_path with rabbitmq.proto fails to be exported")
@pytest.mark.timeout(240)
def test_rabbitmq_protobuf(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value String)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'pb',
rabbitmq_format = 'Protobuf',
rabbitmq_schema = 'rabbitmq.proto:KeyValueProto';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
data = ''
for i in range(0, 20):
msg = rabbitmq_pb2.KeyValueProto()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
channel.basic_publish(exchange='pb', routing_key='', body=data)
data = ''
for i in range(20, 21):
msg = rabbitmq_pb2.KeyValueProto()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
channel.basic_publish(exchange='pb', routing_key='', body=data)
data = ''
for i in range(21, 50):
msg = rabbitmq_pb2.KeyValueProto()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
channel.basic_publish(exchange='pb', routing_key='', body=data)
connection.close()
result = ''
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if rabbitmq_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_big_message(rabbitmq_cluster):
# Create batchs of messages of size ~100Kb
rabbitmq_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(rabbitmq_messages)]
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value String)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'big',
rabbitmq_format = 'JSONEachRow';
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
for message in messages:
channel.basic_publish(exchange='big', routing_key='', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == batch_messages * rabbitmq_messages:
break
connection.close()
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == rabbitmq_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster):
NUM_CONSUMERS = 10
NUM_QUEUES = 10
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'test_sharding',
rabbitmq_num_queues = 10,
rabbitmq_num_consumers = 10,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64, channel_id String)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _channel_id AS channel_id FROM test.rabbitmq;
''')
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
current = 0
for message in messages:
current += 1
mes_id = str(current)
channel.basic_publish(exchange='test_sharding', routing_key='',
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
result1 = ''
while True:
result1 = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.view")
for thread in threads:
thread.join()
assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
assert int(result2) == 10
@pytest.mark.timeout(420)
def test_rabbitmq_mv_combo(rabbitmq_cluster):
NUM_MV = 5
NUM_CONSUMERS = 4
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'combo',
rabbitmq_queue_base = 'combo',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 5,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
for mv_id in range(NUM_MV):
instance.query('''
DROP TABLE IF EXISTS test.combo_{0};
DROP TABLE IF EXISTS test.combo_{0}_mv;
CREATE TABLE test.combo_{0} (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.combo_{0}_mv TO test.combo_{0} AS
SELECT * FROM test.rabbitmq;
'''.format(mv_id))
time.sleep(2)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='combo', routing_key='',
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = 0
for mv_id in range(NUM_MV):
result += int(instance.query('SELECT count() FROM test.combo_{0}'.format(mv_id)))
if int(result) == messages_num * threads_num * NUM_MV:
break
time.sleep(1)
for thread in threads:
thread.join()
for mv_id in range(NUM_MV):
instance.query('''
DROP TABLE test.combo_{0}_mv;
DROP TABLE test.combo_{0};
'''.format(mv_id))
assert int(result) == messages_num * threads_num * NUM_MV, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(240)
def test_rabbitmq_insert(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'insert',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'insert1',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
consumer_connection = pika.BlockingConnection(parameters)
consumer = consumer_connection.channel()
result = consumer.queue_declare(queue='')
queue_name = result.method.queue
consumer.queue_bind(exchange='insert', queue=queue_name, routing_key='insert1')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
insert_messages = []
def onReceived(channel, method, properties, body):
i = 0
insert_messages.append(body.decode())
if (len(insert_messages) == 50):
channel.stop_consuming()
consumer.basic_consume(onReceived, queue_name)
consumer.start_consuming()
consumer_connection.close()
result = '\n'.join(insert_messages)
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_insert_headers_exchange(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'insert_headers',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'test=insert,topic=headers',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
consumer_connection = pika.BlockingConnection(parameters)
consumer = consumer_connection.channel()
result = consumer.queue_declare(queue='')
queue_name = result.method.queue
consumer.queue_bind(exchange='insert_headers', queue=queue_name, routing_key="",
arguments={'x-match': 'all', 'test': 'insert', 'topic': 'headers'})
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
insert_messages = []
def onReceived(channel, method, properties, body):
i = 0
insert_messages.append(body.decode())
if (len(insert_messages) == 50):
channel.stop_consuming()
consumer.basic_consume(onReceived, queue_name)
consumer.start_consuming()
consumer_connection.close()
result = '\n'.join(insert_messages)
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_many_inserts(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_many;
DROP TABLE IF EXISTS test.rabbitmq_consume;
DROP TABLE IF EXISTS test.view_many;
DROP TABLE IF EXISTS test.consumer_many;
CREATE TABLE test.rabbitmq_many (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'many_inserts',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'insert2',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.rabbitmq_consume (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'many_inserts',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'insert2',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_many (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_many TO test.view_many AS
SELECT * FROM test.rabbitmq_consume;
''')
messages_num = 1000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_many VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_many')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.rabbitmq_consume;
DROP TABLE test.rabbitmq_many;
DROP TABLE test.consumer_many;
DROP TABLE test.view_many;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_overloaded_insert(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view_overload;
DROP TABLE IF EXISTS test.consumer_overload;
DROP TABLE IF EXISTS test.rabbitmq_consume;
CREATE TABLE test.rabbitmq_consume (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'over',
rabbitmq_queue_base = 'over',
rabbitmq_exchange_type = 'direct',
rabbitmq_num_consumers = 5,
rabbitmq_num_queues = 10,
rabbitmq_max_block_size = 10000,
rabbitmq_routing_key_list = 'over',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.rabbitmq_overload (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'over',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'over',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_overload (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_overload TO test.view_overload AS
SELECT * FROM test.rabbitmq_consume;
''')
messages_num = 100000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_overload VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 5
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_overload')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer_overload;
DROP TABLE test.view_overload;
DROP TABLE test.rabbitmq_consume;
DROP TABLE test.rabbitmq_overload;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_direct_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
''')
num_tables = 5
for consumer_id in range(num_tables):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.direct_exchange_{0};
DROP TABLE IF EXISTS test.direct_exchange_{0}_mv;
CREATE TABLE test.direct_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'direct_exchange_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'direct_{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.direct_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.direct_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "direct_" + str(key_num)
key_num += 1
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(
exchange='direct_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE test.direct_exchange_{0}_mv;
DROP TABLE test.direct_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_fanout_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.fanout_exchange_{0};
DROP TABLE IF EXISTS test.fanout_exchange_{0}_mv;
CREATE TABLE test.fanout_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_routing_key_list = 'key_{0}',
rabbitmq_exchange_name = 'fanout_exchange_testing',
rabbitmq_exchange_type = 'fanout',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.fanout_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.fanout_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='fanout_exchange_testing', routing_key='',
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE test.fanout_exchange_{0}_mv;
DROP TABLE test.fanout_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_topic_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.topic_exchange_{0};
'''.format(consumer_id))
for consumer_id in range(num_tables):
print(("Setting up table {}".format(num_tables + consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.logs',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.topic_exchange_{0};
'''.format(num_tables + consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "topic." + str(key_num)
key_num += 1
for message in messages:
channel.basic_publish(exchange='topic_exchange_testing', routing_key=key, body=message)
key = "random.logs"
current = 0
for msg_id in range(messages_num):
channel.basic_publish(exchange='topic_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables + messages_num * num_tables:
break
for consumer_id in range(num_tables * 2):
instance.query('''
DROP TABLE test.topic_exchange_{0}_mv;
DROP TABLE test.topic_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(
result) == messages_num * num_tables + messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(
result)
@pytest.mark.timeout(420)
def test_rabbitmq_hash_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64, channel_id String)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 4
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
print(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 4,
rabbitmq_num_queues = 2,
rabbitmq_exchange_type = 'consistent_hash',
rabbitmq_exchange_name = 'hash_exchange_testing',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT key, value, _channel_id AS channel_id FROM test.{0};
'''.format(table_name))
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='hash_exchange_testing', routing_key=str(msg_id),
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
result1 = ''
while True:
result1 = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.destination")
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
instance.query('''
DROP TABLE test.{0}_mv;
DROP TABLE test.{0};
'''.format(table_name))
instance.query('''
DROP TABLE test.destination;
''')
for thread in threads:
thread.join()
assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
assert int(result2) == 4 * num_tables
@pytest.mark.timeout(420)
def test_rabbitmq_multiple_bindings(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
DROP TABLE IF EXISTS test.bindings;
DROP TABLE IF EXISTS test.bindings_mv;
CREATE TABLE test.bindings (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'multiple_bindings_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'key1,key2,key3,key4,key5',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.bindings_mv TO test.destination AS
SELECT * FROM test.bindings;
''')
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
keys = ['key1', 'key2', 'key3', 'key4', 'key5']
for key in keys:
for message in messages:
channel.basic_publish(exchange='multiple_bindings_testing', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * threads_num * 5:
break
for thread in threads:
thread.join()
instance.query('''
DROP TABLE test.bindings;
DROP TABLE test.bindings_mv;
DROP TABLE test.destination;
''')
assert int(result) == messages_num * threads_num * 5, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_headers_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables_to_receive = 2
for consumer_id in range(num_tables_to_receive):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2020',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.headers_exchange_{0};
'''.format(consumer_id))
num_tables_to_ignore = 2
for consumer_id in range(num_tables_to_ignore):
print(("Setting up table {}".format(consumer_id + num_tables_to_receive)))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2019',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.headers_exchange_{0};
'''.format(consumer_id + num_tables_to_receive))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
fields = {}
fields['format'] = 'logs'
fields['type'] = 'report'
fields['year'] = '2020'
for msg_id in range(messages_num):
channel.basic_publish(exchange='headers_exchange_testing', routing_key='',
properties=pika.BasicProperties(headers=fields, message_id=str(msg_id)),
body=messages[msg_id])
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables_to_receive:
break
for consumer_id in range(num_tables_to_receive + num_tables_to_ignore):
instance.query('''
DROP TABLE test.headers_exchange_{0}_mv;
DROP TABLE test.headers_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(result) == messages_num * num_tables_to_receive, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_virtual_columns(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
CREATE TABLE test.rabbitmq_virtuals (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'virtuals',
rabbitmq_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, key, _exchange_name, _channel_id, _delivery_tag, _redelivered FROM test.rabbitmq_virtuals;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
message_num = 10
i = 0
messages = []
for _ in range(message_num):
messages.append(json.dumps({'key': i, 'value': i}))
i += 1
for message in messages:
channel.basic_publish(exchange='virtuals', routing_key='', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == message_num:
break
connection.close()
result = instance.query('''
SELECT key, value, _exchange_name, SUBSTRING(_channel_id, 1, 3), _delivery_tag, _redelivered
FROM test.view ORDER BY key
''')
expected = '''\
0 0 virtuals 1_0 1 0
1 1 virtuals 1_0 2 0
2 2 virtuals 1_0 3 0
3 3 virtuals 1_0 4 0
4 4 virtuals 1_0 5 0
5 5 virtuals 1_0 6 0
6 6 virtuals 1_0 7 0
7 7 virtuals 1_0 8 0
8 8 virtuals 1_0 9 0
9 9 virtuals 1_0 10 0
'''
instance.query('''
DROP TABLE test.rabbitmq_virtuals;
DROP TABLE test.view;
''')
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(420)
def test_rabbitmq_virtual_columns_with_materialized_view(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq_virtuals_mv (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'virtuals_mv',
rabbitmq_format = 'JSONEachRow';
CREATE TABLE test.view (key UInt64, value UInt64,
exchange_name String, channel_id String, delivery_tag UInt64, redelivered UInt8) ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _exchange_name as exchange_name, _channel_id as channel_id, _delivery_tag as delivery_tag, _redelivered as redelivered
FROM test.rabbitmq_virtuals_mv;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
message_num = 10
i = 0
messages = []
for _ in range(message_num):
messages.append(json.dumps({'key': i, 'value': i}))
i += 1
for message in messages:
channel.basic_publish(exchange='virtuals_mv', routing_key='', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == message_num:
break
connection.close()
result = instance.query(
"SELECT key, value, exchange_name, SUBSTRING(channel_id, 1, 3), delivery_tag, redelivered FROM test.view ORDER BY delivery_tag")
expected = '''\
0 0 virtuals_mv 1_0 1 0
1 1 virtuals_mv 1_0 2 0
2 2 virtuals_mv 1_0 3 0
3 3 virtuals_mv 1_0 4 0
4 4 virtuals_mv 1_0 5 0
5 5 virtuals_mv 1_0 6 0
6 6 virtuals_mv 1_0 7 0
7 7 virtuals_mv 1_0 8 0
8 8 virtuals_mv 1_0 9 0
9 9 virtuals_mv 1_0 10 0
'''
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
DROP TABLE test.rabbitmq_virtuals_mv
''')
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(420)
def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64, channel_id String)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 4
for table_id in range(num_tables):
print(("Setting up table {}".format(table_id)))
instance.query('''
DROP TABLE IF EXISTS test.many_consumers_{0};
DROP TABLE IF EXISTS test.many_consumers_{0}_mv;
CREATE TABLE test.many_consumers_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'many_consumers',
rabbitmq_num_queues = 2,
rabbitmq_num_consumers = 2,
rabbitmq_queue_base = 'many_consumers',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.many_consumers_{0}_mv TO test.destination AS
SELECT key, value, _channel_id as channel_id FROM test.many_consumers_{0};
'''.format(table_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='many_consumers', routing_key='',
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
result1 = ''
while True:
result1 = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.destination")
for thread in threads:
thread.join()
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE test.many_consumers_{0};
DROP TABLE test.many_consumers_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
# 4 tables, 2 consumers for each table => 8 consumer tags
assert int(result2) == 8
@pytest.mark.timeout(420)
def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.consume;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE TABLE test.consume (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'producer_reconnect',
rabbitmq_format = 'JSONEachRow',
rabbitmq_num_consumers = 2,
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.consume;
DROP TABLE IF EXISTS test.producer_reconnect;
CREATE TABLE test.producer_reconnect (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'producer_reconnect',
rabbitmq_persistent = '1',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages_num = 100000
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.producer_reconnect VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(0.1)
kill_rabbitmq()
time.sleep(4)
revive_rabbitmq()
while True:
result = instance.query('SELECT count(DISTINCT key) FROM test.view')
time.sleep(1)
if int(result) == messages_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
DROP TABLE test.consume;
DROP TABLE test.producer_reconnect;
''')
assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.consumer_reconnect (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'consumer_reconnect',
rabbitmq_num_consumers = 10,
rabbitmq_num_queues = 10,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
i = 0
messages_num = 150000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i, 'value': i}))
i += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='consumer_reconnect', routing_key='', body=messages[msg_id],
properties=pika.BasicProperties(delivery_mode=2, message_id=str(msg_id)))
connection.close()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.consumer_reconnect;
''')
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(0.1)
kill_rabbitmq()
time.sleep(8)
revive_rabbitmq()
# while int(instance.query('SELECT count() FROM test.view')) == 0:
# time.sleep(0.1)
# kill_rabbitmq()
# time.sleep(2)
# revive_rabbitmq()
while True:
result = instance.query('SELECT count(DISTINCT key) FROM test.view')
time.sleep(1)
if int(result) == messages_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.consumer_reconnect;
''')
assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_rabbitmq_commit_on_block_write(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'block',
rabbitmq_format = 'JSONEachRow',
rabbitmq_queue_base = 'block',
rabbitmq_max_block_size = 100,
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for message in messages:
channel.basic_publish(exchange='block', routing_key='', body=message)
rabbitmq_thread = threading.Thread(target=produce)
rabbitmq_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.rabbitmq;
''')
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='rabbitmq'")) == 1:
time.sleep(1)
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'block',
rabbitmq_format = 'JSONEachRow',
rabbitmq_max_block_size = 100,
rabbitmq_queue_base = 'block',
rabbitmq_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
rabbitmq_thread.join()
connection.close()
assert result == 1, 'Messages from RabbitMQ get duplicated!'
@pytest.mark.timeout(420)
def test_rabbitmq_no_connection_at_startup(rabbitmq_cluster):
# no connection when table is initialized
rabbitmq_cluster.pause_container('rabbitmq1')
instance.query('''
CREATE TABLE test.cs (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'cs',
rabbitmq_format = 'JSONEachRow',
rabbitmq_num_consumers = '5',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.cs;
''')
time.sleep(5)
rabbitmq_cluster.unpause_container('rabbitmq1')
# need to make sure rabbit table made all rabbit setup
time.sleep(10)
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
for i in range(messages_num):
message = json.dumps({'key': i, 'value': i})
channel.basic_publish(exchange='cs', routing_key='', body=message,
properties=pika.BasicProperties(delivery_mode=2, message_id=str(i)))
connection.close()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.cs;
''')
assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result)
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
| 35.220948 | 136 | 0.611875 |
e108d0ccbd5bda558348bee9344caccaeaa8336b | 1,020 | py | Python | firebolt/config.py | juddcraft12/Firebolt | 05625677a5f0b604a799f99e5f6e79348b1c6749 | [
"MIT"
] | null | null | null | firebolt/config.py | juddcraft12/Firebolt | 05625677a5f0b604a799f99e5f6e79348b1c6749 | [
"MIT"
] | 12 | 2021-09-10T04:34:52.000Z | 2022-03-25T04:28:56.000Z | firebolt/config.py | juddcraft12/Firebolt | 05625677a5f0b604a799f99e5f6e79348b1c6749 | [
"MIT"
] | null | null | null | import os
import firebolt
def bot_owner():
return os.environ['BOT_OWNER']
def bot_token():
return os.environ['BOT_TOKEN']
def bot_prefix():
return os.environ['BOT_PREFIX']
def bot_status():
default_prefix = f'{", ".join(firebolt.config.bot_prefix())} | Teapot.py {firebolt.version()}'
try:
return os.environ['BOT_STATUS']
except:
return os.environ['BOT_STATUS']
def storage_type():
if os.environ['STORAGE_TYPE'] != "mysql":
os.environ['STORAGE_TYPE'] = "flatfile"
return os.environ['STORAGE_TYPE']
def db_host():
return os.environ['DB_HOST']
def db_port():
return os.environ['DB_PORT']
def db_schema():
return os.environ['DB_SCHEMA']
def db_user():
return os.environ['DB_USER']
def db_password():
return os.environ['DB_PASSWORD']
def lavalink_host():
return os.environ['LAVALINK_HOST']
def lavalink_port():
return os.environ['LAVALINK_PORT']
def lavalink_password():
return os.environ['LAVALINK_PASSWORD']
| 16.721311 | 98 | 0.667647 |
8e86cc29eb6f63a6f5012a0937991b13e20e4011 | 45,761 | py | Python | devkit/python/evaluate_tracking.py | yoyomimi/TNT_pytorch | 165c5da0bc66baf84bb1ddc5fe0cf6a101555c59 | [
"Apache-2.0"
] | 20 | 2020-03-08T14:47:04.000Z | 2021-11-14T12:55:28.000Z | devkit/python/evaluate_tracking.py | conq44/MOT-trackletNet-Visdrone | 0fa717de9d682abcdbf56acbd33b8807b50bfba3 | [
"Apache-2.0"
] | 13 | 2020-03-08T14:39:01.000Z | 2022-03-12T00:18:08.000Z | devkit/python/evaluate_tracking.py | yoyomimi/TNT_pytorch | 165c5da0bc66baf84bb1ddc5fe0cf6a101555c59 | [
"Apache-2.0"
] | 6 | 2020-03-08T14:47:11.000Z | 2021-05-21T10:33:05.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
function that does the evaluation
input:
- result_sha (sha key where the results are located
- mail (messenger object for output messages sent via email and to cout)
output:
- True if at least one of the sub-benchmarks could be processed successfully
- False otherwise
data:
- at this point the submitted files are located in results/<result_sha>/data
- the results shall be saved as follows
-> summary statistics of the method: results/<result_sha>/stats_task.txt
here task refers to the sub-benchmark (e.g., um_lane, uu_road etc.)
file contents: numbers for main table, format: %.6f (single space separated)
note: only files with successful sub-benchmark evaluation must be created
-> detailed results/graphics/plots: results/<result_sha>/subdir
with appropriate subdir and file names (all subdir's need to be created)
"""
import sys,os,copy,math
from munkres import Munkres
from collections import defaultdict
try:
from ordereddict import OrderedDict # can be installed using pip
except:
from collections import OrderedDict # only included from python 2.7 on
import mailpy
class tData:
"""
Utility class to load data.
"""
def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\
obs_angle=-10,x1=-1,y1=-1,x2=-1,y2=-1,w=-1,h=-1,l=-1,\
X=-1000,Y=-1000,Z=-1000,yaw=-10,score=-1000,track_id=-1):
"""
Constructor, initializes the object given the parameters.
"""
# init object data
self.frame = frame
self.track_id = track_id
self.obj_type = obj_type
self.truncation = truncation
self.occlusion = occlusion
self.obs_angle = obs_angle
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.w = w
self.h = h
self.l = l
self.X = X
self.Y = Y
self.Z = Z
self.yaw = yaw
self.score = score
self.ignored = False
self.valid = False
self.tracker = -1
def __str__(self):
"""
Print read data.
"""
attrs = vars(self)
return '\n'.join("%s: %s" % item for item in attrs.items())
class trackingEvaluation(object):
""" tracking statistics (CLEAR MOT, id-switches, fragments, ML/PT/MT, precision/recall)
MOTA - Multi-object tracking accuracy in [0,100]
MOTP - Multi-object tracking precision in [0,100] (3D) / [td,100] (2D)
MOTAL - Multi-object tracking accuracy in [0,100] with log10(id-switches)
id-switches - number of id switches
fragments - number of fragmentations
MT, PT, ML - number of mostly tracked, partially tracked and mostly lost trajectories
recall - recall = percentage of detected targets
precision - precision = percentage of correctly detected targets
FAR - number of false alarms per frame
falsepositives - number of false positives (FP)
missed - number of missed targets (FN)
"""
def __init__(self, t_sha, gt_path="./data/tracking", min_overlap=0.5, max_truncation = 0, min_height = 25, max_occlusion = 2, mail=None, cls="car"):
# get number of sequences and
# get number of frames per sequence from test mapping
# (created while extracting the benchmark)
filename_test_mapping = "./data/tracking/evaluate_tracking.seqmap"
self.n_frames = []
self.sequence_name = []
gt_list = os.listdir( "./data/tracking/label_02")
with open(filename_test_mapping, "r") as fh:
for i,l in enumerate(fh):
fields = l.split(" ")
if str(fields[0])+'.txt' not in gt_list:
continue
self.sequence_name.append("%04d" % int(fields[0]))
self.n_frames.append(int(fields[3]) - int(fields[2])+1)
fh.close()
self.n_sequences = i+1
# mail object
self.mail = mail
# class to evaluate, i.e. pedestrian or car
self.cls = cls
# data and parameter
self.gt_path = os.path.join(gt_path, "label_02")
self.t_sha = t_sha
self.t_path = os.path.join("./results", t_sha, "data")
# statistics and numbers for evaluation
self.n_gt = 0 # number of ground truth detections minus ignored false negatives and true positives
self.n_igt = 0 # number of ignored ground truth detections
self.n_gts = [] # number of ground truth detections minus ignored false negatives and true positives PER SEQUENCE
self.n_igts = [] # number of ground ignored truth detections PER SEQUENCE
self.n_gt_trajectories = 0
self.n_gt_seq = []
self.n_tr = 0 # number of tracker detections minus ignored tracker detections
self.n_trs = [] # number of tracker detections minus ignored tracker detections PER SEQUENCE
self.n_itr = 0 # number of ignored tracker detections
self.n_itrs = [] # number of ignored tracker detections PER SEQUENCE
self.n_igttr = 0 # number of ignored ground truth detections where the corresponding associated tracker detection is also ignored
self.n_tr_trajectories = 0
self.n_tr_seq = []
self.MOTA = 0
self.MOTP = 0
self.MOTAL = 0
self.MODA = 0
self.MODP = 0
self.MODP_t = []
self.recall = 0
self.precision = 0
self.F1 = 0
self.FAR = 0
self.total_cost = 0
self.itp = 0 # number of ignored true positives
self.itps = [] # number of ignored true positives PER SEQUENCE
self.tp = 0 # number of true positives including ignored true positives!
self.tps = [] # number of true positives including ignored true positives PER SEQUENCE
self.fn = 0 # number of false negatives WITHOUT ignored false negatives
self.fns = [] # number of false negatives WITHOUT ignored false negatives PER SEQUENCE
self.ifn = 0 # number of ignored false negatives
self.ifns = [] # number of ignored false negatives PER SEQUENCE
self.fp = 0 # number of false positives
# a bit tricky, the number of ignored false negatives and ignored true positives
# is subtracted, but if both tracker detection and ground truth detection
# are ignored this number is added again to avoid double counting
self.fps = [] # above PER SEQUENCE
self.mme = 0
self.fragments = 0
self.id_switches = 0
self.MT = 0
self.PT = 0
self.ML = 0
self.min_overlap = min_overlap # minimum bounding box overlap for 3rd party metrics
self.max_truncation = max_truncation # maximum truncation of an object for evaluation
self.max_occlusion = max_occlusion # maximum occlusion of an object for evaluation
self.min_height = min_height # minimum height of an object for evaluation
self.n_sample_points = 500
# this should be enough to hold all groundtruth trajectories
# is expanded if necessary and reduced in any case
self.gt_trajectories = [[] for x in xrange(self.n_sequences)]
self.ign_trajectories = [[] for x in xrange(self.n_sequences)]
def createEvalDir(self):
"""
Creates directory to store evaluation results and data for visualization.
"""
self.eval_dir = os.path.join("./results/", self.t_sha, "eval", self.cls)
if not os.path.exists(self.eval_dir):
print "create directory:", self.eval_dir,
os.makedirs(self.eval_dir)
print "done"
def loadGroundtruth(self):
"""
Helper function to load ground truth.
"""
try:
self._loadData(self.gt_path, cls=self.cls, loading_groundtruth=True)
except IOError:
return False
return True
def loadTracker(self):
"""
Helper function to load tracker data.
"""
try:
if not self._loadData(self.t_path, cls=self.cls, loading_groundtruth=False):
return False
except IOError:
return False
return True
def _loadData(self, root_dir, cls, min_score=-1000, loading_groundtruth=False):
"""
Generic loader for ground truth and tracking data.
Use loadGroundtruth() or loadTracker() to load this data.
Loads detections in KITTI format from textfiles.
"""
# construct objectDetections object to hold detection data
t_data = tData()
data = []
eval_2d = True
eval_3d = True
seq_data = []
n_trajectories = 0
n_trajectories_seq = []
for seq, s_name in enumerate(self.sequence_name):
i = 0
filename = os.path.join(root_dir, "%s.txt" % s_name)
f = open(filename, "r")
f_data = [[] for x in xrange(self.n_frames[seq])] # current set has only 1059 entries, sufficient length is checked anyway
ids = []
n_in_seq = 0
id_frame_cache = []
for line in f:
# KITTI tracking benchmark data format:
# (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry)
line = line.strip()
fields = line.split(" ")
# classes that should be loaded (ignored neighboring classes)
if "car" in cls.lower():
classes = ["car","van"]
elif "pedestrian" in cls.lower():
classes = ["pedestrian","person_sitting"]
else:
classes = [cls.lower()]
classes += ["dontcare"]
if not any([s for s in classes if s in fields[2].lower()]):
continue
# get fields from table
t_data.frame = int(float(fields[0])) # frame
t_data.track_id = int(float(fields[1])) # id
t_data.obj_type = fields[2].lower() # object type [car, pedestrian, cyclist, ...]
t_data.truncation = int(float(fields[3])) # truncation [-1,0,1,2]
t_data.occlusion = int(float(fields[4])) # occlusion [-1,0,1,2]
t_data.obs_angle = float(fields[5]) # observation angle [rad]
t_data.x1 = float(fields[6]) # left [px]
t_data.y1 = float(fields[7]) # top [px]
t_data.x2 = float(fields[8]) # right [px]
t_data.y2 = float(fields[9]) # bottom [px]
t_data.h = float(fields[10]) # height [m]
t_data.w = float(fields[11]) # width [m]
t_data.l = float(fields[12]) # length [m]
t_data.X = float(fields[13]) # X [m]
t_data.Y = float(fields[14]) # Y [m]
t_data.Z = float(fields[15]) # Z [m]
t_data.yaw = float(fields[16]) # yaw angle [rad]
if not loading_groundtruth:
if len(fields) == 17:
t_data.score = -1
elif len(fields) == 18:
t_data.score = float(fields[17]) # detection score
else:
self.mail.msg("file is not in KITTI format")
return
# do not consider objects marked as invalid
if t_data.track_id is -1 and t_data.obj_type != "dontcare":
continue
idx = t_data.frame
# check if length for frame data is sufficient
if idx >= len(f_data):
print "extend f_data", idx, len(f_data)
f_data += [[] for x in xrange(max(500, idx-len(f_data)))]
try:
id_frame = (t_data.frame,t_data.track_id)
if id_frame in id_frame_cache and not loading_groundtruth:
self.mail.msg("track ids are not unique for sequence %d: frame %d" % (seq,t_data.frame))
self.mail.msg("track id %d occured at least twice for this frame" % t_data.track_id)
self.mail.msg("Exiting...")
#continue # this allows to evaluate non-unique result files
return False
id_frame_cache.append(id_frame)
f_data[t_data.frame].append(copy.copy(t_data))
except:
print len(f_data), idx
raise
if t_data.track_id not in ids and t_data.obj_type!="dontcare":
ids.append(t_data.track_id)
n_trajectories +=1
n_in_seq +=1
# check if uploaded data provides information for 2D and 3D evaluation
if not loading_groundtruth and eval_2d is True and(t_data.x1==-1 or t_data.x2==-1 or t_data.y1==-1 or t_data.y2==-1):
eval_2d = False
if not loading_groundtruth and eval_3d is True and(t_data.X==-1000 or t_data.Y==-1000 or t_data.Z==-1000):
eval_3d = False
# only add existing frames
n_trajectories_seq.append(n_in_seq)
seq_data.append(f_data)
f.close()
if not loading_groundtruth:
self.tracker=seq_data
self.n_tr_trajectories=n_trajectories
self.eval_2d = eval_2d
self.eval_3d = eval_3d
self.n_tr_seq = n_trajectories_seq
if self.n_tr_trajectories==0:
return False
else:
# split ground truth and DontCare areas
self.dcareas = []
self.groundtruth = []
for seq_idx in range(len(seq_data)):
seq_gt = seq_data[seq_idx]
s_g, s_dc = [],[]
for f in range(len(seq_gt)):
all_gt = seq_gt[f]
g,dc = [],[]
for gg in all_gt:
if gg.obj_type=="dontcare":
dc.append(gg)
else:
g.append(gg)
s_g.append(g)
s_dc.append(dc)
self.dcareas.append(s_dc)
self.groundtruth.append(s_g)
self.n_gt_seq=n_trajectories_seq
self.n_gt_trajectories=n_trajectories
return True
def boxoverlap(self,a,b,criterion="union"):
"""
boxoverlap computes intersection over union for bbox a and b in KITTI format.
If the criterion is 'union', overlap = (a inter b) / a union b).
If the criterion is 'a', overlap = (a inter b) / a, where b should be a dontcare area.
"""
x1 = max(a.x1, b.x1)
y1 = max(a.y1, b.y1)
x2 = min(a.x2, b.x2)
y2 = min(a.y2, b.y2)
w = x2-x1
h = y2-y1
if w<=0. or h<=0.:
return 0.
inter = w*h
aarea = (a.x2-a.x1) * (a.y2-a.y1)
barea = (b.x2-b.x1) * (b.y2-b.y1)
# intersection over union overlap
if criterion.lower()=="union":
o = inter / float(aarea+barea-inter)
elif criterion.lower()=="a":
o = float(inter) / float(aarea)
else:
raise TypeError("Unkown type for criterion")
return o
def compute3rdPartyMetrics(self):
"""
Computes the metrics defined in
- Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics
MOTA, MOTAL, MOTP
- Nevatia 2008: Global Data Association for Multi-Object Tracking Using Network Flows
MT/PT/ML
"""
# construct Munkres object for Hungarian Method association
hm = Munkres()
max_cost = 1e9
# go through all frames and associate ground truth and tracker results
# groundtruth and tracker contain lists for every single frame containing lists of KITTI format detections
fr, ids = 0,0
for seq_idx in range(len(self.groundtruth)):
seq_gt = self.groundtruth[seq_idx]
seq_dc = self.dcareas[seq_idx] # don't care areas
seq_tracker = self.tracker[seq_idx]
seq_trajectories = defaultdict(list)
seq_ignored = defaultdict(list)
# statistics over the current sequence, check the corresponding
# variable comments in __init__ to get their meaning
seqtp = 0
seqitp = 0
seqfn = 0
seqifn = 0
seqfp = 0
seqigt = 0
seqitr = 0
last_ids = [[],[]]
n_gts = 0
n_trs = 0
for f in range(len(seq_gt)):
g = seq_gt[f]
dc = seq_dc[f]
t = seq_tracker[f]
# counting total number of ground truth and tracker objects
self.n_gt += len(g)
self.n_tr += len(t)
n_gts += len(g)
n_trs += len(t)
# use hungarian method to associate, using boxoverlap 0..1 as cost
# build cost matrix
cost_matrix = []
this_ids = [[],[]]
for gg in g:
# save current ids
this_ids[0].append(gg.track_id)
this_ids[1].append(-1)
gg.tracker = -1
gg.id_switch = 0
gg.fragmentation = 0
cost_row = []
for tt in t:
# overlap == 1 is cost ==0
c = 1-self.boxoverlap(gg,tt)
# gating for boxoverlap
if c<=self.min_overlap:
cost_row.append(c)
else:
cost_row.append(max_cost) # = 1e9
cost_matrix.append(cost_row)
# all ground truth trajectories are initially not associated
# extend groundtruth trajectories lists (merge lists)
seq_trajectories[gg.track_id].append(-1)
seq_ignored[gg.track_id].append(False)
if len(g) is 0:
cost_matrix=[[]]
# associate
association_matrix = hm.compute(cost_matrix)
# tmp variables for sanity checks and MODP computation
tmptp = 0
tmpfp = 0
tmpfn = 0
tmpc = 0 # this will sum up the overlaps for all true positives
tmpcs = [0]*len(g) # this will save the overlaps for all true positives
# the reason is that some true positives might be ignored
# later such that the corrsponding overlaps can
# be subtracted from tmpc for MODP computation
# mapping for tracker ids and ground truth ids
for row,col in association_matrix:
# apply gating on boxoverlap
c = cost_matrix[row][col]
if c < max_cost:
g[row].tracker = t[col].track_id
this_ids[1][row] = t[col].track_id
t[col].valid = True
g[row].distance = c
self.total_cost += 1-c
tmpc += 1-c
tmpcs[row] = 1-c
seq_trajectories[g[row].track_id][-1] = t[col].track_id
# true positives are only valid associations
self.tp += 1
tmptp += 1
else:
g[row].tracker = -1
self.fn += 1
tmpfn += 1
# associate tracker and DontCare areas
# ignore tracker in neighboring classes
nignoredtracker = 0 # number of ignored tracker detections
ignoredtrackers = dict() # will associate the track_id with -1
# if it is not ignored and 1 if it is
# ignored;
# this is used to avoid double counting ignored
# cases, see the next loop
for tt in t:
ignoredtrackers[tt.track_id] = -1
# ignore detection if it belongs to a neighboring class or is
# smaller or equal to the minimum height
tt_height = abs(tt.y1 - tt.y2)
if ((self.cls=="car" and tt.obj_type=="van") or (self.cls=="pedestrian" and tt.obj_type=="person_sitting") or tt_height<=self.min_height) and not tt.valid:
nignoredtracker+= 1
tt.ignored = True
ignoredtrackers[tt.track_id] = 1
continue
for d in dc:
overlap = self.boxoverlap(tt,d,"a")
if overlap>0.5 and not tt.valid:
tt.ignored = True
nignoredtracker+= 1
ignoredtrackers[tt.track_id] = 1
break
# check for ignored FN/TP (truncation or neighboring object class)
ignoredfn = 0 # the number of ignored false negatives
nignoredtp = 0 # the number of ignored true positives
nignoredpairs = 0 # the number of ignored pairs, i.e. a true positive
# which is ignored but where the associated tracker
# detection has already been ignored
gi = 0
for gg in g:
if gg.tracker < 0:
if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
seq_ignored[gg.track_id][-1] = True
gg.ignored = True
ignoredfn += 1
elif gg.tracker>=0:
if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
seq_ignored[gg.track_id][-1] = True
gg.ignored = True
nignoredtp += 1
# if the associated tracker detection is already ignored,
# we want to avoid double counting ignored detections
if ignoredtrackers[gg.tracker] > 0:
nignoredpairs += 1
# for computing MODP, the overlaps from ignored detections
# are subtracted
tmpc -= tmpcs[gi]
gi += 1
# the below might be confusion, check the comments in __init__
# to see what the individual statistics represent
# correct TP by number of ignored TP due to truncation
# ignored TP are shown as tracked in visualization
tmptp -= nignoredtp
# count the number of ignored true positives
self.itp += nignoredtp
# adjust the number of ground truth objects considered
self.n_gt -= (ignoredfn + nignoredtp)
# count the number of ignored ground truth objects
self.n_igt += ignoredfn + nignoredtp
# count the number of ignored tracker objects
self.n_itr += nignoredtracker
# count the number of ignored pairs, i.e. associated tracker and
# ground truth objects that are both ignored
self.n_igttr += nignoredpairs
# false negatives = associated gt bboxes exceding association threshold + non-associated gt bboxes
#
tmpfn += len(g)-len(association_matrix)-ignoredfn
self.fn += len(g)-len(association_matrix)-ignoredfn
self.ifn += ignoredfn
# false positives = tracker bboxes - associated tracker bboxes
# mismatches (mme_t)
tmpfp += len(t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
self.fp += len(t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
#tmpfp = len(t) - tmptp - nignoredtp # == len(t) - (tp - ignoredtp) - ignoredtp
#self.fp += len(t) - tmptp - nignoredtp
# update sequence data
seqtp += tmptp
seqitp += nignoredtp
seqfp += tmpfp
seqfn += tmpfn
seqifn += ignoredfn
seqigt += ignoredfn + nignoredtp
seqitr += nignoredtracker
# sanity checks
# - the number of true positives minues ignored true positives
# should be greater or equal to 0
# - the number of false negatives should be greater or equal to 0
# - the number of false positives needs to be greater or equal to 0
# otherwise ignored detections might be counted double
# - the number of counted true positives (plus ignored ones)
# and the number of counted false negatives (plus ignored ones)
# should match the total number of ground truth objects
# - the number of counted true positives (plus ignored ones)
# and the number of counted false positives
# plus the number of ignored tracker detections should
# match the total number of tracker detections; note that
# nignoredpairs is subtracted here to avoid double counting
# of ignored detection sin nignoredtp and nignoredtracker
if tmptp<0:
print tmptp, nignoredtp
raise NameError("Something went wrong! TP is negative")
if tmpfn<0:
print tmpfn, len(g), len(association_matrix), ignoredfn, nignoredpairs
raise NameError("Something went wrong! FN is negative")
if tmpfp<0:
print tmpfp, len(t), tmptp, nignoredtracker, nignoredtp, nignoredpairs
raise NameError("Something went wrong! FP is negative")
if tmptp + tmpfn is not len(g)-ignoredfn-nignoredtp:
print "seqidx", seq_idx
print "frame ", f
print "TP ", tmptp
print "FN ", tmpfn
print "FP ", tmpfp
print "nGT ", len(g)
print "nAss ", len(association_matrix)
print "ign GT", ignoredfn
print "ign TP", nignoredtp
raise NameError("Something went wrong! nGroundtruth is not TP+FN")
if tmptp+tmpfp+nignoredtp+nignoredtracker-nignoredpairs is not len(t):
print seq_idx, f, len(t), tmptp, tmpfp
print len(association_matrix), association_matrix
raise NameError("Something went wrong! nTracker is not TP+FP")
# check for id switches or fragmentations
for i,tt in enumerate(this_ids[0]):
if tt in last_ids[0]:
idx = last_ids[0].index(tt)
tid = this_ids[1][i]
lid = last_ids[1][idx]
if tid != lid and lid != -1 and tid != -1:
if g[i].truncation<self.max_truncation:
g[i].id_switch = 1
ids +=1
if tid != lid and lid != -1:
if g[i].truncation<self.max_truncation:
g[i].fragmentation = 1
fr +=1
# save current index
last_ids = this_ids
# compute MOTP_t
MODP_t = 1
if tmptp!=0:
MODP_t = tmpc/float(tmptp)
self.MODP_t.append(MODP_t)
# remove empty lists for current gt trajectories
self.gt_trajectories[seq_idx] = seq_trajectories
self.ign_trajectories[seq_idx] = seq_ignored
# gather statistics for "per sequence" statistics.
self.n_gts.append(n_gts)
self.n_trs.append(n_trs)
self.tps.append(seqtp)
self.itps.append(seqitp)
self.fps.append(seqfp)
self.fns.append(seqfn)
self.ifns.append(seqifn)
self.n_igts.append(seqigt)
self.n_itrs.append(seqitr)
# compute MT/PT/ML, fragments, idswitches for all groundtruth trajectories
n_ignored_tr_total = 0
for seq_idx, (seq_trajectories,seq_ignored) in enumerate(zip(self.gt_trajectories, self.ign_trajectories)):
if len(seq_trajectories)==0:
continue
tmpMT, tmpML, tmpPT, tmpId_switches, tmpFragments = [0]*5
n_ignored_tr = 0
for g, ign_g in zip(seq_trajectories.values(), seq_ignored.values()):
# all frames of this gt trajectory are ignored
if all(ign_g):
n_ignored_tr+=1
n_ignored_tr_total+=1
continue
# all frames of this gt trajectory are not assigned to any detections
if all([this==-1 for this in g]):
tmpML+=1
self.ML+=1
continue
# compute tracked frames in trajectory
last_id = g[0]
# first detection (necessary to be in gt_trajectories) is always tracked
tracked = 1 if g[0]>=0 else 0
lgt = 0 if ign_g[0] else 1
for f in range(1,len(g)):
if ign_g[f]:
last_id = -1
continue
lgt+=1
if last_id != g[f] and last_id != -1 and g[f] != -1 and g[f-1] != -1:
tmpId_switches += 1
self.id_switches += 1
if f < len(g)-1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and g[f+1] != -1:
tmpFragments += 1
self.fragments += 1
if g[f] != -1:
tracked += 1
last_id = g[f]
# handle last frame; tracked state is handled in for loop (g[f]!=-1)
if len(g)>1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and not ign_g[f]:
tmpFragments += 1
self.fragments += 1
# compute MT/PT/ML
tracking_ratio = tracked / float(len(g) - sum(ign_g))
if tracking_ratio > 0.8:
tmpMT += 1
self.MT += 1
elif tracking_ratio < 0.2:
tmpML += 1
self.ML += 1
else: # 0.2 <= tracking_ratio <= 0.8
tmpPT += 1
self.PT += 1
if (self.n_gt_trajectories-n_ignored_tr_total)==0:
self.MT = 0.
self.PT = 0.
self.ML = 0.
else:
self.MT /= float(self.n_gt_trajectories-n_ignored_tr_total)
self.PT /= float(self.n_gt_trajectories-n_ignored_tr_total)
self.ML /= float(self.n_gt_trajectories-n_ignored_tr_total)
# precision/recall etc.
if (self.fp+self.tp)==0 or (self.tp+self.fn)==0:
self.recall = 0.
self.precision = 0.
else:
self.recall = self.tp/float(self.tp+self.fn)
self.precision = self.tp/float(self.fp+self.tp)
if (self.recall+self.precision)==0:
self.F1 = 0.
else:
self.F1 = 2.*(self.precision*self.recall)/(self.precision+self.recall)
if sum(self.n_frames)==0:
self.FAR = "n/a"
else:
self.FAR = self.fp/float(sum(self.n_frames))
# compute CLEARMOT
if self.n_gt==0:
self.MOTA = -float("inf")
self.MODA = -float("inf")
else:
self.MOTA = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt)
self.MODA = 1 - (self.fn + self.fp) / float(self.n_gt)
if self.tp==0:
self.MOTP = float("inf")
else:
self.MOTP = self.total_cost / float(self.tp)
if self.n_gt!=0:
if self.id_switches==0:
self.MOTAL = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt)
else:
self.MOTAL = 1 - (self.fn + self.fp + math.log10(self.id_switches))/float(self.n_gt)
else:
self.MOTAL = -float("inf")
if sum(self.n_frames)==0:
self.MODP = "n/a"
else:
self.MODP = sum(self.MODP_t)/float(sum(self.n_frames))
return True
def createSummary(self):
"""
Generate and mail a summary of the results.
If mailpy.py is present, the summary is instead printed.
"""
summary = ""
summary += "tracking evaluation summary".center(80,"=") + "\n"
summary += self.printEntry("Multiple Object Tracking Accuracy (MOTA)", self.MOTA) + "\n"
summary += self.printEntry("Multiple Object Tracking Precision (MOTP)", self.MOTP) + "\n"
summary += self.printEntry("Multiple Object Tracking Accuracy (MOTAL)", self.MOTAL) + "\n"
summary += self.printEntry("Multiple Object Detection Accuracy (MODA)", self.MODA) + "\n"
summary += self.printEntry("Multiple Object Detection Precision (MODP)", self.MODP) + "\n"
summary += "\n"
summary += self.printEntry("Recall", self.recall) + "\n"
summary += self.printEntry("Precision", self.precision) + "\n"
summary += self.printEntry("F1", self.F1) + "\n"
summary += self.printEntry("False Alarm Rate", self.FAR) + "\n"
summary += "\n"
summary += self.printEntry("Mostly Tracked", self.MT) + "\n"
summary += self.printEntry("Partly Tracked", self.PT) + "\n"
summary += self.printEntry("Mostly Lost", self.ML) + "\n"
summary += "\n"
summary += self.printEntry("True Positives", self.tp) + "\n"
#summary += self.printEntry("True Positives per Sequence", self.tps) + "\n"
summary += self.printEntry("Ignored True Positives", self.itp) + "\n"
#summary += self.printEntry("Ignored True Positives per Sequence", self.itps) + "\n"
summary += self.printEntry("False Positives", self.fp) + "\n"
#summary += self.printEntry("False Positives per Sequence", self.fps) + "\n"
summary += self.printEntry("False Negatives", self.fn) + "\n"
#summary += self.printEntry("False Negatives per Sequence", self.fns) + "\n"
summary += self.printEntry("Ignored False Negatives", self.ifn) + "\n"
#summary += self.printEntry("Ignored False Negatives per Sequence", self.ifns) + "\n"
summary += self.printEntry("Missed Targets", self.fn) + "\n"
summary += self.printEntry("ID-switches", self.id_switches) + "\n"
summary += self.printEntry("Fragmentations", self.fragments) + "\n"
summary += "\n"
summary += self.printEntry("Ground Truth Objects (Total)", self.n_gt + self.n_igt) + "\n"
#summary += self.printEntry("Ground Truth Objects (Total) per Sequence", self.n_gts) + "\n"
summary += self.printEntry("Ignored Ground Truth Objects", self.n_igt) + "\n"
#summary += self.printEntry("Ignored Ground Truth Objects per Sequence", self.n_igts) + "\n"
summary += self.printEntry("Ground Truth Trajectories", self.n_gt_trajectories) + "\n"
summary += "\n"
summary += self.printEntry("Tracker Objects (Total)", self.n_tr) + "\n"
#summary += self.printEntry("Tracker Objects (Total) per Sequence", self.n_trs) + "\n"
summary += self.printEntry("Ignored Tracker Objects", self.n_itr) + "\n"
#summary += self.printEntry("Ignored Tracker Objects per Sequence", self.n_itrs) + "\n"
summary += self.printEntry("Tracker Trajectories", self.n_tr_trajectories) + "\n"
#summary += "\n"
#summary += self.printEntry("Ignored Tracker Objects with Associated Ignored Ground Truth Objects", self.n_igttr) + "\n"
summary += "="*80
return summary
def printEntry(self, key, val,width=(70,10)):
"""
Pretty print an entry in a table fashion.
"""
s_out = key.ljust(width[0])
if type(val)==int:
s = "%%%dd" % width[1]
s_out += s % val
elif type(val)==float:
s = "%%%df" % (width[1])
s_out += s % val
else:
s_out += ("%s"%val).rjust(width[1])
return s_out
def saveToStats(self):
"""
Save the statistics in a whitespace separate file.
"""
# create pretty summary
summary = self.createSummary()
# mail or print the summary.
mail.msg(summary)
# write summary to file summary_cls.txt
filename = os.path.join("./results", self.t_sha, "summary_%s.txt" % self.cls)
dump = open(filename, "w+")
print>>dump, summary
dump.close()
# dump all the statistics to the corresponding stats_cls.txt file
filename = os.path.join("./results", self.t_sha, "stats_%s.txt" % self.cls)
dump = open(filename, "w+")
print>>dump, "%.6f " * 21 \
% (self.MOTA, self.MOTP, self.MOTAL, self.MODA, self.MODP, \
self.recall, self.precision, self.F1, self.FAR, \
self.MT, self.PT, self.ML, self.tp, self.fp, self.fn, self.id_switches, self.fragments, \
self.n_gt, self.n_gt_trajectories, self.n_tr, self.n_tr_trajectories)
dump.close()
# write description of statistics to description.txt
filename = os.path.join("./results", self.t_sha, "description.txt")
dump = open(filename, "w+")
print>>dump, "MOTA", "MOTP", "MOTAL", "MODA", "MODP", "recall", "precision", "F1", "FAR",
print>>dump, "MT", "PT", "ML", "tp", "fp", "fn", "id_switches", "fragments",
print>>dump, "n_gt", "n_gt_trajectories", "n_tr", "n_tr_trajectories"
def evaluate(result_sha,mail):
"""
Entry point for evaluation, will load the data and start evaluation for
CAR and PEDESTRIAN if available.
"""
# start evaluation and instanciated eval object
mail.msg("Processing Result for KITTI Tracking Benchmark")
classes = []
for c in ("Car", "Pedestrian"):
e = trackingEvaluation(t_sha=result_sha, mail=mail,cls=c)
# load tracker data and check provided classes
try:
if not e.loadTracker():
continue
mail.msg("Loading Results - Success")
mail.msg("Evaluate Object Class: %s" % c.upper())
classes.append(c)
except:
mail.msg("Feel free to contact us (lenz@kit.edu), if you receive this error message:")
mail.msg(" Caught exception while loading result data.")
break
# load groundtruth data for this class
if not e.loadGroundtruth():
raise ValueError("Ground truth not found.")
mail.msg("Loading Groundtruth - Success")
# sanity checks
if len(e.groundtruth) is not len(e.tracker):
mail.msg("The uploaded data does not provide results for every sequence.")
return False
mail.msg("Loaded %d Sequences." % len(e.groundtruth))
mail.msg("Start Evaluation...")
# create needed directories, evaluate and save stats
try:
e.createEvalDir()
except:
mail.msg("Feel free to contact us (lenz@kit.edu), if you receive this error message:")
mail.msg(" Caught exception while creating results.")
if e.compute3rdPartyMetrics():
e.saveToStats()
else:
mail.msg("There seem to be no true positives or false positives at all in the submitted data.")
# finish
if len(classes)==0:
mail.msg("The uploaded results could not be evaluated. Check for format errors.")
return False
mail.msg("Thank you for participating in our benchmark!")
return True
#########################################################################
# entry point of evaluation script
# input:
# - result_sha (unique key of results)
# - user_sha (key of user who submitted the results, optional)
# - user_sha (email of user who submitted the results, optional)
if __name__ == "__main__":
# check for correct number of arguments. if user_sha and email are not supplied,
# no notification email is sent (this option is used for auto-updates)
if len(sys.argv)!=2 and len(sys.argv)!=4:
print "Usage: python eval_tracking.py result_sha [user_sha email]"
sys.exit(1);
# get unique sha key of submitted results
result_sha = sys.argv[1]
# create mail messenger and debug output object
if len(sys.argv)==4:
mail = mailpy.Mail(sys.argv[3])
else:
mail = mailpy.Mail("")
# evaluate results and send notification email to user
success = evaluate(result_sha,mail)
if len(sys.argv)==4: mail.finalize(success,"tracking",result_sha,sys.argv[2])
else: mail.finalize(success,"tracking",result_sha,"")
| 47.568607 | 176 | 0.500229 |
67795c835b77f8d02ccedf828375b1cfedae3d5f | 1,286 | py | Python | app/core/tests/test_admin.py | matallonor/recipe-app-api- | 78b997321d6ce55d4ef7bafa30c8d7c70693bf28 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | matallonor/recipe-app-api- | 78b997321d6ce55d4ef7bafa30c8d7c70693bf28 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | matallonor/recipe-app-api- | 78b997321d6ce55d4ef7bafa30c8d7c70693bf28 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='test_admin@email.com',
password='Test1234'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@email.com',
password='Test1234',
name='Test User Full Name'
)
def test_users_listed(self):
"""Users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""The User edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""The Create User page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 30.619048 | 68 | 0.635303 |
4df12b1597aeb60bf50c2e5602f34725ac6b31a9 | 1,049 | py | Python | model-optimizer/extensions/front/mxnet/null_ext.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | null | null | null | model-optimizer/extensions/front/mxnet/null_ext.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | 19 | 2021-03-26T08:11:00.000Z | 2022-02-21T13:06:26.000Z | model-optimizer/extensions/front/mxnet/null_ext.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | 1 | 2021-07-28T17:30:46.000Z | 2021-07-28T17:30:46.000Z | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from extensions.ops.parameter import Parameter
from mo.front.extractor import FrontExtractorOp
from mo.ops.const import Const
class NullFrontExtractor(FrontExtractorOp):
op = 'null'
enabled = True
@classmethod
def extract(cls, node):
if 'value' in node.symbol_dict:
Const.update_node_stat(node, {'value': node.symbol_dict['value']})
else:
Parameter.update_node_stat(node, {})
return cls.enabled
| 31.787879 | 78 | 0.729266 |
805f39ac417456d2d96b2b66a0a38046dc6f404e | 1,444 | py | Python | py/0050.pow-x-n.py | ck2w/leetcode | 2d411530b690a2e51b0ae518bf3efaad2edc1083 | [
"MIT"
] | null | null | null | py/0050.pow-x-n.py | ck2w/leetcode | 2d411530b690a2e51b0ae518bf3efaad2edc1083 | [
"MIT"
] | null | null | null | py/0050.pow-x-n.py | ck2w/leetcode | 2d411530b690a2e51b0ae518bf3efaad2edc1083 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=50 lang=python3
#
# [50] Pow(x, n)
#
# https://leetcode.com/problems/powx-n/description/
#
# algorithms
# Medium (30.92%)
# Likes: 2161
# Dislikes: 3621
# Total Accepted: 600.4K
# Total Submissions: 1.9M
# Testcase Example: '2.00000\n10'
#
# Implement pow(x, n), which calculates x raised to the power n (i.e. x^n).
#
#
# Example 1:
#
#
# Input: x = 2.00000, n = 10
# Output: 1024.00000
#
#
# Example 2:
#
#
# Input: x = 2.10000, n = 3
# Output: 9.26100
#
#
# Example 3:
#
#
# Input: x = 2.00000, n = -2
# Output: 0.25000
# Explanation: 2^-2 = 1/2^2 = 1/4 = 0.25
#
#
#
# Constraints:
#
#
# -100.0 < x < 100.0
# -2^31 <= n <= 2^31-1
# -10^4 <= x^n <= 10^4
#
#
#
# @lc code=start
# 1: easy way
# class Solution:
# def myPow(self, x: float, n: int) -> float:
# return x ** n
# 2: recursive
class Solution:
def myPow(self, x: float, n: int) -> float:
def recurse(n):
ans = 1
if(n == 0):
return 1
if(n==1):
return x
if(n == 2):
return x * x
if(n%2 == 0):
ans = recurse(n//2)
return ans * ans
if( n%2 == 1):
ans = recurse(n//2) * recurse((n//2) + 1)
return ans
if( n < 0 ):
x = 1/x
n = -n
return recurse(n)
# @lc code=end
| 17.190476 | 75 | 0.452909 |
0b3fae55ea24f33724477b0f699d523a9eabe40e | 5,267 | py | Python | piki.py | mrpiqipiq/auto | a0648b751481070644c9c63ab3f6c9647146782d | [
"Apache-2.0"
] | null | null | null | piki.py | mrpiqipiq/auto | a0648b751481070644c9c63ab3f6c9647146782d | [
"Apache-2.0"
] | null | null | null | piki.py | mrpiqipiq/auto | a0648b751481070644c9c63ab3f6c9647146782d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# coded by kereh
try:
import mechanize,requests,os,sys,subprocess,cookielib,time,random
except ImportError:
subprocess.call("pip2 install requests mechanize", shell=True)
subprocess.call("clear",shell=True)
#color
green = "\033[1;32m"
normal = "\033[0m"
red = "\033[1;31m"
cyan = "\033[1;36m"
#symbols
good = "\033[1;32m[\033[1;36m+\033[1;32m]\033[0m"
bad = "\033[1;32m[\033[1;31m!\033[1;32m]\033[0m"
#word
success = "\033[1;32mSuccessful\033[0m"
failed = "\033[1;31mFailed\033[0m"
###banner###
banner_menu = """
▄▄▄▄ ▒█████ ▄▄▄█████▓
▓█████▄ ▒██▒ ██▒▓ ██▒ ▓▒
▒██▒ ▄██▒██░ ██▒▒ ▓██░ ▒░
▒██░█▀ ▒██ ██░░ ▓██▓ ░
░▓█ ▀█▓░ ████▓▒░ ▒██▒ ░
░▒▓███▀▒░ ▒░▒░▒░ ▒ ░░
▒░▒ ░ ░ ▒ ▒░ ░
░ ░ ░ ░ ░ ▒ ░
░ ░ ░
░
Author : {}Mr.piqipiq{}
Github : {}https://github.com/mrpiqipiq{}
[+] Menu Bot [+]
[1] Generate Access Token
[2] Auto Like On Your Post 200
[3] Auto Commenter On Your Post
[4] Auto Friend Requests On Your Account
""".format(green,normal,green,normal)
banner = """
▄▄▄▄ ▒█████ ▄▄▄█████▓
▓█████▄ ▒██▒ ██▒▓ ██▒ ▓▒
▒██▒ ▄██▒██░ ██▒▒ ▓██░ ▒░
▒██░█▀ ▒██ ██░░ ▓██▓ ░
░▓█ ▀█▓░ ████▓▒░ ▒██▒ ░
░▒▓███▀▒░ ▒░▒░▒░ ▒ ░░
▒░▒ ░ ░ ▒ ▒░ ░
░ ░ ░ ░ ░ ▒ ░
░ ░ ░
░
Author : {}Mr.piqipiq{}
Facebook : {}??{}
Github : {}https://github.com/mrpiqipiq{}
""".format(green,normal,cyan,normal,green,normal)
###
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_cookiejar(cookielib.LWPCookieJar())
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
info = time.strftime("%S:%M:%H")
def generate_token():
print banner
print
username = raw_input("[+] username : ")
password = raw_input("[+] password : ")
print "[{}]{} Generate Access Token Please Wait....".format(info,good)
time.sleep(5)
if len(username) == 0:
print "[{}]{} You Must Input Your {}Username{} !!!".format(info,good)
elif len(password) == 0:
print "[{}]{} You Must Input Your {}Password{} !!!".format(info,good)
else:
token_parsing = br.open("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + username + "&locale=en_US&password=" + password + "&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6").read()
file_token_access = open("token.txt","w")
file_token_access.write(str(token_parsing))
file_token_access.close()
try:
print "[{}]{} STATUS : {}".format(info,good,success)
print "[{}]{} SAVED FILE WITH NAME : token.txt".format(info,good)
except:
print "[{}]{} Error Operation System".format(info,bad)
def autolike():
print banner
print
token = open("token.txt","r").read()
a = br.open("https://yolikers.com/")
br.select_form(nr=0)
br.form["access_token"] = token
br.submit()
try:
react = raw_input("[+] type reaction ['LIKE','LOVE','HAHA','WOW','SAD','ANGRY'] : ")
d = br.open("https://yolikers.com/like.php?type=status")
br.select_form(nr=0)
br.form["type"] = [""+react]
br.submit()
print "[{}][+] Success Sending Like..".format(info,good)
except:
print "[{}][+] Use After 15 Minute..".format(info,bad)
def comment():
print banner
print
print "[{}]{} Sending Commenter On Your Newest Post Please Wait...".format(info,good)
token = open("token.txt","r").read()
a = br.open("https://yolikers.com/commenter.php?type=status")
br.select_form(nr=0)
br.form["access_token"] = token
br.submit()
try:
b = br.open("https://yolikers.com/commenter.php?type=status")
br.select_form(nr=0)
br.submit()
print "[{}]{} Sending Commenter Success..".format(info,good)
except:
print "[{}]{} Use After 15 Minute..".format(info,bad)
def friend():
print banner
print
print "[{}]{} Sending 30 Friend Request On Your Facebook Account...".format(info,good)
token = open("token.txt","r").read()
a = br.open("https://yolikers.com/")
br.select_form(nr=0)
br.form["access_token"] = token
try:
b = br.open("https://yolikers.com/autorequest.php?type=profile")
br.select_form(nr=0)
br.submit()
print "[{}]{} Sending 30 Friend Request Success...".format(info,good)
except:
print "[{}]{} Use After 15 Minute...".format(info,good)
if __name__=="__main__":
while True:
print banner_menu
print
pilih_menu = raw_input("[+] Enter Your Choice : ")
if len(pilih_menu) == 0:
print "{} You Must Input Your Choice !!!".format(bad)
elif pilih_menu == "1":
generate_token()
time.sleep(5)
elif pilih_menu == "2":
autolike()
time.sleep(5)
elif pilih_menu == "3":
comment()
time.sleep(5)
elif pilih_menu == "4":
friend()
time.sleep(5)
| 30.80117 | 314 | 0.549649 |
506cf77aa06a99b1edc7031c8eb7e4745efc9432 | 374 | py | Python | hiddenwifi.py | scottwedge/python_pentesting_scripts | b57fdbebc8ada05a771eaf3fd3b72d232e3f7dac | [
"MIT"
] | null | null | null | hiddenwifi.py | scottwedge/python_pentesting_scripts | b57fdbebc8ada05a771eaf3fd3b72d232e3f7dac | [
"MIT"
] | null | null | null | hiddenwifi.py | scottwedge/python_pentesting_scripts | b57fdbebc8ada05a771eaf3fd3b72d232e3f7dac | [
"MIT"
] | null | null | null | from scapy.all import *
import os
iface = "wlan0"
def h_packet(packet):
if packet.haslayer(Dot11ProbeREq) or packet.haslayer(Dot11ProbeResp) or packet.haslayer(Dot11AssoReq):
print "SSID identified " + packet.info
os.system("iwconfig " + iface + "mode monitor")
print "Sniffing traffic on interface " + iface
sniff(iface=iface, prn=h_packet)
| 24.933333 | 107 | 0.703209 |
7412a2f2d85002b4915d14bf2f95f4598971f074 | 5,310 | py | Python | kubernetes/client/models/v1alpha1_volume_attachment_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 1 | 2020-05-08T12:41:04.000Z | 2020-05-08T12:41:04.000Z | kubernetes/client/models/v1alpha1_volume_attachment_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1alpha1_volume_attachment_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 2 | 2021-07-09T08:49:05.000Z | 2021-08-03T18:08:36.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1VolumeAttachmentSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'attacher': 'str',
'node_name': 'str',
'source': 'V1alpha1VolumeAttachmentSource'
}
attribute_map = {
'attacher': 'attacher',
'node_name': 'nodeName',
'source': 'source'
}
def __init__(self, attacher=None, node_name=None, source=None):
"""
V1alpha1VolumeAttachmentSpec - a model defined in Swagger
"""
self._attacher = None
self._node_name = None
self._source = None
self.discriminator = None
self.attacher = attacher
self.node_name = node_name
self.source = source
@property
def attacher(self):
"""
Gets the attacher of this V1alpha1VolumeAttachmentSpec.
Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().
:return: The attacher of this V1alpha1VolumeAttachmentSpec.
:rtype: str
"""
return self._attacher
@attacher.setter
def attacher(self, attacher):
"""
Sets the attacher of this V1alpha1VolumeAttachmentSpec.
Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().
:param attacher: The attacher of this V1alpha1VolumeAttachmentSpec.
:type: str
"""
if attacher is None:
raise ValueError("Invalid value for `attacher`, must not be `None`")
self._attacher = attacher
@property
def node_name(self):
"""
Gets the node_name of this V1alpha1VolumeAttachmentSpec.
The node that the volume should be attached to.
:return: The node_name of this V1alpha1VolumeAttachmentSpec.
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""
Sets the node_name of this V1alpha1VolumeAttachmentSpec.
The node that the volume should be attached to.
:param node_name: The node_name of this V1alpha1VolumeAttachmentSpec.
:type: str
"""
if node_name is None:
raise ValueError("Invalid value for `node_name`, must not be `None`")
self._node_name = node_name
@property
def source(self):
"""
Gets the source of this V1alpha1VolumeAttachmentSpec.
Source represents the volume that should be attached.
:return: The source of this V1alpha1VolumeAttachmentSpec.
:rtype: V1alpha1VolumeAttachmentSource
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this V1alpha1VolumeAttachmentSpec.
Source represents the volume that should be attached.
:param source: The source of this V1alpha1VolumeAttachmentSpec.
:type: V1alpha1VolumeAttachmentSource
"""
if source is None:
raise ValueError("Invalid value for `source`, must not be `None`")
self._source = source
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1VolumeAttachmentSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.548387 | 133 | 0.591714 |
68d839f685c7f2a9c5bddd7934d657afbdb5c169 | 12,980 | py | Python | openprocurement/tender/openeu/tests/award.py | tarasvaskiv/openprocurement.tender.openeu | 47a02fb9fc7cbad7fc52b5373a43c82be2d983a7 | [
"Apache-2.0"
] | 8 | 2016-01-31T20:38:18.000Z | 2021-06-01T10:59:24.000Z | openprocurement/tender/openeu/tests/award.py | tarasvaskiv/openprocurement.tender.openeu | 47a02fb9fc7cbad7fc52b5373a43c82be2d983a7 | [
"Apache-2.0"
] | 80 | 2016-02-24T16:44:44.000Z | 2018-07-13T12:06:28.000Z | openprocurement/tender/openeu/tests/award.py | tarasvaskiv/openprocurement.tender.openeu | 47a02fb9fc7cbad7fc52b5373a43c82be2d983a7 | [
"Apache-2.0"
] | 26 | 2016-02-02T09:55:06.000Z | 2018-06-15T14:37:46.000Z | # -*- coding: utf-8 -*-
import unittest
from openprocurement.api.tests.base import snitch
from openprocurement.tender.belowthreshold.tests.base import test_organization
from openprocurement.tender.belowthreshold.tests.award import (
TenderAwardComplaintResourceTestMixin,
TenderAwardDocumentResourceTestMixin,
TenderAwardComplaintDocumentResourceTestMixin,
Tender2LotAwardDocumentResourceTestMixin
)
from openprocurement.tender.belowthreshold.tests.award_blanks import (
# TenderLotAwardComplaintResourceTest
get_tender_lot_award_complaint,
get_tender_lot_award_complaints,
)
from openprocurement.tender.openua.tests.award import TenderUaAwardComplaintResourceTestMixin
from openprocurement.tender.openua.tests.award_blanks import (
# Tender2LotAwardComplaintResourceTest
create_tender_lots_award_complaint,
patch_tender_lots_award_complaint,
# TenderLotAwardComplaintResourceTest
create_tender_lot_award_complaint,
patch_tender_lot_award_complaint,
)
from openprocurement.tender.openeu.tests.award_blanks import (
# Tender2LotAwardComplaintDocumentResourceTest
patch_tender_award_complaint_document,
# TenderAwardComplaintDocumentResourceTest
create_tender_2lot_award_complaint_document,
put_tender_2lot_award_complaint_document,
patch_tender_2lot_award_complaint_document,
# Tender2LotAwardResourceTest
create_tender_2lot_award,
patch_tender_2lot_award,
# TenderLotAwardResourceTest
create_tender_lot_award,
patch_tender_lot_award,
patch_tender_lot_award_unsuccessful,
# TenderAwardResourceTest
create_tender_award_invalid,
create_tender_award,
get_tender_award,
patch_tender_award,
patch_tender_award_active,
patch_tender_award_unsuccessful,
patch_tender_award_Administrator_change,
)
from openprocurement.tender.openeu.tests.base import (
BaseTenderContentWebTest,
test_bids,
test_lots
)
class TenderAwardResourceTestMixin(object):
test_create_tender_award_invalid = snitch(create_tender_award_invalid)
test_create_tender_award = snitch(create_tender_award)
test_patch_tender_award = snitch(patch_tender_award)
test_patch_tender_award_active = snitch(patch_tender_award_active)
test_patch_tender_award_unsuccessful = snitch(patch_tender_award_unsuccessful)
test_get_tender_award = snitch(get_tender_award)
test_patch_tender_award_Administrator_change = snitch(patch_tender_award_Administrator_change)
class TenderAwardResourceTest(BaseTenderContentWebTest,
TenderAwardResourceTestMixin):
initial_status = 'active.tendering'
initial_bids = test_bids
initial_lots = test_lots
initial_auth = ('Basic', ('broker', ''))
expected_award_amount = test_bids[0]['value']['amount']
def setUp(self):
super(TenderAwardResourceTest, self).setUp()
self.prepare_award()
# Get award
response = self.app.get('/tenders/{}/awards'.format(self.tender_id))
self.award_id = response.json['data'][0]['id']
self.bid_token = self.initial_bids_tokens[self.initial_bids[0]['id']]
self.app.authorization = ('Basic', ('broker', ''))
class TenderLotAwardResourceTestMixin(object):
test_create_tender_award = snitch(create_tender_lot_award)
test_patch_tender_award= snitch(patch_tender_lot_award)
test_patch_tender_award_unsuccessful= snitch(patch_tender_lot_award_unsuccessful)
class TenderLotAwardResourceTest(BaseTenderContentWebTest,
TenderLotAwardResourceTestMixin):
initial_status = 'active.tendering'
initial_bids = test_bids
initial_lots = test_lots
initial_auth = ('Basic', ('broker', ''))
expected_award_amount = test_bids[0]['value']['amount']
def setUp(self):
super(TenderLotAwardResourceTest, self).setUp()
self.prepare_award()
# Get award
response = self.app.get('/tenders/{}/awards'.format(self.tender_id))
self.award_id = response.json['data'][0]['id']
self.bid_token = self.initial_bids_tokens[self.initial_bids[0]['id']]
self.app.authorization = ('Basic', ('broker', ''))
class Tender2LotAwardResourceTestMixin(object):
test_create_tender_award = snitch(create_tender_2lot_award)
test_patch_tender_award = snitch(patch_tender_2lot_award)
class Tender2LotAwardResourceTest(BaseTenderContentWebTest,
Tender2LotAwardResourceTestMixin):
initial_status = 'active.tendering'
initial_lots = 2 * test_lots
initial_bids = test_bids
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(Tender2LotAwardResourceTest, self).setUp()
self.prepare_award()
# Get award
response = self.app.get('/tenders/{}/awards'.format(self.tender_id))
self.award_id = response.json['data'][0]['id']
self.app.authorization = ('Basic', ('broker', ''))
class TenderAwardComplaintResourceTest(BaseTenderContentWebTest,
TenderAwardComplaintResourceTestMixin,
TenderUaAwardComplaintResourceTestMixin):
#initial_data = tender_data
initial_status = 'active.tendering'
initial_bids = test_bids
initial_lots = 2 * test_lots
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderAwardComplaintResourceTest, self).setUp()
self.prepare_award()
# Get award
response = self.app.get('/tenders/{}/awards'.format(self.tender_id))
self.award_id = response.json['data'][0]['id']
self.app.authorization = ('Basic', ('broker', ''))
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(self.tender_id, self.award_id, self.tender_token), {'data': {'status': 'active', "qualified": True, "eligible": True}})
self.bid_token = self.initial_bids_tokens[self.initial_bids[0]['id']]
class TenderLotAwardComplaintResourceTestMixin(object):
test_create_tender_award_complaint = snitch(create_tender_lot_award_complaint)
test_patch_tender_award_complaint = snitch(patch_tender_lot_award_complaint)
test_get_tender_award_complaint = snitch(get_tender_lot_award_complaint)
test_get_tender_award_complaints = snitch(get_tender_lot_award_complaints)
class TenderLotAwardComplaintResourceTest(BaseTenderContentWebTest,
TenderLotAwardComplaintResourceTestMixin):
#initial_data = tender_data
initial_status = 'active.tendering'
initial_lots = test_lots
initial_bids = test_bids
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderLotAwardComplaintResourceTest, self).setUp()
self.prepare_award()
# Create award
self.app.authorization = ('Basic', ('token', ''))
bid = self.initial_bids[0]
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
self.app.authorization = ('Basic', ('broker', ''))
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(self.tender_id, self.award_id, self.tender_token), {'data': {'status': 'active', "qualified": True, "eligible": True}})
self.bid_token = self.initial_bids_tokens[self.initial_bids[0]['id']]
class Tender2LotAwardComplaintResourceTestMixin(object):
test_create_tender_award_complaint = snitch(create_tender_lots_award_complaint)
test_patch_tender_award_complaint = snitch(patch_tender_lots_award_complaint)
class Tender2LotAwardComplaintResourceTest(TenderLotAwardComplaintResourceTest,
Tender2LotAwardComplaintResourceTestMixin):
initial_lots = 2 * test_lots
class TenderAwardComplaintDocumentResourceTest(BaseTenderContentWebTest,
TenderAwardComplaintDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_bids
def setUp(self):
super(TenderAwardComplaintDocumentResourceTest, self).setUp()
# Create award
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': self.initial_bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(self.tender_id, self.award_id, self.tender_token), {'data': {'status': 'active', "qualified": True, "eligible": True}})
# Create complaint for award
response = self.app.post_json('/tenders/{}/awards/{}/complaints'.format(
self.tender_id, self.award_id), {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': test_organization}})
complaint = response.json['data']
self.complaint_id = complaint['id']
self.complaint_owner_token = response.json['access']['token']
test_patch_tender_award_complaint_document = snitch(patch_tender_award_complaint_document)
class Tender2LotAwardComplaintDocumentResourceTest(BaseTenderContentWebTest):
initial_status = 'active.qualification'
initial_bids = test_bids
initial_lots = 2 * test_lots
def setUp(self):
super(Tender2LotAwardComplaintDocumentResourceTest, self).setUp()
# Create award
bid = self.initial_bids[0]
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(self.tender_id, self.award_id, self.tender_token), {'data': {'status': 'active', "qualified": True, "eligible": True}})
# Create complaint for award
response = self.app.post_json('/tenders/{}/awards/{}/complaints'.format(
self.tender_id, self.award_id), {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': test_organization}})
complaint = response.json['data']
self.complaint_id = complaint['id']
self.complaint_owner_token = response.json['access']['token']
test_create_tender_award_complaint_document = snitch(create_tender_2lot_award_complaint_document)
test_put_tender_award_complaint_document = snitch(put_tender_2lot_award_complaint_document)
test_patch_tender_award_complaint_document = snitch(patch_tender_2lot_award_complaint_document)
class TenderAwardDocumentResourceTest(BaseTenderContentWebTest,
TenderAwardDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_bids
def setUp(self):
super(TenderAwardDocumentResourceTest, self).setUp()
# Create award
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': self.initial_bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
class Tender2LotAwardDocumentResourceTest(BaseTenderContentWebTest,
Tender2LotAwardDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_bids
initial_lots = 2 * test_lots
def setUp(self):
super(Tender2LotAwardDocumentResourceTest, self).setUp()
# Create award
bid = self.initial_bids[0]
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Tender2LotAwardComplaintDocumentResourceTest))
suite.addTest(unittest.makeSuite(Tender2LotAwardComplaintResourceTest))
suite.addTest(unittest.makeSuite(Tender2LotAwardDocumentResourceTest))
suite.addTest(unittest.makeSuite(Tender2LotAwardResourceTest))
suite.addTest(unittest.makeSuite(TenderAwardComplaintDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderAwardComplaintResourceTest))
suite.addTest(unittest.makeSuite(TenderAwardDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderAwardResourceTest))
suite.addTest(unittest.makeSuite(TenderLotAwardResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 42.980132 | 191 | 0.714715 |
6df76cad5d671784f1cfac8364a650ba088c067b | 3,529 | py | Python | Extra/final_Scrapper.py | AkshayBhimani/Ecommerce-cahtbot-using-NLP | 16ff857187870bc39ee4d9a04ab97eba82d49afa | [
"MIT"
] | null | null | null | Extra/final_Scrapper.py | AkshayBhimani/Ecommerce-cahtbot-using-NLP | 16ff857187870bc39ee4d9a04ab97eba82d49afa | [
"MIT"
] | null | null | null | Extra/final_Scrapper.py | AkshayBhimani/Ecommerce-cahtbot-using-NLP | 16ff857187870bc39ee4d9a04ab97eba82d49afa | [
"MIT"
] | 1 | 2022-02-23T18:42:56.000Z | 2022-02-23T18:42:56.000Z | import urllib
from urllib.parse import urlparse
import requests
from fake_useragent import UserAgent
from bs4 import BeautifulSoup
import re
import pandas as pd
import re
reviewlist =[]
def getAll(productName):
print()
# = input("product name :")
query = productName+"'amazon.in'"
query = urllib.parse.quote_plus(query)
number_result = 20
ua = UserAgent()
google_url = "https://www.google.com/search?q=" + query + "&num=" + str(number_result)
response = requests.get(google_url, {"User-Agent": ua.random})
soup = BeautifulSoup(response.text, "html.parser")
result_div = soup.find_all('div', attrs = {'class': 'ZINbbc'})
links = []
titles = []
descriptions = []
for r in result_div:
try:
link = r.find('a', href = True)
title = r.find('div', attrs={'class':'vvjwJb'}).get_text()
description = r.find('div', attrs={'class':'s3v9rd'}).get_text()
if link != '' and title != '' and description != '':
links.append(link['href'])
titles.append(title)
descriptions.append(description)
except:
continue
to_remove = []
clean_links = []
for i, l in enumerate(links):
clean = re.search('\/url\?q\=(.*)\&sa',l)
if clean is None:
to_remove.append(i)
continue
clean_links.append(clean.group(1))
# for x in to_remove:
# del titles[x]
# del descriptions[x]
print()
print('link :', clean_links[0])
print()
productLink = clean_links[0]
print(clean_links)
productReviewsLink = productLink.replace('/dp/','/product-reviews/')
print(productReviewsLink)
def get_soup(URL):
HEADERS = ({'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36','Accept-Language': 'en-US, en;q=0.5'})
r = requests.get(URL, headers=HEADERS)
soup = BeautifulSoup(r.text, 'html.parser')
return soup
# cleanLink()
def get_reviews(soup):
reviews = soup.find_all('div', {'data-hook': 'review'})
try:
for item in reviews:
review = {
'product': soup.title.text.replace('Amazon.co.uk:Customer reviews:', '').strip(),
'title': item.find('a', {'data-hook': 'review-title'}).text.strip(),
'rating': float(item.find('i', {'data-hook': 'review-star-rating'}).text.replace('out of 5 stars', '').strip()),
'body': item.find('span', {'data-hook': 'review-body'}).text.strip(),
}
reviewlist.append(review)
except:
pass
for x in range(1,10):
if x==1:
reviewPageLink=str(f''+productReviewsLink)+str(f'/ref=cm_cr_dp_d_show_all_btm'+f'?ie=UTF8&reviewerType=all_reviews')
else:
reviewPageLink=str(f''+productReviewsLink)+str(f'/ref=cm_cr_dp_d_show_all_btm_next_{x}'+f'?ie=UTF8&reviewerType=all_reviews&pageNumber={x}')
print(reviewPageLink)
soup = get_soup(reviewPageLink)
print(f'Getting page: {x}')
# print(soup)
get_reviews(soup)
print(len(reviewlist))
if not soup.find('li', {'class': 'a-disabled a-last'}):
pass
else:
break
df = pd.DataFrame(reviewlist)
df.to_excel('./reviews/'+ str(productName)+'.xlsx', index=False)
getAll("one plus 9 ")
| 29.90678 | 179 | 0.573534 |
4ba1d302576df695c5b2e867452b91b3d1d2844a | 2,621 | py | Python | python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py | laipaang/Paddle | d7f35434b761707a8479b75636546a624399369a | [
"Apache-2.0"
] | 3 | 2021-06-11T06:48:10.000Z | 2021-09-02T10:18:06.000Z | python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py | laipaang/Paddle | d7f35434b761707a8479b75636546a624399369a | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py | laipaang/Paddle | d7f35434b761707a8479b75636546a624399369a | [
"Apache-2.0"
] | 1 | 2020-11-05T08:41:11.000Z | 2020-11-05T08:41:11.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import gast
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import is_paddle_api
class CallTransformer(gast.NodeTransformer):
"""
This class transforms function calls into Static Graph Ast.
"""
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Input non-AstNodeWrapper node for the initialization of CallTransformer."
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
def _no_need_convert_call(self, node):
"""
Determines whether a function needs to be transformed by `convert_call`.
It doesn't need to be transformed when a function satisfies the following conditions:
1. It's a api of paddle
2. It's a python builtin function not include `len`
"""
assert isinstance(node, gast.Call)
if is_paddle_api(node):
return True
func_str = ast_to_source_code(node.func).strip()
try:
from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import is_builtin_len, is_builtin
is_builtin = eval("is_builtin({})".format(func_str))
is_builtin_len = eval("is_builtin_len({})".format(func_str))
return is_builtin and not is_builtin_len
except Exception:
return False
def transform(self):
self.visit(self.root)
def visit_Call(self, node):
self.generic_visit(node)
if self._no_need_convert_call(node):
return node
func_str = ast_to_source_code(node.func).strip()
new_func_str = "fluid.dygraph.dygraph_to_static.convert_call({})".format(
func_str)
new_func_ast = gast.parse(new_func_str).body[0].value
node.func = new_func_ast
return node
| 36.915493 | 107 | 0.69897 |
8d85db8378cbc82cfe2a2becbf3507a20943b7c9 | 1,588 | py | Python | growth/too/catalogs.py | deepchatterjeeligo/growth-too-marshal | cefd6a4549cdd34895bd8067833273baaf891341 | [
"MIT"
] | null | null | null | growth/too/catalogs.py | deepchatterjeeligo/growth-too-marshal | cefd6a4549cdd34895bd8067833273baaf891341 | [
"MIT"
] | null | null | null | growth/too/catalogs.py | deepchatterjeeligo/growth-too-marshal | cefd6a4549cdd34895bd8067833273baaf891341 | [
"MIT"
] | null | null | null | import os
from astroquery.vizier import VizierClass
from astropy.table import Column, Table
from celery.local import PromiseProxy
import numpy as np
import pkg_resources
from .flask import app
vizier = VizierClass(row_limit=-1)
def fixup(table):
# Add dummy 2D and 3D credible level columns.
# These columns are filled with nans because they are
# localization dependent.
table.add_column(Column(np.repeat(np.nan, len(table))), 4, '3D CL')
table.add_column(Column(np.repeat(np.nan, len(table))), 4, '2D CL')
table = Table(table, masked=True)
table.convert_bytestring_to_unicode()
for column in table.columns.values():
if np.issubsctype(column, np.floating):
column.format = '%.02f'
column.mask = np.isnan(column)
elif np.issubsctype(column, np.unicode):
column.mask = (column == '')
table['ra'].format = '%.04f'
table['dec'].format = '%+.4f'
return table
def get_from_vizier(*args, **kwargs):
result, = vizier.query_constraints(*args, **kwargs, cache=True)
result.convert_bytestring_to_unicode()
return fixup(result)
def get_from_package(filename):
filepath = os.path.join('catalog', filename)
try:
f = app.open_instance_resource(filepath)
except IOError:
f = pkg_resources.resource_stream(__name__, filepath)
filepath = f.name
f.close()
result = Table.read(filepath)
return fixup(result)
twomass = PromiseProxy(get_from_vizier, ('J/ApJS/199/26/table3',))
galaxies = clu = PromiseProxy(get_from_package, ('CLU.hdf5',))
| 26.915254 | 71 | 0.68136 |
1817b185230d5ef430c61eea4cfe2e910428901a | 7,540 | py | Python | adafruit_logo_vector.py | pmartel/CircuitPyDisk | 4bcd71850dab9da84829126dfbd0e09948c3324f | [
"MIT"
] | null | null | null | adafruit_logo_vector.py | pmartel/CircuitPyDisk | 4bcd71850dab9da84829126dfbd0e09948c3324f | [
"MIT"
] | null | null | null | adafruit_logo_vector.py | pmartel/CircuitPyDisk | 4bcd71850dab9da84829126dfbd0e09948c3324f | [
"MIT"
] | null | null | null | ### Adafruit logo
"""Adafruit logo created from bitmap,
vectorised and flattened to straight lines by Inkscape
then points extracte from SVG data.
(Other route is to ask Adafruit for vector version!)
"""
### pylint 2.3.1 has some strange opinions on data structure indentation here
### and this conflicts with version 1.9.2
### pylint: disable=invalid-name,bad-continuation
offset_x = -10
offset_y = 9
data = [
# Removing the box outline
# Group 1
# (2.9962184, 251.49811),
# (2.9962184, 1.4981075),
# (252.99622, 1.4981075),
# (502.99622, 1.4981075),
# (502.99622, 251.49811),
# (502.99622, 501.49811),
# (252.99622, 501.49811),
# (2.9962184, 501.49811),
# (2.9962184, 251.49811),
# Outline of the flower followed by the five
# Group 2
[(342.49622, 454.21659),
(346.959242969, 451.054080156),
(349.16935125, 444.29346125),
(349.74007, 396.99811),
(349.42067, 348.99811),
(346.15117, 331.03534),
(341.803838594, 322.622872031),
(336.10557625, 315.17921375),
(320.50984, 301.96305),
(315.24595, 297.9209),
(322.89745, 300.16203),
(338.78867125, 303.15162),
(354.51274, 302.19987),
(370.1502025, 297.314415),
(383.99038, 290.42349),
(425.3782575, 261.03182375),
(455.62553, 237.51598),
(457.673981406, 232.384370781),
(457.63498125, 226.75574125),
(455.656955469, 221.435746094),
(451.88833, 217.23004),
(411.03035375, 203.142875),
(364.28838, 189.00345),
(346.28838, 187.51289),
(334.345396094, 188.212601406),
(324.08682875, 190.43598375),
(314.755874531, 194.422959219),
(305.59573, 200.41345),
(298.69523, 205.64576),
(302.36566, 200.82194),
(309.1910175, 190.60812125),
(313.73687, 180.22634),
(316.80698125, 162.25769875),
(315.61122, 142.49811),
(302.2062, 97.30761225),
(287.72529, 54.616095),
(281.9467975, 48.27558525),
(273.17468, 46.774785),
(268.19440125, 47.74447575),
(264.37486, 50.652804),
(236.2211725, 88.324987),
(209.53225, 127.99811),
(202.6606625, 146.3071125),
(201.25938, 164.93584),
(203.77534, 181.49811),
(206.51089, 188.99811),
(205.9051675, 188.794985),
(203.43769, 185.49811),
(196.64267375, 177.104034375),
(188.75523, 169.97381),
(179.92702375, 164.224298125),
(170.30972, 159.97236),
(160.326935313, 157.051098125),
(148.6087275, 155.3990125),
(102.61522, 154.49966),
(60.16592325, 155.3425225),
(53.6551856875, 157.376749063),
(50.290496, 161.35986),
(47.372345, 168.60014),
(48.89669, 175.71523),
(74.532052, 211.99811),
(94.1360167813, 237.819985),
(108.59780025, 254.2254825),
(120.832361594, 264.08215625),
(133.75466, 270.25756),
(141.60613, 272.36428),
(152.49621, 273.38038),
(164.49621, 273.99811),
(158.0089, 275.81829),
(145.07552625, 280.9514125),
(133.8165275, 288.7022225),
(124.0311775, 299.24521125),
(115.51875, 312.75487),
(98.843153625, 358.473565),
(85.295136, 403.16722),
(85.26685925, 408.01758375),
(86.915068, 411.86681),
(92.4915405, 417.39844125),
(99.473822, 419.49811),
(143.00896275, 406.315715),
(190.36777, 389.38911),
(202.01838, 382.636295),
(212.26301, 373.79104),
(221.50430875, 362.505715),
(227.61924, 348.61117),
(230.74226, 339.12239),
(231.21585, 351.56025),
(232.0963625, 362.919155),
(234.30307, 370.76155),
(242.12680375, 385.41184875),
(253.38592, 399.00254),
(289.43734875, 427.02384125),
(327.49622, 454.1789),
(334.9740875, 456.36105375),
#(342.49622, 454.2166),
#(342.49622, 454.21659),
],
# Group 3
[(269.38148, 328.24811),
(260.235962344, 318.54971125),
(252.98225875, 304.9253925),
(249.221025781, 291.33056),
(249.150290957, 285.780265547),
(250.55292, 281.72062),
(254.134922402, 279.408903848),
(258.744026719, 280.396155156),
(263.916454863, 284.085617324),
(269.18842875, 289.88053375),
(278.175901406, 305.399702969),
(280.963844004, 313.930442559),
(281.99622, 322.17961),
(281.55819875, 326.77132),
(279.99622, 329.49811),
(275.74986, 331.3257375),
#(269.38148, 328.24811),
#(269.38148, 328.24811),
],
# Group 4
[(189.27613, 317.48919),
(186.689292012, 315.017226172),
(186.253231719, 310.95217),
(190.75285125, 299.8028675),
(200.613802656, 287.56145625),
(213.6749, 277.74811),
(220.431676309, 274.964493711),
(225.392900469, 274.551332812),
(228.531887832, 276.197175508),
(229.82195375, 279.59057),
(229.236413574, 284.420064492),
(226.748582656, 290.374207187),
(215.95931, 304.41063),
(208.563450625, 310.792326562),
(201.1146375, 315.3621975),
(194.417365625, 317.725924687),
#(189.27613, 317.48919),
#(189.27613, 317.48919),
],
# Group 5
[(270.49622, 263.99178),
(266.275620215, 261.866190078),
(264.014989219, 259.301005),
(263.608503301, 256.443141797),
(264.95033875, 253.4395175),
(272.455678906, 247.58265375),
(285.68442, 242.90575),
(302.642775, 241.59651875),
(316.00365, 244.06329),
(319.7235275, 248.4713),
(318.6949, 253.83523),
(310.640874688, 259.620080625),
(297.3441375, 263.7845675),
(282.673111563, 265.513523125),
#(270.49622, 263.99178),
#(270.49622, 263.99178),
],
# Group 6
[(202.04337, 252.52942),
(191.235123437, 247.592305),
(181.9467875, 241.23868),
(175.444952812, 234.5173375),
(172.99621, 228.47707),
(174.24765832, 224.997173086),
(177.684219687, 223.036961563),
(189.20598, 223.281585),
(203.748087812, 228.422920938),
(217.49714, 237.67295),
(224.516762344, 246.891496563),
(224.975280254, 250.442414727),
(223.57588375, 253.118295),
(220.450783652, 254.792036523),
(215.732190781, 255.336538437),
#(202.04337, 252.52942),
#(202.04337, 252.52942),
],
# Group 7
[(243.14004, 239.17141),
(240.444360469, 233.682861094),
(238.94700375, 225.78207875),
(239.96685, 206.99811),
(245.63492375, 191.82701),
(249.349385156, 186.715313438),
(252.99622, 184.38871),
(256.799735, 184.899581563),
(259.7174025, 187.9743),
(262.71503, 201.49811),
(261.901079688, 215.476273438),
(258.45465, 228.0064775),
(253.205227813, 237.032497812),
(250.16360918, 239.588868945),
(246.9823, 240.49811),
#(243.14004, 239.17141),
#(243.14004, 239.17141),
]
] | 33.811659 | 77 | 0.535013 |
b9f00bf8a1fda9c1f8df45f585762e98bfe14cde | 1,095 | py | Python | scraper/storage_spiders/nguyenhopphatvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | null | null | null | scraper/storage_spiders/nguyenhopphatvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scraper/storage_spiders/nguyenhopphatvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 3 | 2018-08-05T14:54:25.000Z | 2021-06-07T01:49:59.000Z | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@id='Column-Right-Product']/div[@class='ProductNameLink']/h1/a",
'price' : "//div[@class='row']/div[@id='Product-Right']/div[@class='ProductPriceNew clearfix']/span",
'category' : "//ul[@class='col-xs-12']/li[@class='crust list-unstyled pull-left']/a[@class='crumb']/span",
'description' : "//div[@class='quickSpecs']/article[@id='Context']/div[@class='Context']/ul[@class='ListStyle1']",
'images' : "//img[@id='thumb']/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'nguyenhopphat.vn'
allowed_domains = ['nguyenhopphat.vn']
start_urls = ['http://nguyenhopphat.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['[a-zA-Z0-9-]+-\d+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['[a-zA-Z-]+\.html']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 40.555556 | 118 | 0.642009 |
facd62080ec3804fef871d55c39924d8c268a214 | 2,740 | py | Python | Chapter3/ch3_svc.py | buiksat/Learn-Algorithmic-Trading | b85807483dd869f2b6912556a99842f05b646e31 | [
"MIT"
] | 449 | 2019-12-13T09:53:41.000Z | 2022-03-30T23:06:44.000Z | Chapter3/ch3_svc.py | zecaclasher/Learn-Algorithmic-Trading | e938502980d3bf8d560f260437b4bb162e1b1bfd | [
"MIT"
] | 10 | 2020-05-14T20:18:07.000Z | 2022-02-11T15:13:19.000Z | Chapter3/ch3_svc.py | zecaclasher/Learn-Algorithmic-Trading | e938502980d3bf8d560f260437b4bb162e1b1bfd | [
"MIT"
] | 178 | 2019-12-15T02:56:31.000Z | 2022-03-31T19:21:32.000Z | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from pandas_datareader import data
def load_financial_data(start_date, end_date,output_file):
try:
df = pd.read_pickle(output_file)
print('File data found...reading GOOG data')
except FileNotFoundError:
print('File not found...downloading the GOOG data')
df = data.DataReader('GOOG', 'yahoo', start_date, end_date)
df.to_pickle(output_file)
return df
goog_data=load_financial_data(start_date='2001-01-01',
end_date = '2018-01-01',
output_file='goog_data_large.pkl')
def create_trading_condition(df):
df['Open-Close']=df.Open-df.Close
df['High-Low']=df.High-df.Low
df=df.dropna()
X=df[['Open-Close','High-Low']]
Y=np.where(df['Close'].shift(-1)>df['Close'],1,-1)
return (X,Y)
def create_train_split_group(X,Y,split_ratio=0.8):
return train_test_split(X,Y,shuffle=False,train_size=split_ratio)
X,Y=create_trading_condition(goog_data)
X_train,X_test,Y_train,Y_test=\
create_train_split_group(X,Y,split_ratio=0.8)
# Fit the model
svc=SVC()
svc.fit(X_train, Y_train)
# Forecast value
goog_data['Predicted_Signal']=svc.predict(X)
goog_data['GOOG_Returns']=np.log(goog_data['Close']/
goog_data['Close'].shift(1))
def calculate_return(df,split_value,symbol):
cum_goog_return= df[split_value:]['%s_Returns' % symbol].cumsum() * 100
df['Strategy_Returns']= df['%s_Returns' % symbol] * df['Predicted_Signal'].shift(1)
return cum_goog_return
def calculate_strategy_return(df,split_value):
cum_strategy_return = df[split_value:]['Strategy_Returns'].cumsum() * 100
return cum_strategy_return
cum_goog_return=calculate_return(goog_data,split_value=len(X_train),symbol='GOOG')
cum_strategy_return= calculate_strategy_return(goog_data,split_value=len(X_train))
def plot_shart(cum_symbol_return, cum_strategy_return, symbol):
plt.figure(figsize=(10,5))
plt.plot(cum_symbol_return, label='%s Returns' % symbol)
plt.plot(cum_strategy_return,label='Strategy Returns')
plt.legend()
plt.show()
plot_shart(cum_goog_return, cum_strategy_return,symbol='GOOG')
def sharpe_ratio(symbol_returns, strategy_returns):
strategy_std=strategy_returns.std()
sharpe=(strategy_returns-symbol_returns)/strategy_std
return sharpe.mean()
accuracy_train = accuracy_score(Y_train, svc.predict(X_train))
accuracy_test = accuracy_score(Y_test, svc.predict(X_test))
print(accuracy_train, accuracy_test)
print(sharpe_ratio(cum_strategy_return,cum_goog_return))
| 32.235294 | 87 | 0.736131 |
5292daaf2348f3e254df3b13a228a3a21fc790a1 | 6,085 | py | Python | pinry/core/migrations/0002_auto__add_field_pin_learned.py | wids-eria/pinry | bde1aa109c162a24fca0d41d3b5cadf0c433c21b | [
"BSD-2-Clause"
] | 1 | 2015-02-02T02:48:12.000Z | 2015-02-02T02:48:12.000Z | pinry/core/migrations/0002_auto__add_field_pin_learned.py | wids-eria/pinry | bde1aa109c162a24fca0d41d3b5cadf0c433c21b | [
"BSD-2-Clause"
] | null | null | null | pinry/core/migrations/0002_auto__add_field_pin_learned.py | wids-eria/pinry | bde1aa109c162a24fca0d41d3b5cadf0c433c21b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Pin.learned'
db.add_column(u'core_pin', 'learned',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Pin.learned'
db.delete_column(u'core_pin', 'learned')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.pin': {
'Meta': {'object_name': 'Pin'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pin'", 'to': u"orm['django_images.Image']"}),
'learned': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'origin': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'published': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
u'django_images.image': {
'Meta': {'object_name': 'Image'},
'height': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['core'] | 66.141304 | 187 | 0.562202 |
0c18e42fd838b3f64ccd7ab5920dc8785cec31ba | 567 | py | Python | Gathered CTF writeups/cysc19-ctf/crypto/dictionary/generate/generate.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/cysc19-ctf/crypto/dictionary/generate/generate.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/cysc19-ctf/crypto/dictionary/generate/generate.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | #!/usr/bin/env python3
import random
from string import ascii_letters, digits
charlist = ascii_letters + digits + "_"
flag = "CYS{p1np01n7_4ccur4cy}"
with open("./distrib/lookup.lst","a") as fp:
# Write 100000 lines of strings to file
for line in range(100000):
if line == 75212:
fp.write(flag+"\n")
rand_str = ""
# Generate characters according to flag's string length
for char in range(17):
rand_str += random.choice(charlist)
rand_str = "CYS{" + rand_str + "}\n"
fp.write(rand_str)
| 29.842105 | 63 | 0.619048 |
19f077674e45539c32a28b3bb0a54a4b3684cc4c | 224 | py | Python | main.py | brybalti/Exercise-01b-Guessing-Game | 4ea335cd52543d293a15ea74b61928ed239363db | [
"MIT"
] | null | null | null | main.py | brybalti/Exercise-01b-Guessing-Game | 4ea335cd52543d293a15ea74b61928ed239363db | [
"MIT"
] | null | null | null | main.py | brybalti/Exercise-01b-Guessing-Game | 4ea335cd52543d293a15ea74b61928ed239363db | [
"MIT"
] | null | null | null | number = 5
guess = input("Guess a number from 1 to 10: ")
guess = int(guess)
if guess == number:
print("Great job! You got it!")
else:
print("Sorry, better luck next time.")
print("The number was " + str(number)) | 28 | 46 | 0.638393 |
9d490c06167353affa4c515800afe0cf3adecd64 | 1,043 | py | Python | pyramid_igniter/examples/bash/bash/scripts/initializedb.py | tark-hidden/pyramid_igniter | d0723e5547811c652f100763d36538985a97c11e | [
"BSD-3-Clause"
] | 2 | 2015-03-10T17:45:56.000Z | 2015-03-12T06:31:31.000Z | pyramid_igniter/examples/bash/bash/scripts/initializedb.py | tark-hidden/pyramid_igniter | d0723e5547811c652f100763d36538985a97c11e | [
"BSD-3-Clause"
] | null | null | null | pyramid_igniter/examples/bash/bash/scripts/initializedb.py | tark-hidden/pyramid_igniter | d0723e5547811c652f100763d36538985a97c11e | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
from sqlalchemy import engine_from_config
from pyramid.paster import get_appsettings, setup_logging
from pyramid.scripts.common import parse_vars
from bash.models import dbs, Base, StaffModel
import transaction
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
dbs.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = StaffModel(
id=1, role='admin', name='admin',
password='993b33d7c0fc53d51125255feae9b9'
'4ce5213c2269d254b895c423c87035610e',
salt='666')
dbs.add(model)
# session.commit()
| 28.189189 | 59 | 0.668265 |
043b8a0f13df5cc501bac0ac4dec51b71aaf6189 | 12,947 | py | Python | Kaggle-PyTorch/PyTorch-Ensembler/main-statoil.py | pseemakurthi/Deep-Learning-Boot-Camp | c7830b6791b3ef4b8711a78f6290dfa415f678a7 | [
"MIT"
] | 1 | 2021-12-30T09:27:35.000Z | 2021-12-30T09:27:35.000Z | Kaggle-PyTorch/PyTorch-Ensembler/main-statoil.py | PermanentPon/Deep-Learning-Boot-Camp | f8889cd109135d065da141fc0fa8e74040016887 | [
"MIT"
] | null | null | null | Kaggle-PyTorch/PyTorch-Ensembler/main-statoil.py | PermanentPon/Deep-Learning-Boot-Camp | f8889cd109135d065da141fc0fa8e74040016887 | [
"MIT"
] | 1 | 2019-09-17T09:42:17.000Z | 2019-09-17T09:42:17.000Z | from __future__ import print_function
import argparse
import sys
import torch.backends.cudnn as cudnn
from tqdm import tqdm
from utils import *
# from losses import Eve
from pycrayon import *
model_names = sorted(name for name in nnmodels.__dict__
if name.islower() and not name.startswith("__")
and callable(nnmodels.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 and 100 Training')
print("Available models:" + str(model_names))
parser.add_argument('--validationRatio', type=float, default=0.11, help='test Validation Split.')
parser.add_argument('--optim', type=str, default='adam', help='Adam or SGD')
parser.add_argument('--lr_period', default=10, type=float, help='learning rate schedule restart period')
parser.add_argument('--batch_size', default=64, type=int, metavar='N', help='train batchsize')
parser.add_argument('--num_classes', type=int, default=1, help='Number of Classes in data set.')
parser.add_argument('--data_path', default='d:/db/data/ice/', type=str, help='Path to dataset')
parser.add_argument('--dataset', type=str, default='statoil', choices=['statoil', 'statoil'],
help='Choose between Statoil.')
# parser.add_argument('--num_classes', type=int, default=10, help='Number of Classes in data set.')
# parser.add_argument('--data_path', default='d:/db/data/cifar10/', type=str, help='Path to dataset')
# parser.add_argument('--dataset', type=str, default='cifar10',choices=['cifar10', 'Iceberg'],help='Choose between Cifar10/100 and ImageNet.')
# parser.add_argument('--arch', metavar='ARCH', default='senet', choices=model_names)
parser.add_argument('--imgDim', default=2, type=int, help='number of Image input dimensions')
parser.add_argument('--base_factor', default=32, type=int, help='SENet base factor')
parser.add_argument('--epochs', type=int, default=66, help='Number of epochs to train.')
parser.add_argument('--current_time', type=str, default=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
help='Current time.')
# parser.add_argument('--learning_rate', type=float, default=0.0005, help='The Learning Rate.')
parser.add_argument('--lr', '--learning-rate', type=float, default=0.0005, help='The Learning Rate.')
parser.add_argument('--momentum', type=float, default=0.95, help='Momentum.')
parser.add_argument('--decay', type=float, default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1],
help='LR is multiplied by gamma on schedule, number of gammas should be equal to schedule')
# Checkpoints
parser.add_argument('--print_freq', default=50, type=int, metavar='N', help='print frequency (default: 200)')
parser.add_argument('--save_path', type=str, default='./log/', help='Folder to save checkpoints and log.')
parser.add_argument('--save_path_model', type=str, default='./log/', help='Folder to save checkpoints and log.')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--workers', type=int, default=0, help='number of data loading workers (default: 0)')
# random seed
parser.add_argument('--manualSeed', type=int, default=999, help='manual seed')
parser.add_argument('--use_tensorboard', type=bool, default=True, help='Log to tensorboard')
parser.add_argument('--tensorboard_ip', type=str, default='http://192.168.0.2', help='tensorboard IP')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
# Use CUDA
args = parser.parse_args()
args.use_cuda = args.ngpu > 0 and torch.cuda.is_available()
use_cuda = args.use_cuda
if args.manualSeed is None:
args.manualSeed = 999
fixSeed(args)
def BinaryTrainAndValidate(model, criterion, optimizer, runId, debug=False):
if args.use_cuda:
model.cuda()
criterion.cuda()
all_losses = []
val_losses = []
for epoch in tqdm(range(args.epochs)):
# adjust_learning_rate(optimizer,epoch, args)
model.train()
tqdm.write('\n==>>Epoch=[{:03d}/{:03d}]], {:s}, LR=[{}], Batch=[{}]'.format(epoch, args.epochs, time_string(),
state['lr'],
args.batch_size) + ' [Model={}]'.format(
(type(model).__name__), ), log)
running_loss = 0.0
# for i, row_data in tqdm (enumerate(trainloader, 1)):
for i, row_data in (enumerate(trainloader, 1)):
img, label = row_data
if use_cuda:
img, label = Variable(img.cuda(async=True)), Variable(label.cuda(async=True)) # On GPU
else:
img, label = Variable(img), Variable(label) # RuntimeError: expected CPU tensor (got CUDA tensor)
out = model(img)
loss = criterion(out, label)
running_loss += loss.data[0] * label.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
all_losses.append(running_loss / (args.batch_size * i))
predicted_tr = (model(img).data > 0.5).float()
accuracy_tr = (predicted_tr == label.data).float().mean() * 100
# model.eval()
eval_loss = 0
for row_data in testloader:
img, label = row_data
if use_cuda:
img, label = Variable(img.cuda(async=True), volatile=True), Variable(label.cuda(async=True),
volatile=True) # On GPU
else:
img = Variable(img, volatile=True)
label = Variable(label, volatile=True)
out = model(img)
loss = criterion(out, label)
eval_loss += loss.data[0] * label.size(0)
val_losses.append(eval_loss / (len(testset)))
predicted_val = (model(img).data > 0.5).float()
# predictions_val = predicted_val.cpu().numpy()
accuracy_val = (predicted_val == label.data).float().mean() * 100
if debug is True:
tqdm.write('-->LOSS T/V:[{:.6f}/{:.6f}%], ACC T/V:[{:.6f}/{:.6f}%]'.format(running_loss / (len(trainset)),
eval_loss / (len(testset)),
accuracy_tr, accuracy_val))
if args.use_tensorboard:
exp.add_scalar_value('tr_epoch_loss', running_loss / (len(trainset)), step=epoch)
exp.add_scalar_value('tr_epoch_acc', accuracy_tr, step=epoch)
exp.add_scalar_value('val_epoch_loss', eval_loss / (len(testset)), step=epoch)
exp.add_scalar_value('val_epoch_acc', accuracy_val, step=epoch)
val_result = float('{:.6f}'.format(eval_loss / (len(testset))))
train_result = float('{:.6f}'.format(running_loss / (len(trainset))))
recorder.update(epoch, train_result, accuracy_tr, val_result, accuracy_val)
mPath = args.save_path_model + '/'
if not os.path.isdir(mPath):
os.makedirs(mPath)
recorder.plot_curve(os.path.join(mPath, model_name + '_' + runId + '.png'), args, model)
logger.append([state['lr'], train_result, val_result, accuracy_tr, accuracy_val])
if (float(val_result) < float(0.175) and float(train_result) < float(0.175)):
print_log("=>>EARLY STOPPING", log)
df_pred = BinaryInference(model, args)
savePred(df_pred, model, str(val_result) + '_' + str(epoch), train_result, args.save_path_model)
# break
continue
adjust_learning_rate(optimizer, epoch, args)
tqdm.write('TRAIN Loss: {:.6f}'.format(running_loss / (len(trainset))), log)
tqdm.write('VALIDATION Loss: {:.6f}'.format(eval_loss / (len(testset))), log)
val_result = '{:.6f}'.format(eval_loss / (len(testset)))
train_result = '{:.6f}'.format(running_loss / (len(trainset)))
return val_result, train_result
def loadDB(args):
# Data
print('==> Preparing dataset %s' % args.dataset)
if args.dataset == 'statoil':
args.num_classes = 1
args.imgDim = 2
trainloader, testloader, trainset, testset = getStatoilTrainValLoaders(args)
return trainloader, testloader, trainset, testset
if __name__ == '__main__':
if args.use_tensorboard == True:
cc = CrayonClient(hostname=args.tensorboard_ip)
cc.remove_all_experiments()
# ensembleVer2('./pth_old/raw/iceResNet/', './pth_old/ens2/ens_ice800files.csv')
# MinMaxBestBaseStacking('./pth_old/2020/', './pth_old/2020/0.1339.csv','./pth_old/2020/final_mix_900_files_base01344.csv')
# ensembleVer2('./log/statoil/IceResNet/pth', './ens_ice_98989898989898989.csv')
# ensembleVer2('./log/DenseNet/pth/', './pth_old/ens2/ens_densnet_1_hours.csv')
# vis = visdom.Visdom(port=6006)
trainloader, testloader, trainset, testset = loadDB(args)
# for i in tqdm(range(0, 51)):
for i in range(0, 50):
models = ['senet']
for m in models:
runId = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
fixSeed(args)
model = selectModel(args, m)
model_name = (type(model).__name__)
exp_name = datetime.datetime.now().strftime(model_name + '_' + args.dataset + '_%Y-%m-%d_%H-%M-%S')
if args.use_tensorboard == True:
exp = cc.create_experiment(exp_name)
mPath = args.save_path + '/' + args.dataset + '/' + model_name + '/'
args.save_path_model = mPath
if not os.path.isdir(args.save_path_model):
mkdir_p(args.save_path_model)
log = open(os.path.join(args.save_path_model, 'log_seed_{}_{}.txt'.format(args.manualSeed, runId)), 'w')
print_log('Save path : {}'.format(args.save_path_model), log)
print_log(state, log)
print_log("Random Seed: {}".format(args.manualSeed), log)
print_log("python version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("torch version : {}".format(torch.__version__), log)
print_log("cudnn version : {}".format(torch.backends.cudnn.version()), log)
print_log("LR :" + str(args.lr), log)
print_log("Available models:" + str(model_names), log)
print_log("=> Final model name: '{}'".format(model_name), log)
print_log("=> Log to TENSORBOARD: '{}'".format(args.use_tensorboard), log)
print_log("=> TENSORBOARD ip:'{}'".format(args.tensorboard_ip), log)
# print_log("=> Full model '{}'".format(model), log)
# model = torch.nn.DataParallel(model).cuda()
model.cuda()
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
if args.num_classes == 1:
criterion = torch.nn.BCELoss()
else:
criterion = torch.nn.CrossEntropyLoss()
if args.optim is 'adam':
optimizer = torch.optim.Adam(model.parameters(), args.lr) # L2 regularization
else:
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=state['momentum'],
weight_decay=state['weight_decay'], nesterov=True)
# print_log("=> Criterion '{}'".format(str(criterion)), log)
# print_log("=> optimizer '{}'".format(str(optimizer)), log)
title = model_name
logger = Logger(os.path.join(args.save_path_model, runId + '_log.txt'), title=title)
logger.set_names(['LearningRate', 'TrainLoss', 'ValidLoss', 'TrainAcc.', 'ValidAcc.'])
recorder = RecorderMeter(args.epochs) # epoc is updated
val_result, train_result = BinaryTrainAndValidate(model, criterion, optimizer, runId, debug=True)
if (float(val_result) < float(0.165) and float(train_result) < float(0.165)):
df_pred = BinaryInference(model)
savePred(df_pred, model, val_result, train_result, args.save_path_model)
logger.close()
logger.plot()
| 49.605364 | 142 | 0.613347 |
4e90a71ddb4fb9bab8399c16b66aba01ebf3940a | 150 | py | Python | gastarme/wallets/apps.py | rafaeltardivo/gastarme | 7e97e611118ab61885f5088a0df7ab1792b71e6e | [
"MIT"
] | null | null | null | gastarme/wallets/apps.py | rafaeltardivo/gastarme | 7e97e611118ab61885f5088a0df7ab1792b71e6e | [
"MIT"
] | null | null | null | gastarme/wallets/apps.py | rafaeltardivo/gastarme | 7e97e611118ab61885f5088a0df7ab1792b71e6e | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class WalletsConfig(AppConfig):
name = 'wallets'
def ready(self):
import wallets.signals # noqa
| 16.666667 | 38 | 0.686667 |
bd38ffed0791a1b5a79f8c77051784fab091a7ec | 1,202 | py | Python | imperative/python/gen_version.py | MegChai/MegEngine | edfd38befdd15d2108209ef44b1f92c840fd0cee | [
"Apache-2.0"
] | null | null | null | imperative/python/gen_version.py | MegChai/MegEngine | edfd38befdd15d2108209ef44b1f92c840fd0cee | [
"Apache-2.0"
] | null | null | null | imperative/python/gen_version.py | MegChai/MegEngine | edfd38befdd15d2108209ef44b1f92c840fd0cee | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import subprocess
def get_git_commit(src_dir):
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=src_dir).decode('ascii').strip()
except Exception:
return 'unknown'
def get_mge_version(version_txt_path):
v = {}
with open(version_txt_path) as fp:
exec(fp.read(), v)
return v
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="generate version.py to build path")
parser.add_argument("--output", type=str, required=True)
parser.add_argument("--major", type=int, required=True)
parser.add_argument("--minor", type=int, required=True)
parser.add_argument("--patch", type=int, required=True)
parser.add_argument("--internal", action='store_true')
args = parser.parse_args()
python_dir = os.path.dirname(__file__)
commit_id = get_git_commit(python_dir)
mge_ver = str(args.major) + "." + str(args.minor) + "." + str(args.patch)
with open(args.output, 'w') as f:
f.write("__version__ = '{}'\n".format(mge_ver))
f.write("git_version = {}\n".format(repr(commit_id)))
if args.internal:
f.write("__internal__ = True\n")
| 36.424242 | 105 | 0.659734 |
973cf4b8c263ecb6ae38f083c76f00e46df19f5d | 2,075 | py | Python | mistral/notifiers/base.py | shubhamdang/mistral | 3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d | [
"Apache-2.0"
] | 205 | 2015-06-21T11:51:47.000Z | 2022-03-05T04:00:04.000Z | mistral/notifiers/base.py | shubhamdang/mistral | 3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d | [
"Apache-2.0"
] | 8 | 2015-06-23T14:47:58.000Z | 2021-01-28T06:06:44.000Z | mistral/notifiers/base.py | shubhamdang/mistral | 3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d | [
"Apache-2.0"
] | 110 | 2015-06-14T03:34:38.000Z | 2021-11-11T12:12:56.000Z | # Copyright 2018 - Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_log import log as logging
from stevedore import driver
LOG = logging.getLogger(__name__)
_NOTIFIERS = {}
_NOTIFICATION_PUBLISHERS = {}
def cleanup():
global _NOTIFIERS
global _NOTIFICATION_PUBLISHERS
_NOTIFIERS = {}
_NOTIFICATION_PUBLISHERS = {}
def get_notifier(notifier_name):
global _NOTIFIERS
if not _NOTIFIERS.get(notifier_name):
mgr = driver.DriverManager(
'mistral.notifiers',
notifier_name,
invoke_on_load=True
)
_NOTIFIERS[notifier_name] = mgr.driver
return _NOTIFIERS[notifier_name]
def get_notification_publisher(publisher_name):
global _NOTIFICATION_PUBLISHERS
if not _NOTIFICATION_PUBLISHERS.get(publisher_name):
mgr = driver.DriverManager(
'mistral.notification.publishers',
publisher_name,
invoke_on_load=True
)
_NOTIFICATION_PUBLISHERS[publisher_name] = mgr.driver
return _NOTIFICATION_PUBLISHERS[publisher_name]
class Notifier(object, metaclass=abc.ABCMeta):
"""Notifier interface."""
@abc.abstractmethod
def notify(self, ex_id, data, event, timestamp, **kwargs):
raise NotImplementedError()
class NotificationPublisher(object, metaclass=abc.ABCMeta):
"""Notifier plugin interface."""
@abc.abstractmethod
def publish(self, ctx, ex_id, data, event, timestamp, **kwargs):
raise NotImplementedError()
| 26.265823 | 77 | 0.703133 |
2dbe6ab08c60b639010e8708293e7460bf13513b | 907 | py | Python | aoc_wim/aoc2016/q13.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
] | 20 | 2019-10-15T07:33:13.000Z | 2022-01-19T13:40:36.000Z | aoc_wim/aoc2016/q13.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
] | 5 | 2019-02-01T23:31:27.000Z | 2021-12-03T06:55:58.000Z | aoc_wim/aoc2016/q13.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
] | 8 | 2019-12-03T15:41:23.000Z | 2021-12-06T17:13:57.000Z | """
--- Day 13: A Maze of Twisty Little Cubicles ---
https://adventofcode.com/2016/day/13
"""
from aoc_wim.zgrid import ZGrid
from aocd import data
class WallMap:
def __init__(self, fav_number=None):
if fav_number is None:
fav_number = int(data)
self.fav_number = fav_number
def __call__(self, z):
if z.real < 0 or z.imag < 0:
return "#"
x, y = z.real, z.imag
fz = x * x + 3 * x + 2 * x * y + y + y * y
popcount = bin(int(fz) + self.fav_number).count("1")
result = "#" if popcount % 2 else "."
return result
z0 = 1 + 1j
target = 31 + 39j
if __name__ == "__main__":
grid = ZGrid(WallMap(), on=".", off="#")
depths = grid.bfs(target=target, z0=z0)
print("part a:", depths[target])
depths = grid.bfs(z0=z0, max_depth=50)
print("part b:", len(depths))
grid.draw_path(z=target, z0=z0)
| 25.194444 | 60 | 0.566703 |
491f367727670abd65a488be8bdf49906b7140a5 | 4,859 | py | Python | lib/galaxy_utils/sequence/vcf.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 2 | 2016-02-23T00:09:14.000Z | 2019-02-11T07:48:44.000Z | lib/galaxy_utils/sequence/vcf.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy_utils/sequence/vcf.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 6 | 2015-05-27T13:09:50.000Z | 2019-02-11T07:48:46.000Z | #Dan Blankenberg
# See http://www.1000genomes.org/wiki/Analysis/variant-call-format
NOT_A_NUMBER = float( 'NaN' )
class VariantCall( object ):
version = None
header_startswith = None
required_header_fields = None
required_header_length = None
@classmethod
def get_class_by_format( cls, format ):
assert format in VCF_FORMATS, 'Unknown format type specified: %s' % format
return VCF_FORMATS[ format ]
def __init__( self, vcf_line, metadata, sample_names ):
raise Exception( 'Abstract Method' )
class VariantCall33( VariantCall ):
version = 'VCFv3.3'
header_startswith = '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO'
required_header_fields = header_startswith.split( '\t' )
required_header_length = len( required_header_fields )
def __init__( self, vcf_line, metadata, sample_names ):
# Raw line is needed for indexing file.
self.raw_line = vcf_line
self.line = vcf_line.rstrip( '\n\r' )
self.metadata = metadata
self.sample_names = sample_names
self.format = None
self.sample_values = []
#parse line
self.fields = self.line.split( '\t' )
if sample_names:
assert len( self.fields ) == self.required_header_length + len( sample_names ) + 1, 'Provided VCF line (%s) has wrong length (expected: %i)' % ( self.line, self.required_header_length + len( sample_names ) + 1 )
else:
assert len( self.fields ) == self.required_header_length, 'Provided VCF line (%s) has wrong length (expected: %i)' % ( self.line, self.required_header_length)
self.chrom, self.pos, self.id, self.ref, self.alt, self.qual, self.filter, self.info = self.fields[ :self.required_header_length ]
self.pos = int( self.pos )
self.alt = self.alt.split( ',' )
try:
self.qual = float( self.qual )
except:
self.qual = NOT_A_NUMBER #Missing data can be denoted as a '.'
if len( self.fields ) > self.required_header_length:
self.format = self.fields[ self.required_header_length ].split( ':' )
for sample_value in self.fields[ self.required_header_length + 1: ]:
self.sample_values.append( sample_value.split( ':' ) )
class VariantCall40( VariantCall33 ):
version = 'VCFv4.0'
def __init__( self, vcf_line, metadata, sample_names ):
VariantCall33.__init__( self, vcf_line, metadata, sample_names)
class VariantCall41( VariantCall40 ):
version = 'VCFv4.1'
#VCF Format version lookup dict
VCF_FORMATS = {}
for format in [ VariantCall33, VariantCall40, VariantCall41 ]:
VCF_FORMATS[format.version] = format
class Reader( object ):
def __init__( self, fh ):
self.vcf_file = fh
self.metadata = {}
self.header_fields = None
self.metadata_len = 0
self.sample_names = []
self.vcf_class = None
# Read file metadata.
while True:
line = self.vcf_file.readline()
self.metadata_len += len( line )
assert line, 'Invalid VCF file provided.'
line = line.rstrip( '\r\n' )
if self.vcf_class and line.startswith( self.vcf_class.header_startswith ):
# read the header fields, ignoring any blank tabs, which GATK
# VCF produces after the sample
self.header_fields = [l for l in line.split( '\t' ) if l]
if len( self.header_fields ) > self.vcf_class.required_header_length:
for sample_name in self.header_fields[ self.vcf_class.required_header_length + 1 : ]:
self.sample_names.append( sample_name )
break
assert line.startswith( '##' ), 'Non-metadata line found before header'
line = line[2:] #strip ##
metadata = line.split( '=', 1 )
metadata_name = metadata[ 0 ]
if len( metadata ) == 2:
metadata_value = metadata[ 1 ]
else:
metadata_value = None
if metadata_name in self.metadata:
if not isinstance( self.metadata[ metadata_name ], list ):
self.metadata[ metadata_name ] = [ self.metadata[ metadata_name ] ]
self.metadata[ metadata_name ].append( metadata_value )
else:
self.metadata[ metadata_name ] = metadata_value
if metadata_name == 'fileformat':
self.vcf_class = VariantCall.get_class_by_format( metadata_value )
def next( self ):
line = self.vcf_file.readline()
if not line:
raise StopIteration
return self.vcf_class( line, self.metadata, self.sample_names )
def __iter__( self ):
while True:
yield self.next()
| 43 | 223 | 0.613707 |
4bbe86de84079b4cd4396e29b592a53fb5dd2755 | 8,245 | py | Python | python/pyarrow/tests/strategies.py | Root-App/arrow | 793c60a276171595dd6cc80845106aaeb9b37905 | [
"Apache-2.0"
] | 2 | 2019-08-22T10:40:17.000Z | 2021-06-30T11:43:38.000Z | python/pyarrow/tests/strategies.py | dewott-technologies/arrow | 5fa694ba787facfb22f6471262784569cdcbbe28 | [
"Apache-2.0"
] | 7 | 2020-01-31T18:29:09.000Z | 2021-12-14T21:41:59.000Z | python/pyarrow/tests/strategies.py | zettaflowio/arrow | 3a53c9b334f8a2bfa2141fa62336d5f556c9277f | [
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0"
] | 1 | 2019-10-14T01:44:35.000Z | 2019-10-14T01:44:35.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytz
import hypothesis as h
import hypothesis.strategies as st
import hypothesis.extra.numpy as npst
import hypothesis.extra.pytz as tzst
import numpy as np
import pyarrow as pa
# TODO(kszucs): alphanum_text, surrogate_text
custom_text = st.text(
alphabet=st.characters(
min_codepoint=0x41,
max_codepoint=0x7E
)
)
null_type = st.just(pa.null())
bool_type = st.just(pa.bool_())
binary_type = st.just(pa.binary())
string_type = st.just(pa.string())
large_binary_type = st.just(pa.large_binary())
large_string_type = st.just(pa.large_string())
signed_integer_types = st.sampled_from([
pa.int8(),
pa.int16(),
pa.int32(),
pa.int64()
])
unsigned_integer_types = st.sampled_from([
pa.uint8(),
pa.uint16(),
pa.uint32(),
pa.uint64()
])
integer_types = st.one_of(signed_integer_types, unsigned_integer_types)
floating_types = st.sampled_from([
pa.float16(),
pa.float32(),
pa.float64()
])
decimal_type = st.builds(
pa.decimal128,
precision=st.integers(min_value=1, max_value=38),
scale=st.integers(min_value=1, max_value=38)
)
numeric_types = st.one_of(integer_types, floating_types, decimal_type)
date_types = st.sampled_from([
pa.date32(),
pa.date64()
])
time_types = st.sampled_from([
pa.time32('s'),
pa.time32('ms'),
pa.time64('us'),
pa.time64('ns')
])
timestamp_types = st.builds(
pa.timestamp,
unit=st.sampled_from(['s', 'ms', 'us', 'ns']),
tz=tzst.timezones()
)
temporal_types = st.one_of(date_types, time_types, timestamp_types)
primitive_types = st.one_of(
null_type,
bool_type,
binary_type,
string_type,
large_binary_type,
large_string_type,
numeric_types,
temporal_types
)
metadata = st.dictionaries(st.text(), st.text())
def fields(type_strategy=primitive_types):
return st.builds(pa.field, name=custom_text, type=type_strategy,
nullable=st.booleans(), metadata=metadata)
def list_types(item_strategy=primitive_types):
return (
st.builds(pa.list_, item_strategy) |
st.builds(pa.large_list, item_strategy)
)
def struct_types(item_strategy=primitive_types):
return st.builds(pa.struct, st.lists(fields(item_strategy)))
def complex_types(inner_strategy=primitive_types):
return list_types(inner_strategy) | struct_types(inner_strategy)
def nested_list_types(item_strategy=primitive_types, max_leaves=3):
return st.recursive(item_strategy, list_types, max_leaves=max_leaves)
def nested_struct_types(item_strategy=primitive_types, max_leaves=3):
return st.recursive(item_strategy, struct_types, max_leaves=max_leaves)
def nested_complex_types(inner_strategy=primitive_types, max_leaves=3):
return st.recursive(inner_strategy, complex_types, max_leaves=max_leaves)
def schemas(type_strategy=primitive_types, max_fields=None):
children = st.lists(fields(type_strategy), max_size=max_fields)
return st.builds(pa.schema, children)
complex_schemas = schemas(complex_types())
all_types = st.one_of(primitive_types, complex_types(), nested_complex_types())
all_fields = fields(all_types)
all_schemas = schemas(all_types)
_default_array_sizes = st.integers(min_value=0, max_value=20)
@st.composite
def arrays(draw, type, size=None):
if isinstance(type, st.SearchStrategy):
type = draw(type)
elif not isinstance(type, pa.DataType):
raise TypeError('Type must be a pyarrow DataType')
if isinstance(size, st.SearchStrategy):
size = draw(size)
elif size is None:
size = draw(_default_array_sizes)
elif not isinstance(size, int):
raise TypeError('Size must be an integer')
shape = (size,)
if pa.types.is_list(type) or pa.types.is_large_list(type):
offsets = draw(npst.arrays(np.uint8(), shape=shape)).cumsum() // 20
offsets = np.insert(offsets, 0, 0, axis=0) # prepend with zero
values = draw(arrays(type.value_type, size=int(offsets.sum())))
array_type = (
pa.LargeListArray if pa.types.is_large_list(type)
else pa.ListArray)
return array_type.from_arrays(offsets, values)
if pa.types.is_struct(type):
h.assume(len(type) > 0)
fields, child_arrays = [], []
for field in type:
fields.append(field)
child_arrays.append(draw(arrays(field.type, size=size)))
return pa.StructArray.from_arrays(child_arrays, fields=fields)
if (pa.types.is_boolean(type) or pa.types.is_integer(type) or
pa.types.is_floating(type)):
values = npst.arrays(type.to_pandas_dtype(), shape=(size,))
np_arr = draw(values)
if pa.types.is_floating(type):
# Workaround ARROW-4952: no easy way to assert array equality
# in a NaN-tolerant way.
np_arr[np.isnan(np_arr)] = -42.0
return pa.array(np_arr, type=type)
if pa.types.is_null(type):
value = st.none()
elif pa.types.is_time(type):
value = st.times()
elif pa.types.is_date(type):
value = st.dates()
elif pa.types.is_timestamp(type):
tz = pytz.timezone(type.tz) if type.tz is not None else None
value = st.datetimes(timezones=st.just(tz))
elif pa.types.is_binary(type) or pa.types.is_large_binary(type):
value = st.binary()
elif pa.types.is_string(type) or pa.types.is_large_string(type):
value = st.text()
elif pa.types.is_decimal(type):
# TODO(kszucs): properly limit the precision
# value = st.decimals(places=type.scale, allow_infinity=False)
h.reject()
else:
raise NotImplementedError(type)
values = st.lists(value, min_size=size, max_size=size)
return pa.array(draw(values), type=type)
@st.composite
def chunked_arrays(draw, type, min_chunks=0, max_chunks=None, chunk_size=None):
if isinstance(type, st.SearchStrategy):
type = draw(type)
# TODO(kszucs): remove it, field metadata is not kept
h.assume(not pa.types.is_struct(type))
chunk = arrays(type, size=chunk_size)
chunks = st.lists(chunk, min_size=min_chunks, max_size=max_chunks)
return pa.chunked_array(draw(chunks), type=type)
@st.composite
def record_batches(draw, type, rows=None, max_fields=None):
if isinstance(rows, st.SearchStrategy):
rows = draw(rows)
elif rows is None:
rows = draw(_default_array_sizes)
elif not isinstance(rows, int):
raise TypeError('Rows must be an integer')
schema = draw(schemas(type, max_fields=max_fields))
children = [draw(arrays(field.type, size=rows)) for field in schema]
# TODO(kszucs): the names and schame arguments are not consistent with
# Table.from_array's arguments
return pa.RecordBatch.from_arrays(children, names=schema)
@st.composite
def tables(draw, type, rows=None, max_fields=None):
if isinstance(rows, st.SearchStrategy):
rows = draw(rows)
elif rows is None:
rows = draw(_default_array_sizes)
elif not isinstance(rows, int):
raise TypeError('Rows must be an integer')
schema = draw(schemas(type, max_fields=max_fields))
children = [draw(arrays(field.type, size=rows)) for field in schema]
return pa.Table.from_arrays(children, schema=schema)
all_arrays = arrays(all_types)
all_chunked_arrays = chunked_arrays(all_types)
all_record_batches = record_batches(all_types)
all_tables = tables(all_types)
| 31.231061 | 79 | 0.699454 |
0dd1943032afae16ac1ad240e84e949ac71b0b2a | 3,572 | py | Python | django/docs/releases/1.6.10.txt.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | django/docs/releases/1.6.10.txt.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | django/docs/releases/1.6.10.txt.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXX XXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX XXX XXXXX
XXXXXX XXXXXX XXXXX XXXXXXX XXXXXXXX XXXXXX XX XXXXXX
XXXX XXXXXX XXXXXXXX XXX XXXXXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXX XXXXXXX XXX XXXXXX XXXX XXX XXXX XXXXXXXX XXXX XXX XXXXXXXXXX XX
XXXXXXXXXX XX XXXXXXXXXX XXXXXXXXXX XXX XXXXXX XX XXXXXXXXXXXX XXX XXXXXXXXXX
XXXXXXXXXX XXX XXXXXXXXX X XXXXXX XXXXXXXXXXXXXXX XXXXX XXXXXX
XXXXXXXXXXXXXXXXXXXX XX XXX XXXX XXXXXXX XXXX XXXX XXXX XX XXXXXXXX
XXXXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXX XXXX XXXXX XXXX XXX XXXX XXXXXXX XXXXXX XXXXXXXXXXX XXXXXXX
XXXXXXX XXXXXXXXXX XXXXXX XXX XXXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXX XXXX XXXXXX XXXXXXXXXXXXXXXXXXXXX XXXX XXXXX XXXX XX X
XXXXXX XX XXXX XX X XXXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXX XXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXX XXXXX XXXX X XXXXXXXXX XXXXXXX XXXX XX XXX XXXXX
XXXXXXXXX XXXXXX XXX XXXXXXXX XXXXX XXX XXXXXXXXXXXXXXXX XX XXXXXXXX XXX XX
XXXX XX XXXXXXX XX XXXXXXXXXXXXXXX XXXXXX XXXXX XXXXXXXXXXX XXX XXXXXX XXXX
XXXXXXXXXXX
XX XXXXX XX XXXXXXX XXXX XXXXXXXX XXXX XXXXX XXX XXXXXX XXXX XXXXX XXX XXXXXXX
XXXXXXXXXX XXXXXXXXXXX XXXX XXXXXXXX XXXXXXXX XX XXXXXXXX XXXXXXXX XXXXXXXX
XXXXXXXXXXX XXXXXX XXX XXXX XXX XXXXX XXXXXXXX XXXXXXXXXXX XXXXXX XX XXX
XXXXXXXXXXX XXX XXXXXXXXXX XXXX XXX XXXXXXXX XXX XXXXXXXX XX XXXXXX XXXXXXXXXX
XXXXXXX XXXXXXX XXX XXXXXXX XXXX XXX XXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXX
XXXXXXXXX XXXXXXXX XXX XXXXXX XXX XXXXXXXXXXXXX XXXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXX XX XXXX XXXXX XX XXXX XXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXX XXXX XX XX XXX XXXXXXXX XXXX XXX XXXXXXXX XXXXXX XXX XXXXX
XXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXX XXXXXXX
XXXXXXXXXX XX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXX XXXX XXXX
XXXXXXXXXXXXXXXXXXXX XXXXX XX X XXXXXXXXX XXXXXX XX XXXXXXXXXXXXXXXXX XX
XXXXXXX XXXX XXXXXXXX XXXXXXX XXX XXX XXXX X XXX XXXX X XXXXX XXXX XXXXX XXXXXX
XXXX X XXX XXXXXXX XXXX XXX XXXXXXX XXXXXX XXXXXX XXXXXXXXXX XXXXX XX XXXX XXX
XXXX XXX XXXX XXX XXXXXXXXXXXX XXXXXXXX XXXXXX XXX XXXXXXXX XXXX XX XXXXXX
XXXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXX XXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXX XXXXXXXX XX XXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXX
XXX XXXXX XX XXXXXX XXX XXXX XX X XXXXX XXXXXXXXXX X XXX XXXX XXXX XX XXXXXXXX
XXXXX XXXXXX XX XXXXXX XXXXX XXXXX XX XXX XXXX XX XXXX XXXXX XX XXXXXXXX XXXXX
XXXXXXX XXXX XXX XXXXXX X XXXXXXXXXXXXXXXXX XXXXXX XX XXXXXXXXXXXXXX XXXXXXXXXX
XXXX XXXXX XXXXXX XXXX XXXX XXX XXXXX XXX XXXX XX XXXXXX XX XXXXXXX XXXXX
XXXXXX XXXXXX
XXXXX XXXXXXXX XXXX XXXX XXXX XXX XXXXXX XXXXXXX X XXXXXXX XXXX XX XX XXX
XXXXXXXX XXX XXXXXXXXXX XXX XXX XXXXXX XX XXXX XXXX XX X XXXXXXXXXXX XXXX XXX
XXX XX X XXXX XXXX XX XXXXX XXXX XXXXXXX XXX XXXXX XXXX XXXXX XX XXXXXXXXXX
XXXXX X XXXX XXXXXXXXX XXX XXXXXX XX XXX XXX XXX XXXXX XXX
XXXXXXXX XXXXXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX X XXXX XXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX X XXXXXXXXXX XXXXX XX XXX XXXXXXXX XXX X XXXX
XX XXXXX XX XXXXXXXXXXXX XXXXXX XX XXX XXXXXXX XX XXXXXXXXXX XXXXXXXXX XXXXXX
XXX XXX XXXXXXX XXXXX XXX XXXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXX XXXXXXXXX XXXXXX XX XXXXXXX XXXX XXXXXX
| 51.028571 | 79 | 0.862822 |
3afc7639dfde295451edef13273cd0f33030f2d1 | 8,431 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20190701/get_connection_monitor.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/network/v20190701/get_connection_monitor.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/network/v20190701/get_connection_monitor.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetConnectionMonitorResult',
'AwaitableGetConnectionMonitorResult',
'get_connection_monitor',
]
@pulumi.output_type
class GetConnectionMonitorResult:
"""
Information about the connection monitor.
"""
def __init__(__self__, auto_start=None, destination=None, etag=None, id=None, location=None, monitoring_interval_in_seconds=None, monitoring_status=None, name=None, provisioning_state=None, source=None, start_time=None, tags=None, type=None):
if auto_start and not isinstance(auto_start, bool):
raise TypeError("Expected argument 'auto_start' to be a bool")
pulumi.set(__self__, "auto_start", auto_start)
if destination and not isinstance(destination, dict):
raise TypeError("Expected argument 'destination' to be a dict")
pulumi.set(__self__, "destination", destination)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if monitoring_interval_in_seconds and not isinstance(monitoring_interval_in_seconds, int):
raise TypeError("Expected argument 'monitoring_interval_in_seconds' to be a int")
pulumi.set(__self__, "monitoring_interval_in_seconds", monitoring_interval_in_seconds)
if monitoring_status and not isinstance(monitoring_status, str):
raise TypeError("Expected argument 'monitoring_status' to be a str")
pulumi.set(__self__, "monitoring_status", monitoring_status)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if start_time and not isinstance(start_time, str):
raise TypeError("Expected argument 'start_time' to be a str")
pulumi.set(__self__, "start_time", start_time)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="autoStart")
def auto_start(self) -> Optional[bool]:
"""
Determines if the connection monitor will start automatically once created.
"""
return pulumi.get(self, "auto_start")
@property
@pulumi.getter
def destination(self) -> 'outputs.ConnectionMonitorDestinationResponse':
"""
Describes the destination of connection monitor.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
ID of the connection monitor.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Connection monitor location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="monitoringIntervalInSeconds")
def monitoring_interval_in_seconds(self) -> Optional[int]:
"""
Monitoring interval in seconds.
"""
return pulumi.get(self, "monitoring_interval_in_seconds")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> Optional[str]:
"""
The monitoring status of the connection monitor.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the connection monitor.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the connection monitor.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> 'outputs.ConnectionMonitorSourceResponse':
"""
Describes the source of connection monitor.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[str]:
"""
The date and time when the connection monitor was started.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Connection monitor tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Connection monitor type.
"""
return pulumi.get(self, "type")
class AwaitableGetConnectionMonitorResult(GetConnectionMonitorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectionMonitorResult(
auto_start=self.auto_start,
destination=self.destination,
etag=self.etag,
id=self.id,
location=self.location,
monitoring_interval_in_seconds=self.monitoring_interval_in_seconds,
monitoring_status=self.monitoring_status,
name=self.name,
provisioning_state=self.provisioning_state,
source=self.source,
start_time=self.start_time,
tags=self.tags,
type=self.type)
def get_connection_monitor(connection_monitor_name: Optional[str] = None,
network_watcher_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionMonitorResult:
"""
Information about the connection monitor.
:param str connection_monitor_name: The name of the connection monitor.
:param str network_watcher_name: The name of the Network Watcher resource.
:param str resource_group_name: The name of the resource group containing Network Watcher.
"""
__args__ = dict()
__args__['connectionMonitorName'] = connection_monitor_name
__args__['networkWatcherName'] = network_watcher_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20190701:getConnectionMonitor', __args__, opts=opts, typ=GetConnectionMonitorResult).value
return AwaitableGetConnectionMonitorResult(
auto_start=__ret__.auto_start,
destination=__ret__.destination,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
monitoring_interval_in_seconds=__ret__.monitoring_interval_in_seconds,
monitoring_status=__ret__.monitoring_status,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
source=__ret__.source,
start_time=__ret__.start_time,
tags=__ret__.tags,
type=__ret__.type)
| 37.30531 | 246 | 0.655675 |
9a312df30ea37c34cdfe096f401ba98b0cb835bf | 1,571 | py | Python | utils/pretrained_utils.py | EliasKassapis/CAR | ff7ec86aab68c4b9ff8aea171244991bd132d487 | [
"Apache-2.0"
] | 17 | 2020-06-23T20:59:07.000Z | 2021-01-06T11:36:05.000Z | utils/pretrained_utils.py | EliasKassapis/CAR | ff7ec86aab68c4b9ff8aea171244991bd132d487 | [
"Apache-2.0"
] | 5 | 2021-03-19T13:28:40.000Z | 2022-03-12T00:34:32.000Z | utils/pretrained_utils.py | EliasKassapis/CAR | ff7ec86aab68c4b9ff8aea171244991bd132d487 | [
"Apache-2.0"
] | 1 | 2020-07-05T20:59:05.000Z | 2020-07-05T20:59:05.000Z | from utils.constants import *
import torch.nn as nn
import torchvision.models.vgg as vgg
import torchvision
def deeplabv3_segmentation(n_classes):
model = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True, num_classes=n_classes, aux_loss=None)
model = model.to(DEVICE)
return model
def resnet50_segmentation(n_classes):
model = torchvision.models.segmentation.fcn_resnet50(pretrained=False, progress=True, num_classes=n_classes, aux_loss=None)
model = model.to(DEVICE)
return model
def resnet101_segmentation(n_classes, pretrained=False):
model = torchvision.models.segmentation.fcn_resnet101(pretrained=pretrained, progress=True, num_classes=n_classes, aux_loss=None)
model = model.to(DEVICE)
return model
def resnet50(pretrained=False):
model = torchvision.models.resnet50(pretrained=pretrained, progress=True)
model = model.to(DEVICE)
return model
def googlenet(pretrained=False):
model = torchvision.models.googlenet(pretrained=pretrained, progress=True)
model = model.to(DEVICE)
return model
def VGG_19():
VGG = vgg.vgg19(pretrained=True)
VGG = VGG.to(DEVICE)
return VGG.eval()
def inceptionv3(pretrained = False):
model = torchvision.models.inception_v3(pretrained=pretrained, progress=True)
model = model.to(DEVICE)
return model
if __name__ == '__main__':
model = googlenet(pretrained=False)
model.conv1.conv = nn.Conv2d(17, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).to(DEVICE)
print(model)
| 28.563636 | 134 | 0.745385 |
215301f912708d81c7145426d9438837cae4c3b2 | 2,598 | py | Python | Statistical_Machine_Learning/6/SML6_python/1.py | yoshi-ki/BACHELOR | 65d01c62ab2ea4a6d2616a6b6c535bd4f1645630 | [
"MIT"
] | null | null | null | Statistical_Machine_Learning/6/SML6_python/1.py | yoshi-ki/BACHELOR | 65d01c62ab2ea4a6d2616a6b6c535bd4f1645630 | [
"MIT"
] | null | null | null | Statistical_Machine_Learning/6/SML6_python/1.py | yoshi-ki/BACHELOR | 65d01c62ab2ea4a6d2616a6b6c535bd4f1645630 | [
"MIT"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
np.random.seed(0)
def data_generate(n=3000):
x = np.zeros(n)
u = np.random.rand(n)
index1 = np.where((0 <= u) & (u < 1 / 8))
x[index1] = np.sqrt(8 * u[index1])
index2 = np.where((1 / 8 <= u) & (u < 1 / 4))
x[index2] = 2 - np.sqrt(2 - 8 * u[index2])
index3 = np.where((1 / 4 <= u) & (u < 1 / 2))
x[index3] = 1 + 4 * u[index3]
index4 = np.where((1 / 2 <= u) & (u < 3 / 4))
x[index4] = 3 + np.sqrt(4 * u[index4] - 2)
index5 = np.where((3 / 4 <= u) & (u <= 1))
x[index5] = 5 - np.sqrt(4 - 4 * u[index5])
return x
def kernel_function(x):
#compute kernel function
return np.exp(- (x * x) / 2) / (np.sqrt(2 * np.pi))
def p_hat(x,train_data,h):
tempsum = 0
for xi in train_data:
tempsum = tempsum + kernel_function((x - xi)/h)
return tempsum / (len(train_data) * h)
def kernel_density_method(data,h):
#exec kernel density method with band width h and draw the result
#draw the function with the help of p_hat
plt.figure()
X = np.arange(0,5,h)
Y = p_hat(X,data,h)
plt.hist(data,bins = 30,density = True)
plt.plot(X,Y)
plt.savefig("h" + str(int(h*100)) + ".png")
return
def cross_validation(data,n):
#exec cross validation with n sunsets of data to decide band width
hs = [0.01,0.1,0.5]
hscore = np.array([])
#first, shuffle the data
ndata = np.array([])
permutation = np.random.permutation(len(data))
for i in permutation:
ndata = np.append(ndata,data[i])
subset_size = int(len(ndata) / n)
for h in hs:
score = 0
for i in range(n):
#select test data as ith subset
#and create test and train data
train_data = np.append(ndata[0:(i*subset_size)],ndata[((i+1)*subset_size):])
test_data = ndata[(i*subset_size):((i+1)*subset_size)]
# compute the score
tempscore = 0
for x in test_data:
tempscore = tempscore + np.log(p_hat(x, train_data, h))
tempscore = tempscore / len(test_data)
score = score + tempscore
score = score / n
hscore = np.append(hscore, score)
# finally plot the score for each h
plt.figure()
plt.plot(hs,hscore)
plt.savefig("cross_validation.png")
return
if __name__ == "__main__":
n = 3000
data = data_generate(n)
kernel_density_method(data,0.01)
kernel_density_method(data,0.1)
kernel_density_method(data,0.5)
cross_validation(data,5)
| 27.347368 | 88 | 0.573133 |
3409b921156601ac6a3df41d8b6106abb20ac55e | 9,244 | py | Python | sara_flexbe_behaviors/src/sara_flexbe_behaviors/action_takebag_sm.py | WalkingMachine/sara_behaviors | fcb55d274331915cd39d7d444546f17a39f85a44 | [
"BSD-3-Clause"
] | 5 | 2018-05-07T19:58:08.000Z | 2021-04-21T10:49:05.000Z | sara_flexbe_behaviors/src/sara_flexbe_behaviors/action_takebag_sm.py | WalkingMachine/sara_behaviors | fcb55d274331915cd39d7d444546f17a39f85a44 | [
"BSD-3-Clause"
] | 21 | 2017-05-26T01:20:06.000Z | 2021-01-26T23:03:36.000Z | sara_flexbe_behaviors/src/sara_flexbe_behaviors/action_takebag_sm.py | WalkingMachine/sara_behaviors | fcb55d274331915cd39d7d444546f17a39f85a44 | [
"BSD-3-Clause"
] | 2 | 2019-07-22T07:21:20.000Z | 2019-11-11T20:49:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.set_gripper_state import SetGripperState
from sara_flexbe_states.for_loop import ForLoop
from sara_flexbe_states.run_trajectory import RunTrajectory
from sara_flexbe_states.torque_reader import ReadTorque
from sara_flexbe_states.sara_say import SaraSay
from flexbe_states.wait_state import WaitState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu May 16 2019
@author: Quentin Gaillot
'''
class Action_TakeBagSM(Behavior):
'''
Take bag pour le scenarion take out the garbage
'''
def __init__(self):
super(Action_TakeBagSM, self).__init__()
self.name = 'Action_TakeBag'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:825 y:465, x:801 y:48
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:30 y:458, x:130 y:458
_sm_torque_control_0 = OperatableStateMachine(outcomes=['finished', 'failed'])
with _sm_torque_control_0:
# x:126 y:51
OperatableStateMachine.add('w2',
WaitState(wait_time=5),
transitions={'done': 'check torque'},
autonomy={'done': Autonomy.Off})
# x:120 y:195
OperatableStateMachine.add('check torque',
ReadTorque(watchdog=20, Joint="right_shoulder_pitch_joint", Threshold=2.5, min_time=0.5),
transitions={'threshold': 'finished', 'watchdog': 'check torque', 'fail': 'check torque'},
autonomy={'threshold': Autonomy.Off, 'watchdog': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'torque': 'torque'})
# x:30 y:458, x:130 y:458
_sm_trajectory_down_1 = OperatableStateMachine(outcomes=['finished', 'failed'])
with _sm_trajectory_down_1:
# x:68 y:158
OperatableStateMachine.add('run down',
RunTrajectory(file="poubelle_app", duration=16),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:30 y:458, x:130 y:458, x:230 y:458, x:330 y:458, x:430 y:458, x:530 y:458
_sm_container_2 = ConcurrencyContainer(outcomes=['finished', 'failed'], conditions=[
('failed', [('test', 'done')]),
('finished', [('tor', 'threshold')]),
('failed', [('tor', 'watchdog')]),
('failed', [('tor', 'fail')])
])
with _sm_container_2:
# x:67 y:108
OperatableStateMachine.add('test',
RunTrajectory(file="poubelle_valide", duration=7),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:327 y:130
OperatableStateMachine.add('tor',
ReadTorque(watchdog=7, Joint="right_elbow_yaw_joint", Threshold=1.6, min_time=1),
transitions={'threshold': 'finished', 'watchdog': 'failed', 'fail': 'failed'},
autonomy={'threshold': Autonomy.Off, 'watchdog': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'torque': 'torque'})
# x:30 y:365, x:130 y:365
_sm_trajectory_to_transport_pose_3 = OperatableStateMachine(outcomes=['finished', 'failed'])
with _sm_trajectory_to_transport_pose_3:
# x:30 y:40
OperatableStateMachine.add('trajectory to transport pose',
RunTrajectory(file="poubelle_transport", duration=0),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:479 y:236, x:469 y:51, x:462 y:111, x:471 y:160, x:430 y:365, x:530 y:365
_sm_trajectory_down_with_torque_limit_4 = ConcurrencyContainer(outcomes=['finished', 'failed'], conditions=[
('finished', [('torque control', 'finished')]),
('finished', [('trajectory down', 'finished')]),
('failed', [('trajectory down', 'failed')]),
('failed', [('torque control', 'failed')])
])
with _sm_trajectory_down_with_torque_limit_4:
# x:109 y:63
OperatableStateMachine.add('trajectory down',
_sm_trajectory_down_1,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:102 y:211
OperatableStateMachine.add('torque control',
_sm_torque_control_0,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:30 y:458, x:130 y:458
_sm_has_bag_in_gripper_5 = OperatableStateMachine(outcomes=['finished', 'failed'])
with _sm_has_bag_in_gripper_5:
# x:191 y:132
OperatableStateMachine.add('Container',
_sm_container_2,
transitions={'finished': 'ok', 'failed': 'bad'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:18 y:266
OperatableStateMachine.add('ok',
SaraSay(sentence="Yay! I got it!", input_keys=[], emotion=6, block=True),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:238 y:279
OperatableStateMachine.add('bad',
SaraSay(sentence="Woops! I missed!", input_keys=[], emotion=3, block=True),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:30 y:458, x:130 y:458
_sm_trajectory_up_6 = OperatableStateMachine(outcomes=['finished', 'failed'])
with _sm_trajectory_up_6:
# x:71 y:122
OperatableStateMachine.add('trajectory up',
RunTrajectory(file="poubelle_eloigne", duration=10),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:30 y:458, x:130 y:458
_sm_close_gripper_7 = OperatableStateMachine(outcomes=['finished', 'failed'])
with _sm_close_gripper_7:
# x:79 y:177
OperatableStateMachine.add('close gripper',
SetGripperState(width=0, effort=1),
transitions={'object': 'finished', 'no_object': 'retry close'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
# x:272 y:237
OperatableStateMachine.add('retry close',
ForLoop(repeat=1),
transitions={'do': 'close gripper', 'end': 'failed'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index'})
with _state_machine:
# x:89 y:35
OperatableStateMachine.add('head down',
SaraSetHeadAngle(pitch=0.8, yaw=0),
transitions={'done': 'open gripper'},
autonomy={'done': Autonomy.Off})
# x:484 y:164
OperatableStateMachine.add('close gripper',
_sm_close_gripper_7,
transitions={'finished': 'trajectory up', 'failed': 'trajectory up'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:473 y:300
OperatableStateMachine.add('trajectory up',
_sm_trajectory_up_6,
transitions={'finished': 'wait a little bit', 'failed': 'wait a little bit'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:232 y:434
OperatableStateMachine.add('has bag in gripper',
_sm_has_bag_in_gripper_5,
transitions={'finished': 'trajectory to transport pose', 'failed': 'open gripper'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:467 y:33
OperatableStateMachine.add('trajectory down with torque limit',
_sm_trajectory_down_with_torque_limit_4,
transitions={'finished': 'close gripper', 'failed': 'head down'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:283 y:83
OperatableStateMachine.add('open gripper',
SetGripperState(width=0.12, effort=0),
transitions={'object': 'trajectory down with torque limit', 'no_object': 'trajectory down with torque limit'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
# x:557 y:454
OperatableStateMachine.add('trajectory to transport pose',
_sm_trajectory_to_transport_pose_3,
transitions={'finished': 'finished', 'failed': 'finished'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:356 y:357
OperatableStateMachine.add('wait a little bit',
WaitState(wait_time=1),
transitions={'done': 'has bag in gripper'},
autonomy={'done': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 35.968872 | 120 | 0.643444 |
61202e0ae8222260ffb41579a8e0c773f80abd9b | 30 | py | Python | ecs/components/healingpotion.py | joehowells/7drl2020 | ec92c0870fb9ee975530d6a92c1b96634040ebc4 | [
"MIT"
] | null | null | null | ecs/components/healingpotion.py | joehowells/7drl2020 | ec92c0870fb9ee975530d6a92c1b96634040ebc4 | [
"MIT"
] | 2 | 2020-03-25T10:30:31.000Z | 2020-03-25T20:13:43.000Z | ecs/components/healingpotion.py | joehowells/two-button-berserker | ec92c0870fb9ee975530d6a92c1b96634040ebc4 | [
"MIT"
] | null | null | null | class HealingPotion:
pass
| 10 | 20 | 0.733333 |
2d4bd0e59b872fcd5c72efcecc82578c4dd53fa2 | 499 | py | Python | library/opcode.py | creativemindplus/skybison | d1740e08d8de85a0a56b650675717da67de171a0 | [
"CNRI-Python-GPL-Compatible"
] | 278 | 2021-08-31T00:46:51.000Z | 2022-02-13T19:43:28.000Z | library/opcode.py | creativemindplus/skybison | d1740e08d8de85a0a56b650675717da67de171a0 | [
"CNRI-Python-GPL-Compatible"
] | 9 | 2021-11-05T22:28:43.000Z | 2021-11-23T08:39:04.000Z | library/opcode.py | tekknolagi/skybison | bea8fc2af0a70e7203b4c19f36c14a745512a335 | [
"CNRI-Python-GPL-Compatible"
] | 12 | 2021-08-31T07:49:54.000Z | 2021-10-08T01:09:01.000Z | # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
__all__ = [ # noqa: F405
"cmp_op",
"hasconst",
"hasname",
"hasjrel",
"hasjabs",
"haslocal",
"hascompare",
"hasfree",
"opname",
"opmap",
"HAVE_ARGUMENT",
"EXTENDED_ARG",
]
from _compiler_opcode import (
cmp_op,
hasconst,
hasname,
hasjrel,
hasjabs,
haslocal,
hascompare,
hasfree,
opname,
opmap,
HAVE_ARGUMENT,
EXTENDED_ARG,
)
| 16.096774 | 76 | 0.577154 |
0750a8ed13880d09cbd7b4e84636a76f2d4adea1 | 7,596 | py | Python | tests/normal/models/sir_test.py | DarkShadeKnigh/chime_sims | 1e2994a9f48d17e531a6d9b5129392dcdfe8b6b7 | [
"MIT"
] | 24 | 2020-04-14T01:05:37.000Z | 2021-07-12T17:50:36.000Z | tests/normal/models/sir_test.py | DarkShadeKnigh/chime_sims | 1e2994a9f48d17e531a6d9b5129392dcdfe8b6b7 | [
"MIT"
] | 44 | 2020-04-09T17:46:36.000Z | 2020-12-18T22:22:16.000Z | tests/normal/models/sir_test.py | DarkShadeKnigh/chime_sims | 1e2994a9f48d17e531a6d9b5129392dcdfe8b6b7 | [
"MIT"
] | 23 | 2020-04-08T22:41:36.000Z | 2021-03-02T21:34:29.000Z | """Tests for SIR model in this repo
* Compares conserved quantities
* Compares model against Penn CHIME w/wo social policies
* Checks logistic policies in extreme limit
"""
from typing import Tuple
from datetime import date, timedelta
from pytest import fixture
from pandas import DataFrame, Series, DatetimeIndex
from pandas.testing import assert_frame_equal, assert_series_equal
from penn_chime.model.parameters import Parameters, Disposition
from penn_chime.model.sir import (
Sir,
sim_sir,
calculate_dispositions,
calculate_admits,
calculate_census,
)
from bayes_chime.normal.models import SIRModel
from bayes_chime.normal.utilities import one_minus_logistic_fcn
PENN_CHIME_COMMIT = "188c35be9561164bedded4a8071a320cbde0d2bc"
COLS_TO_COMPARE = [
"susceptible",
"infected",
"recovered",
"hospital_admits",
# Does not compare census as this repo uses the exponential distribution
]
COLUMN_MAP = {
"hospitalized": "hospital_admits",
}
@fixture(name="penn_chime_setup")
def fixture_penn_chime_setup() -> Tuple[Parameters, Sir]:
"""Initializes penn_chime parameters and SIR model
"""
p = Parameters(
current_hospitalized=69,
date_first_hospitalized=date(2020, 3, 7),
doubling_time=None,
hospitalized=Disposition.create(days=7, rate=0.025),
icu=Disposition.create(days=9, rate=0.0075),
infectious_days=14,
market_share=0.15,
n_days=100,
population=3600000,
recovered=0,
relative_contact_rate=0.3,
ventilated=Disposition.create(days=10, rate=0.005),
)
return p, Sir(p)
@fixture(name="penn_chime_raw_df_no_policy")
def fixture_penn_chime_raw_df_no_policy(penn_chime_setup) -> DataFrame:
"""Runs penn_chime SIR model for no social policies
"""
p, simsir = penn_chime_setup
n_days = simsir.raw_df.day.max() - simsir.raw_df.day.min()
policies = [(simsir.beta, n_days)]
raw = sim_sir(
simsir.susceptible,
simsir.infected,
p.recovered,
simsir.gamma,
-simsir.i_day,
policies,
)
calculate_dispositions(raw, simsir.rates, market_share=p.market_share)
calculate_admits(raw, simsir.rates)
calculate_census(raw, simsir.days)
raw_df = DataFrame(raw)
raw_df.index = simsir.raw_df.date
return raw_df.fillna(0)
@fixture(name="sir_data_wo_policy")
def fixture_sir_data_wo_policy(penn_chime_setup, penn_chime_raw_df_no_policy):
"""Provides data for local sir module
"""
p, simsir = penn_chime_setup
raw_df = penn_chime_raw_df_no_policy
day0 = raw_df.iloc[0].fillna(0)
total = day0.susceptible + day0.infected + day0.recovered
pars = {
"beta": simsir.beta * total, # This repo uses S/total in sir
"gamma": simsir.gamma,
"initial_susceptible": day0.susceptible,
"initial_infected": day0.infected,
"initial_hospital": day0.hospitalized,
"initial_recovered": day0.recovered,
"hospital_probability": simsir.rates["hospitalized"],
}
x = {
"dates": DatetimeIndex(raw_df.index),
"hospital_length_of_stay": p.dispositions["hospitalized"].days,
"market_share": p.market_share,
}
return x, pars
@fixture(name="sir_data_w_policy")
def fixture_sir_data_w_policy(penn_chime_setup):
"""Provides data for local sir module with implemented policies
"""
p, simsir = penn_chime_setup
raw_df = simsir.raw_df.set_index("date")
day0 = raw_df.iloc[0].fillna(0)
total = day0.susceptible + day0.infected + day0.recovered
pars = {
"beta": simsir.beta * total, # This repo uses S/total in sir
"gamma": simsir.gamma,
"initial_susceptible": day0.susceptible,
"initial_infected": day0.infected,
"initial_hospital": day0.hospitalized,
"initial_recovered": day0.recovered,
"hospital_probability": simsir.rates["hospitalized"],
}
x = {
"dates": DatetimeIndex(raw_df.index),
"hospital_length_of_stay": p.dispositions["hospitalized"].days,
"market_share": p.market_share,
}
return x, pars
def test_conserved_n(sir_data_wo_policy):
"""Checks if S + I + R is conserved for local SIR
"""
x, pars = sir_data_wo_policy
sir_model = SIRModel()
n_total = 0
for key in sir_model.compartments:
n_total += pars[f"initial_{key}"]
predictions = sir_model.propagate_uncertainties(x, pars)
n_computed = predictions[sir_model.compartments].sum(axis=1)
n_expected = Series(data=[n_total] * len(n_computed), index=n_computed.index)
assert_series_equal(n_expected, n_computed)
def test_sir_vs_penn_chime_no_policies(penn_chime_raw_df_no_policy, sir_data_wo_policy):
"""Compares local SIR against penn_chime SIR for no social policies
"""
x, pars = sir_data_wo_policy
sir_model = SIRModel()
predictions = sir_model.propagate_uncertainties(x, pars)
assert_frame_equal(
penn_chime_raw_df_no_policy.rename(columns=COLUMN_MAP)[COLS_TO_COMPARE],
predictions[COLS_TO_COMPARE],
)
def test_sir_vs_penn_chime_w_policies(penn_chime_setup, sir_data_w_policy):
"""Compares local SIR against penn_chime SIR for with social policies
"""
p, sir = penn_chime_setup
x, pars = sir_data_w_policy
policies = sir.gen_policy(p)
new_policy_date = x["dates"][0] + timedelta(days=policies[0][1])
beta0, beta1 = policies[0][0], policies[1][0]
def update_parameters(ddate, **pars): # pylint: disable=W0613
pars["beta"] = (beta0 if ddate < new_policy_date else beta1) * p.population
return pars
sir_model = SIRModel(update_parameters=update_parameters)
predictions = sir_model.propagate_uncertainties(x, pars)
assert_frame_equal(
sir.raw_df.set_index("date")
.fillna(0)
.rename(columns=COLUMN_MAP)[COLS_TO_COMPARE],
predictions[COLS_TO_COMPARE],
)
def test_sir_logistic_policy(penn_chime_setup, sir_data_w_policy):
"""Compares local SIR against penn_chime SIR for implemented social policies
where policies are implemented as a logistic function
"""
p, sir = penn_chime_setup
x, pars = sir_data_w_policy
policies = sir.gen_policy(p)
# Set up logistic function to match policies (Sharp decay)
pars["beta"] = policies[0][0] * p.population
## This are new parameters needed by one_minus_logistic_fcn
pars["L"] = 1 - policies[1][0] / policies[0][0]
pars["x0"] = policies[0][1] - 0.5
pars["k"] = 1.0e7
def update_parameters(ddate, **kwargs):
xx = (ddate - x["dates"][0]).days
ppars = kwargs.copy()
ppars["beta"] = kwargs["beta"] * one_minus_logistic_fcn(
xx, L=kwargs["L"], k=kwargs["k"], x0=kwargs["x0"],
)
return ppars
sir_model = SIRModel(update_parameters=update_parameters)
predictions = sir_model.propagate_uncertainties(x, pars)
assert_frame_equal(
sir.raw_df.set_index("date")
.rename(columns=COLUMN_MAP)[COLS_TO_COMPARE]
.fillna(0),
predictions[COLS_TO_COMPARE],
)
def test_sir_type_conversion(sir_data_w_policy):
"""Compares local SIR run with set gamma vs set with recovery_days
"""
x, pars = sir_data_w_policy
sir_model = SIRModel()
predictions = sir_model.propagate_uncertainties(x, pars)
pars["recovery_days"] = 1 / pars.pop("gamma")
new_predictions = sir_model.propagate_uncertainties(x, pars)
assert_frame_equal(
predictions, new_predictions,
)
| 30.753036 | 88 | 0.689968 |
6b86428b6c45b9eed93787de0995366ae4e45667 | 23,690 | py | Python | gallery/tutorial/tensorIR_cpu_HPCG.py | miraclezqc/tvm | 1c10775fea211ec326d1516b5193766fd339d8b5 | [
"Apache-2.0"
] | null | null | null | gallery/tutorial/tensorIR_cpu_HPCG.py | miraclezqc/tvm | 1c10775fea211ec326d1516b5193766fd339d8b5 | [
"Apache-2.0"
] | null | null | null | gallery/tutorial/tensorIR_cpu_HPCG.py | miraclezqc/tvm | 1c10775fea211ec326d1516b5193766fd339d8b5 | [
"Apache-2.0"
] | null | null | null | import os
import time
import tvm
import math
import numpy as np
from tvm.script import tir as T
@T.prim_func
def GenerateProblem(Aval: T.handle, Aidx: T.handle, B: T.handle, X:T.handle, n: T.int32, nnzinrow: T.int32):
T.func_attr({"global_symbol": "GenerateProblem", "tir.noalias": True})
aval = T.match_buffer(Aval, (n*n*n, nnzinrow), dtype="float64")
aidx = T.match_buffer(Aidx, (n*n*n, nnzinrow), dtype="int32")
b = T.match_buffer(B, (n*n*n), dtype="float64")
x = T.match_buffer(X, (n*n*n), dtype="float64")
for i in range(n*n*n):
with T.block("outer"):
vi = T.axis.spatial(n*n*n, i)
idx = T.alloc_buffer((3), dtype="int32")
idx[0] = vi // (n * n)
idx[1] = (vi - idx[0] * (n * n)) // n
idx[2] = vi % n
nnz = T.alloc_buffer((1), dtype="int32")
num_boundry = T.alloc_buffer((1), dtype="int32")
num_boundry[0] = 0
nnz[0] = 0
for bz in range(3):
for by in range(3):
for bx in range(3):
with T.block("inner"):
vz, vy, vx = T.axis.remap("SSS", [bz, by, bx])
cur_col = T.alloc_buffer((1), dtype="int32")
cur_col[0] = vi + (bz-1) * (n*n) + (by-1) * n + (bx-1)
if cur_col[0] == vi:
aval[vi, nnz[0]] = T.float64(26.0)
aidx[vi, nnz[0]] = cur_col[0]
elif idx[0] + vz == 0 or idx[0] + vz == n+1 or idx[1] + vy == 0 or idx[1] + vy == n+1 or idx[2] + vx == 0 or idx[2] + vx == n+1:
aval[vi, nnz[0]] = T.float64(0.0)
aidx[vi, nnz[0]] = 0
num_boundry[0] += 1
else:
aval[vi, nnz[0]] = T.float64(-1.0)
aidx[vi, nnz[0]] = cur_col[0]
nnz[0] += 1
b[vi] = T.float64(26.0) - (T.float64(nnz[0] - 1 - num_boundry[0]))
x[vi] = T.float64(0)
@T.prim_func
def CopyVec(From: T.handle, To: T.handle, len: T.int32):
T.func_attr({"global_symbol": "CopyVec", "tir.noalias": True})
from_vec = T.match_buffer(From, (len), dtype="float64")
to_vec = T.match_buffer(To, (len), dtype="float64")
for i in range(len):
with T.block("outer"):
vi = T.axis.spatial(len, i)
to_vec[vi] = from_vec[vi]
@T.prim_func
def ZeroVec(X: T.handle, len: T.int32):
T.func_attr({"global_symbol": "ZeroVec", "tir.noalias": True})
x = T.match_buffer(X, (len), dtype="float64")
for i in range(len):
with T.block("outer"):
vi = T.axis.S(len, i)
x[vi] = T.float64(0.0)
@T.prim_func
def Waxpby(alpha: T.float64, A: T.handle, beta: T.float64, B: T.handle, W: T.handle, len: T.int32):
T.func_attr({"global_symbol": "Waxpby", "tir.noalias": True})
a = T.match_buffer(A, (len), dtype="float64")
b = T.match_buffer(B, (len), dtype="float64")
w = T.match_buffer(W, (len), dtype="float64")
for i in range(len):
with T.block("outer"):
vi = T.axis.spatial(len, i)
w[vi] = alpha * a[vi] + beta * b[vi]
@T.prim_func
def DotProduct(A: T.handle, B: T.handle, Res: T.handle, num_threads: T.int32, len: T.int32):
T.func_attr({"global_symbol": "DotProduct", "tir.noalias": True})
a = T.match_buffer(A, (len), dtype="float64")
b = T.match_buffer(B, (len), dtype="float64")
r = T.match_buffer(Res, (1), dtype="float64")
res = T.alloc_buffer((num_threads), dtype="float64")
for t in range(num_threads):
with T.block("outer"):
vt = T.axis.S(num_threads, t)
tmp = T.alloc_buffer((1), dtype="float64")
tmp[0] = T.float64(0)
for i in range(len/num_threads):
with T.block("inner"):
vi = T.axis.S(len/num_threads, i)
tmp[0] += a[vt * len/num_threads + vi] * b[vt * len/num_threads + vi]
res[vt] = tmp[0]
r[0] = T.float64(0)
for j in range(num_threads):
r[0] += res[j]
@T.prim_func
def DotProduct_rfactor(A: T.handle, B: T.handle, Res: T.handle, len: T.int32):
T.func_attr({"global_symbol": "DotProduct_rfactor", "tir.noalias": True})
a = T.match_buffer(A, (len), dtype="float64")
b = T.match_buffer(B, (len), dtype="float64")
r = T.match_buffer(Res, (1), dtype="float64")
for i in range(len):
with T.block("outer"):
vi = T.axis.reduce(len, i)
with T.init():
r[0] = T.float64(0.0)
r[0] += a[vi] * b[vi]
@T.prim_func
def Spmv(Aval: T.handle, Aidx: T.handle, X: T.handle, Y: T.handle, n: T.int32, nnzinrow: T.int32):
T.func_attr({"global_symbol": "Spmv", "tir.noalias": True})
aval = T.match_buffer(Aval, (n*n*n, nnzinrow), dtype="float64")
aidx = T.match_buffer(Aidx, (n*n*n, nnzinrow), dtype="int32")
x = T.match_buffer(X, (n*n*n), dtype="float64")
y = T.match_buffer(Y, (n*n*n), dtype="float64")
for i in range(n*n*n):
with T.block("outer"):
vi = T.axis.spatial(n*n*n, i)
sum = T.alloc_buffer((1), dtype="float64")
sum[0] = T.float64(0)
for j in range(nnzinrow):
with T.block("inner"):
vj = T.axis.spatial(nnzinrow, j)
sum[0] += aval[vi,vj] * x[aidx[vi,vj]]
y[vi] = sum[0]
@T.prim_func
def Symgs(Aval: T.handle, Aidx: T.handle, X: T.handle, R: T.handle, n: T.int32, nnzinrow: T.int32):
T.func_attr({"global_symbol": "Symgs", "tir.noalias": True})
aval = T.match_buffer(Aval, (n*n*n, nnzinrow), dtype="float64")
aidx = T.match_buffer(Aidx, (n*n*n, nnzinrow), dtype="int32")
x = T.match_buffer(X, (n*n*n), dtype="float64")
r = T.match_buffer(R, (n*n*n), dtype="float64")
for i in range(n*n*n):
with T.block("forward_outer"):
vi = T.axis.spatial(n*n*n, i)
sum = T.alloc_buffer((1), dtype="float64")
sum[0] = r[vi]
for j in range(nnzinrow):
with T.block("forward_inner"):
vj = T.axis.spatial(nnzinrow, j)
sum[0] -= aval[vi,vj] * x[aidx[vi,vj]]
sum[0] += aval[vi, nnzinrow/2] * x[vi]
x[vi] = sum[0] / aval[vi, nnzinrow/2]
for i in range(n*n*n):
with T.block("backward_outer"):
vi = T.axis.spatial(n*n*n, n*n*n - 1 - i)
sum1 = T.alloc_buffer((1), dtype="float64")
sum1[0] = r[vi]
for j in range(nnzinrow):
with T.block("backward_inner"):
vj = T.axis.spatial(nnzinrow, j)
sum1[0] -= aval[vi,vj] * x[aidx[vi,vj]]
sum1[0] += aval[vi, nnzinrow/2] * x[vi]
x[vi] = sum1[0] / aval[vi, nnzinrow/2]
@T.prim_func
def Symgs_ls(Aval: T.handle, Aidx: T.handle, X: T.handle, R: T.handle, n: T.int32, nnzinrow: T.int32):
T.func_attr({"global_symbol": "Symgs", "tir.noalias": True})
aval = T.match_buffer(Aval, (n*n*n, nnzinrow), dtype="float64")
aidx = T.match_buffer(Aidx, (n*n*n, nnzinrow), dtype="int32")
x = T.match_buffer(X, (n*n*n), dtype="float64")
r = T.match_buffer(R, (n*n*n), dtype="float64")
for i in range(n*n*n):
with T.block("forward_outer"):
vi = T.axis.spatial(n*n*n, i)
sum = T.alloc_buffer((1), dtype="float64")
sum[0] = r[vi]
for j in range(nnzinrow):
with T.block("forward_inner"):
vj = T.axis.spatial(nnzinrow, j)
sum[0] -= aval[vi,vj] * x[aidx[vi,vj]]
sum[0] += aval[vi, nnzinrow/2] * x[vi]
x[vi] = sum[0] / aval[vi, nnzinrow/2]
for i in range(n*n*n):
with T.block("backward_outer"):
vi = T.axis.spatial(n*n*n, i)
sum1 = T.alloc_buffer((1), dtype="float64")
sum1[0] = r[vi]
for j in range(nnzinrow):
with T.block("backward_inner"):
vj = T.axis.spatial(nnzinrow, j)
sum1[0] -= aval[vi,vj] * x[aidx[vi,vj]]
sum1[0] += aval[vi, nnzinrow/2] * x[vi]
x[vi] = sum1[0] / aval[vi, nnzinrow/2]
@T.prim_func
def Restriction(Rc: T.handle, R: T.handle, Axf: T.handle, nc: T.int32):
T.func_attr({"global_symbol": "Restriction", "tir.noalias": True})
rc = T.match_buffer(Rc, (nc*nc*nc), dtype="float64")
r = T.match_buffer(R, (nc*2*nc*2*nc*2), dtype="float64")
axf = T.match_buffer(Axf, (nc*2*nc*2*nc*2), dtype="float64")
for i,j,k in T.grid(nc,nc,nc):
with T.block("outer"):
vi,vj,vk = T.axis.remap("SSS",[i,j,k])
rc[vi*nc*nc + vj*nc + vk] = r[(2*vi)*(nc*2)*(nc*2)+(2*vj)*(nc*2)+(2*vk)] - axf[(2*vi)*(nc*2)*(nc*2)+(2*vj)*(nc*2)+(2*vk)]
@T.prim_func
def Prolongation(Xf: T.handle, Xc: T.handle, nc: T.int32):
T.func_attr({"global_symbol": "Prolongation", "tir.noalias": True})
xf = T.match_buffer(Xf, (nc*2*nc*2*nc*2), dtype="float64")
xc = T.match_buffer(Xc, (nc*nc*nc), dtype="float64")
for i,j,k in T.grid(nc,nc,nc):
with T.block("outer"):
vi,vj,vk = T.axis.remap("SSS",[i,j,k])
vc = T.axis.S(nc*nc*nc, (vi)*nc*nc+(vj)*nc+(vk))
vf = T.axis.S(nc*2*nc*2*nc*2, (vi*2)*(nc*2)*(nc*2)+(vj*2)*(nc*2)+(vk*2))
xf[vf] = xf[vf] + xc[vc]
class HPCG_Example():
def __init__(self, dim=3, N=256, n_mg_levels=4, real="float64", ctx = tvm.cpu()):
# grid parameters
self.use_multigrid = True
self.ls_opt = True
self.ctx = ctx
self.num_threads = 32
# only apply 3-dim cube problem temporarily
self.N = N
self.n_mg_levels = n_mg_levels
self.dim = dim
self.real = real
self.nrow = N**dim
self.nnzinrow = 27
self.N_ext = self.N // 2 # number of ext cells set so that that total grid size is still power of 2
self.N_tot = 2 * self.N
self.pre_and_post_smoothing = 2
self.bottom_smoothing = 50
self.b = tvm.nd.empty((self.N**3,), dtype=self.real, device=self.ctx)
self.x = tvm.nd.empty((self.N**3,), dtype=self.real, device=self.ctx)
self.r = tvm.nd.empty((self.N**3,), dtype=self.real, device=self.ctx)
self.z = tvm.nd.empty((self.N**3,), dtype=self.real, device=self.ctx)
self.p = tvm.nd.empty((self.N**3,), dtype=self.real, device=self.ctx)
self.Ap= tvm.nd.empty((self.N**3,), dtype=self.real, device=self.ctx)
self.Aval = []
self.Aidx = []
self.rc = []
self.xc = []
self.Axf = []
for level in range(self.n_mg_levels):
self.Aval.append(tvm.nd.empty(((self.N//(2**level))**3, self.nnzinrow), dtype=self.real, device=self.ctx))
self.Aidx.append(tvm.nd.empty(((self.N//(2**level))**3, self.nnzinrow), dtype="int32", device=self.ctx))
if level > 0:
self.rc.append(tvm.nd.empty(((self.N//(2**level))**3, ), dtype=self.real, device=self.ctx))
self.xc.append(tvm.nd.empty(((self.N//(2**level))**3, ), dtype=self.real, device=self.ctx))
if level != (self.n_mg_levels-1) :
self.Axf.append(tvm.nd.empty(((self.N//(2**level))**3, ), dtype=self.real, device=self.ctx))
self.stencil_list = []
self.level_number = 0
self.level_num = tvm.nd.NDArray
self.level_idx = tvm.nd.empty((self.nrow,), dtype="int32", device=self.ctx)
self.symgs_ls_mod = tvm.runtime.Module
self.generate_problem_mod = tvm.runtime.Module
self.copy_vec_mod = tvm.runtime.Module
self.zero_vec_mod = tvm.runtime.Module
self.dot_mod = tvm.runtime.Module
self.waxpby_mod = tvm.runtime.Module
self.spmv_mod = tvm.runtime.Module
self.symgs_mod = tvm.runtime.Module
self.restric_mod = tvm.runtime.Module
self.prolongation_mod = tvm.runtime.Module
def generate_problem(self, Aval, Aidx, B, X, n):
self.generate_problem_mod(Aval, Aidx, B, X, n, self.nnzinrow)
def copy(self, from_vec, to_vec):
self.copy_vec_mod(from_vec, to_vec, self.nrow)
def zero_vec(self, vec, level):
self.zero_vec_mod(vec, (self.N//(2**level))**self.dim)
def waxpby(self, alpha, x, beta, y, w):
self.waxpby_mod(alpha, x, beta, y, w, self.nrow)
def dot_rfactor(self, x, y):
res = tvm.nd.empty((1,), dtype=self.real, device=self.ctx)
self.dot_mod(x, y, res, self.nrow)
return res.asnumpy()[0]
def spmv(self, Aval, Aidx, x, y, level):
self.spmv_mod(Aval, Aidx, x, y, self.N//(2**level), self.nnzinrow)
def symgs(self, Aval, Aidx, x, r, level):
self.symgs_mod(Aval, Aidx, x, r, self.N//(2**level), self.nnzinrow)
def restrication(self, rc, rf, Axf, level):
self.restric_mod(rc, rf, Axf, self.N//(2**level)//2)
def prolongation(self, xf, xc, level):
self.prolongation_mod(xf, xc, self.N//(2**level)//2)
def MG(self, r, x):
rf = r
xf = x
for level in range(self.n_mg_levels-1):
if level != 0 :
rf = self.rc[level-1]
xf = self.xc[level-1]
self.zero_vec(xf, level)
if self.ls_opt and level == 0:
self.symgs_ls_mod(self.Aval[level], self.Aidx[level], xf, rf, self.N, self.nnzinrow, self.level_num, self.level_idx)
# else:
# self.symgs(self.Aval[level], self.Aidx[level], xf, rf, level)
self.spmv(self.Aval[level], self.Aidx[level], xf, self.Axf[level], level)
self.restrication(self.rc[level], rf, self.Axf[level], level)
xf = self.xc[-1]
rf = self.rc[-1]
bot = self.n_mg_levels-1
self.zero_vec(xf, bot)
# self.symgs(self.Aval[bot], self.Aidx[bot], xf, rf, bot)
for level in reversed(range(self.n_mg_levels-1)):
if level != 0 :
rf = self.rc[level-1]
xf = self.xc[level-1]
else :
rf = r
xf = x
self.prolongation(xf, self.xc[level], level)
if self.ls_opt and level == 0:
self.symgs_ls_mod(self.Aval[level], self.Aidx[level], xf, rf, self.N, self.nnzinrow, self.level_num, self.level_idx)
# else:
# self.symgs(self.Aval[level], self.Aidx[level], xf, rf, level)
def init(self):
for level in range(self.n_mg_levels):
if level == 0 :
self.generate_problem(self.Aval[level], self.Aidx[level], self.b, self.x, self.N)
else:
tmp = tvm.nd.empty(((self.N//(2**level))**3,), dtype=self.real, device=self.ctx)
self.generate_problem(self.Aval[level], self.Aidx[level], tmp, tmp, self.N//(2**level))
def slove(self,
max_iters=50,
eps=1e-12,
abs_tol=1e-12,
rel_tol=1e-12,
verbose=False):
t = time.time()
normr = 0.0
rtz = 0.0
oldrtz = 0.0
iter = 0
self.copy(self.x, self.p)
self.spmv(self.Aval[0], self.Aidx[0], self.p, self.Ap, 0)
self.waxpby(1.0, self.b, -1.0, self.Ap, self.r)
normr = math.sqrt(self.dot_rfactor(self.r, self.r))
normr0 = normr
while iter < max_iters and normr/normr0 > abs_tol:
if self.use_multigrid == False:
self.copy(self.r, self.z)
else:
self.MG(self.r, self.z)
if iter == 0:
self.copy(self.z, self.p)
rtz = self.dot_rfactor(self.r, self.z)
else:
oldrtz = rtz
rtz = self.dot_rfactor(self.r, self.z)
beta = rtz/oldrtz
self.waxpby(1.0, self.z, beta, self.p, self.p)
self.spmv(self.Aval[0], self.Aidx[0], self.p, self.Ap, 0)
alpha = rtz / self.dot_rfactor(self.p, self.Ap)
self.waxpby(1.0, self.x, alpha, self.p, self.x)
self.waxpby(1.0, self.r, -alpha, self.Ap, self.r)
normr = math.sqrt(self.dot_rfactor(self.r, self.r))
iter += 1
print("iter",iter, " norm=", normr/normr0, "used time=", f'{time.time() - t:.3f} s')
def generate_module(self):
# GenerateProblem
sch = tvm.tir.Schedule(GenerateProblem)
i, = sch.get_loops(sch.get_block("outer"))
spl_idx = sch.split(i, factors=[self.num_threads,None])
sch.parallel(spl_idx[0])
bz,by,bx = sch.get_loops(sch.get_block("inner"))
sch.unroll(bz)
sch.unroll(by)
sch.unroll(bx)
# print(sch.mod.script())
# mod = tvm.build(sch.mod, target="c --unpacked-api")
self.generate_problem_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
# print(mod.get_source())
# CopyVec
sch = tvm.tir.Schedule(CopyVec)
i, = sch.get_loops(sch.get_block("outer"))
spl_idx = sch.split(i, factors=[self.num_threads, None])
sch.parallel(spl_idx[0])
self.copy_vec_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
# ZeroVec
sch = tvm.tir.Schedule(ZeroVec)
i, = sch.get_loops(sch.get_block("outer"))
spl_idx = sch.split(i, factors=[self.num_threads, None])
sch.parallel(spl_idx[0])
self.zero_vec_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
# DotProduct
sch = tvm.tir.Schedule(DotProduct_rfactor)
i, = sch.get_loops(sch.get_block("outer"))
i_out, i_in = sch.split(i, factors=[self.num_threads,None])
rf_block = sch.rfactor(i_out, 0)
ii, jj = sch.get_loops(rf_block)
sch.parallel(ii)
self.dot_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
# Waxpby
sch = tvm.tir.Schedule(Waxpby)
i, = sch.get_loops(sch.get_block("outer"))
spl_idx = sch.split(i, factors=[self.num_threads,None])
sch.parallel(spl_idx[0])
self.waxpby_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
# Spmv
sch = tvm.tir.Schedule(Spmv)
i, = sch.get_loops(sch.get_block("outer"))
spl_idx = sch.split(i, factors=[self.num_threads,None])
sch.parallel(spl_idx[0])
self.spmv_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
# Symgs
sch = tvm.tir.Schedule(Symgs)
self.symgs_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
# Restrication
sch = tvm.tir.Schedule(Restriction)
i,j,k = sch.get_loops(sch.get_block("outer"))
fuse = sch.fuse(i,j,k)
spl_idx = sch.split(fuse, factors=[self.num_threads,None])
sch.parallel(spl_idx[0])
self.restric_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
# Prolongation
sch = tvm.tir.Schedule(Prolongation)
i,j,k = sch.get_loops(sch.get_block("outer"))
fuse = sch.fuse(i,j,k)
spl_idx = sch.split(fuse, factors=[self.num_threads,None])
sch.parallel(spl_idx[0], force = 1)
self.prolongation_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
def level_schedule_analysis(self):
if os.path.exists("level_num"+str(self.N)+".npy") and os.path.exists("level_idx"+str(self.N)+".npy"):
self.level_idx = tvm.nd.array(np.load("level_idx"+str(self.N)+".npy"), self.ctx)
self.level_num = tvm.nd.array(np.load("level_num"+str(self.N)+".npy"), self.ctx)
self.level_number = len(np.load("level_num"+str(self.N)+".npy")) - 1
else :
for i in range(-1,2,1):
for j in range(-1,2,1):
for k in range(-1,2,1):
self.stencil_list.append((i,j,k))
print(self.stencil_list)
level_idx = np.zeros((self.nrow,2), dtype="int32")
level_idx[:,0] = np.arange(self.nrow)
t0 = time.time()
for i in range(self.N):
for j in range(self.N):
for k in range(self.N):
cur_idx = i * (self.N ** 2) + j * self.N + k
level_idx[cur_idx][1] = 0
max_level = -1
for n in range(len(self.stencil_list)):
bi = self.stencil_list[n][0]
bj = self.stencil_list[n][1]
bk = self.stencil_list[n][2]
if i+bi >=0 and i+bi < self.N and j + bj >=0 and j + bj < self.N and k + bk >=0 and k + bk < self.N:
idx = (i+bi) * (self.N ** 2) + (j + bj) * self.N + k + bk
if idx < cur_idx and level_idx[idx][1] > max_level:
max_level = level_idx[idx][1]
level_idx[cur_idx][1] = max_level + 1
self.level_number = level_idx[-1][1] + 1
level_num = np.zeros((self.level_number+1), dtype="int32")
sort = level_idx[level_idx[:,1].argsort()]
cur_idx = 1
for i in range(1, self.nrow):
if(sort[i-1][1] != sort[i][1]):
level_num[cur_idx] = i
cur_idx += 1
level_num[cur_idx] = self.nrow
self.level_idx = tvm.nd.array(sort[:,0].astype("int32"), self.ctx)
self.level_num = tvm.nd.array(level_num.astype("int32"), self.ctx)
np.save("level_num"+str(self.N)+".npy", level_num.astype("int32"))
np.save("level_idx"+str(self.N)+".npy", sort[:,0].astype("int32"))
# level schedule primitive
append_list = [tvm.tir.decl_buffer((self.level_number+1), name="level_num", dtype="int32"), tvm.tir.decl_buffer((self.nrow), name="level_idx", dtype="int32")]
func = Symgs_ls.append_params(append_list)
sch = tvm.tir.Schedule(func)
loop, = sch.get_loops(sch.get_block("forward_outer"))
sch.level_schedule(loop, self.level_number, append_list[0], append_list[1])
level, level_num = sch.get_loops(sch.get_block("forward_outer"))
sch.parallel(level_num, force = 1)
loop, = sch.get_loops(sch.get_block("backward_outer"))
sch.level_schedule(loop, -self.level_number, append_list[0], append_list[1]) # negative number for reversed loop
level, level_num = sch.get_loops(sch.get_block("backward_outer"))
sch.parallel(level_num, force = 1)
print(sch.mod.script())
# mod = tvm.build(sch.mod, target="c --unpacked-api")
# print(mod.get_source())
self.symgs_ls_mod = tvm.build(sch.mod, target="llvm -opt-level=3")
def run(self, verbose=False):
t0 = time.time()
self.generate_module()
print(f'Generate_module time: {time.time() - t0:.3f} s')
t1 = time.time()
self.init()
if self.ls_opt:
self.level_schedule_analysis()
print(f'Init and level schedule analysis time: {time.time() - t1:.3f} s')
t2 = time.time()
self.slove(max_iters=50, verbose=verbose)
print(f'Slove time: {time.time() - t2:.3f} s')
if __name__ == '__main__':
solver = HPCG_Example(dim=3, N=128)
t = time.time()
solver.run(verbose=True)
print(f'Total solver time: {time.time() - t:.3f} s')
| 41.781305 | 167 | 0.535036 |
0896f95f94d69e1822c21e27f95e9752005b86b8 | 5,239 | py | Python | tensorflow/python/keras/optimizer_v2/adagrad.py | m4rkl1u/tensorflow | 90a8825c7ae9719e8969d45040b4155b0e7de130 | [
"Apache-2.0"
] | 2 | 2018-12-05T10:58:40.000Z | 2019-01-24T11:36:01.000Z | tensorflow/python/keras/optimizer_v2/adagrad.py | m4rkl1u/tensorflow | 90a8825c7ae9719e8969d45040b4155b0e7de130 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/optimizer_v2/adagrad.py | m4rkl1u/tensorflow | 90a8825c7ae9719e8969d45040b4155b0e7de130 | [
"Apache-2.0"
] | 2 | 2019-02-26T16:21:15.000Z | 2020-12-04T17:48:17.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
class Adagrad(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adagrad algorithm.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
Initialization:
$$accum_g_0 := initial_accumulator_value$$
$$t := t + 1$$
$$accum_g_t := accum_g_{t-1} + g * g$$
$$theta_t := theta_{t-1} - lr * g / (\sqrt{accum_g_t} + \epsilon)$$
References
See [paper]
(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
or this
[intro](https://ppasupat.github.io/a9online/uploads/proximal_notes.pdf).
"""
def __init__(self,
learning_rate=0.001,
initial_accumulator_value=0.1,
epsilon=1e-7,
name='Adagrad',
**kwargs):
"""Construct a new Adagrad optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
epsilon: A floating point value.
Starting value for the accumulators, must be positive.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
**kwargs: keyword arguments. Allowed to be {`decay`}
Raises:
ValueError: If the `initial_accumulator_value` or `epsilon` is invalid.
@compatibility(eager)
When eager execution is enabled, `learning_rate` can be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
@end_compatibility
"""
if initial_accumulator_value <= 0.0:
raise ValueError('initial_accumulator_value must be positive: %s' %
initial_accumulator_value)
if epsilon < 1e-7:
raise ValueError('epsilon must be larger than 1e-7: %s' % epsilon)
super(Adagrad, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', learning_rate)
self._set_hyper('decay', self._initial_decay)
self._initial_accumulator_value = initial_accumulator_value
self._set_hyper('epsilon', epsilon)
def _create_slots(self, var_list):
for var in var_list:
dtype = var.dtype.base_dtype
init = init_ops.constant_initializer(
self._initial_accumulator_value, dtype=dtype)
self.add_slot(var, 'accumulator', init)
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
epsilon = self._get_hyper('epsilon', var_dtype)
acc = self.get_slot(var, 'accumulator')
acc_t = state_ops.assign_add(
acc, math_ops.square(grad), use_locking=self._use_locking)
var_update = state_ops.assign_sub(
var, lr_t * grad / (math_ops.sqrt(acc_t) + epsilon))
return var_update
def _resource_apply_sparse(self, grad, var, indices):
def _resource_scatter_add(x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
epsilon = self._get_hyper('epsilon', var_dtype)
acc = self.get_slot(var, 'accumulator')
acc_t = _resource_scatter_add(acc, indices, math_ops.square(grad))
acc_t_slice = array_ops.gather(acc_t, indices)
var_update = _resource_scatter_add(
var, indices, -lr_t * grad / (math_ops.sqrt(acc_t_slice) + epsilon))
return var_update
def get_config(self):
config = super(Adagrad, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'initial_accumulator_value': self._initial_accumulator_value,
'epsilon': self._serialize_hyperparameter('epsilon'),
})
return config
| 37.963768 | 80 | 0.703569 |
54e453305585016bf999592e6340589aecbdb06a | 447 | py | Python | data/scripts/templates/object/draft_schematic/food/shared_drink_alcohol.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/food/shared_drink_alcohol.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/food/shared_drink_alcohol.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/food/shared_drink_alcohol.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.294118 | 73 | 0.727069 |
9e21626bc6e123973f054dfb6c0712c0acc1d515 | 650 | py | Python | perma_web/perma/migrations/0010_auto_20150618_1534.py | leppert/perma | adb0cec29679c3d161d72330e19114f89f8c42ac | [
"MIT",
"Unlicense"
] | null | null | null | perma_web/perma/migrations/0010_auto_20150618_1534.py | leppert/perma | adb0cec29679c3d161d72330e19114f89f8c42ac | [
"MIT",
"Unlicense"
] | null | null | null | perma_web/perma/migrations/0010_auto_20150618_1534.py | leppert/perma | adb0cec29679c3d161d72330e19114f89f8c42ac | [
"MIT",
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def approve_registrars(apps, schema_editor):
# We can't import the Registrar model directly as it may be a newer
# version than this migration expects. We use the historical version.
Registrar = apps.get_model("perma", "Registrar")
for registrar in Registrar.objects.all():
registrar.is_approved = True
registrar.save()
class Migration(migrations.Migration):
dependencies = [
('perma', '0009_auto_20150618_1531'),
]
operations = [
migrations.RunPython(approve_registrars),
]
| 27.083333 | 73 | 0.695385 |
1c871bae4e0f303ff8f935b5967f6a3a8788aecf | 23,402 | py | Python | filip/models/ngsi_v2/context.py | dnikolay-ebc/FiLiP | 9a84979da8dff4523cb91e40869070bd02aa91fe | [
"BSD-3-Clause"
] | 6 | 2021-11-21T21:57:38.000Z | 2022-02-22T08:20:30.000Z | filip/models/ngsi_v2/context.py | RWTH-EBC/FiLiP | e294c5ef94b2b6ad9611316e50b5c550bcd77c1b | [
"BSD-3-Clause"
] | 83 | 2021-04-08T18:34:20.000Z | 2022-03-30T12:18:32.000Z | filip/models/ngsi_v2/context.py | dnikolay-ebc/FiLiP | 9a84979da8dff4523cb91e40869070bd02aa91fe | [
"BSD-3-Clause"
] | 5 | 2021-10-04T08:39:21.000Z | 2022-03-30T07:30:57.000Z | """
NGSIv2 models for context broker interaction
"""
import json
from typing import Any, Type, List, Dict, Union, Optional, Set, Tuple
from aenum import Enum
from pydantic import \
BaseModel, \
create_model, \
Field, \
validator
from filip.models.ngsi_v2.base import \
EntityPattern, \
Expression, \
BaseAttribute, \
BaseValueAttribute, \
BaseNameAttribute
from filip.models.base import DataType, FiwareRegex
class GetEntitiesOptions(str, Enum):
""" Options for queries"""
_init_ = 'value __doc__'
NORMALIZED = "normalized", "Normalized message representation"
KEY_VALUES = "keyValues", "Key value message representation." \
"This mode represents the entity " \
"attributes by their values only, leaving out " \
"the information about type and metadata. " \
"See example " \
"below." \
"Example: " \
"{" \
" 'id': 'R12345'," \
" 'type': 'Room'," \
" 'temperature': 22" \
"}"
VALUES = "values", "Key value message representation. " \
"This mode represents the entity as an array of " \
"attribute values. Information about id and type is " \
"left out. See example below. The order of the " \
"attributes in the array is specified by the attrs " \
"URI param (e.g. attrs=branch,colour,engine). " \
"If attrs is not used, the order is arbitrary. " \
"Example:" \
"[ 'Ford', 'black', 78.3 ]"
UNIQUE = 'unique', "unique mode. This mode is just like values mode, " \
"except that values are not repeated"
class ContextAttribute(BaseAttribute, BaseValueAttribute):
"""
Model for an attribute is represented by a JSON object with the following
syntax:
The attribute value is specified by the value property, whose value may
be any JSON datatype.
The attribute NGSI type is specified by the type property, whose value
is a string containing the NGSI type.
The attribute metadata is specified by the metadata property. Its value
is another JSON object which contains a property per metadata element
defined (the name of the property is the name of the metadata element).
Each metadata element, in turn, is represented by a JSON object
containing the following properties:
Values of entity attributes. For adding it you need to nest it into a
dict in order to give it a name.
Example:
>>> data = {"value": <...>,
"type": <...>,
"metadata": <...>}
>>> attr = ContextAttribute(**data)
"""
pass
class NamedContextAttribute(ContextAttribute, BaseNameAttribute):
"""
Context attributes are properties of context entities. For example, the
current speed of a car could be modeled as attribute current_speed of entity
car-104.
In the NGSI data model, attributes have an attribute name, an attribute type
an attribute value and metadata.
"""
pass
class ContextEntityKeyValues(BaseModel):
"""
Base Model for an entity is represented by a JSON object with the following
syntax.
The entity id is specified by the object's id property, whose value
is a string containing the entity id.
The entity type is specified by the object's type property, whose value
is a string containing the entity's type name.
"""
id: str = Field(
...,
title="Entity Id",
description="Id of an entity in an NGSI context broker. Allowed "
"characters are the ones in the plain ASCII set, except "
"the following ones: control characters, "
"whitespace, &, ?, / and #.",
example='Bcn-Welt',
max_length=256,
min_length=1,
regex=FiwareRegex.standard.value, # Make it FIWARE-Safe
allow_mutation=False
)
type: str = Field(
...,
title="Entity Type",
description="Id of an entity in an NGSI context broker. "
"Allowed characters are the ones in the plain ASCII set, "
"except the following ones: control characters, "
"whitespace, &, ?, / and #.",
example="Room",
max_length=256,
min_length=1,
regex=FiwareRegex.standard.value, # Make it FIWARE-Safe
allow_mutation=False
)
class Config:
"""
Pydantic config
"""
extra = 'allow'
validate_all = True
validate_assignment = True
class PropertyFormat(str, Enum):
"""
Format to decide if properties of ContextEntity class are returned as
List of NamedContextAttributes or as Dict of ContextAttributes.
"""
LIST = 'list'
DICT = 'dict'
class ContextEntity(ContextEntityKeyValues):
"""
Context entities, or simply entities, are the center of gravity in the
FIWARE NGSI information model. An entity represents a thing, i.e., any
physical or logical object (e.g., a sensor, a person, a room, an issue in
a ticketing system, etc.). Each entity has an entity id.
Furthermore, the type system of FIWARE NGSI enables entities to have an
entity type. Entity types are semantic types; they are intended to describe
the type of thing represented by the entity. For example, a context
entity #with id sensor-365 could have the type temperatureSensor.
Each entity is uniquely identified by the combination of its id and type.
The entity id is specified by the object's id property, whose value
is a string containing the entity id.
The entity type is specified by the object's type property, whose value
is a string containing the entity's type name.
Entity attributes are specified by additional properties, whose names are
the name of the attribute and whose representation is described in the
"ContextAttribute"-model. Obviously, id and type are
not allowed to be used as attribute names.
Example::
>>> data = {'id': 'MyId',
'type': 'MyType',
'my_attr': {'value': 20, 'type': 'Number'}}
>>> entity = ContextEntity(**data)
"""
def __init__(self, id: str, type: str, **data):
# There is currently no validation for extra fields
data.update(self._validate_attributes(data))
super().__init__(id=id, type=type, **data)
class Config:
"""
Pydantic config
"""
extra = 'allow'
validate_all = True
validate_assignment = True
@classmethod
def _validate_attributes(cls, data: Dict):
attrs = {key: ContextAttribute.parse_obj(attr) for key, attr in
data.items() if key not in ContextEntity.__fields__}
return attrs
def add_attributes(self, attrs: Union[Dict[str, ContextAttribute],
List[NamedContextAttribute]]) -> None:
"""
Add attributes (properties, relationships) to entity
Args:
attrs: Dict[str, ContextAttribute]: {NAME for attr : Attribute}
or
List[NamedContextAttribute]
Returns:
None
"""
if isinstance(attrs, list):
attrs = {attr.name: ContextAttribute(**attr.dict(exclude={'name'}))
for attr in attrs}
for key, attr in attrs.items():
self.__setattr__(name=key, value=attr)
def get_attributes(
self,
whitelisted_attribute_types: Optional[List[DataType]] = None,
blacklisted_attribute_types: Optional[List[DataType]] = None,
response_format: Union[str, PropertyFormat] = PropertyFormat.LIST) \
-> Union[List[NamedContextAttribute], Dict[str, ContextAttribute]]:
"""
Get attributes or a subset from the entity.
Args:
whitelisted_attribute_types: Optional list, if given only
attributes matching one of the types are returned
blacklisted_attribute_types: Optional list, if given all
attributes are returned that do not match a list entry
response_format: Wanted result format,
List -> list of NamedContextAttributes
Dict -> dict of {name: ContextAttribute}
Raises:
AssertionError, if both a white and a black list is given
Returns:
List[NamedContextAttribute] or Dict[str, ContextAttribute]
"""
response_format = PropertyFormat(response_format)
assert whitelisted_attribute_types is None or \
blacklisted_attribute_types is None,\
"Only whitelist or blacklist is allowed"
if whitelisted_attribute_types is not None:
attribute_types = whitelisted_attribute_types
elif blacklisted_attribute_types is not None:
attribute_types = [att_type for att_type in list(DataType)
if att_type not in blacklisted_attribute_types]
else:
attribute_types = [att_type for att_type in list(DataType)]
if response_format == PropertyFormat.DICT:
return {key: ContextAttribute(**value)
for key, value in self.dict().items()
if key not in ContextEntity.__fields__
and value.get('type') in
[att.value for att in attribute_types]}
else:
return [NamedContextAttribute(name=key, **value)
for key, value in self.dict().items()
if key not in ContextEntity.__fields__
and value.get('type') in
[att.value for att in attribute_types]]
def update_attribute(self,
attrs: Union[Dict[str, ContextAttribute],
List[NamedContextAttribute]]) -> None:
"""
Update attributes of an entity. Overwrite the current held value
for the attribute with the value contained in the corresponding given
attribute
Args:
attrs: List of NamedContextAttributes,
Dict of {attribute_name: ContextAttribute}
Raises:
NameError, if the attribute does not currently exists in the entity
Returns:
None
"""
if isinstance(attrs, list):
attrs = {attr.name: ContextAttribute(**attr.dict(exclude={'name'}))
for attr in attrs}
existing_attribute_names = self.get_attribute_names()
for key, attr in attrs.items():
if key not in existing_attribute_names:
raise NameError
self.__setattr__(name=key, value=attr)
def get_attribute_names(self) -> Set[str]:
"""
Returns a set with all attribute names of this entity
Returns:
Set[str]
"""
return {key for key in self.dict()
if key not in ContextEntity.__fields__}
def delete_attributes(self, attrs: Union[Dict[str, ContextAttribute],
List[NamedContextAttribute],
List[str]]):
"""
Delete the given attributes from the entity
Args:
attrs: - Dict {name: ContextAttribute}
- List[NamedContextAttribute]
- List[str] -> names of attributes
Raises:
Exception: if one of the given attrs does not represent an
existing argument
"""
names: List[str] = []
if isinstance(attrs, list):
for entry in attrs:
if isinstance(entry, str):
names.append(entry)
elif isinstance(entry, NamedContextAttribute):
names.append(entry.name)
else:
names.extend(list(attrs.keys()))
for name in names:
delattr(self, name)
def get_attribute(self, attribute_name: str) -> NamedContextAttribute:
"""
Get the attribute of the entity with the given name
Args:
attribute_name (str): Name of attribute
Raises:
KeyError, if no attribute with given name exists
Returns:
NamedContextAttribute
"""
for attr in self.get_attributes():
if attr.name == attribute_name:
return attr
raise KeyError
def get_properties(
self,
response_format: Union[str, PropertyFormat] = PropertyFormat.LIST)\
-> Union[List[NamedContextAttribute], Dict[str, ContextAttribute]]:
"""
Returns all attributes of the entity that are not of type Relationship,
and are not auto generated command attributes
Args:
response_format: Wanted result format,
List -> list of NamedContextAttributes
Dict -> dict of {name: ContextAttribute}
Returns:
[NamedContextAttribute] or {name: ContextAttribute}
"""
pre_filtered_attrs = self.get_attributes(blacklisted_attribute_types=[
DataType.RELATIONSHIP], response_format=PropertyFormat.LIST)
all_command_attributes_names = set()
for command in self.get_commands():
(c, c_status, c_info) = self.get_command_triple(command.name)
all_command_attributes_names.update([c.name,
c_status.name,
c_info.name])
property_attributes = []
for attr in pre_filtered_attrs:
if attr.name not in all_command_attributes_names:
property_attributes.append(attr)
if response_format == PropertyFormat.LIST:
return property_attributes
else:
return {p.name: ContextAttribute(**p.dict(exclude={'name'}))
for p in property_attributes}
def get_relationships(
self,
response_format: Union[str, PropertyFormat] = PropertyFormat.LIST)\
-> Union[List[NamedContextAttribute], Dict[str, ContextAttribute]]:
"""
Get all relationships of the context entity
Args:
response_format: Wanted result format,
List -> list of NamedContextAttributes
Dict -> dict of {name: ContextAttribute}
Returns:
[NamedContextAttribute] or {name: ContextAttribute}
"""
return self.get_attributes(whitelisted_attribute_types=[
DataType.RELATIONSHIP], response_format=response_format)
def get_commands(
self,
response_format: Union[str, PropertyFormat] = PropertyFormat.LIST)\
-> Union[List[NamedContextAttribute], Dict[str, ContextAttribute]]:
"""
Get all commands of the context entity. Only works if the commands
were autogenerated by Fiware from an Device.
Args:
response_format: Wanted result format,
List -> list of NamedContextAttributes
Dict -> dict of {name: ContextAttribute}
Returns:
[NamedContextAttribute] or {name: ContextAttribute}
"""
# if an attribute with name n is a command, its type does not need to
# be COMMAND.
# But the attributes name_info (type: commandResult) and
# name_status(type: commandStatus) need to exist. (Autogenerated)
# Search all attributes of type commandStatus, check for each if a
# corresponding _info exists and if also a fitting attribute exists
# we know: that is a command.
commands = []
for status_attribute in self.get_attributes(
whitelisted_attribute_types=[DataType.COMMAND_STATUS]):
if not status_attribute.name.split('_')[-1] == "status":
continue
base_name = status_attribute.name[:-7]
try:
info_attribute = self.get_attribute(f'{base_name}_info')
if not info_attribute.type == DataType.COMMAND_RESULT:
continue
attribute = self.get_attribute(base_name)
commands.append(attribute)
except KeyError:
continue
if response_format == PropertyFormat.LIST:
return commands
else:
return {c.name: ContextAttribute(**c.dict(exclude={'name'}))
for c in commands}
def get_command_triple(self, command_attribute_name: str)\
-> Tuple[NamedContextAttribute, NamedContextAttribute,
NamedContextAttribute]:
"""
Returns for a given command attribute name all three corresponding
attributes as triple
Args:
command_attribute_name: Name of the command attribute
Raises:
KeyError, if the given name does not belong to a command attribute
Returns:
(Command, Command_status, Command_info)
"""
commands = self.get_commands(response_format=PropertyFormat.DICT)
if command_attribute_name not in commands:
raise KeyError
command = self.get_attribute(command_attribute_name)
# as the given name was found as a valid command, we know that the
# status and info attributes exist correctly
command_status = self.get_attribute(f'{command_attribute_name}_status')
command_info = self.get_attribute(f'{command_attribute_name}_info')
return command, command_status, command_info
def create_context_entity_model(name: str = None,
data: Dict = None,
validators: Dict[str, Any] = None) -> \
Type['ContextEntity']:
r"""
Creates a ContextEntity-Model from a dict:
Args:
name: name of the model
data: dictionary containing the data structure
validators (optional): validators for the new model
Example:
>>> def username_alphanumeric(cls, value):
assert v.value.isalnum(), 'must be numeric'
return value
>>> model = create_context_entity_model(
name='MyModel',
data={
'id': 'MyId',
'type':'MyType',
'temp': 'MyProperty'}
{'validate_test': validator('temperature')(
username_alphanumeric)})
Returns:
ContextEntity
"""
properties = {key: (ContextAttribute, ...) for key in data.keys() if
key not in ContextEntity.__fields__}
model = create_model(
__model_name=name or 'GeneratedContextEntity',
__base__=ContextEntity,
__validators__=validators or {},
**properties
)
return model
class Query(BaseModel):
"""
Model for queries
"""
entities: List[EntityPattern] = Field(
description="a list of entities to search for. Each element is "
"represented by a JSON object"
)
attrs: Optional[List[str]] = Field(
default=None,
description="List of attributes to be provided "
"(if not specified, all attributes)."
)
expression: Optional[Expression] = Field(
default=None,
description="An expression composed of q, mq, georel, geometry and "
"coords "
)
metadata: Optional[List[str]] = Field(
default=None,
description='a list of metadata names to include in the response. '
'See "Filtering out attributes and metadata" section for '
'more detail.'
)
class ActionType(str, Enum):
"""
Options for queries
"""
_init_ = 'value __doc__'
APPEND = "append", "maps to POST /v2/entities (if the entity does not " \
"already exist) or POST /v2/entities/<id>/attrs (if " \
"the entity already exists). "
APPEND_STRICT = "appendStrict", "maps to POST /v2/entities (if the " \
"entity does not already exist) or POST " \
"/v2/entities/<id>/attrs?options=append " \
"(if the entity already exists)."
UPDATE = "update", "maps to PATCH /v2/entities/<id>/attrs."
DELETE = "delete", "maps to DELETE /v2/entities/<id>/attrs/<attrName> on " \
"every attribute included in the entity or to DELETE " \
"/v2/entities/<id> if no attribute were included in " \
"the entity."
REPLACE = "replace", "maps to PUT /v2/entities/<id>/attrs"
class Update(BaseModel):
"""
Model for update action
"""
action_type: Union[ActionType, str] = Field(
alias='actionType',
description="actionType, to specify the kind of update action to do: "
"either append, appendStrict, update, delete, or replace. "
)
entities: List[ContextEntity] = Field(
description="an array of entities, each entity specified using the "
"JSON entity representation format "
)
@validator('action_type')
def check_action_type(cls, action):
"""
validates action_type
Args:
action: field action_type
Returns:
action_type
"""
return ActionType(action)
class Command(BaseModel):
"""
Class for sending commands to IoT Devices.
Note that the command must be registered via an IoT-Agent. Internally
FIWARE uses its registration mechanism in order to connect the command
with an IoT-Device
"""
type: DataType = Field(default=DataType.COMMAND,
description="Command must have the type command",
const=True)
value: Any = Field(description="Any json serializable command that will "
"be forwarded to the connected IoT device")
@validator("value")
def check_value(cls, value):
"""
Check if value is json serializable
Args:
value: value field
Returns:
value
"""
json.dumps(value)
return value
class NamedCommand(Command):
"""
Class for sending command to IoT-Device.
Extend :class: Command with command Name
"""
name: str = Field(
description="Name of the command",
max_length=256,
min_length=1,
regex=FiwareRegex.string_protect.value
)
| 36.395023 | 80 | 0.58025 |
e0131dcbbaea0684712d83b172147dc815167a7a | 1,634 | py | Python | Chapter09/myshop/cart/views.py | sabin-web/Django-3-by-Example | a0239c954d66fee190014fbd3fa975ddb6eeba17 | [
"MIT"
] | 628 | 2019-11-13T14:13:40.000Z | 2022-03-30T19:02:05.000Z | Chapter09/myshop/cart/views.py | HAKN1999/Django-3-by-Example | a0239c954d66fee190014fbd3fa975ddb6eeba17 | [
"MIT"
] | 96 | 2020-04-17T17:35:33.000Z | 2022-02-17T09:25:06.000Z | Chapter09/myshop/cart/views.py | HAKN1999/Django-3-by-Example | a0239c954d66fee190014fbd3fa975ddb6eeba17 | [
"MIT"
] | 782 | 2019-10-15T07:29:27.000Z | 2022-03-30T17:25:08.000Z | from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from shop.recommender import Recommender
from coupons.forms import CouponApplyForm
from .cart import Cart
from .forms import CartAddProductForm
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,
quantity=cd['quantity'],
override_quantity=cd['override'])
return redirect('cart:cart_detail')
@require_POST
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductForm(initial={'quantity': item['quantity'],
'override': True})
coupon_apply_form = CouponApplyForm()
r = Recommender()
cart_products = [item['product'] for item in cart]
recommended_products = r.suggest_products_for(cart_products,
max_results=4)
return render(request, 'cart/detail.html',
{'cart': cart,
'coupon_apply_form': coupon_apply_form,
'recommended_products': recommended_products})
| 34.765957 | 96 | 0.643207 |
42fbf235bb5c9e3fd727ef4546e839852e2a56ff | 29,428 | py | Python | tensorflow/lite/python/convert.py | exynerve/tensorflow | 60f8979fcdb3ba4b0ed4c45d4ed64d471c619882 | [
"Apache-2.0"
] | 9 | 2019-06-05T06:48:07.000Z | 2020-09-29T07:08:02.000Z | tensorflow/lite/python/convert.py | exynerve/tensorflow | 60f8979fcdb3ba4b0ed4c45d4ed64d471c619882 | [
"Apache-2.0"
] | 3 | 2021-08-25T15:06:34.000Z | 2022-02-10T02:50:24.000Z | tensorflow/lite/python/convert.py | exynerve/tensorflow | 60f8979fcdb3ba4b0ed4c45d4ed64d471c619882 | [
"Apache-2.0"
] | 3 | 2019-06-28T02:28:27.000Z | 2021-07-06T08:16:19.000Z | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import distutils.spawn
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
import six
from six.moves import map
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.python import wrap_toco
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
_quantized_inference_types = [_types_pb2.QUANTIZED_UINT8, _types_pb2.INT8]
# If the `inference_type` or the `inference_input_type` is the quantized type
# and it is not post training quantization, the input quantization stats is
# required.
def _requires_input_stats(toco_flags):
return ((toco_flags.inference_type in _quantized_inference_types or
toco_flags.inference_input_type in _quantized_inference_types) and
not toco_flags.post_training_quantize)
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return six.ensure_text(output)
except UnicodeDecodeError:
pass
return output
@_tf_export("lite.OpsSet")
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
# Convert model using only TensorFlow Lite quantized int8 operations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
# Convert model using only TensorFlow Lite operations with quantized int8
# weights, int16 activations and int64 bias.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
# This quantization mode may be used in models for super-resolution,
# audio signal processing or image de-noising. It improves accuracy
# significantly, but only slightly increases the model size.
# WARNING: These ops are currently experimental and have not yet been
# finalized.
# They are only compatible with CPU execution, and have not been optimized for
# production.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = \
"EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
def __str__(self):
return str(self.value)
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
def mlir_quantize(input_data_str,
disable_per_channel=False,
fully_quantize=False,
inference_type=_types_pb2.INT8,
enable_numeric_verify=False):
"""Quantize `input_data_str` with calibration results.
Args:
input_data_str: Input data in serialized form (e.g. a TFLITE model with
calibration results).
disable_per_channel: Bool indicating whether to do per-channel or per-tensor
quantization
fully_quantize: Bool indicating whether to fully quantize the model. Besides
model body, the input/output will be quantized as well.
inference_type: Data type for the activations. The default value is int8.
enable_numeric_verify: Experimental. Subject to change. Bool indicating
whether to add NumericVerify ops into the debug mode quantized model.
Returns:
Quantized model in serialized form (e.g. a TFLITE model) with floating-point
inputs and outputs.
"""
return wrap_toco.wrapped_experimental_mlir_quantize(input_data_str,
disable_per_channel,
fully_quantize,
inference_type,
enable_numeric_verify)
def mlir_sparsify(input_data_str):
"""Sparsify `input_data_str` to encode sparse tensor with proper format.
Args:
input_data_str: Input data in serialized form (e.g. a TFLITE model).
Returns:
Sparsified model in serialized form (e.g. a TFLITE model).
"""
return wrap_toco.wrapped_experimental_mlir_sparsify(input_data_str)
def register_custom_opdefs(custom_opdefs_list):
"""Register the given custom opdefs to the TensorFlow global op registry.
Args:
custom_opdefs_list: String representing the custom ops OpDefs that are
included in the GraphDef.
Returns:
True if the registration is successfully completed.
"""
return wrap_toco.wrapped_register_custom_opdefs(custom_opdefs_list)
def toco_convert_protos(model_flags_str,
toco_flags_str,
input_data_str,
debug_info_str=None,
enable_mlir_converter=False):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.compat.v1.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
information. (default None)
enable_mlir_converter: Enables MLIR-based conversion instead of the default
TOCO conversion. (default False)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# Historically, TOCO conversion failures would trigger a crash, so we would
# attempt to run the converter out-of-process. The MLIR conversion pipeline
# surfaces errors instead, and can be safely run in-process.
if enable_mlir_converter or not _toco_from_proto_bin:
try:
model_str = wrap_toco.wrapped_toco_convert(model_flags_str,
toco_flags_str, input_data_str,
debug_info_str,
enable_mlir_converter)
return model_str
except Exception as e:
raise ConverterError(str(e))
if distutils.spawn.find_executable(_toco_from_proto_bin) is None:
raise ConverterError("""Could not find toco_from_protos binary, make sure
your virtualenv bin directory or pip local bin directory is in your path.
In particular, if you have installed TensorFlow with --user, make sure you
add the install directory to your path.
For example:
Linux: export PATH=$PATH:~/.local/bin/
Mac: export PATH=$PATH:~/Library/Python/<version#>/bin
Alternative, use virtualenv.""")
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (None, None,
None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input, \
_tempfile.NamedTemporaryFile(delete=False) as fp_debug:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
debug_filename = fp_debug.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(six.ensure_binary(input_data_str))
debug_info_str = debug_info_str if debug_info_str else ""
# if debug_info_str contains a "string value", then the call to
# fp_debug.write(debug_info_str) will fail with the following error
#
# TypeError: a bytes-like object is required, not 'str'
#
# Some of the subtests within the "convert_test" unit-test fail
# with the error shown above. So watch out for that scenario and
# convert debug_info_str to bytes where needed
if not isinstance(debug_info_str, bytes):
fp_debug.write(debug_info_str.encode("utf-8"))
else:
fp_debug.write(debug_info_str)
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin,
model_filename,
toco_filename,
input_filename,
output_filename,
"--debug_proto_file={}".format(debug_filename),
]
if enable_mlir_converter:
cmd.append("--enable_mlir_converter")
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename
]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def build_toco_flags(inference_type=dtypes.float32,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
output_format=lite_constants.TFLITE,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
custom_opdefs=None,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
conversion_summary_dir=None,
select_user_tf_ops=None,
**_):
"""Build the TOCO flags object from params."""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = util.convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
if custom_opdefs:
toco.custom_opdefs.extend(custom_opdefs)
if select_user_tf_ops:
toco.select_user_tf_ops.extend(select_user_tf_ops)
toco.post_training_quantize = post_training_quantize
toco.quantize_to_float16 = quantize_to_float16
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if conversion_summary_dir:
toco.conversion_summary_dir = conversion_summary_dir
if target_ops:
if OpsSet.SELECT_TF_OPS in set(target_ops):
toco.enable_select_tf_ops = True
if set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.force_select_tf_ops = True
return toco
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=dtypes.float32,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
custom_opdefs=None,
change_concat_input_ranges=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False,
debug_info=None,
conversion_summary_dir=None,
saved_model_dir=None,
saved_model_version=0,
saved_model_tags=None,
saved_model_exported_names=None,
select_user_tf_ops=None):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Data type of numeric arrays, excluding the input layer.
(default tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
inference_input_type: Data type of the numeric arrays in the input layer. If
`inference_input_type` is in {tf.int8, tf.uint8}, then
`quantized_input_stats` must be provided. (default is the value assigned
to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8})
input_format: Type of data to read.
(default TENSORFLOW_GRAPHDEF, must be in {TENSORFLOW_GRAPHDEF})
input_shapes: Input array shape. (default None, must be None or a list of
the same length as `input_tensors`.)
output_format: Output file format. (default TFLITE, must be in
{TFLITE, GRAPHVIZ_DOT})
quantized_input_stats: Map of input tensor names to a tuple of floats
representing the mean and standard deviation of the training data.
(e.g., {"foo" : (0., 1.)}). Required if `inference_input_type` is tf.int8
or tf.uint8. (default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver. (default
False)
custom_opdefs: List of strings representing custom ops OpDefs that are
included in the GraphDef. Required when using custom operations with the
MLIR-based converter. (default None)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy). (default False)
quantize_to_float16: Boolean indicating whether to convert float buffers to
float16. (default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet options
indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist or
are unused in the final graph. (default False)
debug_info: `GraphDebugInfo` proto containing the stack traces for the
original nodes referred by the converted graph.
conversion_summary_dir: A string, the path to the generated conversion logs.
saved_model_dir: Filepath of the saved model to be converted. This value
will be non-empty only when the saved model import path will be used.
Otherwises, the graph def-based conversion will be processed.
saved_model_version: SavedModel file format version of The saved model file
to be converted. This value will be set only when the SavedModel import
path will be used.
saved_model_tags: Set of string saved model tags, formatted in the
comma-separated value. This value will be set only when the SavedModel
import path will be used.
saved_model_exported_names: Names to be exported (default: export all) when
the saved model import path is on. This value will be set only when the
SavedModel import path will be used.
select_user_tf_ops: List of user's defined TensorFlow ops need to be
supported in the TensorFlow Lite runtime. These ops will be supported as
select TensorFlow ops.
Returns:
model_flags, toco_flags, debug_info: three protocol buffers describing the
conversion process and debug information.
Raises:
ValueError:
If the input tensor type is unknown
Missing mean_values or std_dev_values
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = build_toco_flags(inference_type, inference_input_type, input_format,
output_format, default_ranges_stats,
drop_control_dependency, reorder_across_fake_quant,
allow_custom_ops, custom_opdefs,
post_training_quantize, quantize_to_float16,
dump_graphviz_dir, dump_graphviz_video, target_ops,
conversion_summary_dir, select_user_tf_ops)
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
if saved_model_dir:
input_array.name = input_tensor.name
else:
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = util.convert_dtype_to_tflite_type(
input_tensor.dtype)
if _requires_input_stats(toco) and quantized_input_stats:
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
if shape.rank is not None:
# Create shapes with -1 for unknown dimensions.
dims = []
for dim in shape:
if (dim is None or
(isinstance(dim, tensor_shape.Dimension) and dim.value is None)):
dims.append(-1)
else:
dims.append(int(dim))
input_array.shape.dims.extend(dims)
input_array.shape.unknown_rank = False
else:
input_array.shape.unknown_rank = True
for output_tensor in output_tensors:
if saved_model_dir:
model.output_arrays.append(output_tensor.name)
else:
model.output_arrays.append(util.get_tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
if saved_model_dir:
model.saved_model_dir = saved_model_dir
model.saved_model_version = saved_model_version
if saved_model_tags:
model.saved_model_tags.extend(saved_model_tags)
if saved_model_exported_names:
model.saved_model_exported_names.extend(saved_model_exported_names)
return model, toco, debug_info
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, _ = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if _requires_input_stats(toco_flags):
if (("quantized_input_stats" not in kwargs) or
(not kwargs["quantized_input_stats"])):
raise ValueError(
"The `quantized_input_stats` flag must be defined when either "
"`inference_type` flag or `inference_input_type` flag is set to "
"tf.int8 or tf.uint8.")
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(list(map(int, shape)))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
enable_mlir_converter=enable_mlir_converter)
return data
def toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, debug_info = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
debug_info_str = debug_info.SerializeToString() if debug_info else None
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
debug_info_str=debug_info_str,
enable_mlir_converter=enable_mlir_converter)
return data
def convert_saved_model(saved_model_dir=None,
saved_model_version=0,
saved_model_tags=None,
saved_model_exported_names=None,
**kwargs):
"""Converts a saved_model using TF Lite converter."""
model_flags = _model_flags_pb2.ModelFlags()
if saved_model_dir:
model_flags.saved_model_dir = saved_model_dir
model_flags.saved_model_version = saved_model_version
if saved_model_tags:
model_flags.saved_model_tags.extend(saved_model_tags)
if saved_model_exported_names:
model_flags.saved_model_exported_names.extend(saved_model_exported_names)
toco_flags = build_toco_flags(**kwargs)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
None, # input_data, unused
None, # debug_info_str, unused
enable_mlir_converter=True)
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `tf.lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
enable_mlir_converter = kwargs.get("enable_mlir_converter", False)
return toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs)
| 42.897959 | 80 | 0.705281 |
0499123883e00444790840196274b421e5bf78de | 1,801 | py | Python | translator/__main__.py | marco-nicola/python-translator | 6a559874c9899e52a4cac9c2954dcca6b638f002 | [
"Apache-2.0"
] | null | null | null | translator/__main__.py | marco-nicola/python-translator | 6a559874c9899e52a4cac9c2954dcca6b638f002 | [
"Apache-2.0"
] | null | null | null | translator/__main__.py | marco-nicola/python-translator | 6a559874c9899e52a4cac9c2954dcca6b638f002 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Marco Nicola
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from concurrent import futures
import grpc
from .config import Config
from .models_manager import ModelsManager
from .servicer import Servicer
from .grpcapi import api_pb2_grpc
def main() -> None:
parser = argparse.ArgumentParser(description='Run the translator server.')
parser.add_argument('-c', '--config', dest='config', required=True,
help='path to YAML configuration file')
args = parser.parse_args()
config = Config.from_yaml_file(args.config)
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
level=config.log_level)
manager = ModelsManager(config)
manager.load_models()
servicer = Servicer(manager)
server = grpc.server(futures.ThreadPoolExecutor(
max_workers=config.max_workers))
api_pb2_grpc.add_ApiServicer_to_server(servicer, server)
address = f'{config.host}:{config.port}'
server.add_insecure_port(address)
logging.info(f'serving on {address}')
server.start()
try:
server.wait_for_termination()
except KeyboardInterrupt:
logging.info('KeyboardInterrupt')
logging.info('Bye!')
if __name__ == '__main__':
main()
| 28.587302 | 78 | 0.71849 |
199043b3b781b077d85fcf10a0c69c0f1bba7765 | 680 | py | Python | socket_utilities.py | kznts9v-1lya/tracert | 73ee8062c525d16e944c0b52e477a5066961e96c | [
"MIT"
] | null | null | null | socket_utilities.py | kznts9v-1lya/tracert | 73ee8062c525d16e944c0b52e477a5066961e96c | [
"MIT"
] | null | null | null | socket_utilities.py | kznts9v-1lya/tracert | 73ee8062c525d16e944c0b52e477a5066961e96c | [
"MIT"
] | null | null | null | import socket
import struct
__all__ = ["init_udp_socket", "init_icmp_socket"]
def init_udp_socket(time_to_live):
"""
Initializes UDP socket
"""
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
udp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, time_to_live)
return udp_socket
def init_icmp_socket(port):
"""
Initializes ICMP socket
"""
icmp_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
timeout = struct.pack("ll", 5, 0)
icmp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, timeout)
icmp_socket.bind(("0.0.0.0", port))
return icmp_socket
| 21.25 | 85 | 0.714706 |
2c8bab53f07f6eed519b8d38f7d265b1ef05afdc | 1,725 | py | Python | tests/integration_tests/test_group_routes.py | jay-aye-see-kay/mealie | 903a9d8861daea1b12a1986cb0bf6a442ec723eb | [
"MIT"
] | null | null | null | tests/integration_tests/test_group_routes.py | jay-aye-see-kay/mealie | 903a9d8861daea1b12a1986cb0bf6a442ec723eb | [
"MIT"
] | null | null | null | tests/integration_tests/test_group_routes.py | jay-aye-see-kay/mealie | 903a9d8861daea1b12a1986cb0bf6a442ec723eb | [
"MIT"
] | null | null | null | import json
import pytest
from fastapi.testclient import TestClient
from tests.app_routes import AppRoutes
@pytest.fixture
def group_data():
return {"name": "Test Group"}
def test_create_group(api_client: TestClient, api_routes: AppRoutes, token):
response = api_client.post(api_routes.groups, json={"name": "Test Group"}, headers=token)
assert response.status_code == 201
def test_get_self_group(api_client: TestClient, api_routes: AppRoutes, token):
response = api_client.get(api_routes.groups, headers=token)
assert response.status_code == 200
assert len(json.loads(response.text)) >= 2
def test_update_group(api_client: TestClient, api_routes: AppRoutes, token):
new_data = {
"name": "New Group Name",
"id": 2,
"categories": [],
"webhookUrls": [],
"webhookTime": "00:00",
"webhookEnable": False,
"users": [],
"mealplans": [],
"shoppingLists": [],
}
# Test Update
response = api_client.put(api_routes.groups_id(2), json=new_data, headers=token)
assert response.status_code == 200
# Validate Changes
response = api_client.get(api_routes.groups, headers=token)
all_groups = json.loads(response.text)
id_2 = filter(lambda x: x["id"] == 2, all_groups)
assert next(id_2) == new_data
def test_home_group_not_deletable(api_client: TestClient, api_routes: AppRoutes, token):
response = api_client.delete(api_routes.groups_id(1), headers=token)
assert response.status_code == 400
def test_delete_group(api_client: TestClient, api_routes: AppRoutes, token):
response = api_client.delete(api_routes.groups_id(2), headers=token)
assert response.status_code == 200
| 29.237288 | 93 | 0.696812 |
7d1898af78f4c9f91de9658f61bbfc88d9b209a0 | 2,145 | py | Python | strux/helpers.py | justinchiu/strux | 364183c3d04c96d67dd91cce547cb4dd3661aa10 | [
"MIT"
] | 17 | 2020-09-03T15:42:43.000Z | 2022-03-10T23:34:41.000Z | strux/helpers.py | justinchiu/strux | 364183c3d04c96d67dd91cce547cb4dd3661aa10 | [
"MIT"
] | null | null | null | strux/helpers.py | justinchiu/strux | 364183c3d04c96d67dd91cce547cb4dd3661aa10 | [
"MIT"
] | 2 | 2020-09-18T21:30:53.000Z | 2022-02-16T02:52:19.000Z | import jax.numpy as np
import math
from .semirings import LogSemiring
import jax
class _Struct:
length_axes = (0,)
def __init__(self, semiring=LogSemiring):
self.semiring = semiring
if False:
self.sum = (jax.vmap(self._dp, (0, 0)))
self.marginals = (jax.grad(lambda *args: self.sum(*args).sum(0), 0))
def fp(*args):
v, extra = (jax.vmap(self._from_parts, (0,)))(*args)
return v, extra[0]
self.from_parts = fp
self.to_parts = (jax.vmap(self._to_parts, (0, None, 0)))
else:
self.sum = jax.jit(jax.vmap(self._dp, (0, 0)))
self.marginals = jax.jit(jax.grad(lambda *args: self.sum(*args).sum(0), 0))
def fp(*args):
v, extra = jax.jit(jax.vmap(self._from_parts, (0,)))(*args)
return v, extra[0]
self.from_parts = fp
self.to_parts = jax.jit(jax.vmap(self._to_parts, (0, None, 0)),
static_argnums=1)
def _dp(log_potentials, length):
pass
@classmethod
def resize(cls, log_potentials, batch=1):
for j in cls.length_axes:
log_potentials = pad_to_pow2(log_potentials, batch + j)
return log_potentials
def score(self, potentials, parts, batch_dims=[0]):
score = potentials * parts
batch = tuple((score.shape[b] for b in batch_dims))
return self.semiring.prod(score.reshape(batch + (-1,)))
@staticmethod
def _to_parts(spans, extra, lengths):
return spans
@staticmethod
def _from_parts(spans):
return spans, 0
def pad_along_axis(array: np.ndarray, target_length: int, axis: int = 0):
pad_size = target_length - array.shape[axis]
if pad_size <= 0:
return array
npad = [(0, 0)] * array.ndim
npad[axis] = (0, pad_size)
return np.pad(array, pad_width=npad, mode='constant', constant_values=0)
def pad_to_pow2(tensor, axis):
size = tensor.shape[axis]
new_size = int(np.power(2, np.ceil(size)))
return pad_along_axis(tensor, new_size, axis)
| 32.5 | 87 | 0.582751 |
c8786fe63a6d1bf6c2e6df36ca11b62f4c1f1c83 | 2,767 | py | Python | libs/imagesList.py | ytkachov/LabelVideoWithRecogintion | b2d1705613b135ef35f7ec59992dc832346bf9fb | [
"MIT"
] | null | null | null | libs/imagesList.py | ytkachov/LabelVideoWithRecogintion | b2d1705613b135ef35f7ec59992dc832346bf9fb | [
"MIT"
] | null | null | null | libs/imagesList.py | ytkachov/LabelVideoWithRecogintion | b2d1705613b135ef35f7ec59992dc832346bf9fb | [
"MIT"
] | null | null | null | from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import time
import pickle
from libs.threading import *
from libs.utils import *
class ImagesList(QDockWidget):
def __init__(self, title, settings, parent_window):
super(ImagesList, self).__init__(title, parent_window)
self._images_source = None
self._current_index = 0
self._image_list_widget = QListWidget()
self._image_list_widget.itemDoubleClicked.connect(self._itemDoubleClicked)
file_list_layout = QVBoxLayout()
file_list_layout.setContentsMargins(0, 0, 0, 0)
file_list_layout.addWidget(self._image_list_widget)
file_list_container = QWidget()
file_list_container.setLayout(file_list_layout)
self.setObjectName(title)
self.setWidget(file_list_container)
self.setFeatures(QDockWidget.NoDockWidgetFeatures)
parent_window.addDockWidget(Qt.RightDockWidgetArea, self)
# Public methods
def SetSource(self, images_source):
self._images_source = images_source
self._image_list_widget.clear()
names_list = self._images_source.GetNames()
for imgname in names_list:
item = QListWidgetItem(imgname)
self._image_list_widget.addItem(item)
def SaveCurrentImage(self) -> str:
return self._images_source.SaveCurrentImage()
def SetImage(self, imgname):
if not self._images_source:
return
self._current_index = self._images_source.GetIndex(imgname)
item = self._image_list_widget.item(self._current_index)
item.setSelected(True)
self._image_list_widget.scrollToItem(item)
self._loadItem(item)
def SetPrevImage(self):
if self._current_index - 1 >= 0:
self._current_index -= 1
item = self._image_list_widget.item(self._current_index)
item.setSelected(True)
self._image_list_widget.scrollToItem(item)
self._loadItem(item)
def SetNextImage(self):
if self._current_index + 1 < self._image_list_widget.count():
self._current_index += 1
item = self._image_list_widget.item(self._current_index)
item.setSelected(True)
self._image_list_widget.scrollToItem(item)
self._loadItem(item)
# signals
image_changed = pyqtSignal(object)
# private methods
def _itemDoubleClicked(self, item):
self._current_index = self._images_source.GetIndex(ustr(item.text()))
self._loadItem(item)
def _loadItem(self, item):
name = ustr(item.text())
res, image = self._images_source.GetImage(name)
if res:
self.image_changed.emit(image)
| 31.089888 | 82 | 0.675822 |
22fcb11114069a201043e1b60c4b97be1176c4f6 | 170 | py | Python | asyncio_fast_portscanner/__init__.py | Razikus/asyncio-fast-portscanner | 634bf125bb31864602b037d39c7ac6a739cc9998 | [
"MIT"
] | 3 | 2019-10-28T06:50:53.000Z | 2022-02-26T23:22:06.000Z | asyncio_fast_portscanner/__init__.py | Razikus/asyncio-fast-portscanner | 634bf125bb31864602b037d39c7ac6a739cc9998 | [
"MIT"
] | null | null | null | asyncio_fast_portscanner/__init__.py | Razikus/asyncio-fast-portscanner | 634bf125bb31864602b037d39c7ac6a739cc9998 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for asyncio-fast-portscanner."""
__author__ = """Adam Raźniewski"""
__email__ = 'adam@razniewski.eu'
__version__ = '0.0.1'
| 21.25 | 53 | 0.664706 |
eefb497492d3d2d1ff8372d353ac9065a101a76c | 4,849 | py | Python | aiida/cmdline/commands/cmd_data/cmd_bands.py | sshepherd637/aiida-core | 99fd841f33a5c2afa6a0c808c5e6ef9eff73a9df | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/cmdline/commands/cmd_data/cmd_bands.py | sshepherd637/aiida-core | 99fd841f33a5c2afa6a0c808c5e6ef9eff73a9df | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/cmdline/commands/cmd_data/cmd_bands.py | sshepherd637/aiida-core | 99fd841f33a5c2afa6a0c808c5e6ef9eff73a9df | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`verdi data bands` command."""
import click
from aiida.cmdline.commands.cmd_data import verdi_data
from aiida.cmdline.commands.cmd_data import cmd_show
from aiida.cmdline.commands.cmd_data.cmd_export import data_export
from aiida.cmdline.commands.cmd_data.cmd_list import list_options
from aiida.cmdline.params import arguments, options, types
from aiida.cmdline.utils import decorators, echo
from aiida.common.utils import Prettifier
LIST_PROJECT_HEADERS = ['ID', 'Formula', 'Ctime', 'Label']
EXPORT_FORMATS = [
'agr', 'agr_batch', 'dat_blocks', 'dat_multicolumn', 'gnuplot', 'json', 'mpl_pdf', 'mpl_png', 'mpl_singlefile',
'mpl_withjson'
]
VISUALIZATION_FORMATS = ['xmgrace']
@verdi_data.group('bands')
def bands():
"""Manipulate BandsData objects (band structures)."""
# pylint: disable=too-many-arguments
@bands.command('list')
@decorators.with_dbenv()
@list_options
@options.WITH_ELEMENTS()
@options.WITH_ELEMENTS_EXCLUSIVE()
@options.FORMULA_MODE()
def bands_list(elements, elements_exclusive, raw, formula_mode, past_days, groups, all_users):
"""List BandsData objects."""
from tabulate import tabulate
from argparse import Namespace
from aiida.orm.nodes.data.array.bands import get_bands_and_parents_structure
args = Namespace()
args.element = elements
args.element_only = elements_exclusive
args.formulamode = formula_mode
args.past_days = past_days
args.group_name = None
if groups is not None:
args.group_pk = [group.id for group in groups]
else:
args.group_pk = None
args.all_users = all_users
entry_list = get_bands_and_parents_structure(args)
counter = 0
bands_list_data = list()
if not raw:
bands_list_data.append(LIST_PROJECT_HEADERS)
for entry in entry_list:
for i, value in enumerate(entry):
if isinstance(value, list):
entry[i] = ','.join(value)
for i in range(len(entry), len(LIST_PROJECT_HEADERS)):
entry.append(None)
counter += 1
bands_list_data.extend(entry_list)
if raw:
echo.echo(tabulate(bands_list_data, tablefmt='plain'))
else:
echo.echo(tabulate(bands_list_data, headers='firstrow'))
echo.echo(f'\nTotal results: {counter}\n')
@bands.command('show')
@arguments.DATA(type=types.DataParamType(sub_classes=('aiida.data:core.array.bands',)))
@options.VISUALIZATION_FORMAT(type=click.Choice(VISUALIZATION_FORMATS), default='xmgrace')
@decorators.with_dbenv()
def bands_show(data, fmt):
"""Visualize BandsData objects."""
try:
show_function = getattr(cmd_show, f'_show_{fmt}')
except AttributeError:
echo.echo_critical(f'visualization format {fmt} is not supported')
show_function(fmt, data)
@bands.command('export')
@arguments.DATUM(type=types.DataParamType(sub_classes=('aiida.data:core.array.bands',)))
@options.EXPORT_FORMAT(type=click.Choice(EXPORT_FORMATS), default='json')
@click.option(
'--y-min-lim',
type=click.FLOAT,
default=None,
help='The minimum value for the y axis.'
' Default: minimum of all bands'
)
@click.option(
'--y-max-lim',
type=click.FLOAT,
default=None,
help='The maximum value for the y axis.'
' Default: maximum of all bands'
)
@click.option(
'-o',
'--output',
type=click.STRING,
default=None,
help='If present, store the output directly on a file '
'with the given name. It is essential to use this option '
'if more than one file needs to be created.'
)
@options.FORCE(help='If passed, overwrite files without checking.')
@click.option(
'--prettify-format',
default=None,
type=click.Choice(Prettifier.get_prettifiers()),
help='The style of labels for the prettifier'
)
@decorators.with_dbenv()
def bands_export(fmt, y_min_lim, y_max_lim, output, force, prettify_format, datum):
"""Export BandsData objects."""
args = {}
if y_min_lim is not None:
args['y_min_lim'] = y_min_lim
if y_max_lim is not None:
args['y_max_lim'] = y_max_lim
if prettify_format is not None:
args['prettify_format'] = prettify_format
data_export(datum, output, fmt, other_args=args, overwrite=force)
| 34.390071 | 115 | 0.660136 |
669bea80897f39699b13d24add05a5be4afee5a8 | 6,898 | py | Python | src/tutorials/craft_adversarial_examples.py | csce585-mlsystems/project | d150afd1d33528f7d91228f9e0b0dbb4b15cc926 | [
"MIT"
] | 2 | 2020-10-01T08:27:13.000Z | 2020-10-01T20:23:04.000Z | src/tutorials/craft_adversarial_examples.py | Jacob-L-Vincent/project-athena | d1d300e375941399f116cbaa4678a9ed7c6652db | [
"MIT"
] | 3 | 2020-09-29T13:57:24.000Z | 2020-10-01T20:26:03.000Z | src/tutorials/craft_adversarial_examples.py | csce585-mlsystems/project | d150afd1d33528f7d91228f9e0b0dbb4b15cc926 | [
"MIT"
] | null | null | null | """
A sample to generate adversarial examples in the context of white-box threat model.
@author: Ying Meng (y(dot)meng201011(at)gmail(dot)com)
"""
import sys
sys.path.append("../")
import argparse
import numpy as np
import os
import time
from matplotlib import pyplot as plt
from utils.model import load_lenet, load_pool
from utils.file import load_from_json
from utils.metrics import error_rate
from attacks.attack import generate
from models.athena import Ensemble, ENSEMBLE_STRATEGY
def generate_ae(model, data, labels, attack_configs,
eot=False,
save=False, output_dir=None):
"""
Generate adversarial examples
:param model: WeakDefense. The targeted model.
:param data: array. The benign samples to generate adversarial for.
:param labels: array or list. The true labels.
:param attack_configs: dictionary. Attacks and corresponding settings.
:param save: boolean. True, if save the adversarial examples.
:param output_dir: str or path. Location to save the adversarial examples.
It cannot be None when save is True.
:return:
"""
img_rows, img_cols = data.shape[1], data.shape[2]
num_attacks = attack_configs.get("num_attacks")
data_loader = (data, labels)
if len(labels.shape) > 1:
labels = np.asarray([np.argmax(p) for p in labels])
# generate attacks one by one
for id in range(num_attacks):
key = "configs{}".format(id)
attack_args = attack_configs.get(key)
attack_args["eot"] = eot
data_adv = generate(model=model,
data_loader=data_loader,
attack_args=attack_args
)
# predict the adversarial examples
predictions = model.predict(data_adv)
predictions = np.asarray([np.argmax(p) for p in predictions])
err = error_rate(y_pred=predictions, y_true=labels)
print(">>> error rate:", err)
# plotting some examples
num_plotting = min(data.shape[0], 0)
for i in range(num_plotting):
img = data_adv[i].reshape((img_rows, img_cols))
plt.imshow(img, cmap='gray')
title = '{}(EOT:{}): {}->{}'.format(attack_configs.get(key).get("description"),
"ON" if eot else "OFF",
labels[i],
predictions[i]
)
plt.title(title)
plt.show()
plt.close()
# save the adversarial example
if save:
if output_dir is None:
raise ValueError("Cannot save images to a none path.")
# save with a random name
file = os.path.join(output_dir, "{}.npy".format(time.monotonic()))
print("Save the adversarial examples to file [{}].".format(file))
np.save(file, data_adv)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('-p', '--pool-configs', required=False,
default='../configs/demo/athena-mnist.json')
parser.add_argument('-m', '--model-configs', required=False,
default='../configs/demo/model-mnist.json',
help='Folder where models stored in.')
parser.add_argument('-d', '--data-configs', required=False,
default='../configs/demo/data-mnist.json',
help='Folder where test data stored in.')
parser.add_argument('-a', '--attack-configs', required=False,
default='../configs/demo/attack-zk-mnist.json',
help='Folder where test data stored in.')
parser.add_argument('-o', '--output-root', required=False,
default='results',
help='Folder for outputs.')
parser.add_argument('--debug', required=False, default=True)
args = parser.parse_args()
print("------AUGMENT SUMMARY-------")
print("POOL CONFIGS:", args.pool_configs)
print("MODEL CONFIGS:", args.model_configs)
print("DATA CONFIGS:", args.data_configs)
print("ATTACK CONFIGS:", args.attack_configs)
print("OUTPUT ROOT:", args.output_root)
print("DEBUGGING MODE:", args.debug)
print('----------------------------\n')
# ----------------------------
# parse configurations (into a dictionary) from json file
# ----------------------------
pool_configs = load_from_json(args.pool_configs)
model_configs = load_from_json(args.model_configs)
data_configs = load_from_json(args.data_configs)
attack_configs = load_from_json(args.attack_configs)
# ---------------------------
# load the targeted model
# ---------------------------
#
# In the context of the zero-knowledge threat model,
# we use the undefended model as adversary's target model.
# model_file = os.path.join(model_configs.get("dir"), model_configs.get("um_file"))
# target = load_lenet(file=model_file, wrap=True)
# In the context of the white-box threat model,
# we use the ensemble as adversary's target model.
# load weak defenses (in this example, load a tiny pool of 3 weak defenses)
pool, _ = load_pool(trans_configs=pool_configs,
model_configs=model_configs,
active_list=True,
wrap=True)
# create an AVEP ensemble as the target model
wds = list(pool.values())
target = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.AVEP.value)
# -----------------------
# Prepare benign samples and corresponding true labels for AE generation
# -----------------------
# load the benign samples
data_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
data_bs = np.load(data_file)
# load the corresponding true labels
label_file = os.path.join(data_configs.get('dir'), data_configs.get('label_file'))
labels = np.load(label_file)
# ------------------------
# Generate adversarial examples for a small subset
# ------------------------
data_bs = data_bs[:5]
labels = labels[:5]
# Normal approach
# Compute the loss w.r.t. a single input
# For an ensemble target, averaging the losses of WDs'.
generate_ae(model=target,
data=data_bs, labels=labels,
eot=False,
attack_configs=attack_configs
)
# Adaptive approach (with EOT)
# Compute the loss expectation over specific distribution.
# For an ensemble target, averaging the EOT of WDs'.
generate_ae(model=target,
data=data_bs, labels=labels,
eot=True,
attack_configs=attack_configs
)
| 39.193182 | 91 | 0.587127 |
a7d94c9ef044354c96e893cf97cc7029e4d84c05 | 4,078 | py | Python | scripts/diffusionEq1D_ForwardEuler.py | RyanClement/DiffusionEquationSolvers | b698e1aa1426e02d29cf5bf1ee2c3cb293046680 | [
"MIT"
] | 1 | 2021-02-25T16:55:00.000Z | 2021-02-25T16:55:00.000Z | scripts/diffusionEq1D_ForwardEuler.py | RyanClement/DiffusionEquationSolvers | b698e1aa1426e02d29cf5bf1ee2c3cb293046680 | [
"MIT"
] | null | null | null | scripts/diffusionEq1D_ForwardEuler.py | RyanClement/DiffusionEquationSolvers | b698e1aa1426e02d29cf5bf1ee2c3cb293046680 | [
"MIT"
] | 1 | 2021-02-25T16:55:01.000Z | 2021-02-25T16:55:01.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program: diffusionEq1D_ForwardEuler
Created: Aug 2020
@author: Ryan Clement (RRCC)
scisoft@outlook.com
Purpose: Solve the
u_t = alpha * u_xx
on the interval (0,L) with boundary conditions
u = 10 for x = 0
and
u = 0 for x = 1.0
and initial condition
u(x,0) = 0.0
"""
### IMPORTS
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
### FUNCTIONS
def plotComputedSteadyStateSolution():
"""
Plots computed steady state solution.
Returns
-------
None.
"""
t = t0
stop = 1
tol = 1e-5
# Advance several time steps
for j in range(3):
for i in range(1,xPts-1):
u[i] = uO[i] + dp*(uO[i-1] -2.0*uO[i] + uO[i+1])
# Enforce boundary conditions
u[0] = lBC
u[-1] = rBC
uO[:] = u
# Advance solution until tolerance is met.
while stop:
maxDif = 0.0
# Advance solution one time step
for i in range(1,xPts-1):
u[i] = uO[i] + dp*(uO[i-1] -2.0*uO[i] + uO[i+1])
dif = abs(u[i] - uO[i])
if maxDif < dif:
maxDif = dif
# Enforce boundary conditions
u[0] = lBC
u[-1] = rBC
if maxDif > tol:
# Update previous solution
uO[:] = u
# Update simulation time
t += dt
else:
stop = 0
# Plot solution
plt.plot(x,u,color='blue')
plt.title("Computed Steady State Solution")
plt.ylabel("u")
plt.xlabel("x")
plt.grid(True)
plt.text(0.62,8.2,r'$u_t=\alpha\cdot u_{xx}$')
def initAnim():
"""
Initialization function for matplotlib.animation.FuncAnimation "init_func" argument.
Returns
-------
solPlt : TYPE
DESCRIPTION.
"""
solPlt.set_data(x,uO)
return solPlt,
def animate(n):
"""
Animation function for matplotlib.animation.FuncAnimation "func" argument.
Parameters
----------
n : dummy
Not used.
Returns
-------
solPlt : axis.plt
Plot object for animation.
"""
for i in range(1,xPts-1):
u[i] = uO[i] + dp*(uO[i-1] -2.0*uO[i] + uO[i+1])
# Enforce boundary conditions
u[0] = lBC
u[-1] = rBC
uO[:] = u
solPlt.set_data(x,u)
return solPlt,
if __name__ == '__main__':
### VARIABLES
x0 = 0.0 # Minumum x-position (left boundary)
xN = 1.0 # Maximum x-position (right boundary)
xPts = 21 # Number of spatial mesh points
t0 = 0.0 # Simulation start time
dc = 0.01 # Diffusion coefficient
dx = (xN - x0)/(xPts - 1) # Spatial mesh interval (distance betwen grid points)
dt = dx**2/(4*dc) # Temporal mesh interval (time between solution updates)
dp = dc*dt/dx**2 # Dimensionless parameter
x = np.linspace(x0,xN,xPts) # Spatial mesh array
u = np.zeros(xPts) # Updated solution array
uO = np.zeros(xPts) # Previous time-step value array
lBC = 10.0 # Left boundary condition
rBC = 0.0 # Right boundary condition
# Option 1: Plot Computed Steady State Solution
# Option 2: Movie
option = 2
if 1 == option:
plotComputedSteadyStateSolution()
else:
fig, ax = plt.subplots()
ax.set_title(r'$u_t=\alpha\cdot u_{xx}$')
ax.set_xlabel("x")
ax.set_ylabel("u")
ax.set_xlim(x0,xN)
ax.set_ylim(rBC,lBC)
ax.grid(True)
solPlt, = ax.plot([],[],color='red')
anim = animation.FuncAnimation(fig,
animate,
frames = 100,
interval = 100,
repeat_delay = 1000)
plt.show()
| 26.480519 | 94 | 0.502943 |
ae0fd271e370c0ed63bc65d8c3e3318d2d93f649 | 439 | py | Python | openff/bespokefit/fragmentation/__init__.py | openforcefield/openff-bespokefit | 85c92a51055a5a82e5d50fee1668a7de4ce2b1d4 | [
"MIT"
] | 7 | 2021-12-10T20:56:36.000Z | 2022-03-15T22:20:27.000Z | openff/bespokefit/fragmentation/__init__.py | openforcefield/openff-bespokefit | 85c92a51055a5a82e5d50fee1668a7de4ce2b1d4 | [
"MIT"
] | 44 | 2021-12-07T16:57:28.000Z | 2022-03-31T07:26:11.000Z | openff/bespokefit/fragmentation/__init__.py | openforcefield/openff-bespokefit | 85c92a51055a5a82e5d50fee1668a7de4ce2b1d4 | [
"MIT"
] | null | null | null | """Support for molecular fragmentation"""
from openff.bespokefit.fragmentation.base import (
FragmentationEngine,
deregister_fragmentation_engine,
get_fragmentation_engine,
list_fragmentation_engines,
register_fragmentation_engine,
)
__all__ = [
"FragmentationEngine",
"deregister_fragmentation_engine",
"get_fragmentation_engine",
"list_fragmentation_engines",
"register_fragmentation_engine",
]
| 24.388889 | 50 | 0.774487 |
3f4187965148f1975fb792afa5a194eeb63bb49b | 3,046 | py | Python | src/traders/fts_option_trader.py | pdkary/black-scholes-plus | 1df6d0e18416900ce9380e5428da58af6bb785fa | [
"MIT"
] | 2 | 2021-02-18T04:22:55.000Z | 2021-02-20T23:40:29.000Z | src/traders/fts_option_trader.py | pdkary/black-scholes-plus | 1df6d0e18416900ce9380e5428da58af6bb785fa | [
"MIT"
] | null | null | null | src/traders/fts_option_trader.py | pdkary/black-scholes-plus | 1df6d0e18416900ce9380e5428da58af6bb785fa | [
"MIT"
] | null | null | null | from datetime import datetime
from src.report_generator import ReportGenerator
class FTSOptionTrader:
@staticmethod
def create_call_bear_spread(tkr, expr, strike_low, strike_high, quantity):
returnString = dict()
key_low = FTSOptionTrader.get_fts_option_key(
tkr, 'CALL', expr, strike_low)
key_high = FTSOptionTrader.get_fts_option_key(
tkr, 'CALL', expr, strike_high)
returnString[key_high] = 'shortsale/' + str(quantity)
returnString[key_low] = 'cashbuy/'+str(quantity)
return returnString
@staticmethod
def create_call_bull_spread(tkr, expr, strike_low, strike_high, quantity):
returnString = dict()
key_low = FTSOptionTrader.get_fts_option_key(
tkr, 'CALL', expr, strike_low)
key_high = FTSOptionTrader.get_fts_option_key(
tkr, 'CALL', expr, strike_high)
returnString[key_high] = 'cashbuy/' + str(quantity)
returnString[key_low] = 'shortsale/'+str(quantity)
return returnString
@staticmethod
def create_put_bear_spread(tkr, expr, strike_low, strike_high, quantity):
returnString = dict()
key_low = FTSOptionTrader.get_fts_option_key(
tkr, 'PUT', expr, strike_low)
key_high = FTSOptionTrader.get_fts_option_key(
tkr, 'PUT', expr, strike_high)
returnString[key_high] = 'cashbuy/' + str(quantity)
returnString[key_low] = 'shortsale/'+str(quantity)
return returnString
@staticmethod
def create_put_bull_spread(tkr, expr, strike_low, strike_high, quantity):
returnString = dict()
key_low = FTSOptionTrader.get_fts_option_key(
tkr, 'PUT', expr, strike_low)
key_high = FTSOptionTrader.get_fts_option_key(
tkr, 'PUT', expr, strike_high)
returnString[key_high] = 'shortsale/' + str(quantity)
returnString[key_low] = 'cashbuy/'+str(quantity)
return returnString
@staticmethod
def create_iron_condor(tkr, expr, call_low, call_high, put_low, put_high, quantity):
returnStr = dict()
bear_call_spread = FTSOptionTrader.create_call_bear_spread(
tkr, expr, call_low, call_high, quantity)
bull_put_spread = FTSOptionTrader.create_put_bull_spread(
tkr, expr, put_low, put_high, quantity)
returnStr.update(bear_call_spread)
returnStr.update(bull_put_spread)
return returnStr
@staticmethod
def get_fts_option_key(tkr, type, expr, strike):
type_id = "D" if type == "CALL" else "P"
expr_date = datetime.strptime(expr_date, "%Y-%m-%d")
yr = str(datetime.year())[2:]
strike_str = str(strike)
if strike < 100:
strike_str = "0"+strike_str
if strike < 1000:
strike_str = strike_str + "00"
if strike > 1000:
strike_str = strike_str+"0"
return tkr+type_id+expr_date.day()+strike_str
| 41.162162 | 89 | 0.637229 |
18f2536a7992ac17c5202217498b7d25ab4a9808 | 10,441 | py | Python | Music Generator/generator pre-ui form.py | sambowyer/musicgenerator | 8d4b74e136e9a1df944f7125ab1b1d39c74224b5 | [
"MIT"
] | null | null | null | Music Generator/generator pre-ui form.py | sambowyer/musicgenerator | 8d4b74e136e9a1df944f7125ab1b1d39c74224b5 | [
"MIT"
] | null | null | null | Music Generator/generator pre-ui form.py | sambowyer/musicgenerator | 8d4b74e136e9a1df944f7125ab1b1d39c74224b5 | [
"MIT"
] | null | null | null | from midiutil.MidiFile import MIDIFile
import random
"""
MIDI Standards:
Tone-numbers - all relative to Middle C (C5) which is 60
- int (0-127)
Duration - (of notes) measured in beats, length of which is defined by tempo (bpm)
- float (usually power of 2 (even eg. 0.25 - semi-demi-quaver)
"""
#MIDI Setup
midi=MIDIFile(2, adjust_origin=False) #create MIDI file with 2 tracks (for chords and for melody)
#Global Variables
tempo = random.randint(80,120)
key = "C" #major
tonicTone = 48 #C4
majorScaleIntervals = [2,2,1,2,2,2,1]
diatonicTones= [0]
for i in range(70):
diatonicTones.append(diatonicTones[-1]+majorScaleIntervals[i%7])
#Track Setup
#Used to store track data thaty can be used to input into midi file at end of program
class Track():
def __init__(self, trackName, trackNo, channelNo, programNo):
self.trackName = trackName
self.trackNo = trackNo
self.channelNo = channelNo
self.programNo = programNo
midi.addTempo(trackNo, channelNo, tempo)
midi.addTrackName(trackNo, 0, trackName)
midi.addProgramChange(trackNo, channelNo, 0, programNo)
chordTrack = Track("Chords [Bright Acoustic Piano]", 0, 0, 1)
melodyTrack = Track("Melody [Electric Guitar (Jazz)]", 1, 0, 26)
def choose(items):
"""
Given a 2D list of options and respective probabilities, in form [option, prob]
this will return a random option based on the given probabilities.
"""
sum=0
for i in items:
sum += i[1]
rnd = random.random() * sum
for i, w in items:
rnd -= w
if rnd < 0:
return i
#Music Theory Setup
def createRhythm(durations, length, overflow):
"""
durations - 2D [option, prob] list
length - length of time to be covered in beats (float)
overflow - whether chosen duration lengths can sum to more than length (bool)
"""
beatsLeft = length
durationOptions = durations[:] #creates new copy of 'durations', not just another name to reference 'durations' with
rhythm = []
while beatsLeft > 0:
if not overflow:
for i in durationOptions:
if i[0] > beatsLeft:
durationOptions.remove(i)
durationValue = choose(durationOptions)
rhythm.append(durationValue)
beatsLeft -= durationValue
return rhythm
"""
CHORDS
"""
#Chord Function class to store info about chord functions and the probability changes they incur
class ChordFuntion():
def __init__(self, scaleTones, displayName):
self.scaleTones = scaleTones
self.displayName = displayName
def updateNewOptions(self, newOptions):
self.newOptions = newOptions
T = ChordFuntion([[1,1],[3,1],[6,1]], "Tonic")
D = ChordFuntion([[5,1],[7,1]], "Dominant")
S = ChordFuntion([[2,1],[4,1]], "Subdominant")
T.updateNewOptions([[T, 10], [D, 20], [S, 40]]) #Tonic --> Subdominant
D.updateNewOptions([[T, 40], [D, 10], [S, 20]]) #Dominant --> Tonic
S.updateNewOptions([[T, 20], [D, 40], [S, 10]]) #Subdominant --> Dominant
#Chord Class for storing info of chords in chord progression
class Chord():
def __init__(self, notes,duration, function):
self.notes = notes #list of MIDI notes
self.duration = duration #chord duration
self.function = function #chord function
"""DONT LEAVE IN FUNCTION/THIS FORMAT - MOVE TO MAIN CODE FLOW (TEMPORARILY IN DEF B/C EASY FORMAT FOR NOW)"""
def createChordProgression(rhythm):
chordProgression = []
initialChordFunctionOptions = [[T, 10], [D, 10], [S, 10]]
chordFunctionOptions = initialChordFunctionOptions[:]
#AS INPUT \/\/\/
jazziness = 0.3
chordSizeOptions = [[3,None],[4,None],[5,None],[6,None],[7,None]]
#how many notes in the chord (3-triad, 4-7th, 5-9th)
#to incorporate jazziness, create exponential(?) function to multiply
#each weighting by, changing the minimum/maximum of curve for different values for no. of notes
for i in chordSizeOptions:
i[1] = (0.5+jazziness)**i[0]
for i in chordRhythm:
currentChordFunction = choose(chordFunctionOptions) #Choose chord function
chordFunctionOptions = currentChordFunction.newOptions #Update probabilities of the next function based on what they all lead to
currentScaleTone = choose(currentChordFunction.scaleTones) #Choose root note of diatonic chord that fits that function
currentChordSize = choose(chordSizeOptions) #Choose chord size
root = diatonicTones[diatonicTones.index(tonicTone)+currentScaleTone-1] #find index of key centre, add new root note, minus one (b/c scale numbers start on 1)
chordTones = [root]
for note in range(currentChordSize-1):
chordTones.append(diatonicTones[diatonicTones.index(chordTones[note])+2]) #add two to the last note to get a diatonic third to build up the chord
chordProgression.append(Chord(chordTones, i, currentChordFunction)) #Add Chord object to list chordProgression
return chordProgression
"""
MELODY
"""
#Note Class for storing info for each note in the melody
class Note():
def __init__(self, tone, duration):
self.tone = tone
self.duration = duration
def createMelody(chordProgression, chordRhythm):
melody = []
initialMelodyDurations = [[0.125,10],[0.25,30],[0.5,30],[1.0,25],[2.0,5], [3.0, 5]]
melodyDurations = initialMelodyDurations[:]
toneOptions = [[i, 0] for i in range(60, 85)]
#2 octaves of chromatic scale - will add a constant amount of weight to diatonic tones
#will increase chromatic weight if duration == 0.125
def toneOptionsUpdate(addedWeight, mustBelongTo):
"""
updating toneOptions so many times decided to safe space by writing a functio to do it
addedWeight - increases probably weighting of certain tones by this amount (float)
mustBelongTo - but only with tones that are in this particular list
"""
for tone in toneOptions:
if tone[1] in mustBelongTo:
tone[1] += addedWeight
toneOptionsUpdate(15, diatonicTones) #+15 if in major scale
diatonicToneOptions = toneOptions[:] #snapshot of toneOptions when notes that are: **in the scale** have been given weight
overflow = 0
for chord in chordProgression:
melodyRhythm = createRhythm(melodyDurations, chord.duration - overflow, True)
overflow = sum(melodyRhythm) - chord.duration
chordTones = []
for i in chordNotes:
chordTones.append((i%12)+60) #add weighting to chord tones in any octave
chordTones.append((i%12)+72) # ^^^
toneOptionsUpdate(15, chordTones) #+15 if in chord
harmonicToneoptions = toneOptions #snapshot of toneOptions when notes that are: **in the scale AND in the chord** have been given weight
for note in melodyRhythm:
if note == 0.125:
toneOptionsUpdate(5, range(60, 85)) #+5 to all (even chromatic notes) if a semi-demi-hemi-quaver (dont want to stay on chromatic note too long)
if len(melody != 0):
lastNote = melody[len(melody)-1].tone
"""
#################################### TODO (DONE) ######################################
Add function to increase weighting of conjunct tones
Maybe use negative parabola with maximum's x value on lastNote
-(0.25x-(lastNote-72))^2 + 25 ???
maybe change c or 0.?x (a) to decrease disparity in added weighting (makes it flatter)
Use another script to find the optimum a & c of the equation
#######################################################################################
"""
for i in toneOptions:
if i[0] in diatonicTones:
addedWeight = -0.6*(i-lastNote)**2 + 45
if i == lastNote:
addedWeight *= 0.5
i[1] += addedWeight
if i[1] < 0: #addedWeight can go -ve so need to check prob >= 0 or choose() can be broken
i[1] = 0
melody.append(Note(choose(toneOptions), choose(melodyDurations))) #append a new Note object to the melody
toneOptions = harmonicToneoptions #Update tone options to before chromatic notes could've been given any weight
toneOptions = diatonicToneOptions #Update toneOptions to before harmonic notes (in the chord) have been given weight so the weights can be added for the notes in the next chord
return melody
#midiConvert
#chord
chordDurations = [[1.0, 20], [2.0, 25], [4.0, 15]] #[duration (in beats), probability (arbitrary relative units)]
chordRhythm = createRhythm(chordDurations, 16, False)
chordProgression = createChordProgression(chordRhythm)
repeats = 4 #input
chordProgression *= repeats #repeat the chord in chordProgression for **repeats** amount of times
time = 0 #time since start
for chord in chordProgression:
for note in chord.notes:
midi.addNote(chordTrack.trackNo, chordTrack.channelNo, note, time, chord.duration, random.randint(80, 90)) #add each note of the current chord to the midi file
time += chord.duration
#melody
melody = createMelody(chordProgression, chordRhythm)
time = 0
for note in melody:
midi.addNote(melodyTrack.trackNo, melodyTrack.channelNo, note.tone, time, note.duration, random.randint(95, 105)) #add each note of the melody one-by-one to the midi file
time += note.duration
testNo = 0
with open("testNo.txt","r") as f:
testNo = int(f.read()) #find out what number test this is
with open("Test%s.mid" % (testNo), "wb") as output_file: #name the output midi file, including the test number in the file name
midi.writeFile(output_file) #finally write the output midi file
with open("testNo.txt","w") as f:
f.write(str(testNo+1)) #add one to the testNo text file so the next midi file can be named with a test number that is one larger than the one just created
input()
| 39.104869 | 187 | 0.63289 |
fc2103237eb2f96adcfefbf8bd12fc6460c33c6f | 2,277 | py | Python | setup.py | ruohola/pgcli | 87ffae295edf4fb2a9c33c552b12f09921def29f | [
"BSD-3-Clause"
] | 9,135 | 2015-04-19T06:09:48.000Z | 2022-03-30T22:21:05.000Z | setup.py | ruohola/pgcli | 87ffae295edf4fb2a9c33c552b12f09921def29f | [
"BSD-3-Clause"
] | 1,121 | 2015-04-19T07:34:10.000Z | 2022-03-30T23:46:06.000Z | setup.py | ruohola/pgcli | 87ffae295edf4fb2a9c33c552b12f09921def29f | [
"BSD-3-Clause"
] | 533 | 2015-04-19T05:27:51.000Z | 2022-03-31T02:23:06.000Z | import platform
from setuptools import setup, find_packages
from pgcli import __version__
description = "CLI for Postgres Database. With auto-completion and syntax highlighting."
install_requirements = [
"pgspecial>=1.11.8",
"click >= 4.1",
"Pygments >= 2.0", # Pygments has to be Capitalcased. WTF?
# We still need to use pt-2 unless pt-3 released on Fedora32
# see: https://github.com/dbcli/pgcli/pull/1197
"prompt_toolkit>=2.0.6,<4.0.0",
"psycopg2 >= 2.8",
"sqlparse >=0.3.0,<0.5",
"configobj >= 5.0.6",
"pendulum>=2.1.0",
"cli_helpers[styles] >= 2.0.0",
]
# setproctitle is used to mask the password when running `ps` in command line.
# But this is not necessary in Windows since the password is never shown in the
# task manager. Also setproctitle is a hard dependency to install in Windows,
# so we'll only install it if we're not in Windows.
if platform.system() != "Windows" and not platform.system().startswith("CYGWIN"):
install_requirements.append("setproctitle >= 1.1.9")
setup(
name="pgcli",
author="Pgcli Core Team",
author_email="pgcli-dev@googlegroups.com",
version=__version__,
license="BSD",
url="http://pgcli.com",
packages=find_packages(),
package_data={"pgcli": ["pgclirc", "packages/pgliterals/pgliterals.json"]},
description=description,
long_description=open("README.rst").read(),
install_requires=install_requirements,
extras_require={"keyring": ["keyring >= 12.2.0"]},
python_requires=">=3.6",
entry_points="""
[console_scripts]
pgcli=pgcli.main:cli
""",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: SQL",
"Topic :: Database",
"Topic :: Database :: Front-Ends",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 35.030769 | 88 | 0.642073 |
063c9d16de200243363a2b290aff0ca3b760d7fa | 1,405 | py | Python | oanda-api-v20-master/tests/test_contrib_generic.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | 389 | 2016-07-22T17:19:17.000Z | 2022-03-18T21:14:55.000Z | oanda-api-v20-master/tests/test_contrib_generic.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | 162 | 2016-10-04T18:17:48.000Z | 2021-12-22T10:53:54.000Z | oanda-api-v20-master/tests/test_contrib_generic.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | 120 | 2016-08-08T18:52:41.000Z | 2022-03-24T06:53:38.000Z | import unittest
try:
from nose_parameterized import parameterized
except ImportError:
print("*** Please install 'nose_parameterized' to run these tests ***")
exit(0)
import oandapyV20.contrib.generic as gen
class TestContribGeneric(unittest.TestCase):
"""Tests regarding contrib generic."""
def test__secs2time(self):
d = gen.secs2time(1497499200)
self.assertTrue(d.strftime("%Y%m%d-%H:%M:%S") == '20170615-04:00:00')
@parameterized.expand([
(gen.granularity_to_time, "M1", 1*60),
(gen.granularity_to_time, "M2", 2*60),
(gen.granularity_to_time, "M5", 5*60),
(gen.granularity_to_time, "M15", 15*60),
(gen.granularity_to_time, "H1", 3600),
(gen.granularity_to_time, "H4", 4*3600),
(gen.granularity_to_time, "D", 86400),
(gen.granularity_to_time, "D1", 86400),
(gen.granularity_to_time, "W", 604800),
(gen.granularity_to_time, "W1", 604800),
(gen.granularity_to_time, "K1", 86400, ValueError),
])
def test__granularity_to_time(self, meth, granularity, refval, exc=None):
"""granularity_to_time."""
if not exc:
# run the factory
r = meth(granularity)
self.assertTrue(r == refval)
else:
with self.assertRaises(exc):
r = meth(granularity)
if __name__ == "__main__":
unittest.main()
| 29.270833 | 77 | 0.620641 |
262c5c62af896029e1d7bb3bb9a9788b6150d6b0 | 897 | py | Python | tests/backends/vk_test.py | Diolor/python-social-auth | ba4e30d4a11b2e188954770bae4df9426d61a470 | [
"BSD-3-Clause"
] | 1 | 2020-09-06T09:30:02.000Z | 2020-09-06T09:30:02.000Z | tests/backends/vk_test.py | cyroxx/python-social-auth | f6c0fa22524ef7c9ade4c5c323cf13ace86a247b | [
"BSD-3-Clause"
] | null | null | null | tests/backends/vk_test.py | cyroxx/python-social-auth | f6c0fa22524ef7c9ade4c5c323cf13ace86a247b | [
"BSD-3-Clause"
] | 1 | 2020-05-23T05:49:36.000Z | 2020-05-23T05:49:36.000Z | #coding: utf-8
from __future__ import unicode_literals
from social.exceptions import AuthUnknownError
from social.p3 import urlencode
from tests.oauth import OAuth2Test
import json
class VKOAuth2Test(OAuth2Test):
backend_path = 'social.backends.vk.VKOAuth2'
user_data_url = 'https://api.vk.com/method/users.get'
expected_username = 'durov'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'response': [{
'uid': '1',
'first_name': 'Павел',
'last_name': 'Дуров',
'screen_name': 'durov',
'nickname': '',
'photo': "http:\/\/cs7003.vk.me\/v7003815\/22a1\/xgG9fb-IJ3Y.jpg"
}]
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| 27.181818 | 77 | 0.615385 |
cb306d42206176160d73bca0b9f69c20ed4ffacf | 981 | py | Python | rejected_peps/pep336.py | wyz23x2/rejected-peps | 2000797a7ea9dea58e60b2d83267a7fbb57c0ca4 | [
"MIT"
] | 1 | 2021-08-08T10:48:53.000Z | 2021-08-08T10:48:53.000Z | rejected_peps/pep336.py | wyz23x2/rejected-peps | 2000797a7ea9dea58e60b2d83267a7fbb57c0ca4 | [
"MIT"
] | null | null | null | rejected_peps/pep336.py | wyz23x2/rejected-peps | 2000797a7ea9dea58e60b2d83267a7fbb57c0ca4 | [
"MIT"
] | null | null | null | """\
PEP INFO
PEP 336 -- Make None callable
Status: Rejected
Created: 2004-10-28
MODULE INFO
This module adds NoneType that makes None callable,
and instance none (None is a keyword). This breaks the `x is None` usage,
so `isNone(obj)` checks if obj is None or none.
REFERENCES
PEP 336: <https://www.python.org/dev/peps/pep-0336/>
"""
PEP = 336
class _singleton(type):
# None is a singleton
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
class NoneType(metaclass=_singleton):
def __repr__(self) -> str:
return 'None'
__str__ = __repr__
def __call__(self, *args, **kwargs) -> 'NoneType':
return self
def __eq__(self, other) -> bool:
return isinstance(other, type(self)) or other is None
none = NoneType()
def isNone(object) -> bool:
return object is none or object is None
| 25.815789 | 74 | 0.662589 |
53c59f66cbf6093baf0ec1765345e12cc6b0f177 | 9,478 | py | Python | tests/integration/sts/topology/sts_switches_manager_test.py | jhall11/sts | b484f184824c9fe59864103f24fdfa24ff8bcdcd | [
"Apache-2.0"
] | null | null | null | tests/integration/sts/topology/sts_switches_manager_test.py | jhall11/sts | b484f184824c9fe59864103f24fdfa24ff8bcdcd | [
"Apache-2.0"
] | null | null | null | tests/integration/sts/topology/sts_switches_manager_test.py | jhall11/sts | b484f184824c9fe59864103f24fdfa24ff8bcdcd | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Ahmed El-Hassany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import unittest
from pox.lib.util import connect_socket_with_backoff
from sts.entities.controllers import ControllerConfig
from sts.entities.controllers import POXController
from sts.entities.sts_entities import FuzzSoftwareSwitch
from sts.entities.sts_entities import DeferredOFConnection
from sts.openflow_buffer import OpenFlowBuffer
from sts.util.io_master import IOMaster
from sts.util.deferred_io import DeferredIOWorker
from sts.topology.controllers_manager import ControllersManager
from sts.topology.sts_switches_manager import STSSwitchesManager
class STSSwitchesManagerTest(unittest.TestCase):
def initialize_io_loop(self):
io_master = IOMaster()
return io_master
def create_connection(self, controller_info, switch,
max_backoff_seconds=1024):
"""Connect switches to controllers. May raise a TimeoutError"""
socket_ctor = socket.socket
sock = connect_socket_with_backoff(controller_info.config.address,
controller_info.config.port,
max_backoff_seconds=max_backoff_seconds,
socket_ctor=socket_ctor)
# Set non-blocking
sock.setblocking(0)
io_worker = DeferredIOWorker(self.io_master.create_worker_for_socket(sock))
connection = DeferredOFConnection(io_worker, controller_info.cid,
switch.dpid, self.openflow_buffer)
return connection
def get_controller_config(self, cid, address='127.0.0.1', port=6633):
start_cmd = ("./pox.py --verbose --no-cli sts.syncproto.pox_syncer "
"--blocking=False openflow.of_01 --address=__address__ "
"--port=__port__")
kill_cmd = ""
cwd = "pox"
config = ControllerConfig(start_cmd=start_cmd, kill_cmd=kill_cmd, cwd=cwd,
address=address, port=port, cid=cid)
return config
def get_controller(self, cid, address='127.0.0.1', port=6633):
config = self.get_controller_config(cid, address, port)
ctrl = POXController(controller_config=config)
return ctrl
def setUp(self):
self.io_master = self.initialize_io_loop()
self.openflow_buffer = OpenFlowBuffer()
def test_add_switch(self):
# Arrange
sw1 = FuzzSoftwareSwitch(1, 's1', ports=1)
sw2 = FuzzSoftwareSwitch(2, 's2', ports=1)
manager = STSSwitchesManager(self.create_connection)
# Act
manager.add_switch(sw1)
manager.add_switch(sw2)
# Assert
self.assertIn(sw1, manager.live_switches)
self.assertIn(sw2, manager.live_switches)
def test_create_switch(self):
# Arrange
manager = STSSwitchesManager(self.create_connection)
# Act
switch = manager.create_switch(1, 2, True)
# Assert
self.assertIsInstance(switch, FuzzSoftwareSwitch)
def test_crash_switch(self):
# Arrange
sw1 = FuzzSoftwareSwitch(1, 's1', ports=1)
sw2 = FuzzSoftwareSwitch(2, 's2', ports=1)
manager = STSSwitchesManager(self.create_connection)
manager.add_switch(sw1)
manager.add_switch(sw2)
# Act
manager.crash_switch(sw1)
# Assert
self.assertNotIn(sw1, manager.live_switches)
self.assertIn(sw1, manager.failed_switches)
self.assertIn(sw2, manager.live_switches)
def test_recover_switch(self):
# Arrange
sw1 = FuzzSoftwareSwitch(1, 's1', ports=1)
sw2 = FuzzSoftwareSwitch(2, 's2', ports=1)
c1 = self.get_controller(1, port=6633)
c1.start()
manager = STSSwitchesManager(self.create_connection)
manager.add_switch(sw1)
manager.add_switch(sw2)
manager.connect_to_controllers(sw1, c1)
manager.crash_switch(sw1)
manager.crash_switch(sw2)
# Act
manager.recover_switch(sw1)
# Assert
self.assertNotIn(sw1, manager.failed_switches)
self.assertIn(sw1, manager.live_switches)
self.assertIn(sw2, manager.failed_switches)
def test_switches(self):
# Arrange
sw1 = FuzzSoftwareSwitch(1, 's1', ports=1)
sw2 = FuzzSoftwareSwitch(2, 's2', ports=1)
manager = STSSwitchesManager(self.create_connection)
manager.add_switch(sw1)
manager.add_switch(sw2)
manager.crash_switch(sw1)
# Act
switches = manager.switches
# Assert
self.assertIn(sw1, switches)
self.assertIn(sw2, switches)
def test_connect(self):
# Arrange
manager = STSSwitchesManager(self.create_connection)
s1 = manager.create_switch(1, 2, True)
s2 = manager.create_switch(2, 2, True)
manager.add_switch(s1)
manager.add_switch(s2)
c1 = self.get_controller(1, port=6633)
c1.start()
c2 = self.get_controller(2,port=6644)
c2.start()
c3 = self.get_controller(3, port=6655)
c3.start()
# Act
manager.connect_to_controllers(s1, c1)
manager.connect_to_controllers(s2, [c1, c2])
# Assert
self.assertTrue(s1.is_connected_to(c1.cid))
self.assertTrue(s2.is_connected_to(c2.cid))
self.assertFalse(s1.is_connected_to(c3.cid))
self.assertFalse(s2.is_connected_to(c3.cid))
def test_get_connected_controllers(self):
# Arrange
manager = STSSwitchesManager(self.create_connection)
s1 = manager.create_switch(1, 2, True)
s2 = manager.create_switch(2, 2, True)
manager.add_switch(s1)
manager.add_switch(s2)
c1 = self.get_controller(1, port=6633)
c1.start()
c2 = self.get_controller(2,port=6644)
c2.start()
c3 = self.get_controller(3, port=6655)
c3.start()
c_mgm = ControllersManager()
c_mgm.add_controller(c1)
c_mgm.add_controller(c2)
c_mgm.add_controller(c3)
manager.connect_to_controllers(s1, c1)
manager.connect_to_controllers(s2, [c1, c2])
# Act
s1_controllers = manager.get_connected_controllers(s1, c_mgm)
s2_controllers = manager.get_connected_controllers(s2, c_mgm)
# Assert
self.assertTrue(s1.is_connected_to(c1.cid))
self.assertTrue(s2.is_connected_to(c2.cid))
self.assertFalse(s1.is_connected_to(c3.cid))
self.assertFalse(s2.is_connected_to(c3.cid))
self.assertItemsEqual([c1], s1_controllers)
self.assertItemsEqual([c1, c2], s2_controllers)
def test_disconnect_controllers(self):
# Arrange
manager = STSSwitchesManager(self.create_connection)
s1 = manager.create_switch(1, 2, True)
manager.add_switch(s1)
c1 = self.get_controller(1, port=6633)
c1.start()
c2 = self.get_controller(2,port=6644)
c2.start()
c_mgm = ControllersManager()
c_mgm.add_controller(c1)
c_mgm.add_controller(c2)
manager.connect_to_controllers(s1, [c1, c2])
# Act
manager.disconnect_controllers(s1)
# Assert
self.assertFalse(s1.is_connected_to(c1.cid))
self.assertFalse(s1.is_connected_to(c2.cid))
self.assertEquals(manager.get_connected_controllers(s1, c_mgm), [])
def test_get_switch(self):
# Arrange
sw1 = FuzzSoftwareSwitch(1, 's1', ports=1)
sw2 = FuzzSoftwareSwitch(2, 's2', ports=1)
manager = STSSwitchesManager(self.create_connection)
manager.add_switch(sw1)
manager.add_switch(sw2)
# Act
get_s1 = manager.get_switch('s1')
get_s2 = manager.get_switch('s2')
get_s3 = manager.get_switch('s3')
# Assert
self.assertEquals(sw1, get_s1)
self.assertEquals(sw2, get_s2)
self.assertIsNone(get_s3)
def test_has_switch(self):
# Arrange
sw1 = FuzzSoftwareSwitch(1, 's1', ports=1)
sw2 = FuzzSoftwareSwitch(2, 's2', ports=1)
manager = STSSwitchesManager(self.create_connection)
manager.add_switch(sw1)
manager.add_switch(sw2)
# Act
has_s1 = manager.has_switch('s1')
has_s2 = manager.has_switch('s2')
has_s3 = manager.has_switch('s3')
# Assert
self.assertTrue(has_s1)
self.assertTrue(has_s2)
self.assertFalse(has_s3)
def test_get_switch_dpid(self):
# Arrange
sw1 = FuzzSoftwareSwitch(1, 's1', ports=1)
sw2 = FuzzSoftwareSwitch(2, 's2', ports=1)
manager = STSSwitchesManager(self.create_connection)
manager.add_switch(sw1)
manager.add_switch(sw2)
# Act
get_s1 = manager.get_switch_dpid(1)
get_s2 = manager.get_switch_dpid(2)
get_s3 = manager.get_switch_dpid(3)
# Assert
self.assertEquals(sw1, get_s1)
self.assertEquals(sw2, get_s2)
self.assertIsNone(get_s3)
def test_edge_switches(self):
# Arrange
manager = STSSwitchesManager(self.create_connection)
sw1 = manager.create_switch(1, 2, can_connect_to_endhosts=True)
sw2 = manager.create_switch(1, 2, can_connect_to_endhosts=True)
sw3 = manager.create_switch(1, 2, can_connect_to_endhosts=False)
manager.add_switch(sw1)
manager.add_switch(sw2)
manager.add_switch(sw3)
manager.crash_switch(sw2)
# Act
edge_switches = manager.edge_switches
live_edge_switches = manager.live_edge_switches
# Assert
self.assertItemsEqual([sw1, sw2], edge_switches)
self.assertItemsEqual([sw1], live_edge_switches)
| 34.465455 | 79 | 0.707322 |
bf5e649f1784c0dca447d724f565bac5f1c780b1 | 1,266 | py | Python | src/capture.py | pallabganguly/gestures-cnn | 8778760d7a5854a5987d24d7b8ff30afb216a624 | [
"MIT"
] | 1 | 2018-05-08T15:34:50.000Z | 2018-05-08T15:34:50.000Z | src/capture.py | pallabganguly/gestures-cnn | 8778760d7a5854a5987d24d7b8ff30afb216a624 | [
"MIT"
] | 9 | 2018-04-25T09:09:08.000Z | 2022-03-11T23:24:27.000Z | src/capture.py | pallabganguly/gestures-cnn | 8778760d7a5854a5987d24d7b8ff30afb216a624 | [
"MIT"
] | 1 | 2018-06-14T08:44:48.000Z | 2018-06-14T08:44:48.000Z | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
count = 1
while count != 1001:
ret, frame = cap.read()
cv2.rectangle(frame, (400,400), (200,200), (0,255,0),0)
crop_img = frame[200:400, 200:400]
value = (33, 33)
hsv = cv2.cvtColor(crop_img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(hsv, (5, 5), 0)
thres = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2)
ret, res = cv2.threshold(thres, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# mask = cv2.inRange(hsv, np.array([0, 5, 3]), np.array([60, 255, 255]))
# gaussian = cv2.GaussianBlur(mask, (11,11), 0)
erosion = cv2.erode(res, None, iterations = 1)
dilated = cv2.dilate(erosion,None,iterations = 1)
median = cv2.medianBlur(dilated, 7)
# median = cv2.medianBlur(dilated, 7)
cv2.putText(frame, "Keep hand in box", (200,200), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
cv2.imshow('cropped', frame)
cv2.imshow('mask', median)
# #
# write_img = cv2.resize(median, (50,50))
# cv2.imwrite('images_data/peace/'+str(count)+'.jpg',write_img)
# print count
# count += 1
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
| 36.171429 | 112 | 0.64376 |
a9c2808382f784cb0d01f035b6c4c9f1d02b4179 | 91 | py | Python | src/apps/core/purpleserver/providers/apps.py | blueprin4/purplship-server | e4817d6f6fb358adb10eab81153cf564fdcbc784 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/apps/core/purpleserver/providers/apps.py | blueprin4/purplship-server | e4817d6f6fb358adb10eab81153cf564fdcbc784 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/apps/core/purpleserver/providers/apps.py | blueprin4/purplship-server | e4817d6f6fb358adb10eab81153cf564fdcbc784 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class CarriersConfig(AppConfig):
name = 'carriers'
| 15.166667 | 33 | 0.758242 |
c1866a48351eee304e3f1eb108f8cec8ea987c03 | 3,920 | py | Python | graphgallery/gallery/nodeclas/pytorch/gcn.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 300 | 2020-08-09T04:27:41.000Z | 2022-03-30T07:43:41.000Z | graphgallery/gallery/nodeclas/pytorch/gcn.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 5 | 2020-11-05T06:16:50.000Z | 2021-12-11T05:05:22.000Z | graphgallery/gallery/nodeclas/pytorch/gcn.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 51 | 2020-09-23T15:37:12.000Z | 2022-03-05T01:28:56.000Z | from graphgallery.data.sequence import FullBatchSequence
from graphgallery import functional as gf
from graphgallery.gallery.nodeclas import PyTorch
from graphgallery.gallery import Trainer
from graphgallery.nn.models import get_model
@PyTorch.register()
class GCN(Trainer):
"""
Implementation of Graph Convolutional Networks (GCN).
`Semi-Supervised Classification with Graph Convolutional Networks
<https://arxiv.org/abs/1609.02907>`
Tensorflow 1.x implementation: <https://github.com/tkipf/gcn>
Pytorch implementation: <https://github.com/tkipf/pygcn>
"""
def data_step(self,
adj_transform="normalize_adj",
attr_transform=None):
graph = self.graph
adj_matrix = gf.get(adj_transform)(graph.adj_matrix)
node_attr = gf.get(attr_transform)(graph.node_attr)
X, A = gf.astensors(node_attr, adj_matrix, device=self.data_device)
# ``A`` and ``X`` are cached for later use
self.register_cache(X=X, A=A)
def model_step(self,
hids=[16],
acts=['relu'],
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=False):
model = get_model("GCN", self.backend)
model = model(self.graph.num_node_attrs,
self.graph.num_node_classes,
hids=hids,
acts=acts,
dropout=dropout,
weight_decay=weight_decay,
lr=lr,
bias=bias)
return model
def train_loader(self, index):
labels = self.graph.node_label[index]
sequence = FullBatchSequence(inputs=[self.cache.X, self.cache.A],
y=labels,
out_index=index,
device=self.data_device)
return sequence
# @PyTorch.register()
# class DropEdge(Trainer):
# """
# Implementation of Graph Convolutional Networks (GCN) with DropEdge.
# """
# def data_step(self,
# adj_transform="normalize_adj",
# attr_transform=None):
# graph = self.graph
# adj_matrix = gf.get(adj_transform)(graph.adj_matrix)
# node_attr = gf.get(attr_transform)(graph.node_attr)
# X, A = gf.astensors(node_attr, adj_matrix, device=self.data_device)
# # ``A`` and ``X`` are cached for later use
# self.register_cache(X=X, A=A)
# def model_step(self,
# p=0.05,
# hids=[16],
# acts=['relu'],
# dropout=0.5,
# weight_decay=5e-4,
# lr=0.01,
# bias=False):
# model = get_model("DropEdge", self.backend)
# model = model(self.graph.num_node_attrs,
# self.graph.num_node_classes,
# p=p,
# hids=hids,
# acts=acts,
# dropout=dropout,
# weight_decay=weight_decay,
# lr=lr,
# bias=bias)
# return model
# def train_loader(self, index):
# labels = self.graph.node_label[index]
# sequence = FullBatchSequence(inputs=[self.cache.X, self.cache.A],
# y=labels,
# out_index=index,
# device=self.data_device)
# return sequence
@PyTorch.register()
class DenseGCN(GCN):
"""This model is not really dense version of GCN, but it works for dense adjacency matrix tensor"""
...
| 33.220339 | 104 | 0.502806 |
be25287823641613cabd767d3bbf0a081db3be25 | 131 | py | Python | recbole/__init__.py | Ahren09/RecBole | b3921818dfbc1b81f9eda8d5e9f05bc9d9114089 | [
"MIT"
] | 16 | 2021-11-03T02:12:49.000Z | 2022-03-27T05:48:19.000Z | recbole/__init__.py | Ahren09/RecBole | b3921818dfbc1b81f9eda8d5e9f05bc9d9114089 | [
"MIT"
] | 2 | 2021-11-21T14:12:25.000Z | 2022-03-11T03:00:04.000Z | recbole/__init__.py | Ahren09/RecBole | b3921818dfbc1b81f9eda8d5e9f05bc9d9114089 | [
"MIT"
] | 4 | 2021-11-25T09:23:41.000Z | 2022-03-26T11:23:26.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
__version__ = '0.2.1' | 26.2 | 38 | 0.847328 |
88691586f499a899057ae36d5c53a790aa771e64 | 149 | py | Python | source/__init__.py | wasiahmad/SumGenToBT | 84cc82a23c3207811a6b17dfd408570e94009c29 | [
"MIT"
] | 1 | 2022-02-25T11:19:35.000Z | 2022-02-25T11:19:35.000Z | source/__init__.py | wasiahmad/SumGenToBT | 84cc82a23c3207811a6b17dfd408570e94009c29 | [
"MIT"
] | null | null | null | source/__init__.py | wasiahmad/SumGenToBT | 84cc82a23c3207811a6b17dfd408570e94009c29 | [
"MIT"
] | null | null | null | import os
import sys
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
from . import multi_translation
from . import backtranslation
| 18.625 | 63 | 0.791946 |
5104b247da2dce85b7c983fcd39d75ca58e8b938 | 20,779 | py | Python | auto_lights/auto_lights.py | dermotduffy/appdaemon-apps | 117ee558e3e9dfbcc1e02f7f9538626d52fb0b47 | [
"MIT"
] | null | null | null | auto_lights/auto_lights.py | dermotduffy/appdaemon-apps | 117ee558e3e9dfbcc1e02f7f9538626d52fb0b47 | [
"MIT"
] | null | null | null | auto_lights/auto_lights.py | dermotduffy/appdaemon-apps | 117ee558e3e9dfbcc1e02f7f9538626d52fb0b47 | [
"MIT"
] | null | null | null | import datetime
import functools
import os
import traceback
import appdaemon.plugins.hass.hassapi as hass
import voluptuous as vol
import conditions
CONF_TRIGGER_ACTIVATE_CONDITION = 'trigger_activate_condition'
CONF_TRIGGER_DEACTIVATE_CONDITION = 'trigger_deactivate_condition'
CONF_EXTEND_CONDITION = 'extend_condition'
CONF_DISABLE_CONDITION = 'disable_condition'
CONF_CONDITION = 'condition'
CONF_ACTIVATE_ENTITIES = 'activate_entities'
CONF_DEACTIVATE_ENTITIES = 'deactivate_entities'
CONF_STATE_ENTITIES = 'state_entities'
CONF_SOFT_TIMEOUT = 'soft_timeout'
CONF_HARD_TIMEOUT = 'hard_timeout'
CONF_GRACE_PERIOD_TIMEOUT = 'grace_timeout'
CONF_OUTPUT = 'output'
CONF_ENTITY_ID = 'entity_id'
CONF_SERVICE = 'service'
CONF_ON_STATE = 'on_state'
CONF_STATUS_VAR = 'status_var'
CONF_MAX_ACTIONS_PER_MIN = 'max_actions_per_min'
CONF_SERVICE_DATA = 'service_data'
DEFAULT_SOFT_TIMEOUT = 60*15
DEFAULT_HARD_TIMEOUT = 60*60*3
DEFAULT_GRACE_PERIOD_TIMEOUT = 10
DEFAULT_ON_STATE = 'on'
DEFAULT_STATE_UPDATE_TIMEOUT=3
DEFAULT_MAX_ACTIONS_PER_MIN=4
KEY_FRIENDLY_NAME = 'friendly_name'
KEY_ACTIVATE = 'activate'
KEY_DEACTIVATE = 'deactivate'
STATUS_VAR_UPDATE_SECONDS = 10
STATUS_VAR_STATE_MANUAL = 'manual'
STATUS_VAR_STATE_ACTIVE_TIMER = 'active_timer'
STATUS_VAR_STATE_WAITING = 'waiting'
STATUS_VAR_STATE_PAUSED = 'paused'
STATUS_VAR_STATE_DISABLED = 'disabled'
STATUS_VAR_ATTR_NA = 'N/A'
STATUS_VAR_ATTR_NONE = 'None'
STATUS_VAR_ATTR_TIME_REMAINING = 'light_timeout'
STATUS_VAR_ATTR_LAST_TRIGGER = 'last_trigger_%s'
STATUS_VAR_ATTR_EXTEND = 'will_extend'
STATUS_VAR_ATTR_EXTEND_NEVER = 'never'
STATUS_VAR_ATTR_NO = 'no'
STATUS_VAR_ATTR_YES = 'yes'
STATUS_VAR_ATTR_DISABLED = 'disabled'
STATUS_VAR_ATTR_ICON = 'icon'
STATUS_VAR_ICONS = {
STATUS_VAR_STATE_MANUAL: 'mdi:hand-left',
STATUS_VAR_STATE_ACTIVE_TIMER: 'mdi:timer-outline',
STATUS_VAR_STATE_WAITING: 'mdi:sleep',
STATUS_VAR_STATE_PAUSED: 'mdi:pause',
STATUS_VAR_STATE_DISABLED: 'mdi:block-helper',
}
CONFIG_CONDITION_SCHEMA = vol.Schema(
[conditions.CONFIG_CONDITION_BASE_SCHEMA],
extra=vol.PREVENT_EXTRA)
SERVICE_TURN_ON = 'turn_on'
SERVICE_TURN_OFF = 'turn_off'
VALID_SERVICES = (SERVICE_TURN_ON, SERVICE_TURN_OFF)
SERVICE_DATA = vol.Schema({
}, extra=vol.ALLOW_EXTRA)
ENTITY_SCHEMA = vol.Schema({
vol.Required(CONF_ENTITY_ID): str,
vol.Optional(CONF_ON_STATE, default=DEFAULT_ON_STATE): str,
}, extra=vol.PREVENT_EXTRA)
ACTIVATE_ENTITIES = ENTITY_SCHEMA.extend({
vol.Optional(CONF_SERVICE, default=SERVICE_TURN_ON): vol.In(VALID_SERVICES),
vol.Optional(CONF_SERVICE_DATA, default={}): SERVICE_DATA,
}, extra=vol.ALLOW_EXTRA)
DEACTIVATE_ENTITIES = ENTITY_SCHEMA.extend({
vol.Optional(CONF_SERVICE, default=SERVICE_TURN_OFF): vol.In(VALID_SERVICES),
vol.Optional(CONF_SERVICE_DATA, default={}): SERVICE_DATA,
}, extra=vol.ALLOW_EXTRA)
OUTPUT_SCHEMA = vol.Schema([{
vol.Optional(CONF_CONDITION, default=[]): CONFIG_CONDITION_SCHEMA,
vol.Required(CONF_ACTIVATE_ENTITIES): [ACTIVATE_ENTITIES],
vol.Optional(CONF_DEACTIVATE_ENTITIES, default=[]): [DEACTIVATE_ENTITIES],
}])
CONFIG_SCHEMA = vol.Schema({
vol.Optional(CONF_STATUS_VAR): str,
vol.Optional(CONF_TRIGGER_ACTIVATE_CONDITION,
default=[]): CONFIG_CONDITION_SCHEMA,
vol.Optional(CONF_TRIGGER_DEACTIVATE_CONDITION,
default=[]): CONFIG_CONDITION_SCHEMA,
vol.Optional(CONF_EXTEND_CONDITION, default=[]): CONFIG_CONDITION_SCHEMA,
vol.Optional(CONF_DISABLE_CONDITION, default=[]): CONFIG_CONDITION_SCHEMA,
vol.Optional(CONF_STATE_ENTITIES): [ENTITY_SCHEMA],
vol.Optional(CONF_SOFT_TIMEOUT,
default=DEFAULT_SOFT_TIMEOUT): vol.Range(min=60),
vol.Optional(CONF_HARD_TIMEOUT,
default=DEFAULT_HARD_TIMEOUT): vol.Range(min=300),
vol.Optional(CONF_GRACE_PERIOD_TIMEOUT,
default=DEFAULT_GRACE_PERIOD_TIMEOUT): vol.Range(min=0),
vol.Optional(CONF_MAX_ACTIONS_PER_MIN,
default=DEFAULT_MAX_ACTIONS_PER_MIN): vol.Range(min=0),
vol.Required(CONF_OUTPUT): OUTPUT_SCHEMA,
}, extra=vol.ALLOW_EXTRA)
def timedelta_to_str(td):
hours, remainder = divmod(td.total_seconds(), 60*60)
minutes, seconds = divmod(remainder, 60)
return '{:02}:{:02}:{:02}'.format(int(hours), int(minutes), int(seconds))
@functools.total_ordering
class Timer(object):
def __init__(self, app, func=None, seconds=None, name='timer', kwargs=None):
self._app = app
self._func = func
self._seconds = seconds
self._name = name
self._kwargs = kwargs
self._handle = None
self._expire_datetime = None
def create(self, seconds=None):
if seconds is None:
seconds = self._seconds
if seconds is None:
raise RuntimeError('Failed to specify timer \'seconds\'')
if self._handle is not None:
self.cancel()
self._expire_datetime = self._app.datetime() + datetime.timedelta(
seconds=seconds)
self._handle = self._app.run_in(
lambda kwargs: self._log_wrap(self._func, self._kwargs),
seconds)
self._app.log('Created timer: (%s, %s) for %i seconds' % (
self._name, self._handle, seconds))
def cancel(self):
if self._handle:
self._app.log('Cancel timer: (%s, %s)' % (self._name, self._handle))
self._app.cancel_timer(self._handle)
self._raw_reset()
def _raw_reset(self):
self._handle = None
self._expire_datetime = None
def get_time_until_expire_string(self):
if self._expire_datetime is None:
return timedelta_to_str(datetime.timedelta(0))
return timedelta_to_str(self._expire_datetime - self._app.datetime())
def _log_wrap(self, func, kwargs):
try:
# Reset internals first so callbacks can see that timer has finished.
self._raw_reset()
if func:
func(kwargs)
except Exception as e:
# Funnel exceptions through the Appdaemon logger (otherwise we won't see
# them at all)
stack_trace = traceback.format_exc()
self._app.log('%s%s%s' % (e, os.linesep, stack_trace), level="ERROR")
def __eq__(self, other):
return self._expire_datetime == other._expire_datetime
def __lt__(self, other):
if other._expire_datetime is None:
return True
return (self._expire_datetime is not None and
self._expire_datetime < other._expire_datetime)
def __bool__(self):
return self._handle is not None
def __repr__(self):
return '<Timer:%s,%s,%s>' % (self._name,
self.get_time_until_expire_string(), self._handle)
# A note on state: As much as possible, attempt to store the authoritative
# state in HA (retrieved via Appdaemon get_state(), not here.
class AutoLights(hass.Hass):
def initialize(self):
self._manual_mode = False
self._last_actions = []
self._last_trigger = {
KEY_ACTIVATE: None,
KEY_DEACTIVATE: None
}
self._config = CONFIG_SCHEMA(self.args)
self._status_var = self._config.get(CONF_STATUS_VAR)
self._main_timer = Timer(self, self._main_timer_expire, name='main')
self._pause_timer = Timer(self, self._pause_timer_expire, name='pause')
self._state_update_timer = Timer(self,
seconds=DEFAULT_STATE_UPDATE_TIMEOUT, name='state_update')
self._listen_condition('activate', CONF_TRIGGER_ACTIVATE_CONDITION,
self._trigger_callback, activate=True)
self._listen_condition('deactivate', CONF_TRIGGER_DEACTIVATE_CONDITION,
self._trigger_callback, activate=False)
self._listen_condition('extend', CONF_EXTEND_CONDITION,
self._extend_callback)
self._listen_condition('disable', CONF_DISABLE_CONDITION,
self._disable_callback)
self._state_entities = self._get_state_entities()
self._listen_entities('state',
[entity[CONF_ENTITY_ID] for entity in self._state_entities],
self._state_callback)
if self._has_on_state_entity():
self._main_timer.create(seconds=self._get_soft_timeout())
if self._status_var:
self.run_every(
self._update_status,
'now',
STATUS_VAR_UPDATE_SECONDS)
def _get_soft_timeout(self):
return self._config.get(CONF_SOFT_TIMEOUT)
def _get_hard_timeout(self):
return self._config.get(CONF_HARD_TIMEOUT)
def _listen_condition(self, name, conf_condition, func, **kwargs):
entities = conditions.extract_entities_from_condition(
self._config.get(conf_condition))
return self._listen_entities(name, entities, func, **kwargs)
def _listen_entities(self, name, entities, func, **kwargs):
self.log('Listening to %s entities -> %s' % (name, entities))
for entity_id in entities:
self.listen_state(func, entity_id, **kwargs)
return entities
def _get_state_entities(self):
if CONF_STATE_ENTITIES in self._config:
return self._config[CONF_STATE_ENTITIES]
state_entities = []
for output in self._config[CONF_OUTPUT]:
for activate_entity in output[CONF_ACTIVATE_ENTITIES]:
state_entities.append(activate_entity)
for deactivate_entity in output[CONF_DEACTIVATE_ENTITIES]:
state_entities.append(deactivate_entity)
return state_entities
def _get_best_matching_output(self, triggers=None):
for output in self._config.get(CONF_OUTPUT):
if conditions.evaluate_condition(
self, self.datetime(), output.get(CONF_CONDITION),
triggers=triggers):
return output
return None
def _update_status(self, kwargs=None):
if self._status_var:
state = STATUS_VAR_STATE_WAITING
attributes = {
STATUS_VAR_ATTR_TIME_REMAINING: STATUS_VAR_ATTR_NA,
STATUS_VAR_ATTR_LAST_TRIGGER % KEY_ACTIVATE: STATUS_VAR_ATTR_NONE,
STATUS_VAR_ATTR_LAST_TRIGGER % KEY_DEACTIVATE: STATUS_VAR_ATTR_NONE,
STATUS_VAR_ATTR_EXTEND: STATUS_VAR_ATTR_EXTEND_NEVER,
STATUS_VAR_ATTR_DISABLED: STATUS_VAR_ATTR_NO,
}
if self._is_disabled():
state = STATUS_VAR_STATE_DISABLED
attributes[STATUS_VAR_ATTR_DISABLED] = STATUS_VAR_ATTR_YES
elif self._pause_timer:
state = STATUS_VAR_STATE_PAUSED
elif self._manual_mode:
state = STATUS_VAR_STATE_MANUAL
elif self._main_timer:
state = STATUS_VAR_STATE_ACTIVE_TIMER
attributes[STATUS_VAR_ATTR_ICON] = STATUS_VAR_ICONS[state]
if self._main_timer:
attributes[STATUS_VAR_ATTR_TIME_REMAINING] = (
self._main_timer.get_time_until_expire_string())
for key in (KEY_ACTIVATE, KEY_DEACTIVATE):
if self._last_trigger[key]:
attributes[STATUS_VAR_ATTR_LAST_TRIGGER % key] = (
self._last_trigger[key])
if self._config.get(CONF_EXTEND_CONDITION):
if self._should_extend():
attributes[STATUS_VAR_ATTR_EXTEND] = STATUS_VAR_ATTR_YES
else:
attributes[STATUS_VAR_ATTR_EXTEND] = STATUS_VAR_ATTR_NO
self.set_state(self._status_var, state=state, attributes=attributes)
def _should_extend(self):
return (self._config.get(CONF_EXTEND_CONDITION) and
conditions.evaluate_condition(
self, self.datetime(), self._config.get(CONF_EXTEND_CONDITION)))
def _is_disabled(self):
return (self._config.get(CONF_DISABLE_CONDITION) and
conditions.evaluate_condition(
self, self.datetime(), self._config.get(CONF_DISABLE_CONDITION)))
def _main_timer_expire(self, kwargs):
self.log('Main timer expired at %s' % self.datetime())
if self._should_extend():
self.log('Extending main timer ...')
self._main_timer.create(self._get_soft_timeout())
return
output = self._get_best_matching_output()
if output:
self._deactivate(output)
def _deactivate(self, output):
return self._activate(output, activate=False)
def _activate(self, output, activate=True):
self.log('%s output: %s' % (
'Activating' if activate else 'Deactivating', output))
override_service = None
override_data = None
if activate:
entities = output[CONF_ACTIVATE_ENTITIES]
else:
if output[CONF_DEACTIVATE_ENTITIES]:
entities = output[CONF_DEACTIVATE_ENTITIES]
else:
# If deactivation entities are not provided, go with the activation
# entities, however override the service to be turn_off, and remove the
# data (as it will otherwise cause an off call to fail).
entities = output[CONF_ACTIVATE_ENTITIES]
override_service = SERVICE_TURN_OFF
override_data = {}
if entities:
self._state_update_timer.create()
for entity in entities:
data = (override_data if override_data is not None else
entity[CONF_SERVICE_DATA])
service = (override_service if override_service is not None else
entity[CONF_SERVICE])
if service == SERVICE_TURN_ON:
self.turn_on(entity[CONF_ENTITY_ID], **data)
else:
self.turn_off(entity[CONF_ENTITY_ID], **data)
self._last_actions.insert(0, (self.datetime(), activate, output))
def _prune_last_actions(self):
# Retain every action from the last minute, or minimum of 1 action
# (regardless of time).
out_last_actions = []
for tpl in self._last_actions:
if self._seconds_since_dt(tpl[0]) < 60:
out_last_actions.append(tpl)
if not out_last_actions and self._last_actions:
out_last_actions.append(self._last_actions[0])
self._last_actions = out_last_actions
def _opposing_last_actions(self):
"""Return how many distinct last actions (e.g. on->off->on == 3)."""
last_activate = None
distinct = 0
for (dt, activate, output) in self._last_actions:
if last_activate is None:
distinct = 1
elif last_activate != activate:
distinct += 1
last_activate = activate
return distinct
def _has_on_state_entity(self):
for entity in self._state_entities:
if self.get_state(entity[CONF_ENTITY_ID]) == entity[CONF_ON_STATE]:
return True
return False
def _state_callback(self, entity, attribute, old, new, kwargs):
self.log('State callback: %s (old: %s, new: %s)' % (entity, old, new))
if self._is_disabled():
self.log('Disabled: Ignoring state for: %s' % entity)
return
# A note on manual mode: Manual mode is not enabled when any
# state change happens during automated lighting. The assumption is that
# automated lighting will be the norm for a room, and so automations that
# impact that lighting do not constitute conversion to manual mode (e.g.
# status controller events). Automations that work outside of automated
# lighting times will indeed convert this to manual mode.
if self._has_on_state_entity():
if not self._state_update_timer and not self._main_timer:
# If there's a light on, but there was not a change made by this app,
# change to manual mode. We cannot use the expiry timers here, as there
# may be a time delay between those timers expiring and the new state
# arriving here (which is exactly what the state timer is designed to
# work around).
self.log('Changed to manual mode: %s (%s->%s)' % (entity, old, new))
self._manual_mode = True
# A changing state entity resets the timer.
self._main_timer.create(seconds=self._get_hard_timeout())
else:
self._manual_mode = False
if not self._state_update_timer:
# Condition this section on the state update timer not existing.
# Without this, it is possible that this apps own actions (e.g.
# activating scene B, when scene A is already activated -- with no
# overlap in entities between A & B -- will result in HASS firing state
# callbacks that show all entities being off (very briefly) -- but that
# still triggers this timer cancel, which may result unintentional
# re-activations of the same output, which may override changes the
# user has manually made to the scene). The tradeoff is that there is a
# tiny window after this app makes changes where if the user, at the
# same time, deactivates all outputs, then the timer may not get
# canceled (when the timer expired the deactivation should have limited
# impact anyway).
self._main_timer.cancel()
# If this state change was not due to an action invoked from this app, then
# pause triggers for <grace_period>.
if (not self._state_update_timer and
self._config.get(CONF_GRACE_PERIOD_TIMEOUT) > 0):
self.log('Pausing due externally applied state change: %s (%s->%s)' % (
entity, old, new))
self._pause_timer.create(
seconds=self._config.get(CONF_GRACE_PERIOD_TIMEOUT))
self._update_status()
def _seconds_since_dt(self, dt):
return (self.datetime() - dt).total_seconds()
def _within_window(self, dt, window):
return self._seconds_since_dt(dt) < window
def _trigger_callback(self, entity, attribute, old, new, kwargs):
activate = kwargs[KEY_ACTIVATE]
self.log('Trigger callback (activate=%s): %s (old: %s, new: %s)' % (
activate, entity, old, new))
if old == 'unavailable':
self.log('Unavailable: Skipping previously unavailable state for: %s' % entity)
return
if self._is_disabled():
self.log('Disabled: Skipping trigger for: %s' % entity)
return
elif self._pause_timer:
self.log('Paused: Skipping trigger for: %s' % entity)
return
elif self._manual_mode:
self.log('Manual mode: Skipping trigger for: %s' % entity)
return
triggers={entity: new}
condition = self._config.get(
CONF_TRIGGER_ACTIVATE_CONDITION if activate
else CONF_TRIGGER_DEACTIVATE_CONDITION)
triggered = conditions.evaluate_condition(self, self.datetime(),
condition, triggers=triggers)
activate_key = KEY_ACTIVATE if activate else KEY_DEACTIVATE
if triggered:
output = self._get_best_matching_output(triggers=triggers)
if output:
# Prune last actions list.
self._prune_last_actions()
self.log('Last-actions: %s' % self._last_actions)
# Safety precaution: Pause changes if more distinct actions than
# max_actions_per_min (avoid lights flapping due to more configuration
# choices). (e.g. imagine a trigger than turns lights on when
# brightness dips below X, but turns them off when it rises above X: a
# poorly configured instance could cause the lights to flap)
# Implicitly, this is allowing multiple repitions of the same action
# with no pauseing (e.g. repeatedly turning on the same light due to
# walking past multiple motion sensors is just fine).
max_actions_per_min = self._config.get(CONF_MAX_ACTIONS_PER_MIN)
if self._opposing_last_actions() >= max_actions_per_min:
self.log('Pausing attempts to %s output as >%i (%s) distinct '
'actions have been executed in the last minute: %s' % (
activate_key,
max_actions_per_min,
CONF_MAX_ACTIONS_PER_MIN,
output))
# Pause for 1 minute (it's max actions per minute).
self._pause_timer.create(seconds=1*60)
self._update_status()
return
# If this would just activate the exact same output, just reset
# the timer rather than re-activating (as otherwise we lose custom
# adjustments made to the lighting).
if (activate and self._main_timer and self._last_actions and
self._last_actions[0][1] == activate and
self._last_actions[0][2] == output):
self.log('Same output triggered by %s. '
'Resetting timer only.' % entity)
self._main_timer.create(self._get_soft_timeout())
else:
if activate:
self._main_timer.create(self._get_soft_timeout())
else:
self._main_timer.cancel()
self._activate(output, activate=activate)
self._last_trigger[activate_key] = self.get_state(
entity, attribute=KEY_FRIENDLY_NAME)
self._update_status()
def _pause_timer_expire(self, kwargs):
self._update_status()
def _extend_callback(self, entity, attribute, old, new, kwargs):
self._update_status()
def _disable_callback(self, entity, attribute, old, new, kwargs):
if self._is_disabled():
self.log('Disabled: Triggered by %s (%s->%s)' % (entity, old, new))
self._main_timer.cancel()
else:
self.log('Enabled: Triggered by %s (%s->%s)' % (entity, old, new))
if self._has_on_state_entity() and not self._main_timer:
self._main_timer.create(self._get_soft_timeout())
self._manual_mode = False
self._update_status()
| 37.575045 | 85 | 0.705472 |
90433bebb71e32b847a29c824b1e39420d5c7b6b | 5,169 | py | Python | tests/test_dsl.py | KeshavInamdar/rasa_core | 432638e9df53e2b5d68771ea5cf6af6ef1ac6b72 | [
"Apache-2.0"
] | null | null | null | tests/test_dsl.py | KeshavInamdar/rasa_core | 432638e9df53e2b5d68771ea5cf6af6ef1ac6b72 | [
"Apache-2.0"
] | null | null | null | tests/test_dsl.py | KeshavInamdar/rasa_core | 432638e9df53e2b5d68771ea5cf6af6ef1ac6b72 | [
"Apache-2.0"
] | 1 | 2018-05-09T06:57:22.000Z | 2018-05-09T06:57:22.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import numpy as np
from rasa_core.events import ActionExecuted, UserUttered
from rasa_core.featurizers import BinaryFeaturizer
from rasa_core.training import (
extract_trackers_from_file,
extract_story_graph_from_file, extract_training_data_from_file)
from rasa_core.training.structures import Story
def test_can_read_test_story(default_domain):
trackers = extract_trackers_from_file("data/test_stories/stories.md",
default_domain,
featurizer=BinaryFeaturizer())
assert len(trackers) == 7
# this should be the story simple_story_with_only_end -> show_it_all
# the generated stories are in a non stable order - therefore we need to
# do some trickery to find the one we want to test
tracker = [t for t in trackers if len(t.events) == 5][0]
assert tracker.events[0] == ActionExecuted("action_listen")
assert tracker.events[1] == UserUttered(
"simple",
intent={"name": "simple", "confidence": 1.0},
parse_data={'text': 'simple',
'intent_ranking': [{'confidence': 1.0,
'name': 'simple'}],
'intent': {'confidence': 1.0, 'name': 'simple'},
'entities': []})
assert tracker.events[2] == ActionExecuted("utter_default")
assert tracker.events[3] == ActionExecuted("utter_greet")
assert tracker.events[4] == ActionExecuted("action_listen")
def test_persist_and_read_test_story_graph(tmpdir, default_domain):
graph = extract_story_graph_from_file("data/test_stories/stories.md",
default_domain)
out_path = tmpdir.join("persisted_story.md")
with io.open(out_path.strpath, "w") as f:
f.write(graph.as_story_string())
recovered_trackers = extract_trackers_from_file(out_path.strpath,
default_domain,
BinaryFeaturizer())
existing_trackers = extract_trackers_from_file(
"data/test_stories/stories.md",
default_domain,
BinaryFeaturizer())
existing_stories = {t.export_stories() for t in existing_trackers}
for t in recovered_trackers:
story_str = t.export_stories()
assert story_str in existing_stories
existing_stories.discard(story_str)
def test_persist_and_read_test_story(tmpdir, default_domain):
graph = extract_story_graph_from_file("data/test_stories/stories.md",
default_domain)
out_path = tmpdir.join("persisted_story.md")
Story(graph.story_steps).dump_to_file(out_path.strpath)
recovered_trackers = extract_trackers_from_file(out_path.strpath,
default_domain,
BinaryFeaturizer())
existing_trackers = extract_trackers_from_file(
"data/test_stories/stories.md",
default_domain,
BinaryFeaturizer())
existing_stories = {t.export_stories() for t in existing_trackers}
for t in recovered_trackers:
story_str = t.export_stories()
assert story_str in existing_stories
existing_stories.discard(story_str)
def test_read_story_file_with_cycles(tmpdir, default_domain):
graph = extract_story_graph_from_file(
"data/test_stories/stories_with_cycle.md",
default_domain)
assert len(graph.story_steps) == 5
graph_without_cycles = graph.with_cycles_removed()
assert graph.cyclic_edge_ids != set()
assert graph_without_cycles.cyclic_edge_ids == set()
assert len(graph.story_steps) == len(graph_without_cycles.story_steps) == 5
assert len(graph_without_cycles.story_end_checkpoints) == 2
def test_generate_training_data_with_cycles(tmpdir, default_domain):
featurizer = BinaryFeaturizer()
training_data = extract_training_data_from_file(
"data/test_stories/stories_with_cycle.md",
default_domain,
featurizer,
augmentation_factor=0,
max_history=4)
assert training_data.num_examples() == 15
np.testing.assert_array_equal(
training_data.y,
[2, 4, 0, 2, 4, 0, 1, 0, 2, 4, 0, 1, 0, 0, 3])
def test_visualize_training_data_graph(tmpdir, default_domain):
graph = extract_story_graph_from_file(
"data/test_stories/stories_with_cycle.md",
default_domain)
graph = graph.with_cycles_removed()
out_path = tmpdir.join("graph.png").strpath
# this will be the plotted networkx graph
G = graph.visualize(out_path)
assert os.path.exists(out_path)
# we can't check the exact topology - but this should be enough to ensure
# the visualisation created a sane graph
assert set(G.nodes()) == set(range(-1, 14))
assert len(G.edges()) == 16
| 38.288889 | 79 | 0.65177 |
a0bd85e5f78dc3281ce6e26b7b05a5f4846339f4 | 375 | py | Python | experiments/heat-3d/tmp_files/6467.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/heat-3d/tmp_files/6467.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/heat-3d/tmp_files/6467.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/heat-3d/tmp_files/6467.c')
procedure('kernel_heat_3d')
loop(0)
tile(0,2,16,2)
tile(0,4,16,4)
tile(0,6,32,6)
tile(1,2,16,2)
tile(1,4,16,4)
tile(1,6,32,6)
| 23.4375 | 116 | 0.746667 |
6f6beed60da9d3222f7e6c8766086213570d3a23 | 13,855 | py | Python | sdk/python/pulumi_azure_native/documentdb/v20160331/database_account_mongo_db_database.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20160331/database_account_mongo_db_database.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20160331/database_account_mongo_db_database.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._inputs import *
__all__ = ['DatabaseAccountMongoDBDatabaseArgs', 'DatabaseAccountMongoDBDatabase']
@pulumi.input_type
class DatabaseAccountMongoDBDatabaseArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
options: pulumi.Input[Mapping[str, pulumi.Input[str]]],
resource: pulumi.Input['MongoDBDatabaseResourceArgs'],
resource_group_name: pulumi.Input[str],
database_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DatabaseAccountMongoDBDatabase resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input['MongoDBDatabaseResourceArgs'] resource: The standard JSON format of a MongoDB database
:param pulumi.Input[str] resource_group_name: Name of an Azure resource group.
:param pulumi.Input[str] database_name: Cosmos DB database name.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "options", options)
pulumi.set(__self__, "resource", resource)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def options(self) -> pulumi.Input[Mapping[str, pulumi.Input[str]]]:
"""
A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: pulumi.Input[Mapping[str, pulumi.Input[str]]]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input['MongoDBDatabaseResourceArgs']:
"""
The standard JSON format of a MongoDB database
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input['MongoDBDatabaseResourceArgs']):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of an Azure resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[pulumi.Input[str]]:
"""
Cosmos DB database name.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_name", value)
class DatabaseAccountMongoDBDatabase(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['MongoDBDatabaseResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An Azure Cosmos DB MongoDB database.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['MongoDBDatabaseResourceArgs']] resource: The standard JSON format of a MongoDB database
:param pulumi.Input[str] resource_group_name: Name of an Azure resource group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DatabaseAccountMongoDBDatabaseArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure Cosmos DB MongoDB database.
:param str resource_name: The name of the resource.
:param DatabaseAccountMongoDBDatabaseArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DatabaseAccountMongoDBDatabaseArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['MongoDBDatabaseResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DatabaseAccountMongoDBDatabaseArgs.__new__(DatabaseAccountMongoDBDatabaseArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["database_name"] = database_name
if options is None and not opts.urn:
raise TypeError("Missing required property 'options'")
__props__.__dict__["options"] = options
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__.__dict__["resource"] = resource
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20160331:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20150401:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150401:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20150408:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150408:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20151106:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20151106:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20160319:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160319:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20190801:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20191212:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20200301:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20200401:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20200901:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20210115:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20210315:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-native:documentdb/v20210415:DatabaseAccountMongoDBDatabase"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:DatabaseAccountMongoDBDatabase")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DatabaseAccountMongoDBDatabase, __self__).__init__(
'azure-native:documentdb/v20160331:DatabaseAccountMongoDBDatabase',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DatabaseAccountMongoDBDatabase':
"""
Get an existing DatabaseAccountMongoDBDatabase resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DatabaseAccountMongoDBDatabaseArgs.__new__(DatabaseAccountMongoDBDatabaseArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return DatabaseAccountMongoDBDatabase(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
| 57.729167 | 2,996 | 0.702346 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.