hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1afb84941b693d8555e01b585defdf5fe070f8ff | 7,359 | py | Python | facenet/config.py | sMedX/facenet | 2a86255bded9da94031488b41250a6cef703e05b | [
"MIT"
] | 2 | 2020-01-11T07:48:23.000Z | 2021-05-25T01:25:05.000Z | facenet/config.py | RuslanKosarev/FaceNet | 53aad2497982b1543e3a566c621f9f1d98fca58c | [
"MIT"
] | null | null | null | facenet/config.py | RuslanKosarev/FaceNet | 53aad2497982b1543e3a566c621f9f1d98fca58c | [
"MIT"
] | 2 | 2021-01-04T21:14:56.000Z | 2021-06-30T09:47:48.000Z | # coding: utf-8
__author__ = 'Ruslan N. Kosarev'
import sys
from pathlib import Path
from datetime import datetime
from omegaconf import OmegaConf
import random
import numpy as np
import tensorflow as tf
from facenet import ioutils
# directory for default configs
default_config_dir = Path(__file__).parents[0].joinpath('apps', 'configs')
default_config = default_config_dir.joinpath('config.yaml')
# directory for user's configs
user_config_dir = Path(__file__).parents[1].joinpath('configs')
user_config = user_config_dir.joinpath('config.yaml')
# directory for default trained model
default_model_path = Path(__file__).parents[1].joinpath('models/default')
def subdir():
return datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
def config_paths(app_file_name, custom_config_file):
config_name = Path(app_file_name).stem + '.yaml'
paths = [
default_config,
default_config_dir.joinpath(config_name),
user_config,
user_config_dir.joinpath(config_name)
]
if custom_config_file is not None:
paths.append(custom_config_file)
return tuple(paths)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
class Config:
"""Object representing YAML settings as a dict-like object with values as fields
"""
def __init__(self, dct=None):
"""Update config from dict
:param dct: input object
"""
if dct is None:
dct = dict()
for key, item in dct.items():
if isinstance(item, dict):
setattr(self, key, Config(item))
else:
setattr(self, key, item)
def __repr__(self):
shift = 3 * ' '
def get_str(obj, ident=''):
s = ''
for key, item in obj.items():
if isinstance(item, Config):
s += f'{ident}{key}: \n{get_str(item, ident=ident + shift)}'
else:
s += f'{ident}{key}: {str(item)}\n'
return s
return get_str(self)
def __getattr__(self, name):
return self.__dict__.get(name, Config())
def __bool__(self):
return bool(self.__dict__)
@property
def as_dict(self):
def as_dict(obj):
s = {}
for key, item in obj.items():
if isinstance(item, Config):
item = as_dict(item)
s[key] = item
return s
return as_dict(self)
def items(self):
return self.__dict__.items()
def exists(self, name):
return True if name in self.__dict__.keys() else False
class LoadConfigError(Exception):
pass
def load_config(app_file_name, options):
"""Load configuration from the set of config files
:param app_file_name
:param options: Optional path to the custom config file
:return: The validated config in Config model instance
"""
paths = config_paths(app_file_name, options['config'])
cfg = OmegaConf.create()
new_cfg = None
for config_path in paths:
if not config_path.is_file():
continue
try:
new_cfg = OmegaConf.load(config_path)
cfg = OmegaConf.merge(cfg, new_cfg)
except Exception as err:
raise LoadConfigError(f"Cannot load configuration from '{config_path}'\n{err}")
if new_cfg is None:
raise LoadConfigError("The configuration has not been loaded.")
cfg = OmegaConf.to_container(cfg)
cfg = Config(cfg)
return cfg
def extract_faces(app_file_name, options):
cfg = load_config(app_file_name, options)
if not cfg.outdir:
cfg.outdir = f'{Path(cfg.dataset.path)}_extracted_{cfg.image.size}'
cfg.outdir = Path(cfg.outdir).expanduser()
cfg.logdir = cfg.outdir
cfg.logfile = cfg.outdir / 'log.txt'
cfg.h5file = cfg.outdir / 'statistics.h5'
# set seed for random number generators
set_seed(cfg.seed)
# write arguments and store some git revision info in a text files in the log directory
ioutils.write_arguments(cfg, cfg.logdir.joinpath(Path(app_file_name).stem + '.yaml'))
ioutils.store_revision_info(cfg.logdir)
return cfg
def train_softmax(options):
app_file_name = sys.argv[0]
cfg = load_config(app_file_name, options)
path = Path(cfg.model.path).expanduser()
cfg.model.path = path / subdir()
cfg.logs = Config()
cfg.logs.dir = cfg.model.path / 'logs'
cfg.logs.file = cfg.model.path.stem + '.log'
if cfg.model.checkpoint:
cfg.model.checkpoint = Path(cfg.model.checkpoint).expanduser()
if not cfg.train.epoch.max_nrof_epochs:
cfg.train.epoch.max_nrof_epochs = cfg.train.learning_rate.schedule[-1][0]
if cfg.validate:
cfg.validate.batch_size = cfg.batch_size
cfg.validate.image.size = cfg.image.size
cfg.validate.image.standardization = cfg.image.standardization
# set seed for random number generators
set_seed(cfg.seed)
# write arguments and store some git revision info in a text files in the log directory
ioutils.write_arguments(cfg, cfg.logs.dir)
ioutils.store_revision_info(cfg.logs.dir)
return cfg
def embeddings(app_file_name, options):
cfg = load_config(app_file_name, options)
if not cfg.model.path:
cfg.model.path = default_model_path
if cfg.suffix not in ('.h5', '.tfrecord'):
raise ValueError('Invalid suffix for output file, must either be h5 or tfrecord.')
cfg.outdir = Path(cfg.dataset.path + '_' + Path(cfg.model.path).stem)
cfg.outdir = Path(cfg.outdir).expanduser()
cfg.logdir = cfg.outdir
cfg.logfile = cfg.outdir.joinpath('log.txt')
cfg.outfile = cfg.outdir.joinpath('embeddings').with_suffix(cfg.suffix)
# set seed for random number generators
set_seed(cfg.seed)
# write arguments and store some git revision info in a text files in the log directory
ioutils.write_arguments(cfg, cfg.logdir.joinpath(Path(app_file_name).stem + '.yaml'))
ioutils.store_revision_info(cfg.logdir)
return cfg
def validate(app_file_name, options):
cfg = load_config(app_file_name, options)
if not cfg.model.path:
cfg.model.path = default_model_path
cfg.outdir = Path(cfg.dataset.path + '_' + Path(cfg.model.path).stem)
cfg.outdir = Path(cfg.outdir).expanduser()
cfg.logdir = cfg.outdir
cfg.logfile = cfg.outdir.joinpath('validate.txt')
# set seed for random number generators
set_seed(cfg.seed)
# write arguments and store some git revision info in a text files in the log directory
ioutils.write_arguments(cfg, cfg.logdir.joinpath(Path(app_file_name).stem + '.yaml'))
ioutils.store_revision_info(cfg.logdir)
return cfg
def train_classifier(app_file_name, options):
cfg = load_config(app_file_name, options)
cfg.classifier.path = Path(cfg.classifier.path).expanduser() / subdir()
cfg.logdir = cfg.classifier.path
cfg.logfile = cfg.logdir / 'log.txt'
# set seed for random number generators
set_seed(cfg.seed)
# write arguments and store some git revision info in a text files in the log directory
ioutils.write_arguments(cfg, cfg.logdir.joinpath(Path(app_file_name).stem + '.yaml'))
ioutils.store_revision_info(cfg.logdir)
return cfg
| 27.561798 | 91 | 0.66259 | 1,012 | 7,359 | 4.641304 | 0.177866 | 0.028316 | 0.044496 | 0.042155 | 0.483287 | 0.444326 | 0.433468 | 0.385991 | 0.373217 | 0.373217 | 0 | 0.002124 | 0.232233 | 7,359 | 266 | 92 | 27.665414 | 0.829204 | 0.141459 | 0 | 0.282051 | 0 | 0 | 0.076308 | 0.011678 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0.00641 | 0.051282 | 0.032051 | 0.282051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1afbe1138d8039a8a05a95b59450d11623814fb8 | 1,861 | py | Python | final_plots/dockless_geographic_overlap.py | georgetown-analytics/DC-Bikeshare | 9f5a6a3256cff15a29f0dca6e9a9d8098ab2df28 | [
"MIT"
] | 11 | 2018-07-01T16:43:05.000Z | 2020-07-17T19:08:16.000Z | final_plots/dockless_geographic_overlap.py | noahnewberger/Bikeshare-DC | 42676654d103cdaddfb76db76d1eece533251261 | [
"MIT"
] | 5 | 2021-02-08T20:21:12.000Z | 2021-12-13T19:47:04.000Z | final_plots/dockless_geographic_overlap.py | noahnewberger/Bikeshare-DC | 42676654d103cdaddfb76db76d1eece533251261 | [
"MIT"
] | 5 | 2018-10-05T19:54:20.000Z | 2020-10-27T11:54:09.000Z | from dockless_exploration_graphs import *
if __name__ == '__main__':
conn = read_only_connect_aws()
# Geographic Overlap by Operator Over Time
try:
os.mkdir('./Load Graphs')
except FileExistsError:
pass
load_path = './Load Graphs/'
google_drive_location = '1LRJWj6wLBWvyBJbN93jXA2dpgF3BLrN3'
df = pd.read_sql("""select distinct
date,
/* % of trips that Start within quarter mile of CaBi Station*/
dless_geo_start_jump,
dless_geo_start_lime,
dless_geo_start_mobike,
dless_geo_start_ofo,
dless_geo_start_spin,
/* % of trips that End within quarter mile of CaBi Station*/
dless_geo_end_jump,
dless_geo_end_lime,
dless_geo_end_mobike,
dless_geo_end_ofo,
dless_geo_end_spin
from final_db
where dless_trips_all > 0
""", con=conn)
print(df.tail())
# Open google drive connection
dr = open_drive()
# Reshaping the data to derive operators from the category
df_2 = pd.melt(
df, id_vars=['date'], var_name='Category',
value_name='pct_total_trips')
df_2['operator'] = df_2['Category'].str.split('_').str.get(3)
df_2['time'] = df_2['Category'].str.split('_').str.get(2)
sns.boxplot(
x='operator', y='pct_total_trips', hue='time', data=df_2,
showfliers=False)
plt.title('Geographic Overlap by Operator Over Time')
all_in_one_save(
"Geo Overlap", load_path, dr,
google_drive_location)
# Delete Graphs from Directory
shutil.rmtree(load_path)
| 38.770833 | 86 | 0.554541 | 212 | 1,861 | 4.523585 | 0.471698 | 0.08342 | 0.067779 | 0.056309 | 0.20438 | 0.20438 | 0.131387 | 0.079249 | 0 | 0 | 0 | 0.013412 | 0.358947 | 1,861 | 47 | 87 | 39.595745 | 0.790444 | 0.083289 | 0 | 0 | 0 | 0 | 0.578483 | 0.082305 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.02439 | 0.02439 | 0 | 0.02439 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1afce6c555ebff3d7d4b28bd273f9016e9d8daa2 | 744 | py | Python | test/demo1.py | runawayhorse001/PySparkAudit | a7981bff0d3d8ab4568bbd4c61c57c4b30f74c2f | [
"MIT"
] | 7 | 2019-07-11T02:30:31.000Z | 2022-01-27T08:07:22.000Z | test/demo1.py | runawayhorse001/PySparkAudit | a7981bff0d3d8ab4568bbd4c61c57c4b30f74c2f | [
"MIT"
] | null | null | null | test/demo1.py | runawayhorse001/PySparkAudit | a7981bff0d3d8ab4568bbd4c61c57c4b30f74c2f | [
"MIT"
] | 6 | 2020-10-19T08:46:17.000Z | 2022-02-09T01:32:38.000Z | from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Python Spark regression example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# import PySpark Audit functions
from PySparkAudit import data_types, hist_plot, bar_plot, freq_items,feature_len
from PySparkAudit import dataset_summary, rates
from PySparkAudit import trend_plot, auditing
# load dataset
data = spark.read.csv(path='Heart.csv',
sep=',', encoding='UTF-8', comment=None, header=True, inferSchema=True)
# audit function by function
# data types
print(data_types(data))
# check frequent items
print(freq_items(data))
# bar plot for categorical features
bar_plot(data, display=True)
| 25.655172 | 93 | 0.735215 | 96 | 744 | 5.59375 | 0.583333 | 0.089385 | 0.122905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001605 | 0.162634 | 744 | 28 | 94 | 26.571429 | 0.860353 | 0.182796 | 0 | 0 | 0 | 0 | 0.133111 | 0.039933 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1aff15182dcbf6a79732990cf8083b8988bcdd75 | 2,554 | py | Python | setup.py | JarryShaw/ptyng | b15747114ffaf7eeae92f7508fbefc4db4fab11e | [
"PSF-2.0"
] | 1 | 2019-06-27T12:28:25.000Z | 2019-06-27T12:28:25.000Z | setup.py | JarryShaw/ptyng | b15747114ffaf7eeae92f7508fbefc4db4fab11e | [
"PSF-2.0"
] | 1 | 2020-05-31T08:52:00.000Z | 2020-05-31T08:55:47.000Z | setup.py | JarryShaw/ptyng | b15747114ffaf7eeae92f7508fbefc4db4fab11e | [
"PSF-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version_info[:2] < (3, 4):
import subprocess32 as subprocess
else:
import subprocess
try:
import pty # pylint: disable=unused-import
except ImportError:
sys.exit('Unsupported operating system!')
# version string
__version__ = '0.3.3.post1'
# install requires
try:
subprocess.check_call(['ps', 'axo', 'pid=,stat='],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
with subprocess.Popen(['yes'], stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL) as pipe:
subprocess.check_call(['pgrep', '-P', str(os.getpid())],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
pipe.terminate()
pipe.kill()
except subprocess.CalledProcessError:
requirements = ['psutil']
else:
requirements = None
# README
with open('README.rst', 'rb') as file:
long_desc = file.read().decode('utf-8')
# set-up script for pip distribution
setup(
name='ptyng',
version=__version__,
author='Jarry Shaw',
author_email='jarryshaw@icloud.com',
url='https://github.com/JarryShaw/ptyng',
license='Python Software Foundation License',
description='Pseudo-terminal utilities.',
long_description=long_desc,
long_description_content_type='text/x-rst',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
include_package_data=True,
install_requires=requirements,
extras_require={
':python_version <= "3.3"': [
'backports.shutil_which>=3.5.2',
'subprocess32>=3.5.3'
],
},
py_modules=['ptyng'],
package_data={
'': [
'LICENSE',
'README.md',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Python Software Foundation License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
'Topic :: Terminals',
]
)
| 28.696629 | 83 | 0.608849 | 273 | 2,554 | 5.578755 | 0.490842 | 0.087328 | 0.114905 | 0.085358 | 0.060407 | 0.060407 | 0 | 0 | 0 | 0 | 0 | 0.022408 | 0.24863 | 2,554 | 88 | 84 | 29.022727 | 0.771235 | 0.048943 | 0 | 0.150685 | 0 | 0.013699 | 0.342692 | 0.011974 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.136986 | 0 | 0.136986 | 0.013699 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21012dbb9efba5f433744edaaad9dbf69569d85a | 9,118 | py | Python | mtdata/main.py | thammegowda/mtdata | 9064629d8e4dd46cf0f7f793dbc17fd0f770649f | [
"Apache-2.0"
] | 81 | 2020-04-07T02:55:39.000Z | 2022-03-30T05:28:58.000Z | mtdata/main.py | thammegowda/mtdata | 9064629d8e4dd46cf0f7f793dbc17fd0f770649f | [
"Apache-2.0"
] | 77 | 2020-04-07T19:53:48.000Z | 2022-03-22T18:41:08.000Z | mtdata/main.py | thammegowda/mtdata | 9064629d8e4dd46cf0f7f793dbc17fd0f770649f | [
"Apache-2.0"
] | 6 | 2020-04-16T22:21:19.000Z | 2022-02-07T20:52:15.000Z | #!/usr/bin/env python
#
# Author: Thamme Gowda [tg (at) isi (dot) edu]
# Created: 4/4/20
import argparse
from pathlib import Path
from collections import defaultdict
import mtdata
from mtdata import log, __version__, cache_dir as CACHE_DIR, cached_index_file
from mtdata.entry import DatasetId, LangPair
from mtdata.utils import IO
from mtdata.iso.bcp47 import bcp47
def list_data(langs, names, not_names=None, full=False):
from mtdata.index import get_entries
entries = get_entries(langs, names, not_names, fuzzy_match=True)
log.info(f"Found {len(entries)}")
for i, ent in enumerate(entries):
print(ent.format(delim='\t'))
if full:
print(ent.cite or "CITATION_NOT_LISTED", end='\n\n')
print(f"Total {len(entries)} entries")
def get_data(langs, out_dir, train_dids=None, test_dids=None, dev_did=None, merge_train=False, compress=False,
drop_dupes=False, drop_tests=False, **kwargs):
if kwargs:
log.warning(f"Args are ignored: {kwargs}")
from mtdata.data import Dataset
assert train_dids or test_dids, 'Required --train or --test or both'
dataset = Dataset.prepare(
langs, train_dids=train_dids, test_dids=test_dids, out_dir=out_dir,
dev_did=dev_did, cache_dir=CACHE_DIR, merge_train=merge_train, compress=compress,
drop_dupes=drop_dupes, drop_tests=drop_tests)
cli_sig = f'-l {"-".join(str(l) for l in langs)}'
if train_dids:
cli_sig += f' -tr {" ".join(str(d) for d in train_dids)}'
if test_dids:
cli_sig += f' -ts {" ".join(str(d) for d in test_dids)}'
if dev_did:
cli_sig += f' -dv {dev_did}'
for flag, val in [('--merge', merge_train), ('--compress', compress), ('-dd', drop_dupes), ('-dt', drop_tests)]:
if val:
cli_sig += ' ' + flag
sig = f'mtdata get {cli_sig} -o <out-dir>\nmtdata version {mtdata.__version__}\n'
log.info(f'Dataset is ready at {dataset.dir}')
log.info(f'mtdata args for reproducing this dataset:\n {sig}')
with IO.writer(out_dir / 'mtdata.signature.txt', append=True) as w:
w.write(sig)
def generate_report(langs, names, not_names=None, format='plain'):
from mtdata.index import get_entries
entries = get_entries(langs, names, not_names)
lang_stats = defaultdict(int)
name_stats = defaultdict(int)
group_stats = defaultdict(int)
for ent in entries:
lang_stats[ent.lang_str] += 1
name_stats[ent.did.name] += 1
group_stats[ent.did.group] += 1
print("Languages:")
for key, val in lang_stats.items():
print(f'{key}\t{val:,}')
print("\nNames:")
for key, val in name_stats.items():
print(f'{key}\t{val:,}')
print("\nGroups:")
for key, val in group_stats.items():
print(f'{key}\t{val:,}')
def list_experiments(args):
raise Exception("Not implemented yet")
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width: int):
if text.startswith("R|"):
return text[2:].splitlines()
return super()._split_lines(text, width)
def lang_pair(string) -> LangPair:
parts = string.split('-')
if len(parts) != 2:
msg = f'expected value of form "xxx-yyz" eg "deu-eng"; given {string}'
raise argparse.ArgumentTypeError(msg)
std_codes = (bcp47(parts[0]), bcp47(parts[1]))
std_form = '-'.join(str(lang) for lang in std_codes)
if std_form != string:
log.info(f"Suggestion: Use codes {std_form} instead of {string}."
f" Let's make a little space for all languages of our planet 😢.")
return std_codes
def add_boolean_arg(parser: argparse.ArgumentParser, name, dest=None, default=False, help=''):
group = parser.add_mutually_exclusive_group()
dest = dest or name
group.add_argument(f'--{name}', action='store_true', dest=dest, default=default, help=help)
group.add_argument(f'--no-{name}', action='store_false', dest=dest, default=not default,
help='Do not ' + help)
def parse_args():
p = argparse.ArgumentParser(formatter_class=MyFormatter, epilog=f'Loaded from {__file__} (v{__version__})')
p.add_argument('-vv', '--verbose', action='store_true', help='verbose mode')
p.add_argument('-v', '--version', action='version', version=f'%(prog)s {__version__}')
p.add_argument('-ri', '--reindex', action='store_true',
help=f"Invalidate index of entries and recreate it. This deletes"
f" {cached_index_file} only and not the downloaded files. "
f"Use this if you're using in developer mode and modifying mtdata index.")
sub_ps = p.add_subparsers(required=True, dest='task',
help='''R|
"list" - List the available entries
"get" - Downloads the entry files and prepares them for experiment
"list-exp" - List the (well) known papers and datasets used in their experiments
"get-exp" - Get the datasets used in the specified experiment from "list-exp"
''')
list_p = sub_ps.add_parser('list', formatter_class=MyFormatter)
list_p.add_argument('-l', '--langs', metavar='L1-L2', type=lang_pair,
help='Language pairs; e.g.: deu-eng')
list_p.add_argument('-n', '--names', metavar='NAME', nargs='*',
help='Name of dataset set; eg europarl_v9.')
list_p.add_argument('-nn', '--not-names', metavar='NAME', nargs='*', help='Exclude these names')
list_p.add_argument('-f', '--full', action='store_true', help='Show Full Citation')
list_p.add_argument('-o', '--out', type=Path, help='This arg is ignored. '
'Only used in "get" subcommand.')
get_p = sub_ps.add_parser('get', formatter_class=MyFormatter)
get_p.add_argument('-l', '--langs', metavar='L1-L2', type=lang_pair,
help='Language pairs; e.g.: deu-eng', required=True)
get_p.add_argument('-tr', '--train', metavar='ID', dest='train_dids', nargs='*', type=DatasetId.parse,
help='''R|Names of datasets separated by space, to be used for *training*.
e.g. -tr Statmt-news_commentary-16-deu-eng europarl_v9 .
To concatenate all these into a single train file, set --merge flag.''')
get_p.add_argument('-ts', '--test', metavar='ID', dest='test_dids', nargs='*', type=DatasetId.parse,
help='''R|Names of datasets separated by space, to be used for *testing*.
e.g. "-ts Statmt-newstest_deen-2019-deu-eng Statmt-newstest_deen-2020-deu-eng ".
You may also use shell expansion if your shell supports it.
e.g. "-ts Statmt-newstest_deen-20{19,20}-deu-eng" ''')
get_p.add_argument('-dv', '--dev', metavar='ID', dest='dev_did', type=DatasetId.parse, required=False,
help='''R|Dataset to be used for development (aka validation).
e.g. "-dev Statmt-newstest_deen-2017-deu-eng"''')
add_boolean_arg(get_p, 'merge', dest='merge_train', default=False, help='Merge train into a single file')
get_p.add_argument(f'--compress', action='store_true', default=False, help="Keep the files compressed")
get_p.add_argument('-dd', f'--dedupe', '--drop-dupes', dest='drop_dupes', action='store_true', default=False,
help="Remove duplicate (src, tgt) pairs in training (if any); valid when --merge. "
"Not recommended for large datasets. ")
get_p.add_argument('-dt', f'--drop-tests', dest='drop_tests', action='store_true', default=False,
help="Remove dev/test sentences from training sets (if any); valid when --merge")
get_p.add_argument('-o', '--out', dest='out_dir', type=Path, required=True, help='Output directory name')
report_p = sub_ps.add_parser('report', formatter_class=MyFormatter)
report_p.add_argument('-l', '--langs', metavar='L1-L2', type=lang_pair,
help='Language pairs; e.g.: deu-eng')
report_p.add_argument('-n', '--names', metavar='NAME', nargs='*',
help='Name of dataset set; eg europarl_v9.')
report_p.add_argument('-nn', '--not-names', metavar='NAME', nargs='*', help='Exclude these names')
args = p.parse_args()
if args.verbose:
log.getLogger().setLevel(level=log.DEBUG)
mtdata.debug_mode = True
return args
def main():
args = parse_args()
if args.reindex and cached_index_file.exists():
bak_file = cached_index_file.with_suffix(".bak")
log.info(f"Invalidate index: {cached_index_file} -> {bak_file}")
cached_index_file.rename(bak_file)
if args.task == 'list':
list_data(args.langs, args.names, not_names=args.not_names, full=args.full)
elif args.task == 'get':
get_data(**vars(args))
elif args.task == 'list_exp':
list_experiments(args)
elif args.task == 'report':
generate_report(args.langs, names=args.names, not_names=args.not_names)
else:
raise Exception(f'{args.task} not implemented')
if __name__ == '__main__':
main()
| 46.284264 | 116 | 0.641259 | 1,296 | 9,118 | 4.341049 | 0.243827 | 0.041059 | 0.040526 | 0.02133 | 0.221294 | 0.185212 | 0.166904 | 0.139353 | 0.129399 | 0.129399 | 0 | 0.006674 | 0.211231 | 9,118 | 196 | 117 | 46.520408 | 0.775445 | 0.008993 | 0 | 0.055901 | 0 | 0.006211 | 0.323738 | 0.021479 | 0 | 0 | 0 | 0 | 0.006211 | 1 | 0.055901 | false | 0 | 0.068323 | 0 | 0.15528 | 0.055901 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21031888ae60afb1eb930da2ee6539160ee0da47 | 3,767 | py | Python | ch6/02-himmelblau.py | mbassale/genetic-algorithms-books | 7c2c5f78d715b1b8b345ef4512a9dcfac1889d92 | [
"MIT"
] | 1 | 2021-10-14T03:37:07.000Z | 2021-10-14T03:37:07.000Z | ch6/02-himmelblau.py | mbassale/genetic-algorithms-books | 7c2c5f78d715b1b8b345ef4512a9dcfac1889d92 | [
"MIT"
] | null | null | null | ch6/02-himmelblau.py | mbassale/genetic-algorithms-books | 7c2c5f78d715b1b8b345ef4512a9dcfac1889d92 | [
"MIT"
] | null | null | null | from deap import base
from deap import creator
from deap import tools
import random
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import elitism
# problem constants
DIMENSIONS = 2
BOUND_LOW, BOUND_HIGH = -5.0, 5.0
# Genetic Algorithm Constants
POPULATION_SIZE = 300
P_CROSSOVER = 0.9
P_MUTATION = 0.5
MAX_GENERATIONS = 300
HALL_OF_FAME_SIZE = 30
CROWDING_FACTOR = 20.0
# set the random seed
RANDOM_SEED = 17
random.seed(RANDOM_SEED)
toolbox = base.Toolbox()
# define a single objective, minimizing fitness strategy
creator.create('FitnessMin', base.Fitness, weights=(-1.0,))
# create the Individual class based on list
creator.create('Individual', list, fitness=creator.FitnessMin)
# helper function for creating float numbers uniformly distributed within a given range [low, high]
def randomFloat(low, high):
return [random.uniform(a, b) for a, b in zip([low] * DIMENSIONS, [high] * DIMENSIONS)]
# create an operator that randomly return a float in the desired range and dimension
toolbox.register('attrFloat', randomFloat, BOUND_LOW, BOUND_HIGH)
# create the individual operator to fill up an Individual instance
toolbox.register('individualCreator', tools.initIterate, creator.Individual, toolbox.attrFloat)
# create the population operator to generate a list of individuals
toolbox.register('populationCreator', tools.initRepeat, list, toolbox.individualCreator)
# Himmelblau function as the given individual's fitness
def himmelblau(individual):
x = individual[0]
y = individual[1]
f = (x ** 2 + y - 11) ** 2 + (x + y ** 2 - 7) ** 2
return f, # return a tuple
toolbox.register('evaluate', himmelblau)
# genetic operators
toolbox.register('select', tools.selTournament, tournsize=2)
toolbox.register('mate', tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_HIGH, eta=CROWDING_FACTOR)
toolbox.register('mutate', tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_HIGH, eta=CROWDING_FACTOR,
indpb=1.0 / DIMENSIONS)
# Genetic Algorithm flow:
def main():
# create initial population (generation 0):
population = toolbox.populationCreator(n=POPULATION_SIZE)
# prepare the statistics object:
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("min", np.min)
stats.register("avg", np.mean)
# define the hall-of-fame object:
hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
# perform the Genetic Algorithm flow with elitism:
population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
# print info for best solution found:
best = hof.items[0]
print("-- Best Individual = ", best)
print("-- Best Fitness = ", best.fitness.values[0])
print("- Best solutions are:")
for i in range(HALL_OF_FAME_SIZE):
print(i, ": ", hof.items[i].fitness.values[0], " -> ", hof.items[i])
# plot solution locations on x-y plane:
plt.figure(1)
globalMinima = [[3.0, 2.0], [-2.805118, 3.131312], [-3.779310, -3.283186], [3.584458, -1.848126]]
plt.scatter(*zip(*globalMinima), marker='X', color='red', zorder=1)
plt.scatter(*zip(*population), marker='.', color='blue', zorder=0)
# extract statistics:
minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
# plot statistics:
plt.figure(2)
sns.set_style("whitegrid")
plt.plot(minFitnessValues, color='red')
plt.plot(meanFitnessValues, color='green')
plt.xlabel('Generation')
plt.ylabel('Min / Average Fitness')
plt.title('Min and Average fitness over Generations')
plt.show()
if __name__ == "__main__":
main()
| 31.923729 | 118 | 0.70799 | 497 | 3,767 | 5.287726 | 0.376258 | 0.039954 | 0.015221 | 0.015982 | 0.02968 | 0.02968 | 0.02968 | 0.02968 | 0.02968 | 0 | 0 | 0.028939 | 0.174409 | 3,767 | 117 | 119 | 32.196581 | 0.816077 | 0.225378 | 0 | 0 | 0 | 0 | 0.093232 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046154 | false | 0 | 0.123077 | 0.015385 | 0.2 | 0.061538 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21051731e91dcd2bb0223e9b235777805c632b49 | 20,025 | py | Python | utils.py | QuickLearner171998/Retrosynthesis-Reaction-Pathway | 78011c94a9bfd9630b0240c966fe358efa183817 | [
"MIT"
] | 4 | 2020-06-23T20:47:31.000Z | 2022-01-06T07:01:06.000Z | utils.py | QuickLearner171998/Retrosynthesis-Reaction-Pathway | 78011c94a9bfd9630b0240c966fe358efa183817 | [
"MIT"
] | null | null | null | utils.py | QuickLearner171998/Retrosynthesis-Reaction-Pathway | 78011c94a9bfd9630b0240c966fe358efa183817 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from itertools import repeat
from onmt.utils.logging import init_logger
from onmt.utils.misc import split_corpus
from onmt.translate.translator import build_translator
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
def translate_file(input_filename, output_filename):
parser = ArgumentParser(description='translation')
opts.config_opts(parser)
opts.translate_opts(parser)
# print(opts)
args = f'''-model m16_step_44000.pt
-src source_products_16.txt
-output op_16x_4400_50_10.txt
-batch_size 128
-replace_unk
-max_length 200
-verbose
-beam_size 50
-n_best 10
-min_length 5'''
opt = parser.parse_args(args)
# print(opt.model)
translator = build_translator(opt, report_score=True)
src_shards = split_corpus(opt.src, opt.shard_size)
tgt_shards = repeat(None)
shard_pairs = zip(src_shards, tgt_shards)
for i, (src_shard, tgt_shard) in enumerate(shard_pairs):
scores, predictions = translator.translate(
src=src_shard,
tgt=tgt_shard,
src_dir=opt.src_dir,
batch_size=opt.batch_size,
attn_debug=opt.attn_debug
)
return scores, predictions
def create_reaction_variants(molecule):
# creates a variant of the input for each reaction type token
reactions = ['<RX_1>', '<RX_2>', '<RX_3>', '<RX_4>', '<RX_5>', '<RX_6>',
'<RX_7>', '<RX_8>', '<RX_9>', '<RX_10>']
rxns = [i + ' ' + molecule for i in reactions]
return rxns
def molecules_to_file(molecules, filename):
# writes molecules to text files
with open(filename, 'w') as out:
for mol in molecules:
out.write(mol + '\n')
def smile_valid(smile):
# determines of a predicted SMILES is valid
s = ''.join(smile.split(' '))
smile_check = AllChem.MolFromSmiles(s)
if smile_check:
return True
else:
return False
def canonicalize_smiles(smiles):
# converts a SMILES string to canonical form
mol = Chem.MolFromSmiles(smiles)
if mol is not None:
return Chem.MolToSmiles(mol, isomericSmiles=True)
else:
return ''
def process_smile(smile):
# Processes tokenized SMILES string into a form that RDKit accepts
if '> ' in smile:
# SMILES may have reaction token, this strips the reaction token
smile = smile.split('> ')[1]
smile = ''.join(smile.split(' '))
return smile
def smile_to_mol(smile):
# converts text SMILES into RDKit Mol object
return Chem.MolFromSmiles(smile)
def canonicalize_prediction(smiles):
# converts tokenized predicted SMILES into canonicalized form
smiles = canonicalize_smiles(process_smile(smiles))
if smiles == '':
return smiles
else:
return ' '.join([i for i in smiles])
def create_prediction_df(molecule_variants, predictions, scores):
# Create dataframe of predictions
# molecule variants - variants of the source product with different reaction tokens
# predictions - list of lists of predicted SMILES for each molecule variant
# scores - list of lists for scores corresponding to predictions
dfs = []
# for ech variant, create dataframe of input, products, mechanisms and predictions
for i in range(len(molecule_variants)):
df_iter = pd.DataFrame({f'Prediction': predictions[i], f'Score': scores[i]})
df_iter[f'Score'] = df_iter[f'Score'].map(lambda x: x.item())
df_iter['Input'] = molecule_variants[i]
df_iter['Product_Molecule'] = df_iter.Input.map(lambda x: x.split('> ')[1])
df_iter['Mechanism'] = df_iter.Input.map(lambda x: x.split(' ')[0])
dfs.append(df_iter)
df = pd.concat(dfs, axis=0)
df = df.reset_index(drop=True)
return df
def clean_predictions(df):
# cleans a dataframe of predictions
# drops all invalid SMILES predictions
df = df[df[f'Prediction'].map(lambda x: smile_valid(x))]
df = df.reset_index(drop=True)
# canonicalizes predictions
df[f'Prediction'] = df[f'Prediction'].map(lambda x: canonicalize_prediction(x))
# applys stoichiometry check
df = df[df.apply(lambda row: check_stoichiometry(row['Prediction'], row['Product_Molecule']), axis=1)]
# removes trivial predictions where product molecule is contained in predicted reactants
df = df[~df.apply(lambda row: row[f'Product_Molecule'] in row[f'Prediction'], axis=1)]
df = df.reset_index(drop=True)
return df
def check_stoichiometry(reactants, product):
# Checks stoichiometry between product and predicted reactant
# All product molecules must be contained in the reactant prediction for the prediction to be valid
reactant_mol = smile_to_mol(process_smile(reactants))
product_mol = smile_to_mol(process_smile(product))
reactant_dict = {}
product_dict = {}
for i, atom in enumerate(reactant_mol.GetAtoms()):
atomic_num = atom.GetAtomicNum()
if atomic_num in reactant_dict.keys():
reactant_dict[atomic_num] += 1
else:
reactant_dict[atomic_num] = 1
for i, atom in enumerate(product_mol.GetAtoms()):
atomic_num = atom.GetAtomicNum()
if atomic_num in product_dict.keys():
product_dict[atomic_num] += 1
else:
product_dict[atomic_num] = 1
for key in product_dict.keys():
if not key in reactant_dict.keys():
return False
if product_dict[key] > reactant_dict[key]:
return False
return True
def process_predictions(df):
# Clean and score prediction dataframe
df = clean_predictions(df)
df.reset_index(inplace=True, drop=True)
df = score_predictions(df)
return df
def score_predictions(df):
# apply scoring function to predictions
df[f'Prediction_Score'] = df.apply(lambda row:
heuristic_scoring(row[f'Product_Molecule'],
row[f'Prediction'],
row[f'Score']), axis=1)
return df
def calc_ring_change(product, predicted_reactant):
# returns a score based on the number of ring changes between product and predicted reactants
# we want to sepect reactant predictions that maximally break down the product
mol_product = smile_to_mol(process_smile(product))
mol_reactant = smile_to_mol(process_smile(predicted_reactant))
product_rings = Chem.rdMolDescriptors.CalcNumRings(mol_product)
reactant_rings = Chem.rdMolDescriptors.CalcNumRings(mol_reactant)
# ideally reactants are simpler and contain fewer rings
return product_rings - reactant_rings
def average_compound_size(smile, ret_max=False):
# computes average token length of a molecule of string of molecules
if '.' in smile:
smile = smile.split('.')
else:
smile = [smile]
lengths = []
for s in smile:
lengths.append(len(s.split(' ')))
if ret_max:
return max(lengths)
else:
return sum(lengths) / len(lengths)
def compound_size_change(product, predicted_reactant):
# ideally average reactant size is smaller than product size and this value is positive
return average_compound_size(product) - average_compound_size(predicted_reactant, ret_max=True)
def heuristic_scoring(product, predicted_reactant, model_score, a=100, b=10, c=3):
# scoring function
# we want high confidence predictions that reduce complexity going from product to reactant
# complexity reduction is determined by the number of rings broken and change in average molecule side
# score terms are weighted by coefficients a and b
model_score_exp = np.exp(model_score)
ring_change = calc_ring_change(product, predicted_reactant)
size_change = compound_size_change(product, predicted_reactant)
if model_score > -2:
# only compute full score for high confidence predictions
# occasionally low confience predictions of a sincle atom cause the ring and size change parameters
# to blow up and give an inflated score
score = a * model_score_exp + b * ring_change + c * size_change
else:
score = a * model_score_exp
return score
def check_terminal(smile):
# determines if a product molecule is considered terminal
smile = process_smile(smile)
if 'Mg' in smile:
# Grignards are considered terminal
# Grignard forming reactions are not present in the training data and therefore cannot be predicted
return True
if smile_to_mol(smile).GetNumAtoms() < 11:
# Small molecules with less than 11 atoms are considered terminal
return True
if len(smile) <= 10:
# SMILES character length of less than 10 is considered terminal
return True
else:
return False
# dictionary of reaction tokens to mechanisms
rxn_dict = {
'<RX_1>': 'Heteroatom Alkylation and Arylation',
'<RX_2>': 'Acylation and Related Processes',
'<RX_3>': 'C-C Bond Formation',
'<RX_4>': 'Heterocycle Formation',
'<RX_5>': 'Protection',
'<RX_6>': 'Deprotection',
'<RX_7>': 'Reduction',
'<RX_8>': 'Oxidation',
'<RX_9>': 'Functional Group Interconversion',
'<RX_10>': 'Functional Group Addition'
}
class Reaction():
# Class for managing a single reaction for retrosynthesis prediction
# A reaction is instantiated with the tokenized SMILES of the product molecule
# A score threshold is passed to filter predictions
# The reaction class receives a set of reactant predictions, filters them, and selects the best prediction of reactants
# Predicted reactants are checked to see if they are terminal
# Reaction objects are made to connect in a tree-like structure, and have attributes for parent and child nodes
def __init__(self, product, score_threshold=36):
self.product = product
# product_variants holds versions of the product molecule with different reaction mechanism tokens prepended
# These will be used for prediction
# There are 10 reaction mechanisms
# For each mechanism, 10 predictions are generated via beam search
# The top prediction (mechanism + predicted reactants) will be selected
self.product_variants = create_reaction_variants(product)
self.reactants = None
self.children = []
self.parent = None
self.terminal = False
self.process_product(self.product)
self.score_threshold = score_threshold
def add_prediction(self, df):
# df is a dataframe of predictions from the product molecule
# The actual prediction process is run by the Retrosynthesis class to allow for batched prediction
self.prediction_df = df
if self.parent:
# If there is a parent node, predictions containing the parent molecule are discarded
# This prevents recursive loops where the same two molecules are predicted from one another infinitely
self.prediction_df = self.prediction_df[~self.prediction_df.apply(lambda row:
self.parent.product in row['Prediction'], axis=1)]
self.prediction_df = self.prediction_df.reset_index(drop=True)
if self.prediction_df.shape[0] == 0:
# If the dataframe is empty, indicating no predictions produced valid SMILES or unique molecules, the node is designated terminal
self.terminal = True
else:
# Otherwise, select best prediction
self.top_prediction = self.prediction_df.nlargest(1, 'Prediction_Score')
if self.top_prediction.Prediction_Score.values[0] < self.score_threshold:
# If no predictions pass the threshold, the node is terminal
self.terminal = True
else:
# If a prediction is valid, the reactants are added
self.add_reactants(self.top_prediction.Prediction.values[0],
self.top_prediction.Mechanism.values[0])
def process_product(self, product):
# Checks the product molecule upon instantiation to see if it is terminal
if check_terminal(product):
self.terminal = True
def add_reactants(self, reactant, mechanism):
# Adds reactant molecules under the reactant and individual_reactant attributes
# reactants contain the exact predicted string, which may contain multiple molecules
# individual_reactants accounts for this
# the reaction mechanism is also recorded
self.reactants = reactant
if '.' in reactant:
self.individual_reactants = reactant.split(' . ')
else:
self.individual_reactants = [reactant]
self.reaction_mechanism = mechanism
def display_reaction(self, img_size=(400, 400)):
# Displays product molecule and reactants if they exist
product_mol = smile_to_mol(process_smile(self.product))
legend = ['Product']
mols = [product_mol]
if self.reactants:
reactant_mol = smile_to_mol(process_smile(self.reactants))
legend += [f'Reactants ({rxn_dict[self.reaction_mechanism]})']
mols += [reactant_mol]
return Draw.MolsToGridImage(mols, subImgSize=img_size, legends=legend)
def display_prediction(self, idx, img_size=(400, 400)):
# Displays any prediction in prediction_df by index
# Useful for examining predictions that didn't get selected
prod = smile_to_mol(process_smile(self.prediction_df.iloc[idx].Product_Molecule))
pred = smile_to_mol(process_smile(self.prediction_df.iloc[idx].Prediction))
legend = [f'Reactants ({rxn_dict[self.prediction_df.iloc[idx].Mechanism]})',
'Product']
return Draw.MolsToGridImage([pred, prod], legends=legend, subImgSize=img_size)
def __repr__(self):
# Repr - prints product SMILES, reactants if they exist, and if the node is terminal
s = 'Product: ' + process_smile(self.product) + '\n'
if self.reactants:
s += 'Reactants: ' + process_smile(self.reactants) + '\n'
s += 'Reaction Mechanism: ' + rxn_dict[self.reaction_mechanism]
elif self.terminal:
s += 'Terminal Reactant'
else:
s += 'Reactants not yet identified'
s += '\n'
return s
class RetroSynthesis():
# Retrosynthesis class
# Manages multiple Reaction nodes
# Builds a retrosynthesis tree by recursively breaking down reaction nodes
def __init__(self, molecule, max_depth=15):
# Class is instantiated with a single target molecule
# max_depth controls the max number of iterations to avoid infinite loops
self.molecule = molecule
self.smile = process_smile(molecule)
self.max_depth = max_depth
self.depth = 0
self.reactions = []
self.nodes = [Reaction(molecule)]
def batch_tree_prediction(self):
# Batch prediction over all active nodes
# Gathers all nodes, selecting nodes that are active
# An active node is a node that is not terminal and does not have predicted reactants
active_nodes = []
for node in self.nodes:
if self.is_active(node):
active_nodes.append(node)
if active_nodes and self.depth <= self.max_depth:
reactants = []
for node in active_nodes:
# For each active node, we gather the variants (different reaction tokens) of each product
reactants += node.product_variants
# All product variants are written to a txt file for prediction
# Predicting all active products at once in the Retrosynthesis class, compared to running prediction
# one node at a time in the Reaction class, is much more efficient
molecules_to_file(reactants, 'source_products_16.txt')
# Generate predictions
# print("")
scores, predictions = translate_file('source_products_16.txt', 'op_16x_3800_50_10.txt')
# Predictions are stored in a dataframe
prediction_df = create_prediction_df(reactants, predictions, scores)
# Predictions are filtered for correct SMILES and scored
prediction_df = process_predictions(prediction_df)
for node in active_nodes:
# For each node, the predictions specific to that product molecule are selected
# The selected dataframe is passed to the add_prediction function
node_df = prediction_df[prediction_df.Product_Molecule == node.product]
node_df = node_df.reset_index(drop=True)
node.add_prediction(node_df)
# New child nodes are created based on the prediction results
self.create_children(node)
self.depth += 1
# Batch prediction function is called recursively until either max_depth is reached or there are no active nodes
self.batch_tree_prediction()
def create_children(self, node):
# Creates new child nodes based on prediction results
# New nodes are only created if a node has predicted reactants and is not terminal
if node.reactants and not node.terminal:
for reactant in node.individual_reactants:
# For each individual reactant, a new node is created
child_node = Reaction(reactant)
# For each new node, we set the parent attribute in the new node and add the new node to the children of the parent
child_node.parent = node
node.children.append(child_node)
# new nodes are added to the list of nodes in the Retrosynthesis class
self.nodes.append(child_node)
def is_active(self, node):
# A node is active if there are no predicted reactants and the node is not terminal
if node.reactants or node.terminal:
return False
else:
return True
def extract_reaction(self, node):
# For a given node, this function recursively extracts all predicted reactions that are children to the node
if node.reactants:
self.reactions.append([node.product, node.reaction_mechanism,
node.reactants, node.top_prediction.Prediction_Score.values[0]])
if node.children:
for child_node in node.children:
self.extract_reaction(child_node)
def display_synthesis(self, img_size=(400, 400)):
# Runs through the predicted reactions in reverse (ie forward synthesis) and displays all predicted steps
reaction_mols = []
legend = []
for reaction in reversed(self.reactions):
product_mol = smile_to_mol(process_smile(reaction[0]))
reactant_mol = smile_to_mol(process_smile(reaction[2]))
legend_iter = [f'Reactant ({rxn_dict[reaction[1]]})', f'Product ({reaction[3]:.4})']
reaction_mols += [reactant_mol, product_mol]
legend += legend_iter
return Draw.MolsToGridImage(reaction_mols, legends=legend, molsPerRow=2, subImgSize=img_size)
def run_retrosynthesis(self):
# Runs recursive batch prediction, then extracts reactions from the intial parent node
self.batch_tree_prediction()
self.extract_reaction(self.nodes[0])
def __repr__(self):
s = f'Retrosynthesis for {self.smile}'
return s
| 37.640977 | 141 | 0.661923 | 2,522 | 20,025 | 5.109437 | 0.190325 | 0.018625 | 0.009312 | 0.013193 | 0.131228 | 0.088856 | 0.045088 | 0.023902 | 0.019711 | 0.014589 | 0 | 0.009146 | 0.262871 | 20,025 | 531 | 142 | 37.711864 | 0.86383 | 0.313458 | 0 | 0.154605 | 0 | 0 | 0.092101 | 0.016266 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108553 | false | 0 | 0.039474 | 0.006579 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21056749b82e52d6799015dcaa1da9696577b10d | 2,051 | py | Python | src/278.first-bad-version.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | src/278.first-bad-version.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | src/278.first-bad-version.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=278 lang=python3
#
# [278] First Bad Version
#
# https://leetcode.com/problems/first-bad-version/description/
#
# algorithms
# Easy (38.48%)
# Likes: 2518
# Dislikes: 907
# Total Accepted: 609.3K
# Total Submissions: 1.6M
# Testcase Example: '5\n4'
#
# You are a product manager and currently leading a team to develop a new
# product. Unfortunately, the latest version of your product fails the quality
# check. Since each version is developed based on the previous version, all the
# versions after a bad version are also bad.
#
# Suppose you have n versions [1, 2, ..., n] and you want to find out the first
# bad one, which causes all the following ones to be bad.
#
# You are given an API bool isBadVersion(version) which returns whether version
# is bad. Implement a function to find the first bad version. You should
# minimize the number of calls to the API.
#
#
# Example 1:
#
#
# Input: n = 5, bad = 4
# Output: 4
# Explanation:
# call isBadVersion(3) -> false
# call isBadVersion(5) -> true
# call isBadVersion(4) -> true
# Then 4 is the first bad version.
#
#
# Example 2:
#
#
# Input: n = 1, bad = 1
# Output: 1
#
#
#
# Constraints:
#
#
# 1 <= bad <= n <= 2^31 - 1
#
#
#
# @lc code=start
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return an integer
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
# 左闭右开 二分法
lo, hi = 1, n+1
while lo < hi:
mid = lo + (hi-lo)//2
if not isBadVersion(mid):
lo = mid+1
else:
hi = mid
return lo # hi==lo
def firstBadVersion(self, n):
"""
左闭右闭 二分法
"""
lo, hi = 1, n
while lo <= hi:
mid = lo + (hi-lo)//2
if not isBadVersion(mid):
lo = mid+1
else:
hi = mid-1
return lo # hi+1==lo
# @lc code=end
| 21.819149 | 79 | 0.577279 | 288 | 2,051 | 4.111111 | 0.444444 | 0.027027 | 0.050676 | 0.030405 | 0.106419 | 0.091216 | 0.091216 | 0.091216 | 0.091216 | 0.091216 | 0 | 0.038163 | 0.310093 | 2,051 | 93 | 80 | 22.053763 | 0.798587 | 0.648952 | 0 | 0.631579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21087231d394eadd113671e581a3ce0916c09477 | 711 | py | Python | checkin.py | PPKunOffical/SAuto_Checkin | 0cb4b3dce4483d9754fa13e1cdce613c5aaa861a | [
"MIT"
] | 1 | 2020-07-10T04:34:20.000Z | 2020-07-10T04:34:20.000Z | checkin.py | PPKunOffical/SAuto_Checkin | 0cb4b3dce4483d9754fa13e1cdce613c5aaa861a | [
"MIT"
] | null | null | null | checkin.py | PPKunOffical/SAuto_Checkin | 0cb4b3dce4483d9754fa13e1cdce613c5aaa861a | [
"MIT"
] | null | null | null | from fake_useragent import UserAgent
import requests,json
accouts=json.loads(open("./accouts.json", mode='r').read())
ua = UserAgent()
for i in accouts["sp"]:
print("--------"+i["name"]+"----------")
checkin_url=i["url"]["cu"]
login_url=i["url"]["lu"]
login_data={
"email":i["ac"]["u"],
"passwd":i["ac"]["p"]
}
headers={
"user-agent":ua.random
}
print("User-Agent:",headers)
lose=requests.Session()
try:
login_res=lose.post(login_url,data=login_data,headers=headers).content.decode("unicode-escape")
checkin_res=lose.post(checkin_url,headers=headers).content.decode("unicode-escape")
data_checkin=checkin_res
print("Return Json:",data_checkin,"\n")
except:
print("Error!")
| 27.346154 | 97 | 0.663854 | 100 | 711 | 4.6 | 0.47 | 0.065217 | 0.030435 | 0.117391 | 0.173913 | 0.173913 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102672 | 711 | 25 | 98 | 28.44 | 0.721003 | 0 | 0 | 0 | 0 | 0 | 0.189873 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.041667 | 0.083333 | 0 | 0.083333 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2109339d104ffd306d701e278e5cda2613de7b4e | 5,275 | py | Python | tests/other/region_counts_test.py | himanshur-dev/ribopy | 78846e4140a7aa7b4dc995f39606577efaaf0831 | [
"MIT"
] | 4 | 2020-01-14T01:01:36.000Z | 2022-03-21T16:30:24.000Z | tests/other/region_counts_test.py | himanshur-dev/ribopy | 78846e4140a7aa7b4dc995f39606577efaaf0831 | [
"MIT"
] | 9 | 2019-12-17T20:45:08.000Z | 2021-12-15T22:34:06.000Z | tests/other/region_counts_test.py | himanshur-dev/ribopy | 78846e4140a7aa7b4dc995f39606577efaaf0831 | [
"MIT"
] | 3 | 2019-12-14T17:51:53.000Z | 2022-01-12T16:09:45.000Z | # -*- coding: utf-8 -*-
import unittest
import os
from io import StringIO, BytesIO
import numpy as np
import h5py
from ribopy import create
from ribopy.core.coverage import find_coverage
from ribopy.core.region_counts import get_extended_boundaries,\
find_region_counts
from ribopy.core.get_gadgets import get_reference_names,\
get_reference_lengths,\
get_region_boundaries
from ribopy.settings import *
import sys
test_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(test_dir)
from test_data import *
###########################################
TRANSCRIPT_LENGTHS=\
"""GAPDH 90
VEGFA 12
FLT 40
MYC 1
P53 85"""
TRANSCRIPT_ANNOTATION=\
"""GAPDH 0 20 UTR5 0 -
GAPDH 20 50 CDS 0 -
GAPDH 50 90 UTR3 0 -
VEGFA 0 10 CDS 0 +
VEGFA 10 12 UTR3 0 +
FLT 0 15 UTR5 0 +
FLT 15 40 CDS 0 +
MYC 0 1 CDS 0 -
MYC 1 1 UTR3 0 -
P53 0 20 UTR5 0 +
P53 20 55 CDS 0 +
P53 55 85 UTR3 0 +"""
READ_SET_1=\
"""GAPDH 2 4 read_UTR5_1 0 +
GAPDH 3 5 read_UTR5_2 0 +
GAPDH 10 14 read_UTR5_3 0 +
GAPDH 10 14 read_UTR5_4 0 +
GAPDH 11 14 read_UTR5_5 0 +
GAPDH 12 14 read_UTR5_6 0 +
GAPDH 14 25 read_UTR5_7 0 +
GAPDH 15 20 read_UTR5_junc_1 0 +
GAPDH 16 20 read_UTR5_junc_2 0 +
GAPDH 17 20 read_UTR5_junc_3 0 +
GAPDH 20 25 read_UTR5_junc_4 0 +
GAPDH 23 25 read_UTR5_junc_5 0 +
GAPDH 24 30 read_CDS_1 0 +
GAPDH 30 35 read_CDS_2 0 +
GAPDH 44 46 read_CDS_3 0 +
GAPDH 45 55 read_UTR3_junc_1 0 +
GAPDH 53 57 read_UTR3_junc_2 0 +
GAPDH 54 57 read_UTR3_1 0 +
VEGFA 0 1 read_UTR5_junc_1 0 +
VEGFA 3 5 read_UTR5_junc_2 0 +
VEGFA 4 11 read_CDS_1 0 +
VEGFA 4 5 read_CDS_2 0 +
VEGFA 11 12 read_UTR3_junc_1 0 +
FLT 36 40 read_UTR3_junc_1 0 +
MYC 0 1 read_UTR5_junc_1 0 +
P53 5 15 read_UTR5_1 0 +
P53 25 35 read_CDS_1 0 +"""
#########################################
##### Expected EXTENDED ANNOTATION
GAPDH_regions = ( (0, 15), (15, 24), (24, 45), (45, 54), (54, 90) )
VEGFA_regions = ( (0, 0), (0, 4), (4, 5), (5, 12), (12, 12) )
FLT_regions = ( (0, 10), (10, 19), (19, 35), (35, 40), (40, 40) )
MYC_regions = ( (0, 0), (0, 1), (1, 1), (1, 1), (1, 1) )
P53_regions = ( (0, 15), (15, 24), (24, 50), (50, 59), (59, 85) )
extended_boundary_regions = ( GAPDH_regions, VEGFA_regions,
FLT_regions, MYC_regions, P53_regions )
#########################################
#########################################
### Expected Region Counts
# Order is UTR5, UTR5_junc, CDS, UTR3_junc, UTR3
GAPDH_counts = ( 7, 5, 3, 2, 1 )
VEGFA_counts = ( 0, 2, 2, 1, 0 )
FLT_counts = ( 0, 0, 0, 1, 0 )
MYC_counts = ( 0, 1, 0, 0, 0 )
P53_counts = ( 1, 0, 1, 0, 0 )
expected_counts = ( GAPDH_counts, VEGFA_counts, FLT_counts,
MYC_counts, P53_counts )
#########################################
LEFT_SPAN = 5
RIGHT_SPAN = 3
ANNOTATION = [\
([0, 20], [20, 50], [50, 90]),\
([0, 0], [0, 10], [10, 12]),\
([0, 15], [15, 40], [40, 40]),\
([0, 0], [0, 1], [1, 1]),\
([0, 20], [20, 55], [55, 85])
]
def _get_transcripts( file_in_string ):
rows = file_in_string.split("\n")
pairs = tuple( map( lambda x: x.split(), rows ) )
ref_names = tuple( map( lambda x: x[0], pairs ) )
ref_lengths = tuple( map( lambda x: x[1], pairs ) )
ref_lengths = tuple (map( int, ref_lengths ) )
return (ref_names, ref_lengths)
class TestCreate(unittest.TestCase):
def setUp(self):
self.ref_len_file = StringIO(TRANSCRIPT_LENGTHS)
self.annotation_file = StringIO(TRANSCRIPT_ANNOTATION)
self.alignment_file = StringIO(READ_SET_1)
self.handle = h5py.File(BytesIO(), "w")
def tearDown(self):
self.handle.close()
def test_get_extended_boundaries(self):
input_stream = StringIO( READ_SET_1 )
ref_names , ref_lengths = _get_transcripts(TRANSCRIPT_LENGTHS)
coverage = find_coverage(input_stream, ref_names , ref_lengths )
extended_boundaries = \
get_extended_boundaries( annotation = ANNOTATION,
left_span = LEFT_SPAN,
right_span = RIGHT_SPAN )
for computed, expected in \
zip(extended_boundaries , extended_boundary_regions):
self.assertTrue( computed == expected )
def test_find_region_counts(self):
input_stream = StringIO(READ_SET_1)
ref_names , ref_lengths = _get_transcripts(TRANSCRIPT_LENGTHS)
coverage = find_coverage(input_stream, ref_names , ref_lengths )
rg = find_region_counts( coverage = coverage,
annotation = ANNOTATION,
left_span = LEFT_SPAN,
right_span = RIGHT_SPAN)
for i in range(5):
comparison = (rg[i,: ] == expected_counts[i] )
self.assertTrue(np.all(comparison) )
if __name__ == '__main__':
unittest.main()
| 30.316092 | 73 | 0.558104 | 743 | 5,275 | 3.703903 | 0.18035 | 0.041424 | 0.034884 | 0.032703 | 0.239099 | 0.177326 | 0.146802 | 0.135174 | 0.135174 | 0.135174 | 0 | 0.11595 | 0.303507 | 5,275 | 173 | 74 | 30.491329 | 0.633097 | 0.022938 | 0 | 0.121951 | 0 | 0 | 0.002992 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 1 | 0.060976 | false | 0 | 0.146341 | 0 | 0.231707 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
210ba069336ddf00cd66106d81ef2dad2dadafb5 | 5,974 | py | Python | src/pg_cluster_monitor/monitor/cluster_monitor.py | treshnikov/pgClusterMonitor | e8fd765f08e5813805af0c23ce32e4cc92b5c862 | [
"MIT"
] | 1 | 2020-10-11T17:17:44.000Z | 2020-10-11T17:17:44.000Z | src/pg_cluster_monitor/monitor/cluster_monitor.py | treshnikov/pgClusterMonitor | e8fd765f08e5813805af0c23ce32e4cc92b5c862 | [
"MIT"
] | null | null | null | src/pg_cluster_monitor/monitor/cluster_monitor.py | treshnikov/pgClusterMonitor | e8fd765f08e5813805af0c23ce32e4cc92b5c862 | [
"MIT"
] | 1 | 2020-12-06T19:50:45.000Z | 2020-12-06T19:50:45.000Z | import time
import logging
from cluster.cluster import DbCluster
from monitor.master_db_handler import MasterDbHandler
from monitor.standby_db_handler import StandbyDbHandler
from cluster.cluster_node_role import DbRole
from monitor.webserver import WebServer
from utils import shell
from threading import Lock
class DbClusterMonitor:
"""Class monitors DB nodes of the cluster, performs auto-failover command,
handles the cases when a standby DB is down and when the cluster has two master DB."""
def __init__(self, config):
self.logger = logging.getLogger("logger")
self.logger.info(f"DbClusterMonitor started with config {config._sections}")
main_config_section = config["main"]
self.local_node_host_name = main_config_section["local_node_host_name"]
self.db_cluster = DbCluster(config.items("cluster"))
self.cluster_scan_period_sec = main_config_section.getint("cluster_scan_period_sec")
self.get_network_status_string_command = main_config_section["cmd_get_network_status_string"]
self.success_network_status_string = main_config_section["cmd_success_network_status_string"]
self.timeout_to_failover_sec = main_config_section.getint("timeout_to_failover_sec")
self.timeout_to_downgrade_master_sec = main_config_section.getint("timeout_to_downgrade_master_sec")
self.promote_command = main_config_section["cmd_promote_standby_to_master"]
self.get_db_status_string_command = main_config_section["cmd_get_db_status_string"]
self.success_db_status_string = main_config_section["cmd_success_db_status_string"]
self.start_db_command = main_config_section["cmd_start_db"]
self.stop_db_command = main_config_section["cmd_stop_db"]
self.isRunning = None
self.replication_slot_name = main_config_section["replication_slot_name"]
self.pg_rewind_command = main_config_section["cmd_pg_rewind_command"]
self.pg_basebackup_command = main_config_section["cmd_pg_basebackup_command"]
self.pg_data_path = main_config_section["pg_data_path"]
self.create_db_directories_command = main_config_section["cmd_create_db_directories"]
self.remove_db_directories_command = main_config_section["cmd_remove_db_directories"]
self.get_cluster_state_lock = Lock()
self.webserver = WebServer(self.get_cluster_state, main_config_section["webserver_address"], int(main_config_section["webserver_port"]))
self.timeout_to_check_replication_status_after_start_sec = main_config_section.getint("timeout_to_check_replication_status_after_start_sec")
def check_local_postgre_sql_server_status(self):
"""If the local PostgreSQL server is not running - try to run and wait for the server. If the server is still
not available - return False. """
self.logger.debug("Check that the local server of PostgreSQL is running.")
cmd_result = shell.execute_cmd(self.get_db_status_string_command)
if self.success_db_status_string not in cmd_result:
self.logger.critical("Local PostgreSQL server is not running, trying to start it.")
shell.execute_cmd(self.start_db_command)
return False
self.logger.debug("Local PostgreSQL server is running.")
return True
def get_cluster_state(self):
"""Returns the state of the cluster as json, threadsafe."""
with self.get_cluster_state_lock:
return self.db_cluster.nodes
def analyze_cluster(self):
"""Main procedure which performs cluster monitoring."""
# check local PostgreSQL server state
if not self.check_local_postgre_sql_server_status():
return
# gather information from cluster nodes
self.db_cluster.update()
if not (self.local_node_host_name in self.db_cluster.nodes):
self.logger.error(f"Local DB with host name {self.local_node_host_name} is not in the cluster.")
return
# check connection to the local DB
node_info = self.db_cluster.nodes[self.local_node_host_name]
if not node_info.connected:
self.logger.warning(f"There is no connection with {node_info.host_name}. Keep waiting for the connection.")
return
# consider cluster state
db = None
if node_info.state.db_role == DbRole.MASTER:
db = MasterDbHandler(self.local_node_host_name, self.start_db_command, self.stop_db_command,
self.pg_rewind_command, self.pg_basebackup_command, self.pg_data_path, self.replication_slot_name,
self.create_db_directories_command, self.remove_db_directories_command, self.timeout_to_downgrade_master_sec,
self.timeout_to_check_replication_status_after_start_sec)
if node_info.state.db_role == DbRole.STANDBY:
db = StandbyDbHandler(self.local_node_host_name, self.get_network_status_string_command,
self.timeout_to_failover_sec, self.promote_command, self.replication_slot_name,
self.success_network_status_string)
if db:
db.handle_cluster_state(self.db_cluster)
def stop(self):
"""Stop service."""
self.webserver.stop()
self.logger.info("Service has received a stop command.")
self.isRunning = False
def start(self):
"""Start service and run the main monitoring cycle of the DB cluster."""
self.logger.info("Service is starting.")
self.isRunning = True
self.webserver.start()
while self.isRunning:
try:
self.analyze_cluster()
except Exception as ex:
self.logger.exception(f"Main cycle: {ex}")
time.sleep(self.cluster_scan_period_sec)
self.logger.info("The service main cycle has been finished.")
| 51.5 | 148 | 0.713927 | 784 | 5,974 | 5.084184 | 0.1875 | 0.052684 | 0.089563 | 0.055193 | 0.435273 | 0.275464 | 0.163573 | 0.056197 | 0.024084 | 0 | 0 | 0 | 0.21376 | 5,974 | 115 | 149 | 51.947826 | 0.848627 | 0.101942 | 0 | 0.034884 | 0 | 0 | 0.181049 | 0.082158 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.104651 | 0 | 0.255814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2118073a43ee9cd6d977b43ed7ee65bc1f3bd9e9 | 5,668 | py | Python | utils/reddit_scraper.py | azsry/orb | e76f581a16a1587e2c1d6c4a1dffe50291deb706 | [
"MIT"
] | 3 | 2019-05-31T03:27:27.000Z | 2020-03-24T07:58:19.000Z | utils/reddit_scraper.py | azsry/orb | e76f581a16a1587e2c1d6c4a1dffe50291deb706 | [
"MIT"
] | 6 | 2019-05-20T14:13:31.000Z | 2021-11-24T02:35:03.000Z | utils/reddit_scraper.py | azsry/orb | e76f581a16a1587e2c1d6c4a1dffe50291deb706 | [
"MIT"
] | 4 | 2019-05-15T01:31:13.000Z | 2021-03-21T08:33:45.000Z | import discord
import mimetypes
import requests
import random
from io import BytesIO
from utils import permissions, http
from discord.ext.commands import errors
class RedditPost:
"""
Represents a single reddit post with the following attributes:
(str) subreddit: the subreddit that the post belongs to
(str) title: the title of the post
(str) author: the author of the post
(str) url: link to the image within the reddit post
(bool) nsfw: whether or not the post is flagged for NSFW content
(int) upvotes: the number of upvotes
"""
def __init__(self, subreddit, title, author, url, nsfw, upvotes):
self.subreddit = subreddit
self.title = title
self.author = author
self.url = url
self.nsfw = nsfw
self.upvotes = upvotes
def get_subreddit(self):
return self.subreddit
def get_title(self):
return self.title
def get_author(self):
return self.author
def get_url(self):
return self.url
def get_nsfw(self):
return self.nsfw
def get_upvotes(self):
return self.upvotes
def __str__(self):
return '%s %s %s %s %s %d' % (
self.subreddit,
self.title,
self.author,
self.url,
self.nsfw,
self.upvotes,
)
def _get_rand_post(json_url):
"""Parses the JSON data and returns a RedditPost object corresponding to a random post."""
all_post_data = requests.get(url=json_url,
headers={'user-agent': 'created by Paigekins'}
).json()['data']['children']
if all_post_data is None:
raise ValueError('No posts in subreddit.')
finding_post = True
while finding_post:
try:
rand_post = all_post_data[random.choice([
i for i in range(len(all_post_data))
])]['data']
post_obj = RedditPost(
rand_post['subreddit'],
rand_post['title'],
rand_post['author'],
rand_post['url'],
rand_post['over_18'],
rand_post['ups']
)
finding_post = False
return post_obj
except KeyError:
print('Post could not be parsed. Finding another...')
def _is_image(url):
"""
Checks whether a normal url contains an image.
Params:
(str) url: the url for the image
Returns:
(bool): True if url contains an image, False otherwise.
"""
mimetype, encoding = mimetypes.guess_type(url)
return (mimetype and mimetype.startswith('image'))
def _is_gif(url):
"""Checks if the url contains a gif (not implemented yet)"""
content_type = requests.head(url).headers['Content-Type']
pass
def most_upvoted(all_post_data):
"""
Finds the most upvoted post from some data containing posts (not implemented).
Params:
(dict) all_post_data: data on one or more posts
Returns:
(dict): data on the most highly upvoted post
"""
pass
async def _create_embed(ctx, post: RedditPost):
"""Creates a tuple with the embed and the chosen random image url"""
try:
author = ctx.message.author
rand_url = post.get_url()
embed = discord.Embed(
title=post.get_title() if len(post.get_title()) < 256 else 'Title too long to display ;A;',
description='**OP**: ' + '/u/' + post.get_author() + f'\n **Updoots**: {str(post.get_upvotes())} \n',
url=post.get_url(),
colour=ctx.me.top_role.colour,
)
embed.set_footer(text=f'Requested by {author.name}, and retrieved from /r/ {post.get_subreddit()}.')
return embed, rand_url
except AttributeError:
raise AttributeError('Post parameter must be a RedditPost object.')
async def reddit_imgscrape(ctx, url):
"""
Randomly selects an image from a subreddit corresponding to a json url and sends it to the channel.
Checks for permissions, NSFW-mode.
Params:
(commands.Context): context
(str): json url
"""
current_channel = ctx.message.channel
author = ctx.message.author
try:
await ctx.trigger_typing()
rand_post = _get_rand_post(url) # RedditPost object
embed, rand_url = await _create_embed(ctx, rand_post)
if not permissions.can_attach(ctx):
await current_channel.send('I cannot upload images/GIFs here ;w;')
elif not permissions.is_nsfw(ctx) and rand_post.get_nsfw():
await current_channel.send(f'L-lewd {author.name}! NSFW commands go in NSFW channels!! >///<')
else:
try:
if _is_image(rand_url):
bio = BytesIO(await http.get(rand_url, res_method='read'))
extension = rand_url.split('.')[-1]
await current_channel.send(embed=embed)
await current_channel.send(file=discord.File(bio, filename=f'image.{extension}'))
else:
await current_channel.send(embed=embed)
await current_channel.send(rand_url)
except KeyError:
await current_channel.send('That didn\'t work ;o; please try the command again.')
except:
await current_channel.send("Something went wrong, but I'm not sure what. Please slow down and try again")
| 31.142857 | 114 | 0.582216 | 694 | 5,668 | 4.619597 | 0.301153 | 0.029944 | 0.047411 | 0.057392 | 0.034935 | 0.034935 | 0.034935 | 0.034935 | 0.034935 | 0.034935 | 0 | 0.001566 | 0.323924 | 5,668 | 181 | 115 | 31.314917 | 0.835073 | 0.152611 | 0 | 0.130841 | 0 | 0 | 0.139027 | 0.011388 | 0.009346 | 0 | 0 | 0 | 0 | 1 | 0.11215 | false | 0.018692 | 0.065421 | 0.065421 | 0.280374 | 0.009346 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
211b0c35bf8dc4c4edb3279e90482b7dda9c860a | 10,321 | py | Python | loris/app/views/main.py | gucky92/loris | 6f54b7d791d473f52690380d71da0acc0352d954 | [
"MIT"
] | 1 | 2021-08-01T02:02:54.000Z | 2021-08-01T02:02:54.000Z | loris/app/views/main.py | gucky92/loris | 6f54b7d791d473f52690380d71da0acc0352d954 | [
"MIT"
] | null | null | null | loris/app/views/main.py | gucky92/loris | 6f54b7d791d473f52690380d71da0acc0352d954 | [
"MIT"
] | 3 | 2020-03-31T10:26:46.000Z | 2021-08-02T00:12:54.000Z | """views
"""
from flask import render_template, request, flash, url_for, redirect, \
send_from_directory, session
from functools import wraps
from flask_login import current_user, login_user, login_required, logout_user
import datajoint as dj
import pandas as pd
from loris import config
from loris.app.app import app
from loris.app.templates import form_template, joined_table_template
from loris.app.forms.dynamic_form import DynamicForm
from loris.app.forms.fixed import (
dynamic_jointablesform, dynamic_settingstableform, LoginForm,
PasswordForm, dynamic_tablecreationform
)
from loris.app.utils import draw_helper, get_jsontable, user_has_permission
from loris.utils import save_join
from loris.app.login import User
from loris.database.users import grantuser, change_password, grantprivileges
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User(form.user_name.data)
if user.user_name == 'root':
flash('Cannot login as root', 'error')
elif not user.user_exists or not user.check_password(form.password.data):
flash('Invalid username or password', 'error')
elif not user.is_active:
flash(f'User {user.user_name} is an inactive member', 'error')
elif form.password.data == config['standard_password']:
flash('Please change your password', 'warning')
login_user(user)
return redirect(url_for('change'))
else:
login_user(user)
redirect_url = request.args.get('target', None)
if redirect_url is None:
return redirect(url_for('home'))
else:
return redirect(redirect_url)
return render_template(
'pages/login.html',
form=form,
)
@app.route("/logout")
@login_required
def logout():
# user_configs.pop(current_user.user_name, None)
logout_user()
flash('Successful logout!')
return redirect(url_for('login'))
@app.route("/change", methods=['GET', 'POST'])
@login_required
def change():
"""change password
"""
form = PasswordForm()
if form.validate_on_submit():
user = User(current_user.user_name)
if (
not user.user_exists
or not user.check_password(form.old_password.data)
):
flash('Old password incorrect', 'error')
elif form.old_password.data == form.new_password.data:
flash('New and old password match', 'error')
else:
change_password(current_user.user_name, form.new_password.data)
flash('Successfully changed password and logged in')
login_user(user)
redirect_url = request.args.get('target', None)
if redirect_url is None:
return redirect(url_for('home'))
else:
return redirect(redirect_url)
return render_template(
'pages/change.html',
form=form
)
@app.route('/')
@login_required
def home():
# refresh session
app.session_refresh()
return render_template(
'pages/home.html',
user=current_user.user_name
)
@app.route('/about')
@login_required
def about():
return render_template('pages/about.html')
@app.route('/register', methods=['GET', 'POST'])
@login_required
def register():
if current_user.user_name not in config['administrators']:
flash("Only administrators are allowed to register users", "warning")
return redirect(
url_for(
'table',
schema=config['user_schema'],
table=config['user_table'],
edit="True",
_id={config['user_name']: current_user.user_name}
)
)
user_class = config.user_table
dynamicform, form = config.get_dynamicform(
f'{config["user_schema"]}.{config["user_table"]}',
user_class, DynamicForm
)
if request.method == 'POST':
submit = request.form.get('submit', None)
form.rm_hidden_entries()
if submit == 'Register':
if form.validate_on_submit():
try:
dynamicform.insert(form)
except dj.DataJointError as e:
flash(f"{e}", 'error')
else:
dynamicform.reset()
formatted_dict = form.get_formatted()
grantuser(
formatted_dict[config['user_name']],
adduser=True
)
flash("User created", 'success')
form.append_hidden_entries()
edit_url = url_for(
'table', schema=config['user_schema'], table=config['user_table'])
delete_url = url_for(
'delete', schema=config['user_schema'], table=config['user_table'])
data = dynamicform.get_jsontable(
edit_url, delete_url,
)
return render_template(
'pages/register.html',
form=form,
data=data,
toggle_off_keys=[0]
)
@app.route('/registeredusers', methods=['GET', 'POST'])
@login_required
def registeredusers():
"""registeredusers
"""
delete_url = url_for(
'delete',
schema=config['user_schema'],
table=config['user_table'],
subtable=None)
return joined_table_template(
[config.user_table],
'Registered Users',
edit_url=url_for(
'table',
schema=config['user_schema'],
table=config['user_table'],
subtable=None
),
delete_url=delete_url
)
@app.route('/emergencycontacts', methods=['GET', 'POST'])
@login_required
def emergencycontacts():
"""emergencycontacts
"""
if not hasattr(config.user_table, 'EmergencyContact'):
return redirect(url_for('registeredusers'))
delete_url = url_for(
'delete',
schema=config['user_schema'],
table=config['user_table'],
subtable=None)
return joined_table_template(
[config.user_table.EmergencyContact],
'Emergency Contacts',
edit_url=url_for(
'table',
schema=config['user_schema'],
table=config['user_table'],
subtable=None
),
delete_url=delete_url
)
@app.route('/registergroup', methods=['GET', 'POST'])
@login_required
def registergroup():
"""setup a group
"""
if current_user.user_name not in config['administrators']:
flash("Only administrators are allowed to register groups", "warning")
return redirect(url_for('home'))
group_class = config.group_table
dynamicform, form = config.get_dynamicform(
f'{config["group_schema"]}.{config["group_table"]}',
group_class, DynamicForm
)
if request.method == 'POST':
submit = request.form.get('submit', None)
form.rm_hidden_entries()
if submit == 'Register':
if form.validate_on_submit():
try:
dynamicform.insert(form)
except dj.DataJointError as e:
flash(f"{e}", 'error')
else:
dynamicform.reset()
config.create_group_schemas()
flash("Project group created", 'success')
form.append_hidden_entries()
edit_url = url_for(
'table', schema=config['group_schema'], table=config['group_table'])
delete_url = url_for(
'delete', schema=config['group_schema'], table=config['group_table'])
data = dynamicform.get_jsontable(
edit_url, delete_url,
)
return render_template(
'pages/group.html',
form=form,
data=data,
toggle_off_keys=[0]
)
@app.route('/assigngroup', methods=['GET', 'POST'])
@login_required
def assigngroup():
"""setup a group
"""
if current_user.user_name not in config['administrators']:
flash("Only administrators are allowed to assign groups", "warning")
return redirect(url_for('home'))
# only assign yourself to a group
# TODO what should the authorization behavior be?
kwargs = {
config['user_name']: current_user.user_name
}
if current_user.user_name in config['administrators']:
readonly = []
else:
readonly = [config['user_name']]
group_class = config.assigned_table
dynamicform, form = config.get_dynamicform(
f'{config["assignedgroup_schema"]}.{config["assignedgroup_table"]}',
group_class, DynamicForm, **kwargs
)
if request.method == 'POST':
submit = request.form.get('submit', None)
form.rm_hidden_entries()
if submit == 'Register':
if form.validate_on_submit():
try:
formatted_dict = dynamicform.insert(form)
except dj.DataJointError as e:
flash(f"{e}", 'error')
else:
dynamicform.reset()
config.create_group_schemas()
config.refresh_permissions()
user = formatted_dict[config['user_name']]
group = formatted_dict[config['group_name']]
grantprivileges(
user,
config['connection'],
{f'{group}.*': 'ALL PRIVILEGES'}
)
flash(
f"Project {group} now includes user {user}",
'success'
)
form.append_hidden_entries()
delete_url = url_for(
'delete',
schema=config["assignedgroup_schema"],
table=config["assignedgroup_table"])
data = dynamicform.get_jsontable(
delete_url=delete_url,
)
return render_template(
'pages/assigngroup.html',
form=form,
data=data,
toggle_off_keys=[0],
readonly=readonly
)
@app.route(f"{config['tmp_folder']}/<path:filename>")
@login_required
def tmpfile(filename):
return send_from_directory(config['tmp_folder'], filename)
| 28.354396 | 81 | 0.590447 | 1,101 | 10,321 | 5.333333 | 0.166213 | 0.042575 | 0.024523 | 0.032357 | 0.560286 | 0.506812 | 0.476158 | 0.430177 | 0.390497 | 0.384707 | 0 | 0.000413 | 0.295611 | 10,321 | 363 | 82 | 28.432507 | 0.80729 | 0.024416 | 0 | 0.462366 | 0 | 0 | 0.162946 | 0.021726 | 0 | 0 | 0 | 0.002755 | 0 | 1 | 0.039427 | false | 0.046595 | 0.050179 | 0.007168 | 0.164875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
211c72da1f964ad54208029bc2d732797cb76a09 | 11,191 | py | Python | stanCode_Projects/break_out_game/breakoutgraphics_extension.py | YuHaoHan/MystanCodeProjects | 382631f70392ecc714a21aac89af723ea5a608f0 | [
"MIT"
] | null | null | null | stanCode_Projects/break_out_game/breakoutgraphics_extension.py | YuHaoHan/MystanCodeProjects | 382631f70392ecc714a21aac89af723ea5a608f0 | [
"MIT"
] | null | null | null | stanCode_Projects/break_out_game/breakoutgraphics_extension.py | YuHaoHan/MystanCodeProjects | 382631f70392ecc714a21aac89af723ea5a608f0 | [
"MIT"
] | null | null | null | """
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
This program set up a class to help construct a breakout game.
"""
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
NUM_LIVES = 3 # Number of lives
class BreakoutGraphics:
def __init__(self, num_lives = NUM_LIVES, ball_radius = BALL_RADIUS, paddle_width = PADDLE_WIDTH,
paddle_height = PADDLE_HEIGHT, paddle_offset = PADDLE_OFFSET,
brick_rows = BRICK_ROWS, brick_cols = BRICK_COLS,
brick_width = BRICK_WIDTH, brick_height = BRICK_HEIGHT,
brick_offset = BRICK_OFFSET, brick_spacing = BRICK_SPACING,
title='Breakout'):
self.brick_rows = brick_rows
self.brick_cols = brick_cols
# Create a graphical window, with some extra space
window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=window_width, height=window_height, title=title)
# Create a paddle
self.paddle = GRect(paddle_width, paddle_height)
self.paddle.filled = True
self.window.add(self.paddle, (self.window.width-self.paddle.width)/2, self.window.height-paddle_offset)
# Center a filled ball in the graphical window
self.ball_radius = ball_radius
self.ball = GOval(ball_radius*2, ball_radius*2)
self.ball.filled = True
# Default initial velocity for the ball
self.__dy = 0
self.__dx = 0
# Four corner of the ball.
self.upper_left = None
self.upper_right = None
self.lower_left = None
self.lower_right = None
# Points
self.__point = 0
self.score_label = GLabel("Score: " + str(self.__point))
self.score_label.color = "green"
self.score_label.font = "-25"
self.window.add(self.score_label, 10, self.window.height)
# Lives
self.lives = num_lives
self.lives_label = GLabel("Lives :" + str(self.lives))
self.lives_label.color = "red"
self.lives_label.font = "-25"
self.window.add(self.lives_label, 130, self.window.height)
# White board used in the end of a game.
self.white_board = GRect(self.window.width, self.window.height)
self.white_board.filled = True
self.white_board.fill_color = "white"
# Initialize our mouse listeners
onmouseclicked(self.start_game)
onmousemoved(self.paddle_move)
# Draw bricks
for i in range(brick_rows):
for j in range(brick_cols):
self.brick = GRect(brick_width, brick_height)
self.brick.filled = True
if i == 0 or i == 1:
self.brick.fill_color = "red"
elif i == 2 or i == 3:
self.brick.fill_color = "orange"
elif i == 4 or i == 5:
self.brick.fill_color = "yellow"
elif i == 6 or i == 7:
self.brick.fill_color = "green"
else:
self.brick.fill_color = "blue"
x_spacing = self.brick.width + brick_spacing
y_spacing = self.brick.height + brick_spacing
self.window.add(self.brick, x_spacing*j, brick_offset+y_spacing*i)
# Create starting view
self.title = GLabel("Welcome to Breakout !")
self.title.font = "-30"
self.window.add(self.title, (self.window.width-self.title.width)/2, self.window.height//2)
self.author = GLabel("Author: David Han")
self.author.font = "-25"
self.window.add(self.author, (self.window.width-self.author.width)/2, self.window.height//1.8)
self.start_button = GRect(self.window.width//3, self.window.height//7)
self.start_button.filled = True
self.start_button.fill_color = "blue"
self.window.add(self.start_button, (self.window.width-self.start_button.width)/2, self.window.height//1.5)
self.start_word = GLabel("Click to start")
self.start_word.color = "white"
self.start_word.font = "-20"
self.window.add(self.start_word, (self.window.width-self.start_word.width)/2,
self.start_button.y+self.start_button.height/2+self.start_word.height/2)
def paddle_move(self, m):
"""
:param m: the position of player's mouse
:return: the paddle's midpoint moves with player's mouse
"""
if 0 + self.paddle.width/2 <= m.x <= self.window.width-self.paddle.width/2:
self.paddle.x = m.x-self.paddle.width/2
def set_ball_speed(self):
"""
At the beginning of every round, set an initial speed for the ball.
"""
self.__dy = INITIAL_Y_SPEED
self.__dx = random.randint(1, MAX_X_SPEED)
if random.random() > 0.5:
self.__dx = -self.__dx
def get_dx(self):
"""
Getter of horizontal velocity
"""
return self.__dx
def get_dy(self):
"""
Getter of vertical velocity
"""
return self.__dy
def game_has_started(self):
"""
Determine if the game has started
"""
if self.__dx == 0 and self.__dy == 0:
return False
return True
def start_game(self, m):
"""
When the game has not started and player click the start button, start the game.
"""
if self.window.get_object_at(m.x, m.y) == self.start_button or self.window.get_object_at(m.x, m.y) == self.start_word:
self.window.remove(self.title)
self.window.remove(self.author)
self.window.remove(self.start_button)
self.window.remove(self.start_word)
if self.window.get_object_at(self.start_button.x, self.start_button.y) is None \
and not self.game_has_started():
self.set_ball_speed()
def collide(self):
"""
Determine if the ball has collided with bricks or the paddle.
"""
self.upper_left = self.window.get_object_at(self.ball.x, self.ball.y)
self.upper_right = self.window.get_object_at(self.ball.x + 2 * self.ball_radius, self.ball.y)
self.lower_left = self.window.get_object_at(self.ball.x, self.ball.y + 2 * self.ball_radius)
self.lower_right = self.window.get_object_at(self.ball.x + 2 * self.ball_radius,
self.ball.y + 2 * self.ball_radius)
if self.upper_left is not None and self.upper_left is not self.score_label and self.upper_left is not self.lives_label:
return True
if self.upper_right is not None and self.upper_right is not self.score_label and self.upper_right is not self.lives_label:
return True
if self.lower_right is not None and self.lower_right is not self.score_label and self.lower_right is not self.lives_label:
return True
if self.lower_left is not None and self.lower_left is not self.score_label and self.lower_left is not self.lives_label:
return True
return False
def is_paddle(self):
"""
Determine if the ball has collided with the paddle.
"""
if (self.upper_left is self.paddle or self.upper_right is self.paddle
or self.lower_right is self.paddle or self.lower_left is self.paddle):
return True
return False
def brick_out(self):
"""
Remove the bricks hit by the ball.
"""
if self.upper_left is not None:
self.window.remove(self.upper_left)
self.__point += 1
if self.upper_right is not None and self.upper_right is not self.upper_left:
self.window.remove(self.upper_right)
self.__point += 1
if self.lower_right is not None and self.lower_right is not self.upper_right:
self.window.remove(self.lower_right)
self.__point += 1
if (self.lower_left is not None and self.lower_left is not self.lower_right
and self.lower_left is not self.upper_left):
self.window.remove(self.lower_left)
self.__point += 1
self.score_label.text = ("Score: " + str(self.__point))
def set_ball(self):
"""
Set ball position
"""
self.window.add(self.ball, (self.window.width - self.ball.width) / 2,
(self.window.height - self.ball.height) / 2)
def remove_ball(self):
"""
Remove the ball from the window
"""
self.window.remove(self.ball)
def reset_ball(self):
"""
When player lose a round and the number of lives is not zero, reset ball velocity and ball position.
"""
self.ball.x = (self.window.width - self.ball.width) / 2
self.ball.y = (self.window.height - self.ball.height) / 2
self.__dx = 0
self.__dy = 0
def get_point(self):
"""
Getter of points.
"""
return self.__point
def win(self):
"""
Winning condition
"""
win_point = self.brick_rows*self.brick_cols
if self.__point == win_point:
return True
return False
def get_lives(self):
"""
Getter of number of lives
"""
return self.lives
def lose_game(self):
"""
When a player lose, show "You lose"
"""
self.remove_ball()
self.window.add(self.white_board)
lose = GLabel("You lose")
lose.font = "-50"
self.window.add(lose, (self.window.width - lose.width) / 2, (self.window.height - lose.height) / 2)
def win_game(self):
"""
When a player win, show "You win !"
"""
self.remove_ball()
self.window.add(self.white_board)
win = GLabel("You win !")
win.font = "-50"
self.window.add(win, (self.window.width - win.width) / 2, (self.window.height - win.height) / 2)
| 39.967857 | 130 | 0.608793 | 1,541 | 11,191 | 4.253731 | 0.12719 | 0.08238 | 0.025782 | 0.028528 | 0.362929 | 0.263921 | 0.233105 | 0.178185 | 0.125095 | 0.097941 | 0 | 0.012137 | 0.293182 | 11,191 | 279 | 131 | 40.111111 | 0.816561 | 0.160039 | 0 | 0.128492 | 0 | 0 | 0.018401 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100559 | false | 0 | 0.022346 | 0 | 0.212291 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2120b3f8447118a6674bbb48a00ea1f3e02d7a04 | 10,742 | py | Python | src/lumigo_tracer/wrappers/http/sync_http_wrappers.py | MattBillock/python_tracer | 0bd8ee0400687390820fc0e6c848f04ad71a8a4d | [
"Apache-2.0"
] | null | null | null | src/lumigo_tracer/wrappers/http/sync_http_wrappers.py | MattBillock/python_tracer | 0bd8ee0400687390820fc0e6c848f04ad71a8a4d | [
"Apache-2.0"
] | null | null | null | src/lumigo_tracer/wrappers/http/sync_http_wrappers.py | MattBillock/python_tracer | 0bd8ee0400687390820fc0e6c848f04ad71a8a4d | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import http.client
from io import BytesIO
import importlib.util
from typing import Optional
from lumigo_tracer.libs.wrapt import wrap_function_wrapper
from lumigo_tracer.parsing_utils import safe_get_list, recursive_json_join
from lumigo_tracer.lumigo_utils import (
get_logger,
lumigo_safe_execute,
ensure_str,
Configuration,
lumigo_dumps,
get_size_upper_bound,
is_error_code,
)
from lumigo_tracer.spans_container import SpansContainer
from lumigo_tracer.wrappers.http.http_data_classes import HttpRequest, HttpState
from collections import namedtuple
from lumigo_tracer.wrappers.http.http_parser import get_parser, HTTP_TYPE
_BODY_HEADER_SPLITTER = b"\r\n\r\n"
_FLAGS_HEADER_SPLITTER = b"\r\n"
LUMIGO_HEADERS_HOOK_KEY = "_lumigo_headers_hook"
HookedData = namedtuple("HookedData", ["headers", "path"])
def add_request_event(parse_params: HttpRequest):
"""
This function parses an request event and add it to the span.
"""
parser = get_parser(parse_params.host)()
msg = parser.parse_request(parse_params)
HttpState.previous_request = parse_params
SpansContainer.get_span().add_span(msg)
def add_unparsed_request(parse_params: HttpRequest):
"""
This function handle the case where we got a request the is not fully formatted as we expected,
I.e. there isn't '\r\n' in the request data that <i>logically</i> splits the headers from the body.
In that case, we will consider it as a continuance of the previous request if they got the same url,
and we didn't get any answer yet.
"""
last_event = SpansContainer.get_span().get_last_span()
if last_event:
if last_event and last_event.get("type") == HTTP_TYPE and HttpState.previous_request:
if last_event.get("info", {}).get("httpInfo", {}).get("host") == parse_params.host:
if "response" not in last_event["info"]["httpInfo"]:
SpansContainer.get_span().pop_last_span()
body = (HttpState.previous_request.body + parse_params.body)[
: get_size_upper_bound()
]
add_request_event(HttpState.previous_request.clone(body=body))
return
add_request_event(parse_params.clone(headers=None))
def update_event_response(
host: Optional[str], status_code: int, headers: dict, body: bytes
) -> None:
"""
:param host: If None, use the host from the last span, otherwise this is the first chuck and we can empty
the aggregated response body
This function assumes synchronous execution - we update the last http event.
"""
last_event = SpansContainer.get_span().pop_last_span()
if last_event:
http_info = last_event.get("info", {}).get("httpInfo", {})
if not host:
host = http_info.get("host", "unknown")
else:
HttpState.previous_response_body = b""
has_error = is_error_code(status_code)
max_size = Configuration.get_max_entry_size(has_error)
headers = {k.lower(): v for k, v in headers.items()} if headers else {}
parser = get_parser(host, headers)() # type: ignore
if len(HttpState.previous_response_body) < max_size:
HttpState.previous_response_body += body
if has_error:
_update_request_data_increased_size_limit(http_info, max_size)
update = parser.parse_response( # type: ignore
host, status_code, headers, HttpState.previous_response_body # type: ignore
)
SpansContainer.get_span().add_span(recursive_json_join(update, last_event))
def _update_request_data_increased_size_limit(http_info: dict, max_size: int) -> None:
if not HttpState.previous_request or not http_info.get("request"):
return
http_info["request"].update(
{
"body": lumigo_dumps(HttpState.previous_request.body, max_size)
if HttpState.previous_request.body
else "",
"headers": lumigo_dumps(HttpState.previous_request.headers, max_size),
}
)
# Wrappers #
def _http_send_wrapper(func, instance, args, kwargs):
"""
This is the wrapper of the requests. it parses the http's message to conclude the url, headers, and body.
Finally, it add an event to the span, and run the wrapped function (http.client.HTTPConnection.send).
"""
data = safe_get_list(args, 0)
with lumigo_safe_execute("parse requested streams"):
if hasattr(data, "read"):
if not hasattr(data, "seek") or not hasattr(data, "tell"):
# If we will read this data, then we will change the original behavior
data = ""
else:
current_pos = data.tell()
data = data.read(get_size_upper_bound())
args[0].seek(current_pos)
host, method, headers, body, uri = (
getattr(instance, "host", None),
getattr(instance, "_method", None),
None,
None,
None,
)
with lumigo_safe_execute("parse request"):
if isinstance(data, bytes) and _BODY_HEADER_SPLITTER in data:
headers, body = data.split(_BODY_HEADER_SPLITTER, 1)
hooked_headers = getattr(instance, LUMIGO_HEADERS_HOOK_KEY, None)
if hooked_headers and hooked_headers.headers:
# we will get here only if _headers_reminder_wrapper ran first. remove its traces.
headers = {k: ensure_str(v) for k, v in hooked_headers.headers.items()}
uri = f"{host}{hooked_headers.path}"
setattr(instance, LUMIGO_HEADERS_HOOK_KEY, None)
elif _FLAGS_HEADER_SPLITTER in headers:
request_info, headers = headers.split(_FLAGS_HEADER_SPLITTER, 1)
headers = http.client.parse_headers(BytesIO(headers))
path_and_query_params = (
# Parse path from request info, remove method (GET | POST) and http version (HTTP/1.1)
request_info.decode("ascii")
.replace(method, "")
.replace(instance._http_vsn_str, "")
.strip()
)
uri = f"{host}{path_and_query_params}"
host = host or headers.get("Host")
else:
headers = None
with lumigo_safe_execute("add request event"):
if headers:
add_request_event(
HttpRequest(host=host, method=method, uri=uri, headers=headers, body=body)
)
else:
add_unparsed_request(HttpRequest(host=host, method=method, uri=uri, body=data))
ret_val = func(*args, **kwargs)
with lumigo_safe_execute("add response event"):
SpansContainer.get_span().update_event_end_time()
return ret_val
def _headers_reminder_wrapper(func, instance, args, kwargs):
"""
This is the wrapper of the function `http.client.HTTPConnection.request` that gets the headers.
Remember the headers helps us to improve performances on requests that use this flow.
"""
with lumigo_safe_execute("add hooked data"):
setattr(
instance,
LUMIGO_HEADERS_HOOK_KEY,
HookedData(headers=kwargs.get("headers"), path=args[1]),
)
return func(*args, **kwargs)
def _requests_wrapper(func, instance, args, kwargs):
"""
This is the wrapper of the function `requests.request`.
This function is being wrapped specifically because it initializes the connection by itself and parses the response,
which creates a gap from the traditional http.client wrapping.
"""
start_time = datetime.now()
ret_val = func(*args, **kwargs)
with lumigo_safe_execute("requests wrapper time updates"):
SpansContainer.get_span().update_event_times(start_time=start_time)
return ret_val
def _response_wrapper(func, instance, args, kwargs):
"""
This is the wrapper of the function that can be called only after that the http request was sent.
Note that we don't examine the response data because it may change the original behaviour (ret_val.peek()).
"""
ret_val = func(*args, **kwargs)
with lumigo_safe_execute("parse response"):
headers = dict(ret_val.headers.items())
status_code = ret_val.code
update_event_response(instance.host, status_code, headers, b"")
return ret_val
def _read_wrapper(func, instance, args, kwargs):
"""
This is the wrapper of the function that can be called only after `getresponse` was called.
"""
ret_val = func(*args, **kwargs)
if ret_val:
with lumigo_safe_execute("parse response.read"):
update_event_response(None, instance.code, dict(instance.headers.items()), ret_val)
return ret_val
def _read_stream_wrapper(func, instance, args, kwargs):
ret_val = func(*args, **kwargs)
return _read_stream_wrapper_generator(ret_val, instance)
def _read_stream_wrapper_generator(stream_generator, instance):
for partial_response in stream_generator:
with lumigo_safe_execute("parse response.read_chunked"):
update_event_response(
None, instance.status, dict(instance.headers.items()), partial_response
)
yield partial_response
def _putheader_wrapper(func, instance, args, kwargs):
"""
This is the wrapper of the function that called after that the http request was sent.
Note that we don't examine the response data because it may change the original behaviour (ret_val.peek()).
"""
kwargs["headers"]["X-Amzn-Trace-Id"] = SpansContainer.get_span().get_patched_root()
ret_val = func(*args, **kwargs)
return ret_val
def wrap_http_calls():
with lumigo_safe_execute("wrap http calls"):
get_logger().debug("wrapping http requests")
wrap_function_wrapper("http.client", "HTTPConnection.send", _http_send_wrapper)
wrap_function_wrapper("http.client", "HTTPConnection.request", _headers_reminder_wrapper)
if importlib.util.find_spec("botocore"):
wrap_function_wrapper("botocore.awsrequest", "AWSRequest.__init__", _putheader_wrapper)
wrap_function_wrapper("http.client", "HTTPConnection.getresponse", _response_wrapper)
wrap_function_wrapper("http.client", "HTTPResponse.read", _read_wrapper)
if importlib.util.find_spec("urllib3"):
wrap_function_wrapper(
"urllib3.response", "HTTPResponse.read_chunked", _read_stream_wrapper
)
if importlib.util.find_spec("requests"):
wrap_function_wrapper("requests.api", "request", _requests_wrapper)
| 41.157088 | 120 | 0.668684 | 1,362 | 10,742 | 5.030103 | 0.194567 | 0.015764 | 0.027295 | 0.030652 | 0.337469 | 0.232959 | 0.156765 | 0.120274 | 0.107722 | 0.089768 | 0 | 0.001101 | 0.238969 | 10,742 | 260 | 121 | 41.315385 | 0.836942 | 0.191584 | 0 | 0.122222 | 0 | 0 | 0.084429 | 0.017614 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072222 | false | 0 | 0.083333 | 0 | 0.205556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21219f971557855728e3021c592f91575bbdcad2 | 2,692 | py | Python | src/main.py | Procope/conceptalign | bc4534257f6dc5c6a50d1d797b3420b6fffc5661 | [
"MIT"
] | null | null | null | src/main.py | Procope/conceptalign | bc4534257f6dc5c6a50d1d797b3420b6fffc5661 | [
"MIT"
] | null | null | null | src/main.py | Procope/conceptalign | bc4534257f6dc5c6a50d1d797b3420b6fffc5661 | [
"MIT"
] | null | null | null | import utils
from talkpages import WikiCorpusReader, WikiCorpus
from alignment import Alignment
TOPIC = 'sports'
corpus_reader = WikiCorpusReader('../../data/controversial/')
tsv_filename = corpus_reader.json_to_tsv('tokenized_posts/', topic_list=[TOPIC])
# tsv_filename = './tsv/WikiControversial-{}.tsv'.format(TOPIC)
corpus = WikiCorpus(tsv_filename)
# corpus = WikiCorpus('tokenized_posts/{}_posts.csv'.format(TOPIC))
corpus.tokenize_posts()
markers, marker_words = utils.read_convokit_markers('../coord-liwc-patterns.txt')
categories = list(markers.keys())
corpus.count_marker_categories(markers)
print(corpus.posts.describe())
# corpus.count_marker_tokens(marker_words)
# corpus.save('with_counts/{}_posts.csv'.format(TOPIC))
# corpus = WikiCorpus('with_counts/{}_posts.csv'.format(TOPIC))
pairs = corpus.reply_pairs()
al = Alignment(corpus, markers)
N_base, N_align, C_base, C_align = al.counts(mode='categorical')
means, intervals = al.swam(N_base, N_align, C_base, C_align, verbose=True)
base_means, align_means = means
base_intervals, align_intervals = intervals
utils.plot_baseline_and_alignment(categories,
base_means, align_means,
base_intervals, align_intervals,
filename='plots/swam-{}'.format(TOPIC))
# users = corpus.get_users()
# # print(users.head())
# net = corpus.social_network(prune=False)
# corpus.assign_centrality('eigenvector')
# # print(users.head())
# tie_strengths = []
# for _, pair in pairs.iterrows():
# user_a, user_b = pair['author_name_a'], pair['author_name_b']
# tie_strengths.append(net[user_a][user_b]['weight'])
# pairs['tie_strength'] = tie_strengths
# strong_dyad_filter = ('strong-tie', lambda pair: (pair['tie_strength'] >= 2))
# weak_dyad_filter = ('weak-tie', lambda pair: (pair['tie_strength'] < 2))
# print(len(pairs[(pairs['tie_strength'] < 2)]), len(pairs[(pairs['tie_strength'] >= 2)]))
# for filter_str, group_filter in [strong_dyad_filter, weak_dyad_filter]:
# N_base, N_align, C_base, C_align = al.counts(mode='categorical', group_filter=group_filter)
# means, intervals = al.swam(N_base, N_align, C_base, C_align, verbose=True)
# base_means, align_means = means
# base_intervals, align_intervals = intervals
# utils.plot_baseline_and_alignment(categories,
# base_means, align_means,
# base_intervals, align_intervals,
# filename='plots/swam-{}-{}'.format(TOPIC, filter_str))
# print(pairs.head())
# print(corpus)
# corpus.assign_centrality('betweenness')
# print(users.head()) | 33.234568 | 97 | 0.68685 | 334 | 2,692 | 5.245509 | 0.284431 | 0.037671 | 0.013699 | 0.025114 | 0.421804 | 0.40411 | 0.342466 | 0.309361 | 0.309361 | 0.309361 | 0 | 0.001792 | 0.170877 | 2,692 | 81 | 98 | 33.234568 | 0.783154 | 0.586553 | 0 | 0 | 0 | 0 | 0.089815 | 0.047222 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.136364 | 0 | 0.136364 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2121c256c900d4ca0f66bb47e7efbeb7983c6b8e | 383 | py | Python | python-codes/m2_curso_em_video_estruturas_de_controle/ex065.2.py | lucasportella/learning_repo | a9449dffd489e7e1f1619e3acef86bc2c64f0f14 | [
"MIT"
] | null | null | null | python-codes/m2_curso_em_video_estruturas_de_controle/ex065.2.py | lucasportella/learning_repo | a9449dffd489e7e1f1619e3acef86bc2c64f0f14 | [
"MIT"
] | null | null | null | python-codes/m2_curso_em_video_estruturas_de_controle/ex065.2.py | lucasportella/learning_repo | a9449dffd489e7e1f1619e3acef86bc2c64f0f14 | [
"MIT"
] | null | null | null | n = s = c = mai = men = 0
perg = 's'
while perg in 'sSSIMsim':
n = int(input('Digite um número: '))
c += 1
s += n
if men == 0:
men = n
if n > mai:
mai = n
if n < men:
men = n
perg = str(input('Quer continuar? [s/n] ')).strip()
m = s/c
print('Soma:',s,'\nQuant. números:',c,'\nMédia: {:.2f}'.format(m),'\nMaior:',mai,'\nMenor:',men) | 25.533333 | 96 | 0.477807 | 62 | 383 | 2.951613 | 0.5 | 0.04918 | 0.043716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01487 | 0.29765 | 383 | 15 | 96 | 25.533333 | 0.665428 | 0 | 0 | 0.133333 | 0 | 0 | 0.265625 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21242d1a4ca530581bd7647266334e5c0492c200 | 720 | py | Python | docs/minimal.py | MCZhi/SMARTS | 3ef5650b04ac6fb7145cf4e23d5534d73e0929fc | [
"MIT"
] | 2 | 2021-12-13T12:41:54.000Z | 2021-12-16T03:10:24.000Z | docs/minimal.py | MCZhi/SMARTS | 3ef5650b04ac6fb7145cf4e23d5534d73e0929fc | [
"MIT"
] | null | null | null | docs/minimal.py | MCZhi/SMARTS | 3ef5650b04ac6fb7145cf4e23d5534d73e0929fc | [
"MIT"
] | null | null | null | import gym
from smarts.core.agent import Agent
from smarts.core.agent_interface import AgentInterface, AgentType
from smarts.zoo.agent_spec import AgentSpec
agent_id = "Agent-007"
agent_spec = AgentSpec(
interface=AgentInterface.from_type(AgentType.Laner),
agent_params={"agent_function": lambda _: "keep_lane"},
agent_builder=Agent.from_function,
)
env = gym.make(
"smarts.env:hiway-v0",
scenarios=["scenarios/loop"],
agent_specs={agent_id: agent_spec},
)
agent = agent_spec.build_agent()
observations = env.reset()
dones = {"__all__": False}
while not dones["__all__"]:
action = agent.act(observations[agent_id])
observations, _, dones, _ = env.step({agent_id: action})
env.close()
| 26.666667 | 65 | 0.7375 | 94 | 720 | 5.351064 | 0.43617 | 0.071571 | 0.055666 | 0.075547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00641 | 0.133333 | 720 | 26 | 66 | 27.692308 | 0.799679 | 0 | 0 | 0 | 0 | 0 | 0.109722 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
212ab4fb98f24f1c674eddbea9f0186ab13f01ff | 411 | py | Python | examples/example1/publish.py | etpinard/djangofy | 9ad1437255df66b1a8e7d5f684e56c3414f47bf5 | [
"MIT"
] | 1 | 2020-08-24T21:23:06.000Z | 2020-08-24T21:23:06.000Z | examples/example1/publish.py | etpinard/djangofy | 9ad1437255df66b1a8e7d5f684e56c3414f47bf5 | [
"MIT"
] | null | null | null | examples/example1/publish.py | etpinard/djangofy | 9ad1437255df66b1a8e7d5f684e56c3414f47bf5 | [
"MIT"
] | null | null | null | import djangofy as dfy
import os
PATH = 'examples/example1/'
PUBLISHED = PATH + 'published/' # path to published files!
# Make url and sitemaps files
names = [
'page1',
'page2',
'page3'
]
urls = [
'some-exciting-article',
'another-exciting-article',
'you-must-read-this'
]
dfy.make_urls(names, urls, PUBLISHED + 'urls.py')
dfy.make_sitemaps(names, urls, PUBLISHED + 'sitemaps.py')
| 17.869565 | 59 | 0.6618 | 52 | 411 | 5.192308 | 0.557692 | 0.096296 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012048 | 0.192214 | 411 | 22 | 60 | 18.681818 | 0.801205 | 0.126521 | 0 | 0 | 0 | 0 | 0.348315 | 0.126404 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
212c3e1ba940543bbeebd1b123e05e0a9ce76a63 | 3,575 | py | Python | mini/utils.py | FiveEyes/CycleGAN | b36e231bd4148d04ac8d04a0cec3b5c139ee9e40 | [
"MIT"
] | null | null | null | mini/utils.py | FiveEyes/CycleGAN | b36e231bd4148d04ac8d04a0cec3b5c139ee9e40 | [
"MIT"
] | null | null | null | mini/utils.py | FiveEyes/CycleGAN | b36e231bd4148d04ac8d04a0cec3b5c139ee9e40 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import shutil
import numpy as np
import torch
def mkdir(paths):
if not isinstance(paths, (list, tuple)):
paths = [paths]
for path in paths:
if not os.path.isdir(path):
os.makedirs(path)
def cuda_devices(gpu_ids):
gpu_ids = [str(i) for i in gpu_ids]
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(gpu_ids)
def cuda(xs):
if torch.cuda.is_available():
if not isinstance(xs, (list, tuple)):
return xs.cuda()
else:
return [x.cuda() for x in xs]
def save_checkpoint(state, save_path, is_best=False, max_keep=None):
# save checkpoint
torch.save(state, save_path)
# deal with max_keep
save_dir = os.path.dirname(save_path)
list_path = os.path.join(save_dir, 'latest_checkpoint')
save_path = os.path.basename(save_path)
if os.path.exists(list_path):
with open(list_path) as f:
ckpt_list = f.readlines()
ckpt_list = [save_path + '\n'] + ckpt_list
else:
ckpt_list = [save_path + '\n']
if max_keep is not None:
for ckpt in ckpt_list[max_keep:]:
ckpt = os.path.join(save_dir, ckpt[:-1])
if os.path.exists(ckpt):
os.remove(ckpt)
ckpt_list[max_keep:] = []
with open(list_path, 'w') as f:
f.writelines(ckpt_list)
# copy best
if is_best:
shutil.copyfile(save_path, os.path.join(save_dir, 'best_model.ckpt'))
def load_checkpoint(ckpt_dir_or_file, map_location=None, load_best=False):
if os.path.isdir(ckpt_dir_or_file):
if load_best:
ckpt_path = os.path.join(ckpt_dir_or_file, 'best_model.ckpt')
else:
with open(os.path.join(ckpt_dir_or_file, 'latest_checkpoint')) as f:
ckpt_path = os.path.join(ckpt_dir_or_file, f.readline()[:-1])
else:
ckpt_path = ckpt_dir_or_file
ckpt = torch.load(ckpt_path, map_location=map_location)
print(' [*] Loading checkpoint from %s succeed!' % ckpt_path)
return ckpt
def reorganize(dataset_dir):
dirs = {}
dirs['trainA'] = os.path.join(dataset_dir, 'trainA')
dirs['trainB'] = os.path.join(dataset_dir, 'trainB')
dirs['testA'] = os.path.join(dataset_dir, 'testA')
dirs['testB'] = os.path.join(dataset_dir, 'testB')
mkdir(dirs.values())
for key in dirs:
try:
os.remove(os.path.join(dirs[key], '0'))
except:
pass
os.symlink(os.path.abspath(os.path.join(dataset_dir, key)),
os.path.join(dirs[key], '0'))
return dirs
class ItemPool(object):
def __init__(self, max_num=50):
self.max_num = max_num
self.num = 0
self.items = []
def __call__(self, in_items):
"""`in_items` is a list of item."""
if self.max_num <= 0:
return in_items
return_items = []
for in_item in in_items:
if self.num < self.max_num:
self.items.append(in_item)
self.num = self.num + 1
return_items.append(in_item)
else:
if np.random.ranf() > 0.5:
idx = np.random.randint(0, self.max_num)
tmp = copy.copy(self.items[idx])
self.items[idx] = in_item
return_items.append(tmp)
else:
return_items.append(in_item)
return return_items
| 28.830645 | 80 | 0.590769 | 502 | 3,575 | 3.968127 | 0.221116 | 0.060241 | 0.065261 | 0.039157 | 0.180723 | 0.081827 | 0.042671 | 0.031125 | 0.031125 | 0 | 0 | 0.004734 | 0.290909 | 3,575 | 123 | 81 | 29.065041 | 0.781065 | 0.020979 | 0 | 0.085106 | 0 | 0 | 0.050401 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0.010638 | 0.085106 | 0 | 0.244681 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
212d4ef4ab99dec5153f7ecca9ef4a401ae09050 | 10,315 | py | Python | occuspytial/utils.py | zoj613/OccuSpytial | e599d515b5377cb8f7a16af828f39b115a04bc66 | [
"BSD-3-Clause"
] | 5 | 2018-11-27T13:41:51.000Z | 2021-08-28T23:50:13.000Z | occuspytial/utils.py | zoj613/OccuSpytial | e599d515b5377cb8f7a16af828f39b115a04bc66 | [
"BSD-3-Clause"
] | 4 | 2020-08-20T16:58:11.000Z | 2021-03-22T20:27:55.000Z | occuspytial/utils.py | zoj613/OccuSpytial | e599d515b5377cb8f7a16af828f39b115a04bc66 | [
"BSD-3-Clause"
] | 1 | 2021-02-01T16:17:46.000Z | 2021-02-01T16:17:46.000Z | import warnings
import numpy as np
from scipy.linalg import pinvh
def get_generator(random_state=None):
"""Get an instance of a numpy random number generator object.
This instance uses `SFC64 <https://tinyurl.com/y2jtyly7>`_ bitgenerator,
which is the fastest numpy currently has to offer as of version 1.19.
This function conveniently instantiates a generator of this kind and should
be used in all modules.
Parameters
----------
random_state : {None, int, array_like[ints], numpy.random.SeedSequence}
A seed to initialize the bitgenerator. Defaults to ``None``.
Returns
-------
numpy.random.Generator
Instance of numpy's Generator class, which exposes a number of random
number generating methods.
Examples
--------
>>> from occuspytial.utils import get_generator
>>> rng = get_generator()
# The instance can be used to access functions of ``numpy.random``
>>> rng.standard_normal()
-0.203 # random
"""
bitgenerator = np.random.SFC64(random_state)
return np.random.default_rng(bitgenerator)
def rand_precision_mat(lat_row, lat_col, max_neighbors=8, rho=1):
"""Generate a random spatial precision matrix.
The spatial precision matrix is generated using a rectengular lattice
of dimensions `lat_row` x `lat_col`, and thus the row and colum size of
the matrix is (`lat_row` x `lat_col`).
Parameters
----------
lat_row : int
Number of rows of the lattice used to generate the matrix.
lat_col : int
Number of columns of the lattice used to generate the matrix.
max_neighbors : {4, 8}, optional
The maximum number of neighbors for each site. The default is 8.
rho : float, optional
The spatial weight parameter. Takes values between 0 and 1, with
0 implying independent random effects and 1 implying strong spatial
autocorrelation. Setting the value to 1 is equivalent to generating
the Intrinsic Autoregressive Model.
Returns
-------
scipy.sparse.coo_matrix
Spatial precision matrix
Raises
------
ValueError
If the `max_neighbours` is any value other than 4 or 8.
Examples
--------
>>> from occuspytial.utils import rand_precision_mat
>>> Q = rand_precision_mat(10, 5)
>>> Q
<50x50 sparse matrix of type '<class 'numpy.int64'>'
with 364 stored elements in COOrdinate format>
# The matrix can be converted to numpy format using method ``toarray()``
>>> Q.toarray()
array([[ 3, -1, 0, ..., 0, 0, 0],
[-1, 5, -1, ..., 0, 0, 0],
[ 0, -1, 5, ..., 0, 0, 0],
...,
[ 0, 0, 0, ..., 5, -1, 0],
[ 0, 0, 0, ..., -1, 5, -1],
[ 0, 0, 0, ..., 0, -1, 3]])
"""
if max_neighbors == 8:
nn = 'queen'
elif max_neighbors == 4:
nn = 'rook'
else:
raise ValueError('Maximum number of neighbors should be one of {4, 8}')
with warnings.catch_warnings():
# ignore the "geopandas not available" warning since it is not relevant
warnings.simplefilter('ignore', UserWarning)
import libpysal
W = libpysal.weights.lat2SW(lat_row, lat_col, criterion=nn, row_st=False)
W = W.tocoo()
D = W.sum(axis=1).A1
W.data = -W.data * rho
W.setdiag(D)
return W
def make_data(
n=150,
min_v=None,
max_v=None,
ns=None,
p=3,
q=3,
tau_range=(0.25, 1.5),
max_neighbors=8,
random_state=None,
):
"""Generate random data to use for modelling species occupancy.
Parameters
----------
n : int, optional
Number of sites. Defaults to 150.
min_v : int, optional
Minimum number of visits per site. If None, the maximum number is set
to 2. Defaults to None.
max_v : int, optional
Maximum number of visits per site. If None, the maximum number is set
to 10% of `n`. Defaults to None.
ns : int, optional
Number of surveyed sites out of `n`. If None, then this parameter is
set to 50% of `n`. Defaults to None.
p : int, optional
Number covariates to use for species occupancy. Defaults to 3.
q : int, optional
Number of covariates to use for conditonal detection. Defaults to 3.
tau_range : tuple, optional
The range to randomly sample the precision parameter value from.
Defaults to (0.25, 1.5).
max_neighbors : int, optional
Maximum number of neighbors per site. Should be one of {4, 8}. Default
is 8.
random_state : int, optional
The seed to use for random number generation. Useful for reproducing
generated data. If None then a random seed is chosen. Defaults to None.
Returns
-------
Q : scipy.sparse.coo_matrix
Spatial precision matrix
W : Dict[int, np.ndarray]
Dictionary of detection corariates where the keys are the site numbers
of the surveyed sites and the values are arrays containing
the design matrix of each corresponding site.
X : np.ndarray
Design matrix of species occupancy covariates.
y : Dict[int, np.ndarray]
Dictionary of survey data where the keys are the site numbers of the
surveyed sites and the values are number arrays of 1's and 0's
where 0's indicate "no detection" and 1's indicate "detection". The
length of each array equals the number of visits in the corresponding
site.
alpha : np.ndarray
True values of coefficients of detection covariates.
beta : np.ndarray
True values of coefficients of occupancy covariates.
tau : np.ndarray
True value of the precision parameter
z : np.ndarray
True occupancy state for all `n` sites.
Raises
------
ValueError
When `n` is less than the default 150 sites.
When `min_v` is less than 1.
When `max_v` is less than 2 or greater than `n`.
When `ns` is not a positive integer or greater than `n`.
Examples
--------
>>> from occuspytial.utils import make_data
>>> Q, W, X, y, alpha, beta, tau, z = make_data()
>>> Q
<150x150 sparse matrix of type '<class 'numpy.float64'>'
with 1144 stored elements in COOrdinate format>
>>> Q.toarray()
array([[ 3., -1., 0., ..., 0., 0., 0.], # random
[-1., 5., -1., ..., 0., 0., 0.],
[ 0., -1., 5., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 5., -1., 0.],
[ 0., 0., 0., ..., -1., 5., -1.],
[ 0., 0., 0., ..., 0., -1., 3.]])
>>> W
{81: array([[ 1. , 1.01334565, 0.93150242], # random
[ 1. , 0.19276808, -1.71939657],
[ 1. , 0.23866531, 0.0559545 ],
[ 1. , 1.36102304, 1.73611887],
[ 1. , 0.47247886, 0.73410589],
[ 1. , -1.9018879 , 0.0097963 ]]),
131: array([[ 1. , 1.67846707, -1.12476746],
[ 1. , -1.63131532, -1.32216705],
[ 1. , -1.37431173, -0.79734213],
...,
21: array([[ 1. , 1.6416734 , -1.91642502],
[ 1. , 0.2256312 , -1.68929118],
[ 1. , 1.36953093, 1.08758129],
[ 1. , -1.08029212, 0.40219588]])}
>>> X
array([[ 1. , 0.71582433, 1.76344395],
[ 1. , 0.8561976 , 1.0520401 ],
[ 1. , -0.28051247, 0.16809809],
...,
[ 1. , 0.86702262, -1.18225448],
[ 1. , -0.41346399, -0.9633078 ],
[ 1. , -0.23182363, 1.69930761]])
>>> y
{15: array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), # random
81: array([0, 0, 0, 1, 1, 0]),
...,
21: array([0, 1, 0, 0])}
>>> alpha
array([-1.43291816, -0.87932413, -1.84927642]) # random
>>> beta
array([-0.62084322, -1.09645564, -0.93371374]) # random
>>> tau
1.415532667780688 # random
>>> z
array([0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1,
1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1,
0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0])
"""
rng = get_generator(random_state)
if n < 150:
raise ValueError('n cant be lower than 150')
if min_v is None:
min_v = 2
elif min_v < 1:
raise ValueError('min_v needs to be at least 1')
if max_v is None:
max_v = n // 10
elif max_v < 2:
raise ValueError('max_v is too small')
elif max_v > n:
raise ValueError('max_v cant be more than n')
if ns is None:
ns = n // 2
elif ns == 0:
raise ValueError('ns should be positive')
elif ns > n:
raise ValueError('ns cant be more than n')
surveyed_sites = rng.choice(range(n), size=ns, replace=False)
visits_per_site = rng.integers(min_v, max_v, size=ns, endpoint=True)
alpha = rng.standard_normal(q)
beta = rng.standard_normal(p)
tau = rng.uniform(*tau_range)
factors = []
for i in range(3, n):
if (n % i) == 0:
factors.append(i)
row = rng.choice(factors)
col = n // row
Q = rand_precision_mat(row, col, max_neighbors=max_neighbors).astype(float)
Q_pinv = pinvh(Q.toarray(), cond=1e-5)
eta = rng.multivariate_normal(np.zeros(n), Q_pinv / tau, method='eigh')
X = rng.uniform(-2, 2, n * p).reshape(n, -1)
X[:, 0] = 1
psi = np.exp(-np.logaddexp(0, -X @ beta + eta))
z = rng.binomial(1, p=psi, size=n)
W, y = {}, {}
for i, j in zip(surveyed_sites, visits_per_site):
_W = rng.uniform(-2, 2, size=j * q).reshape(j, -1)
_W[:, 0] = 1
d = np.exp(-np.logaddexp(0, -_W @ alpha))
W[i] = _W
y[i] = rng.binomial(1, z[i] * d)
return Q, W, X, y, alpha, beta, tau, z
| 34.966102 | 79 | 0.552399 | 1,517 | 10,315 | 3.700725 | 0.216875 | 0.038475 | 0.040078 | 0.034913 | 0.226755 | 0.165123 | 0.132348 | 0.103848 | 0.083007 | 0.072854 | 0 | 0.114201 | 0.313233 | 10,315 | 294 | 80 | 35.085034 | 0.678289 | 0.67872 | 0 | 0 | 0 | 0 | 0.080402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.051282 | 0 | 0.128205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
213024992027d02f4dffdced7cd69f769559b582 | 1,119 | py | Python | Easy/409.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 6 | 2017-09-25T18:05:50.000Z | 2019-03-27T00:23:15.000Z | Easy/409.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 1 | 2017-10-29T12:04:41.000Z | 2018-08-16T18:00:37.000Z | Easy/409.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | null | null | null | # ------------------------------
# 409. Longest Palindrome
#
# Description:
# Given a string which consists of lowercase or uppercase letters, find the length of the longest palindromes that can be built with those letters.
# This is case sensitive, for example "Aa" is not considered a palindrome here.
# Note:
# Assume the length of given string will not exceed 1,010.
# Example:
# Input:
# "abccccdd"
# Output:
# 7
# Explanation:
# One longest palindrome that can be built is "dccaccd", whose length is 7.
#
# Version: 1.0
# 06/29/18 by Jianfa
# ------------------------------
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
charset = set(s)
length = 0
flag = 0
for c in charset:
if s.count(c) % 2 == 0:
length += s.count(c)
else:
length += s.count(c) - 1
flag = 1
return length + flag
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# | 24.326087 | 147 | 0.526363 | 133 | 1,119 | 4.368421 | 0.616541 | 0.030981 | 0.036145 | 0.048193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029487 | 0.302949 | 1,119 | 46 | 148 | 24.326087 | 0.715385 | 0.559428 | 0 | 0 | 0 | 0 | 0.018223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21330019fbaa4db50ee20e38ed8c5d8aea8c4dbc | 313 | py | Python | codigo_funcoes_exercicios/exemplo3-saque de saldo.py | rosacarla/100-days-of-python-code | 3db9e35f861ce933e952cff2dd3a505dfce1b440 | [
"MIT"
] | 1 | 2021-09-26T09:17:36.000Z | 2021-09-26T09:17:36.000Z | codigo_funcoes_exercicios/exemplo3-saque de saldo.py | rosacarla/100-days-of-python-code | 3db9e35f861ce933e952cff2dd3a505dfce1b440 | [
"MIT"
] | null | null | null | codigo_funcoes_exercicios/exemplo3-saque de saldo.py | rosacarla/100-days-of-python-code | 3db9e35f861ce933e952cff2dd3a505dfce1b440 | [
"MIT"
] | null | null | null | def sacar(saldo):
montante = float(input("Quanto deseja sacar? "))
saldo = saldo - montante
return saldo
saldo = float(input("Informe o saldo total: "))
qtde = int(input("Quantos saques vc deseja? "))
for c in range(qtde):
saldo = sacar(saldo)
print("Saldo final: ", saldo)
| 10.793103 | 52 | 0.616613 | 40 | 313 | 4.825 | 0.575 | 0.15544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.252396 | 313 | 28 | 53 | 11.178571 | 0.824786 | 0 | 0 | 0 | 0 | 0 | 0.271242 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.222222 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2139e18eede874f8c9984b45680b934dcf1d9148 | 1,267 | py | Python | test/test_del_user_from_group.py | SergKitler/python_training | a5bbc5f4121dc10ef2b5a3bddacaf65595dc948c | [
"Apache-2.0"
] | null | null | null | test/test_del_user_from_group.py | SergKitler/python_training | a5bbc5f4121dc10ef2b5a3bddacaf65595dc948c | [
"Apache-2.0"
] | null | null | null | test/test_del_user_from_group.py | SergKitler/python_training | a5bbc5f4121dc10ef2b5a3bddacaf65595dc948c | [
"Apache-2.0"
] | null | null | null | __author__ = 'sergei'
from model.group import Group
from model.user import User
import random
def test_del_user_from_group(app, orm):
if len(orm.get_contact_list()) == 0:
app.user.add(User(firstname="Jeck", lastname="Antonio"))
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="TestGroup"))
old_group = orm.get_group_list()
group = random.choice(old_group)
old_users_in_group = orm.get_contacts_in_group(group)
if len(old_users_in_group) == 0:
old_users = orm.get_contact_list()
user_in_group = User(firstname="Jeck2", lastname="Antonio")
app.user.add(user_in_group)
new_users = orm.get_contact_list()
old_id = list(old_users[i].id for i in range(len(old_users)))
id = list(new_users[i].id for i in range(len(new_users)) if new_users[i].id not in old_id )
print(id)
app.user.add_user_by_id_to_group(id[0],group.id)
user_in_group.id = id[0]
old_users_in_group.append(user_in_group)
user = random.choice(old_users_in_group)
app.user.del_user_by_id_from_group(user.id, group.id)
new_users_in_group = orm.get_contacts_in_group(group)
old_users_in_group.remove(user)
assert old_users_in_group == new_users_in_group
| 39.59375 | 99 | 0.700868 | 212 | 1,267 | 3.825472 | 0.212264 | 0.120838 | 0.118372 | 0.110974 | 0.23058 | 0.147965 | 0.147965 | 0.147965 | 0.093711 | 0 | 0 | 0.005803 | 0.183899 | 1,267 | 31 | 100 | 40.870968 | 0.77853 | 0 | 0 | 0 | 0 | 0 | 0.029992 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.142857 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
213bcd6bcb089f055fbbea7d10486c9635c089cf | 12,883 | py | Python | BB/bbConfig/bbConfig.py | Laura7089/GOF2BountyBot | 96d678bb3eda116292f4c1575d5e046206aa3cdd | [
"MIT"
] | null | null | null | BB/bbConfig/bbConfig.py | Laura7089/GOF2BountyBot | 96d678bb3eda116292f4c1575d5e046206aa3cdd | [
"MIT"
] | null | null | null | BB/bbConfig/bbConfig.py | Laura7089/GOF2BountyBot | 96d678bb3eda116292f4c1575d5e046206aa3cdd | [
"MIT"
] | null | null | null | # Typing imports
from __future__ import annotations
import math, random, pprint
from ..bbUtil import dumbEmoji
##### UTIL #####
# Number of decimal places to calculate itemTLSpawnChanceForShopTL values to
tl_resolution = 3
def truncToRes(num : float) -> float:
"""Truncate the passed float to tl_resolution decimal places.
:param float num: Float number to truncate
:return: num, truncated to tl_resolution decimal places
:rtype: float
"""
return math.trunc(num * math.pow(10, tl_resolution)) / math.pow(10, tl_resolution)
##### DUELS #####
# Amount of time before a duel request expires
duelReqExpiryTime = {"days":1}
# duelReqExpiryTime as a user-friendly string for printing
duelReqExpiryTimeStr = "1 day"
# The amount to vary ship stats (+-) by before executing a duel
duelVariancePercent = 0.05
# Max number of entries that can be printed for a duel log
duelLogMaxLength = 10
# Percentage probability of a user envoking a cloak module in a given timeStep, should they have one equipped
duelCloakChance = 20
##### SHOPS #####
# Amount of time to wait between refreshing stock of all shops
shopRefreshStockPeriod = {"days":0, "hours":6, "minutes":0, "seconds":0}
# The number of ranks to use when randomly picking shop stock
numShipRanks = 10
numWeaponRanks = 10
numModuleRanks = 7
numTurretRanks = 3
# The default number of items shops should generate every shopRefreshStockPeriod
shopDefaultShipsNum = 5
shopDefaultWeaponsNum = 5
shopDefaultModulesNum = 5
shopDefaultTurretsNum = 2
# bbTurret is the only item that has a probability not to be spawned. This metric indicates the percentage chance of turrets being stocked on a given day
turretSpawnProbability = 45
# The range of valid tech levels a shop may spawn at
minTechLevel = 1
maxTechLevel = 10
# The probability of a shop spawning with a given tech level. Tech level = index + 1
cumulativeShopTLChance = [0 for tl in range(minTechLevel, maxTechLevel + 1)]
shopTLChance = [0 for tl in range(minTechLevel, maxTechLevel + 1)]
itemChanceSum = 0
# Calculate spawn chance for each shop TL
for shopTL in range(minTechLevel, maxTechLevel + 1):
itemChance = truncToRes(1 - math.exp((shopTL - 10.5) / 5))
cumulativeShopTLChance[shopTL - 1] = itemChance
itemChanceSum += itemChance
# Scale shop TL probabilities so that they add up to 1
for shopTL in range(minTechLevel, maxTechLevel + 1):
currentChance = cumulativeShopTLChance[shopTL - 1]
if currentChance != 0:
cumulativeShopTLChance[shopTL - 1] = truncToRes(currentChance / itemChanceSum)
# Save non-cumulative probabilities
for i in range(len(cumulativeShopTLChance)):
shopTLChance[i] = cumulativeShopTLChance
# Sum probabilities to give cumulative scale
currentSum = 0
for shopTL in range(minTechLevel, maxTechLevel + 1):
currentChance = cumulativeShopTLChance[shopTL - 1]
if currentChance != 0:
cumulativeShopTLChance[shopTL - 1] = truncToRes(currentSum + currentChance)
currentSum += currentChance
def pickRandomShopTL() -> int:
"""Pick a random shop techlevel, with probabilities calculated previously in bbConfig.
:return: An integer between 1 and 10 representing a shop tech level
:rtype: int
"""
tlChance = random.randint(1, 10 ** tl_resolution) / 10 ** tl_resolution
for shopTL in range(len(cumulativeShopTLChance)):
if cumulativeShopTLChance[shopTL] >= tlChance:
return shopTL + 1
return maxTechLevel
# Price ranges by which ships should be ranked into tech levels. 0th index = tech level 1
shipMaxPriceTechLevels = [50000, 100000, 200000, 500000, 1000000, 2000000, 5000000, 7000000, 7500000, 999999999]
# CUMULATIVE probabilities of items of a given tech level spawning in a shop of a given tech level
# Outer dimension is shop tech level
# Inner dimension is item tech level
itemTLSpawnChanceForShopTL = [[0 for i in range(minTechLevel, maxTechLevel + 1)] for i in range(minTechLevel, maxTechLevel + 1)]
cumulativeItemTLSpawnChanceForShopTL = [[0 for i in range(minTechLevel, maxTechLevel + 1)] for i in range(minTechLevel, maxTechLevel + 1)]
# Parameters for itemTLSpawnChanceForShopTL values, using quadratic function: https://www.desmos.com/calculator/n2xfxf8taj
# Original u function by Novahkiin22: https://www.desmos.com/calculator/tnldodey5u
# Original function by Novahkiin22: https://www.desmos.com/calculator/nrshikfmxc
tl_s = 7
tl_o = 2.3
"""def tl_u(x, t):
h = t - tl_s
tl_n = (x - tl_o - h) / tl_s
mid = tl_n * (1 - math.pow(tl_n, 4))
outer = tl_s * mid - (h / 2)
return truncToRes(outer if outer > 0 else 0)"""
def tl_u(x : int, t : int) -> float:
"""mathematical function used when calculating item spawn probabilities.
:param int x: int representing the item's tech level
:param int t: int representing the owning shop's tech level
:return: A partial probability for use in probability generation
:rtype: float
"""
chance = truncToRes(1 - math.pow((x - t)/1.4,2))
return chance if chance > 0 else 0
# Loop through shop TLs
for shopTL in range(minTechLevel, maxTechLevel + 1):
tl_h = shopTL - tl_s
itemChanceSum = 0
# Calculate spawn chance for each item TL in this shop TL
for itemTL in range(minTechLevel, maxTechLevel + 1):
itemChance = tl_u(itemTL, shopTL)
cumulativeItemTLSpawnChanceForShopTL[shopTL - 1][itemTL - 1] = itemChance
itemChanceSum += itemChance
# Scale item TLs so that they add up to 1
for itemTL in range(minTechLevel, maxTechLevel + 1):
currentChance = cumulativeItemTLSpawnChanceForShopTL[shopTL - 1][itemTL - 1]
if currentChance != 0:
cumulativeItemTLSpawnChanceForShopTL[shopTL - 1][itemTL - 1] = truncToRes(currentChance / itemChanceSum)
# Save non-cumulative probabilities
for i in range(len(cumulativeItemTLSpawnChanceForShopTL[shopTL - 1])):
itemTLSpawnChanceForShopTL[shopTL - 1][i] = cumulativeItemTLSpawnChanceForShopTL[shopTL - 1][i]
# Sum probabilities to give cumulative scale
currentSum = 0
for itemTL in range(minTechLevel, maxTechLevel + 1):
currentChance = cumulativeItemTLSpawnChanceForShopTL[shopTL - 1][itemTL - 1]
if currentChance != 0:
cumulativeItemTLSpawnChanceForShopTL[shopTL - 1][itemTL - 1] = truncToRes(currentSum + currentChance)
currentSum += currentChance
print("[bbConfig] Item rarities generated:")
for shopTL in range(len(itemTLSpawnChanceForShopTL)):
print("\t• shop TL" + str(shopTL+1) + ": itemTL",end="")
for itemTL in range(len((itemTLSpawnChanceForShopTL[shopTL]))):
if itemTLSpawnChanceForShopTL[shopTL][itemTL] != 0:
print(" " + str(itemTL + 1) + "=" + str(truncToRes(itemTLSpawnChanceForShopTL[shopTL][itemTL]*100)),end="% ")
print()
def pickRandomItemTL(shopTL : int) -> int:
"""Pick a random item techlevel, with probabilities calculated previously in bbConfig.
:param int shopTL: int representing the tech level of the shop owning the item
:return: An integer between 1 and 10 representing a item tech level
:rtype: int
"""
tlChance = random.randint(1, 10 ** tl_resolution) / 10 ** tl_resolution
for itemTL in range(len(cumulativeItemTLSpawnChanceForShopTL[shopTL - 1])):
if cumulativeItemTLSpawnChanceForShopTL[shopTL - 1][itemTL] >= tlChance:
return itemTL + 1
return maxTechLevel
##### BOUNTIES #####
maxBountiesPerFaction = 5
# The maximum number of bounties a player is allowed to win each day
maxDailyBountyWins = 10
# can be "fixed" or "random"
newBountyDelayType = "random-routeScale"
### fixed delay config
# only spawn bounties at this time
newBountyFixedDailyTime = {"hours":18, "minutes":40, "seconds":0}
# use the above, or just spawn after every newBountyFixedDelta
newBountyFixedUseDailyTime = False
# time to wait inbetween spawning bounties
# when using fixed-routeScale generation, use this for bounties of route length 1
newBountyFixedDelta = {"days":0, "hours":0, "minutes":1, "seconds":0}
### random delay config
# when using random delay generation, use these min and max points
# when using random-routeScale generation, use these min and max points for bounties of route length 1
newBountyDelayRandomRange = {"min": 5 * 60, "max": 7 * 60}
### routeScale config
newBountyDelayRouteScaleCoefficient = 1
fallbackRouteScale = 5
# The number of credits to award for each bPoint (each system in a criminal route)
bPointsToCreditsRatio = 1000
# time to put users on cooldown between using !bb check
checkCooldown = {"minutes":3}
# number of bounties ahead of a checked system in a route to report a recent criminal spotting (+1)
closeBountyThreshold = 4
# Text to send to a BountyBoardChannel when no bounties are currently active
bbcNoBountiesMsg = "```css\n[ NO ACTIVE BOUNTIES ]\n\nThere are currently no active bounty listings.\nPlease check back later, or use [ $notify bounties ] to be pinged when new ones become available!\n```"
# The number of times to retry BBC listing updates when HTTP exceptions are thrown
bbcHTTPErrRetries = 3
# The number of seconds to wait between BBC listing update retries upon HTTP exception catching
bbcHTTPErrRetryDelaySeconds = 1
##### SAVING #####
# The time to wait inbetween database autosaves.
savePeriod = {"hours":1}
# path to JSON files for database saves
userDBPath = "saveData/users.json"
guildDBPath = "saveData/guilds.json"
bountyDBPath = "saveData/bounties.json"
reactionMenusDBPath = "saveData/reactionMenus.json"
# path to folder to save log txts to
loggingFolderPath = "saveData/logs"
##### SCHEDULING #####
# Whether to execute timedtask checks every timedTaskLatenessThresholdSeconds ("fixed"), or to calculate the delay to wait until the next TimedTask is schedule to expire ("dynamic")
timedTaskCheckingType = "fixed"
# How late a timed task may acceptably be in seconds.
# I.e a scheduled task may expire up to timedTaskLatenessThresholdSeconds seconds after their intended expiration time.
# replaces the depracated 'delayFactor' variable
timedTaskLatenessThresholdSeconds = 10
##### MISC #####
# prefix for bot commands. dont forget a space if you want one!
commandPrefix = "$"
# When a user message prompts a DM to be sent, this emoji will be added to the message reactions.
dmSentEmoji = dumbEmoji(unicode="📬")
# max number of characters accepted by nameShip
maxShipNickLength = 30
# max number of characters accepted by nameShip, when called by a developer
maxDevShipNickLength = 100
# The default emojis to list in a reaction menu
numberEmojis = [dumbEmoji(unicode="0️⃣"), dumbEmoji(unicode="1️⃣"), dumbEmoji(unicode="2️⃣"), dumbEmoji(unicode="3️⃣"), dumbEmoji(unicode="4️⃣"), dumbEmoji(unicode="5️⃣"), dumbEmoji(unicode="6️⃣"), dumbEmoji(unicode="7️⃣"), dumbEmoji(unicode="8️⃣"), dumbEmoji(unicode="9️⃣"), dumbEmoji(unicode="🔟")]
defaultMenuEmojis = numberEmojis
defaultCancelEmoji = dumbEmoji(unicode="🇽")
defaultErrEmoji = dumbEmoji(unicode="❓")
defaultAcceptEmoji = dumbEmoji(unicode="👍")
defaultRejectEmoji = dumbEmoji(unicode="👎")
##### ADMINISTRATION #####
# discord user IDs of all developers
developers = [188618589102669826, 448491245296418817]
# titles to give each type of user when reporting error messages etc
devTitle = "officer"
adminTitle = "commander"
userTitle = "pilot"
# Servers where bountyBot commands are disabled. Currently this is just the emoji servers:
disabledServers = [723704980246233219, 723702782640783361, 723708988830515231, 723704665560055848, 723705817764986900, 723703454635393056, 723708655031156742, 723706906517962814, 723704087962583131, 723704350131748935]
##### HANGARS #####
# The maximum number of items that will be displayed per page of a user's hangar, when all item types are requested
maxItemsPerHangarPageAll = 3
# The maximum number of items that will be displayed per page of a user's hangar, when a single item type is requested
maxItemsPerHangarPageIndividual = 10
# Names to be used when checking input to !bb hangar and bbUser.numInventoryPages
validItemNames = ["ship", "weapon", "module", "turret", "all"]
##### USERS #####
userAlertsIDsDefaults = { "bounties_new": False,
"shop_refresh": False,
"duels_challenge_incoming_new": True,
"duels_challenge_incoming_cancel": False,
"system_updates_major": False,
"system_updates_minor": False,
"system_misc": False}
##### REACTION MENUS #####
roleMenuDefaultTimeout = {"days": 1}
duelChallengeMenuDefaultTimeout = {"hours": 2}
pollMenuDefaultTimeout = {"hours": 2}
expiredMenuMsg = "😴 This role menu has now expired."
pollMenuResultsBarLength = 10
maxRoleMenusPerGuild = 10
| 37.126801 | 299 | 0.726772 | 1,586 | 12,883 | 5.895965 | 0.313367 | 0.014223 | 0.026414 | 0.043097 | 0.282857 | 0.24607 | 0.218479 | 0.16918 | 0.146936 | 0.127901 | 0 | 0.045169 | 0.183731 | 12,883 | 346 | 300 | 37.234104 | 0.841385 | 0.397112 | 0 | 0.190141 | 0 | 0.007042 | 0.098713 | 0.014786 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028169 | false | 0 | 0.021127 | 0 | 0.091549 | 0.035211 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
213d3e71dd31496bb63acb66ff3f22dd91bbc213 | 8,383 | py | Python | villebot1.0.py | wkpelt/Villebot | d04a7707684babae07ad93d1fd76ee401fc371de | [
"MIT"
] | 1 | 2019-03-10T22:00:18.000Z | 2019-03-10T22:00:18.000Z | villebot1.0.py | wkpelt/Villebot | d04a7707684babae07ad93d1fd76ee401fc371de | [
"MIT"
] | null | null | null | villebot1.0.py | wkpelt/Villebot | d04a7707684babae07ad93d1fd76ee401fc371de | [
"MIT"
] | null | null | null | #VILLEBot v1.0
#Author = Wiljam Peltomaa
#Last updated = 21.04.2019
import PySimpleGUI as sg
import pyautogui
import pyperclip
import string
from time import sleep
def main():
print('To stop the bot, drag the cursor to the top-left corner of your screen or press Ctrl-Alt-Del')
texts = []
locations = []
last = ''
correct_answers = []
wrong = []
task = 0
questions = 0
assignment_name = "Tehtävä" #default name
allDone = False
#GUI
layout = [[sg.Text('Montako kysymystä?'),
sg.Text('', key='total_questions', text_color='blue', background_color='white', size=(17, 1))],
[sg.Input(size=(40, 3), do_not_clear=False, key='_IN_')],
[sg.Text('Tehtäväsarjan nimi?'),
sg.Text('', key='assignment_name', text_color='blue', background_color='white', size=(17, 1))],
[sg.Input(size=(40, 3), do_not_clear=False, key='_IN2_')],
[sg.Image('villelogo.png')],
[sg.Output(size=(37, 30))],
[sg.Text('Tehtävä nr:', size=(8, 1)), sg.Text('', key='current_task', size=(2, 1))],
[sg.Text('Kerättyjä vastauksia:', size=(15, 1)), sg.Text('', key='collected_answers', size=(2, 1))],
[sg.Button('OK', button_color=('white', 'green')), sg.Button('F11'), sg.Button('Start'),
sg.Text('by wkpelt', size=(12, 1)), sg.Exit(button_color=('white', 'red'), size=(5, 1)), ]]
window = sg.Window('VILLEbot v1.0', auto_size_text=False, default_element_size=(16, 1), keep_on_top=True).Layout(layout)
window.SetIcon('favicon.ico')
def scrollDown(n):
for i in range(n):
pyautogui.press('pgdn')
while True:
event, values = window.Read(timeout=0)
if event is None or event == 'Exit':
break
if event == 'F11':
pyautogui.moveRel(-100, 0)
pyautogui.click()
pyautogui.press('f11')
pyautogui.moveRel(100, 0)
if event == 'OK':
try:
questions = int(values['_IN_'])
for i in range(questions):
wrong.append([])
except ValueError:
print("Yritätkö edes? Anna numero!")
assignment_name = str(values['_IN2_'])
window.FindElement('total_questions').Update(values['_IN_'])
window.FindElement('assignment_name').Update(values['_IN2_'])
if event == 'Start':
#find restart button on screen
restart = pyautogui.locateCenterOnScreen('restart.png')
while allDone is False:
scrollDown(5)
#update values on gui
window.FindElement('current_task').Update(task + 1)
window.FindElement('collected_answers').Update(len(correct_answers))
window.Read(timeout=0)
done = False
#find the x,y positions of all possible answers on screen
for pos in pyautogui.locateAllOnScreen('blue.png'):
if not done:
locations.append((pos[0] + 5, pos[1] + 5))
#skip if the correct answer for the current task has already been found
if len(correct_answers) > task and len(correct_answers) != questions:
pyautogui.click(locations[-1])
done = True
#else scan the whole page
else:
pyautogui.moveTo(pos[0] - 20, pos[1] + 5)
pyautogui.dragRel(700, 45, 0.2, button='left')
pyautogui.hotkey('ctrl', 'c')
copied_text = pyperclip.paste()
texts.append(copied_text.strip("\r\n"))
last = copied_text.strip("\r\n")
#if all the correct answers have been found
if len(correct_answers) == questions:
if correct_answers[task] == last:
pyautogui.click(locations[-1])
pyautogui.press('enter')
if task+1 == questions:
allDone = True
task += 1
done = True
#check if last copied text has already been tried before
else:
for i in range(len(texts)):
if texts[i] not in wrong[task]:
pyautogui.click(locations[i])
done = True
texts = []
locations = []
window.Read(timeout=0)
while len(correct_answers) != questions:
window.Read(timeout=0)
#if the current question is the last question, get rid of the popup
if (questions == (task+1)):
sleep(0.1)
pyautogui.press('tab', interval=0.1)
pyautogui.press('tab', interval=0.1)
pyautogui.press('enter', interval=0.1)
#if the current question's correct answer has already been found, skip
if len(correct_answers) > task:
pyautogui.press('enter')
task += 1
print("Skip\n")
break
green = pyautogui.locateOnScreen('green.png')
#if clicked option was the correct answer
if green is not None:
task += 1
#skip if found already
if len(correct_answers) >= task:
pyautogui.press('enter')
scrollDown(5)
window.Read(timeout=0)
break
#new correct answer
else:
print(f"Oikea vastaus:\n {last} \n")
right_answer = last
#strip unnecessary regex from the copied text (ty ville)
correct_answers.append(right_answer.strip("\r\n"))
pyautogui.press('enter')
scrollDown(5)
window.Read(timeout=0)
if len(correct_answers) == questions:
pyautogui.click(restart)
task = 0
break
#if clicked option was the wrong answer
elif green is None:
print(f"Väärä vastaus:\n {last} \n")
wrong[task].append(last)
print("Arvattu ", len(wrong[task]), " kertaa tehtävää numero", len(correct_answers) + 1, "\n")
task = 0
pyautogui.click(restart)
window.Read(timeout=0)
scrollDown(5)
break
print("Oikeat vastaukset: ")
#save the correct answers into a .txt file
nr = 1
with open("vastaukset.txt", "a", encoding='iso-8859-1') as f:
f.write("\n" + str(assignment_name) + "\n")
for i in correct_answers:
f.write(str(nr) + ": " + str(i) + "\n")
print(nr, ": ", i)
nr += 1
window.Read(timeout=0)
#clear the variables
wrong = []
correct_answers = []
task = 0
questions = 0
allDone = False
#alert that all the correct answers have been found
pyautogui.alert(text='Valmis!', title='', button='OK')
window.Close()
main()
| 44.590426 | 125 | 0.445187 | 810 | 8,383 | 4.535802 | 0.279012 | 0.06478 | 0.041644 | 0.039194 | 0.202776 | 0.1546 | 0.120033 | 0.102069 | 0.084377 | 0.058247 | 0 | 0.026053 | 0.450555 | 8,383 | 187 | 126 | 44.828877 | 0.771602 | 0.094238 | 0 | 0.363014 | 0 | 0.006849 | 0.093039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013699 | false | 0 | 0.034247 | 0 | 0.047945 | 0.054795 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
213db5f35ea6d1846adc243063d3e37e46d77f5c | 2,487 | py | Python | test/test_erc20.py | patractlabs/py-patract | ba36981898d93ad68ab832ea4cdc8843bb545105 | [
"Apache-2.0"
] | 10 | 2021-01-04T01:35:05.000Z | 2021-04-15T19:36:01.000Z | test/test_erc20.py | patractlabs/py-patract | ba36981898d93ad68ab832ea4cdc8843bb545105 | [
"Apache-2.0"
] | 3 | 2021-04-16T10:23:45.000Z | 2021-05-17T21:44:46.000Z | test/test_erc20.py | patractlabs/py-patract | ba36981898d93ad68ab832ea4cdc8843bb545105 | [
"Apache-2.0"
] | null | null | null | import os
import unittest
import logging
from substrateinterface import SubstrateInterface, ContractMetadata, Keypair
from substrateinterface.utils.ss58 import ss58_encode
from patractinterface.unittest.env import SubstrateTestEnv
from patractinterface.contracts.erc20 import ERC20
class ERC20TestCase(unittest.TestCase):
@classmethod
def tearDown(cls):
cls.env.stop_node()
@classmethod
def setUpClass(cls):
logging.info("init deplay")
cls.env = SubstrateTestEnv.create_europa(port=39944)
cls.env.start_node()
cls.substrate=SubstrateInterface(url=cls.env.url(), type_registry_preset=cls.env.typ(), type_registry=cls.env.types())
cls.contract_metadata = ContractMetadata.create_from_file(
metadata_file=os.path.join(os.path.dirname(__file__), 'constracts', 'ink', 'erc20.json'),
substrate=cls.substrate
)
cls.erc20 = ERC20.create_from_contracts(
substrate= cls.substrate,
contract_file= os.path.join(os.path.dirname(__file__), 'constracts', 'ink', 'erc20.wasm'),
metadata_file= os.path.join(os.path.dirname(__file__), 'constracts', 'ink', 'erc20.json')
)
cls.alice = Keypair.create_from_uri('//Alice')
cls.bob = Keypair.create_from_uri('//Bob')
cls.erc20.instantiate_with_code(cls.alice, 1000000 * (10 ** 15))
def transfer(self):
supply = self.erc20.totalSupply()
self.assertEqual(supply, 1000000 * (10 ** 15))
res = self.erc20.transfer(self.alice, self.bob.ss58_address, 10000)
self.assertTrue(res.is_success)
self.check_balance_of(self.bob.ss58_address, 10000)
def transfer_from(self):
res = self.erc20.transfer_from(self.alice,
from_acc=self.alice.ss58_address,
to_acc=self.bob.ss58_address,
amt=10000)
self.assertTrue(res.is_success)
def approve(self):
res = self.erc20.approve(self.alice, spender=self.bob.ss58_address, amt=10000)
self.assertTrue(res.is_success)
allowance = self.erc20.allowance(self.alice.ss58_address, self.bob.ss58_address)
self.assertEqual(allowance, 10000)
def check_balance_of(self, acc, value):
res = self.erc20.balance_of(acc)
self.assertEqual(res, value)
def test_exec_and_read(self):
self.transfer()
self.approve()
self.transfer_from()
if __name__ == '__main__':
unittest.main()
| 35.528571 | 126 | 0.675513 | 302 | 2,487 | 5.350993 | 0.291391 | 0.047649 | 0.034035 | 0.055693 | 0.214728 | 0.189356 | 0.170173 | 0.170173 | 0.170173 | 0.170173 | 0 | 0.050761 | 0.207881 | 2,487 | 69 | 127 | 36.043478 | 0.769543 | 0 | 0 | 0.090909 | 0 | 0 | 0.040209 | 0 | 0 | 0 | 0 | 0 | 0.109091 | 1 | 0.127273 | false | 0 | 0.127273 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
213f46b4589c11995b5f6fb48b27af63bf5048d3 | 33,295 | py | Python | tests/metrics/test_metastatus_lib.py | white105/paasta | 410418fb54d501141f091381ada368a8bf62037b | [
"Apache-2.0"
] | 2 | 2020-04-09T06:58:46.000Z | 2021-05-03T21:56:03.000Z | tests/metrics/test_metastatus_lib.py | white105/paasta | 410418fb54d501141f091381ada368a8bf62037b | [
"Apache-2.0"
] | null | null | null | tests/metrics/test_metastatus_lib.py | white105/paasta | 410418fb54d501141f091381ada368a8bf62037b | [
"Apache-2.0"
] | 1 | 2020-09-29T03:23:02.000Z | 2020-09-29T03:23:02.000Z | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import re
import mock
from mock import Mock
from mock import patch
from paasta_tools.metrics import metastatus_lib
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
def test_ok_check_threshold():
assert metastatus_lib.check_threshold(10, 30)
def test_fail_check_threshold():
assert not metastatus_lib.check_threshold(80, 30)
def test_get_mesos_cpu_status():
fake_metrics = {
'master/cpus_total': 3,
'master/cpus_used': 1,
}
fake_mesos_state = {
'slaves': [
{
'reserved_resources': {
'maintenance': {
'cpus': 1,
},
},
},
],
}
total, used, available = metastatus_lib.get_mesos_cpu_status(fake_metrics, fake_mesos_state)
assert total == 3
assert used == 2
assert available == 1
def test_ok_cpu_health():
ok_metrics = {
'master/cpus_total': 10,
'master/cpus_used': 0.5,
}
fake_mesos_state = {
'slaves': [
{
'reserved_resources': {
'maintenance': {
'cpus': 0.5,
},
},
},
],
}
ok_output, ok_health = metastatus_lib.assert_cpu_health(ok_metrics, fake_mesos_state)
assert ok_health
assert "CPUs: 1.00 / 10 in use (%s)" % PaastaColors.green("10.00%") in ok_output
def test_bad_cpu_health():
failure_metrics = {
'master/cpus_total': 10,
'master/cpus_used': 8,
}
fake_mesos_state = {
'slaves': [
{
'reserved_resources': {
'maintenance': {
'cpus': 1,
},
},
},
],
}
failure_output, failure_health = metastatus_lib.assert_cpu_health(failure_metrics, fake_mesos_state)
assert not failure_health
assert "CRITICAL: Less than 10% CPUs available. (Currently using 90.00% of 10)" in failure_output
def test_assert_memory_health():
ok_metrics = {
'master/mem_total': 1024,
'master/mem_used': 256,
}
fake_mesos_state = {
'slaves': [
{
'reserved_resources': {
'maintenance': {
'mem': 256,
},
},
},
],
}
ok_output, ok_health = metastatus_lib.assert_memory_health(ok_metrics, fake_mesos_state)
assert ok_health
assert "Memory: 0.50 / 1.00GB in use (%s)" % PaastaColors.green("50.00%") in ok_output
def test_failing_memory_health():
failure_metrics = {
'master/mem_total': 1024,
'master/mem_used': 500,
}
fake_mesos_state = {
'slaves': [
{
'reserved_resources': {
'maintenance': {
'mem': 500,
},
},
},
],
}
failure_output, failure_health = metastatus_lib.assert_memory_health(failure_metrics, fake_mesos_state)
assert not failure_health
assert "CRITICAL: Less than 10% memory available. (Currently using 97.66% of 1.00GB)" in failure_output
def test_assert_disk_health():
ok_metrics = {
'master/disk_total': 1024,
'master/disk_used': 256,
}
fake_mesos_state = {
'slaves': [
{
'reserved_resources': {
'maintenance': {
'disk': 256,
},
},
},
],
}
ok_output, ok_health = metastatus_lib.assert_disk_health(ok_metrics, fake_mesos_state)
assert ok_health
assert "Disk: 0.50 / 1.00GB in use (%s)" % PaastaColors.green("50.00%") in ok_output
def test_failing_disk_health():
failure_metrics = {
'master/disk_total': 1024,
'master/disk_used': 500,
}
fake_mesos_state = {
'slaves': [
{
'reserved_resources': {
'maintenance': {
'disk': 500,
},
},
},
],
}
failure_output, failure_health = metastatus_lib.assert_disk_health(failure_metrics, fake_mesos_state)
assert not failure_health
assert "CRITICAL: Less than 10% disk available. (Currently using 97.66%)" in failure_output
def test_assert_gpu_health():
ok_metrics = {
'master/gpus_total': 3,
'master/gpus_used': 1,
}
ok_output, ok_health = metastatus_lib.assert_gpu_health(ok_metrics)
assert ok_health
assert "GPUs: 1 / 3 in use (%s)" % PaastaColors.green("33.33%") in ok_output
def test_assert_no_gpu_health():
zero_metrics = {
'master/gpus_total': 0,
'master/gpus_used': 0,
}
zero_output, zero_health = metastatus_lib.assert_gpu_health(zero_metrics)
assert zero_health
assert "No gpus found from mesos!" in zero_output
def test_assert_bad_gpu_health():
bad_metrics = {
'master/gpus_total': 4,
'master/gpus_used': 3,
}
bad_output, bad_health = metastatus_lib.assert_gpu_health(bad_metrics, threshold=50)
assert not bad_health
assert "CRITICAL: Less than 50% GPUs available. (Currently using 75.00% of 4)" in bad_output
def test_cpu_health_mesos_reports_zero():
mesos_metrics = {
'master/cpus_total': 0,
'master/cpus_used': 1,
}
fake_mesos_state = {'slaves': []}
failure_output, failure_health = metastatus_lib.assert_cpu_health(mesos_metrics, fake_mesos_state)
assert failure_output == "Error reading total available cpu from mesos!"
assert failure_health is False
def test_memory_health_mesos_reports_zero():
mesos_metrics = {
'master/mem_total': 0,
'master/mem_used': 1,
}
fake_mesos_state = {'slaves': []}
failure_output, failure_health = metastatus_lib.assert_memory_health(mesos_metrics, fake_mesos_state)
assert failure_output == "Error reading total available memory from mesos!"
assert failure_health is False
def test_disk_health_mesos_reports_zero():
mesos_metrics = {
'master/disk_total': 0,
'master/disk_used': 1,
}
fake_mesos_state = {'slaves': []}
failure_output, failure_health = metastatus_lib.assert_disk_health(mesos_metrics, fake_mesos_state)
assert failure_output == "Error reading total available disk from mesos!"
assert failure_health is False
def test_assert_no_duplicate_frameworks():
state = {
'frameworks': [
{
'name': 'test_framework1',
},
{
'name': 'test_framework2',
},
{
'name': 'test_framework3',
},
{
'name': 'test_framework4',
},
],
}
output, ok = metastatus_lib.assert_no_duplicate_frameworks(
state,
['test_framework1', 'test_framework2', 'test_framework3', 'test_framework4'],
)
expected_output = "\n".join(
["Frameworks:"] +
[' Framework: %s count: 1' % x['name'] for x in state['frameworks']],
)
assert output == expected_output
assert ok
def test_duplicate_frameworks():
state = {
'frameworks': [
{
'name': 'test_framework1',
},
{
'name': 'test_framework1',
},
{
'name': 'test_framework1',
},
{
'name': 'test_framework2',
},
],
}
output, ok = metastatus_lib.assert_no_duplicate_frameworks(
state,
['test_framework1', 'test_framework2', 'test_framework3', 'test_framework4'],
)
assert " CRITICAL: There are 3 connected test_framework1 frameworks! (Expected 1)" in output
assert not ok
def test_duplicate_frameworks_not_checked():
state = {
'frameworks': [
{
'name': 'test_framework1',
},
{
'name': 'test_framework1',
},
{
'name': 'test_framework1',
},
{
'name': 'test_framework2',
},
],
}
output, ok = metastatus_lib.assert_no_duplicate_frameworks(
state,
['test_framework2', 'test_framework3', 'test_framework4'],
)
assert "test_framework2" in output
assert ok
def test_ok_marathon_apps():
client = Mock()
client.list_apps.return_value = [
"MarathonApp::1",
"MarathonApp::2",
]
output, ok = metastatus_lib.assert_marathon_apps([client])
assert re.match("marathon apps: +2", output)
assert ok
def test_no_marathon_apps():
client = mock.Mock()
client.list_apps.return_value = []
output, ok = metastatus_lib.assert_marathon_apps([client])
assert "CRITICAL: No marathon apps running" in output
assert not ok
def test_marathon_tasks():
client = Mock()
client.list_tasks.return_value = ["MarathonTask:1"]
output, ok = metastatus_lib.assert_marathon_tasks([client])
assert re.match("marathon tasks: +1", output)
assert ok
def test_assert_marathon_deployments():
client = Mock()
client.list_deployments.return_value = ["MarathonDeployment:1"]
output, ok = metastatus_lib.assert_marathon_deployments([client])
assert re.match("marathon deployments: +1", output)
assert ok
def test_assert_slave_health():
fake_slave_info = {
'master/slaves_active': 10,
'master/slaves_inactive': 10,
}
output, ok = metastatus_lib.assert_slave_health(fake_slave_info)
assert "Slaves: active: 10 inactive: 10" in output
assert ok
def test_assert_tasks_running():
fake_tasks_info = {
'master/tasks_running': 20,
'master/tasks_staging': 10,
'master/tasks_starting': 10,
}
output, ok = metastatus_lib.assert_tasks_running(fake_tasks_info)
assert "Tasks: running: 20 staging: 10 starting: 10" in output
assert ok
@patch('paasta_tools.metrics.metastatus_lib.get_mesos_quorum', autospec=True)
@patch('paasta_tools.metrics.metastatus_lib.get_num_masters', autospec=True)
def test_healthy_asssert_quorum_size(mock_num_masters, mock_quorum_size):
mock_num_masters.return_value = 5
mock_quorum_size.return_value = 3
output, health = metastatus_lib.assert_quorum_size()
assert health
assert 'Quorum: masters: 5 configured quorum: 3 ' in output
@patch('paasta_tools.metrics.metastatus_lib.get_mesos_quorum', autospec=True)
@patch('paasta_tools.metrics.metastatus_lib.get_num_masters', autospec=True)
def test_unhealthy_asssert_quorum_size(mock_num_masters, mock_quorum_size):
mock_num_masters.return_value = 1
mock_quorum_size.return_value = 3
output, health = metastatus_lib.assert_quorum_size()
assert not health
assert "CRITICAL: Number of masters (1) less than configured quorum(3)." in output
def test_get_marathon_status():
client = Mock()
client.list_apps.return_value = [
"MarathonApp::1",
"MarathonApp::2",
]
client.list_deployments.return_value = [
"MarathonDeployment::1",
]
client.list_tasks.return_value = [
"MarathonTask::1",
"MarathonTask::2",
"MarathonTask::3",
]
expected_apps_output = ("marathon apps: 2", True)
expected_deployment_output = ("marathon deployments: 1", True)
expected_tasks_output = ("marathon tasks: 3", True)
results = metastatus_lib.get_marathon_status([client])
assert expected_apps_output in results
assert expected_deployment_output in results
assert expected_tasks_output in results
def test_assert_chronos_scheduled_jobs():
mock_client = Mock()
mock_client.list.return_value = [
{'name': 'myjob', 'disabled': False},
{'name': 'myjob', 'disabled': True},
]
results = metastatus_lib.assert_chronos_scheduled_jobs(mock_client)
assert results == ('Enabled chronos jobs: 1', True)
def test_assert_chronos_queued_jobs_no_queued():
mock_client = Mock()
mock_client.metrics.return_value = {
'gauges': {
metastatus_lib.HIGH_QUEUE_GAUGE: {'value': 0},
metastatus_lib.QUEUE_GAUGE: {'value': 0},
},
}
mock_client.list.return_value = [
{'name': 'myjob', 'disabled': False},
{'name': 'myjob', 'disabled': True},
]
assert metastatus_lib.assert_chronos_queued_jobs(mock_client) == metastatus_lib.HealthCheckResult(
message="Jobs Queued: 0 (0.0%)",
healthy=True,
)
def test_assert_chronos_queued_jobs_queued():
mock_client = Mock()
mock_client.metrics.return_value = {
'gauges': {
metastatus_lib.HIGH_QUEUE_GAUGE: {'value': 1},
metastatus_lib.QUEUE_GAUGE: {'value': 0},
},
}
mock_client.list.return_value = [
{'name': 'myjob', 'disabled': False},
{'name': 'myjob', 'disabled': False},
]
assert metastatus_lib.assert_chronos_queued_jobs(mock_client) == metastatus_lib.HealthCheckResult(
message="Jobs Queued: 1 (50.0%)",
healthy=True,
)
@patch('paasta_tools.metrics.metastatus_lib.assert_chronos_queued_jobs', autospec=True)
@patch('paasta_tools.metrics.metastatus_lib.assert_chronos_scheduled_jobs', autospec=True)
def test_get_chronos_status(mock_queued_jobs, mock_scheduled_jobs):
mock_scheduled_jobs_result = metastatus_lib.HealthCheckResult(
message='Enabled chronos jobs: 1',
healthy=True,
)
mock_queued_jobs_result = metastatus_lib.HealthCheckResult(
message="Jobs Queued: 0 (0%)",
healthy=True,
)
mock_queued_jobs.return_value = mock_queued_jobs_result
mock_scheduled_jobs.return_value = mock_scheduled_jobs_result
expected_results = [mock_queued_jobs_result, mock_scheduled_jobs_result]
assert metastatus_lib.get_chronos_status(Mock()) == expected_results
def test_status_for_results():
assert metastatus_lib.status_for_results([
metastatus_lib.HealthCheckResult(
message='message',
healthy=True,
),
metastatus_lib.HealthCheckResult(
message='message',
healthy=False,
),
]) == [True, False]
def test_generate_summary_for_results_ok():
assert (metastatus_lib.generate_summary_for_check("Myservice", True) ==
"Myservice Status: %s" % PaastaColors.green("OK"))
def test_generate_summary_for_results_critical():
assert (metastatus_lib.generate_summary_for_check("Myservice", False) ==
"Myservice Status: %s" % PaastaColors.red("CRITICAL"))
def test_critical_events_in_outputs():
assert (metastatus_lib.critical_events_in_outputs([
metastatus_lib.HealthCheckResult('myservice', True),
metastatus_lib.HealthCheckResult('myservice_false', False),
]) == [('myservice_false', False)])
def test_filter_mesos_state_metrics():
test_resource_dictionary = {
'cpus': 0,
'mem': 1,
'MEM': 2,
'garbage_data': 3,
'disk': 4,
'gpus': 5,
}
expected = {
'cpus': 0,
'mem': 1,
'disk': 4,
'gpus': 5,
}
assert metastatus_lib.filter_mesos_state_metrics(test_resource_dictionary) == expected
def test_filter_slaves():
filters = {"foo": ['one', 'two'], "bar": ['three', 'four']}
fns = [
metastatus_lib.make_filter_slave_func(k, v) for k, v in filters.items()
]
data = [
{"name": "aaa", "attributes": {"foo": "one", "bar": "three"}},
{"name": "bbb", "attributes": {"foo": "one"}},
{"name": "ccc", "attributes": {"foo": "wrong", "bar": "four"}},
]
slaves = metastatus_lib.filter_slaves(data, fns)
names = [s["name"] for s in slaves]
assert("aaa" in names)
assert("bbb" not in names)
assert("ccc" not in names)
def test_group_slaves_by_key_func():
slaves = [
{
'id': 'somenametest-slave',
'hostname': 'test.somewhere.www',
'resources': {
'cpus': 75,
'disk': 250,
'mem': 100,
},
'attributes': {
'habitat': 'somenametest-habitat',
},
},
{
'id': 'somenametest-slave2',
'hostname': 'test2.somewhere.www',
'resources': {
'cpus': 500,
'disk': 200,
'mem': 750,
},
'attributes': {
'habitat': 'somenametest-habitat-2',
},
},
]
actual = metastatus_lib.group_slaves_by_key_func(
lambda x: x['attributes']['habitat'],
slaves,
)
assert len(actual.items()) == 2
for k, v in actual.items():
paasta_print(k, v)
assert len(list(v)) == 1
@patch('paasta_tools.metrics.metastatus_lib.group_slaves_by_key_func', autospec=True)
@patch('paasta_tools.metrics.metastatus_lib.calculate_resource_utilization_for_slaves', autospec=True)
@patch('paasta_tools.metrics.metastatus_lib.get_all_tasks_from_state', autospec=True)
def test_get_resource_utilization_by_grouping(
mock_get_all_tasks_from_state,
mock_calculate_resource_utilization_for_slaves,
mock_group_slaves_by_key_func,
):
mock_group_slaves_by_key_func.return_value = {
'somenametest-habitat': [{
'id': 'abcd',
'hostname': 'test.somewhere.www',
}],
'somenametest-habitat-2': [{
'id': 'abcd',
'hostname': 'test2.somewhere.www',
}],
}
mock_calculate_resource_utilization_for_slaves.return_value = {
'free': metastatus_lib.ResourceInfo(cpus=10, mem=10, disk=10),
'total': metastatus_lib.ResourceInfo(cpus=20, mem=20, disk=20),
}
state = {
'frameworks': Mock(),
'slaves': [{'id': 'abcd'}],
}
actual = metastatus_lib.get_resource_utilization_by_grouping(
grouping_func=mock.sentinel.grouping_func,
mesos_state=state,
)
mock_get_all_tasks_from_state.assert_called_with(state, include_orphans=True)
assert sorted(actual.keys()) == sorted(['somenametest-habitat', 'somenametest-habitat-2'])
for k, v in actual.items():
assert v['total'] == metastatus_lib.ResourceInfo(
cpus=20,
disk=20,
mem=20,
)
assert v['free'] == metastatus_lib.ResourceInfo(
cpus=10,
disk=10,
mem=10,
)
def test_get_resource_utilization_by_grouping_correctly_groups():
fake_state = {
'slaves': [
{
'id': 'foo',
'resources': {
'disk': 100,
'cpus': 10,
'mem': 50,
},
'reserved_resources': {},
},
{
'id': 'bar',
'resources': {
'disk': 100,
'cpus': 10,
'mem': 50,
},
'reserved_resources': {},
},
],
'frameworks': [
{'tasks': [
{
'state': 'TASK_RUNNING',
'resources': {'cpus': 1, 'mem': 10, 'disk': 10},
'slave_id': 'foo',
},
{
'state': 'TASK_RUNNING',
'resources': {'cpus': 1, 'mem': 10, 'disk': 10},
'slave_id': 'bar',
},
]},
],
}
def grouping_func(x): return x['id']
free_cpus = metastatus_lib.get_resource_utilization_by_grouping(
mesos_state=fake_state,
grouping_func=grouping_func,
)['foo']['free'].cpus
assert free_cpus == 9
def test_get_resource_utilization_by_grouping_correctly_multi_groups():
fake_state = {
'slaves': [
{
'id': 'foo1',
'resources': {
'disk': 100,
'cpus': 10,
'mem': 50,
},
'attributes': {'one': 'yes', 'two': 'yes'},
'reserved_resources': {},
},
{
'id': 'bar1',
'resources': {
'disk': 100,
'cpus': 10,
'mem': 50,
},
'attributes': {'one': 'yes', 'two': 'no'},
'reserved_resources': {},
},
{
'id': 'foo2',
'resources': {
'disk': 100,
'cpus': 10,
'mem': 50,
},
'attributes': {'one': 'no', 'two': 'yes'},
'reserved_resources': {},
},
{
'id': 'bar2',
'resources': {
'disk': 100,
'cpus': 10,
'mem': 50,
},
'attributes': {'one': 'no', 'two': 'no'},
'reserved_resources': {},
},
],
'frameworks': [
{'tasks': [
{
'state': 'TASK_RUNNING',
'resources': {'cpus': 1, 'mem': 10, 'disk': 10},
'slave_id': 'foo1',
},
{
'state': 'TASK_RUNNING',
'resources': {'cpus': 1, 'mem': 10, 'disk': 10},
'slave_id': 'bar1',
},
]},
],
}
grouping_func = metastatus_lib.key_func_for_attribute_multi(['one', 'two'])
resp = metastatus_lib.get_resource_utilization_by_grouping(
mesos_state=fake_state,
grouping_func=grouping_func,
)
# resp should have 4 keys...
assert(len(resp.keys()) == 4)
# Each key should be a set with 2 items...
assert(len(list(resp.keys())[0]) == 2)
# Each item in the set should have 2 values (original key, value)
assert(len(list(list(resp.keys())[0])[0]) == 2)
def test_get_resource_utilization_per_slave():
tasks = [
{
'resources': {
'cpus': 10,
'mem': 10,
'disk': 10,
},
'state': 'TASK_RUNNING',
},
{
'resources': {
'cpus': 10,
'mem': 10,
'disk': 10,
},
'state': 'TASK_RUNNING',
},
]
slaves = [
{
'id': 'somenametest-slave',
'hostname': 'test.somewhere.www',
'resources': {
'cpus': 75,
'disk': 250,
'mem': 100,
},
'reserved_resources': {
},
'attributes': {
'habitat': 'somenametest-habitat',
},
},
{
'id': 'somenametest-slave2',
'hostname': 'test2.somewhere.www',
'resources': {
'cpus': 500,
'disk': 200,
'mem': 750,
},
'reserved_resources': {
'maintenance': {
'cpus': 10,
'disk': 0,
'mem': 150,
},
},
'attributes': {
'habitat': 'somenametest-habitat-2',
},
},
]
actual = metastatus_lib.calculate_resource_utilization_for_slaves(
slaves=slaves,
tasks=tasks,
)
assert sorted(actual.keys()) == sorted(['total', 'free', 'slave_count'])
assert actual['total'] == metastatus_lib.ResourceInfo(
cpus=575,
disk=450,
mem=850,
)
assert actual['free'] == metastatus_lib.ResourceInfo(
cpus=545,
disk=430,
mem=680,
)
assert actual['slave_count'] == 2
def test_calculate_resource_utilization_for_slaves():
fake_slaves = [
{
'id': 'somenametest-slave2',
'hostname': 'test2.somewhere.www',
'resources': {
'cpus': 500,
'disk': 200,
'mem': 750,
'gpus': 5,
},
'reserved_resources': {},
'attributes': {
'habitat': 'somenametest-habitat-2',
},
},
]
tasks = [
{
'resources': {
'cpus': 10,
'mem': 10,
'disk': 10,
'gpus': 1,
},
'state': 'TASK_RUNNING',
},
{
'resources': {
'cpus': 10,
'mem': 10,
'disk': 10,
'gpus': 2,
},
'state': 'TASK_RUNNING',
},
]
free = metastatus_lib.calculate_resource_utilization_for_slaves(
slaves=fake_slaves,
tasks=tasks,
)['free']
assert free.cpus == 480
assert free.mem == 730
assert free.disk == 180
assert free.gpus == 2
def test_healthcheck_result_for_resource_utilization_ok():
expected_message = 'cpus: 5.00/10.00(50.00%) used. Threshold (90.00%)'
expected = metastatus_lib.HealthCheckResult(
message=expected_message,
healthy=True,
)
resource_utilization = metastatus_lib.ResourceUtilization(
metric='cpus',
total=10,
free=5,
)
assert metastatus_lib.healthcheck_result_for_resource_utilization(
resource_utilization=resource_utilization,
threshold=90,
) == expected
def test_healthcheck_result_for_resource_utilization_unhealthy():
expected_message = 'cpus: 5.00/10.00(50.00%) used. Threshold (10.00%)'
expected = metastatus_lib.HealthCheckResult(
message=expected_message,
healthy=False,
)
resource_utilization = metastatus_lib.ResourceUtilization(
metric='cpus',
total=10,
free=5,
)
assert metastatus_lib.healthcheck_result_for_resource_utilization(
resource_utilization=resource_utilization,
threshold=10,
) == expected
def test_healthcheck_result_for_resource_utilization_zero():
expected_message = 'cpus: 0.00/0.00(0.00%) used. Threshold (10.00%)'
expected = metastatus_lib.HealthCheckResult(
message=expected_message,
healthy=True,
)
resource_utilization = metastatus_lib.ResourceUtilization(
metric='cpus',
total=0,
free=0,
)
assert metastatus_lib.healthcheck_result_for_resource_utilization(
resource_utilization=resource_utilization,
threshold=10,
) == expected
def test_format_table_column_for_healthcheck_resource_utilization_pair_healthy():
fake_healthcheckresult = Mock()
fake_healthcheckresult.healthy = True
fake_resource_utilization = Mock()
fake_resource_utilization.free = 10
fake_resource_utilization.total = 20
expected = PaastaColors.green("10/20 (50.00%)")
assert metastatus_lib.format_table_column_for_healthcheck_resource_utilization_pair(
(fake_healthcheckresult, fake_resource_utilization),
False,
) == expected
def test_format_table_column_for_healthcheck_resource_utilization_pair_unhealthy():
fake_healthcheckresult = Mock()
fake_healthcheckresult.healthy = False
fake_healthcheckresult.metric = 'mem'
fake_resource_utilization = Mock()
fake_resource_utilization.free = 10
fake_resource_utilization.total = 20
expected = PaastaColors.red("10/20 (50.00%)")
assert metastatus_lib.format_table_column_for_healthcheck_resource_utilization_pair(
(fake_healthcheckresult, fake_resource_utilization),
False,
) == expected
def test_format_table_column_for_healthcheck_resource_utilization_pair_zero():
fake_healthcheckresult = Mock()
fake_healthcheckresult.healthy = False
fake_healthcheckresult.metric = 'mem'
fake_resource_utilization = Mock()
fake_resource_utilization.free = 0
fake_resource_utilization.total = 0
expected = PaastaColors.red("0/0 (100.00%)")
assert metastatus_lib.format_table_column_for_healthcheck_resource_utilization_pair(
(fake_healthcheckresult, fake_resource_utilization),
False,
) == expected
def test_format_table_column_for_healthcheck_resource_utilization_pair_healthy_human():
fake_healthcheckresult = Mock()
fake_healthcheckresult.healthy = True
fake_healthcheckresult.metric = 'mem'
fake_resource_utilization = Mock()
fake_resource_utilization.free = 10
fake_resource_utilization.total = 20
expected = PaastaColors.green("10.0M/20.0M (50.00%)")
assert metastatus_lib.format_table_column_for_healthcheck_resource_utilization_pair(
(fake_healthcheckresult, fake_resource_utilization),
True,
) == expected
def test_format_table_column_for_healthcheck_resource_utilization_pair_unhealthy_human():
fake_healthcheckresult = Mock()
fake_healthcheckresult.healthy = False
fake_healthcheckresult.metric = 'mem'
fake_resource_utilization = Mock()
fake_resource_utilization.free = 10
fake_resource_utilization.total = 20
expected = PaastaColors.red("10.0M/20.0M (50.00%)")
assert metastatus_lib.format_table_column_for_healthcheck_resource_utilization_pair(
(fake_healthcheckresult, fake_resource_utilization),
True,
) == expected
def test_format_table_column_for_healthcheck_resource_utilization_pair_zero_human():
fake_healthcheckresult = Mock()
fake_healthcheckresult.healthy = False
fake_healthcheckresult.metric = 'mem'
fake_resource_utilization = Mock()
fake_resource_utilization.free = 0
fake_resource_utilization.total = 0
expected = PaastaColors.red("0B/0B (100.00%)")
assert metastatus_lib.format_table_column_for_healthcheck_resource_utilization_pair(
(fake_healthcheckresult, fake_resource_utilization),
True,
) == expected
@patch(
'paasta_tools.metrics.metastatus_lib.format_table_column_for_healthcheck_resource_utilization_pair',
autospec=True,
)
def test_format_row_for_resource_utilization_checks(mock_format_row):
fake_pairs = [
(Mock(), Mock()),
(Mock(), Mock()),
(Mock(), Mock()),
]
assert metastatus_lib.format_row_for_resource_utilization_healthchecks(fake_pairs, False)
assert mock_format_row.call_count == len(fake_pairs)
@patch('paasta_tools.metrics.metastatus_lib.format_row_for_resource_utilization_healthchecks', autospec=True)
def test_get_table_rows_for_resource_usage_dict(mock_format_row):
fake_pairs = [
(Mock(), Mock()),
(Mock(), Mock()),
(Mock(), Mock()),
]
mock_format_row.return_value = ['10/10', '10/10', '10/10']
actual = metastatus_lib.get_table_rows_for_resource_info_dict(['myhabitat'], fake_pairs, False)
assert actual == ['myhabitat', '10/10', '10/10', '10/10']
def test_key_func_for_attribute():
assert inspect.isfunction(metastatus_lib.key_func_for_attribute('habitat'))
def test_get_mesos_disk_status():
metrics = {
'master/disk_total': 100,
'master/disk_used': 50,
}
actual = metastatus_lib.get_mesos_disk_status(metrics)
assert actual == (100, 50, 50)
def test_get_mesos_gpu_status():
metrics = {
'master/gpus_total': 3,
'master/gpus_used': 1,
}
actual = metastatus_lib.get_mesos_gpu_status(metrics)
assert actual == (3, 1, 2)
def test_reserved_maintenence_resources_no_maintenenance():
actual = metastatus_lib.reserved_maintenence_resources({})
assert all([actual[x] == 0 for x in ['cpus', 'mem', 'disk']])
def test_reserved_maintenence_resources():
actual = metastatus_lib.reserved_maintenence_resources({
'maintenance': {
'cpus': 5,
'mem': 5,
'disk': 5,
},
})
assert all([actual[x] == 5 for x in ['cpus', 'mem', 'disk']])
def test_reserved_maintenence_resources_ignores_non_maintenance():
actual = metastatus_lib.reserved_maintenence_resources({
'maintenance': {
'cpus': 5,
'mem': 5,
'disk': 5,
},
'myotherole': {
'cpus': 5,
'mem': 5,
'disk': 5,
},
})
assert all([actual[x] == 5 for x in ['cpus', 'mem', 'disk']])
| 30.489927 | 109 | 0.583811 | 3,457 | 33,295 | 5.30431 | 0.094301 | 0.069477 | 0.029012 | 0.019087 | 0.743688 | 0.675956 | 0.600153 | 0.538038 | 0.468888 | 0.409009 | 0 | 0.028035 | 0.300436 | 33,295 | 1,091 | 110 | 30.517874 | 0.759231 | 0.021144 | 0 | 0.446921 | 0 | 0.004246 | 0.178056 | 0.027169 | 0 | 0 | 0 | 0 | 0.14862 | 1 | 0.064756 | false | 0 | 0.008493 | 0.001062 | 0.073248 | 0.002123 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21405a908f0dbb287266e8c8f77371225bd31b1c | 14,337 | py | Python | source/spark_runner/run_pipeline_multiple_files.py | VCCRI/GEOraclePlus | 6595cf449b4e9ead8317bcf56ebd8e173aa78bca | [
"MIT"
] | 3 | 2019-03-21T05:58:23.000Z | 2021-05-21T06:28:07.000Z | source/spark_runner/run_pipeline_multiple_files.py | VCCRI/GEOraclePlus | 6595cf449b4e9ead8317bcf56ebd8e173aa78bca | [
"MIT"
] | null | null | null | source/spark_runner/run_pipeline_multiple_files.py | VCCRI/GEOraclePlus | 6595cf449b4e9ead8317bcf56ebd8e173aa78bca | [
"MIT"
] | null | null | null | import argparse
import os
import shlex
import shutil
from subprocess import Popen, PIPE
from pyspark import SparkContext, SparkConf
import pandas as pd
import subprocess
import boto3
import re
global parser_result
APPLICATION_FOLDER = "/mnt/app"
GENOME_REFERENCES_FOLDER = "/mnt/ref"
TEMP_OUTPUT_FOLDER = "/mnt/output"
star_collected_metrics = ["number of input reads", "uniquely mapped reads number", "number of splices: total",
"number of splices: annotated (sjdb)", "number of splices: gt/ag", "number of splices: gc/ag",
"number of splices: at/ac", "number of splices: non-canonical",
"number of reads mapped to multiple loci", "number of reads mapped to too many loci"]
picard_collected_metrics = ['pf_bases', 'pf_aligned_bases', 'ribosomal_bases', 'coding_bases', 'utr_bases',
'intronic_bases', 'intergenic_bases', 'ignored_reads', 'correct_strand_reads',
'incorrect_strand_reads']
hisat_ignore_metrics_pattern = r"^[\d.]+\%"
hisat_extract_metrics_pattern = r"(\d+)\s?(\([\d.]+\%\))?\s?(.*)\:?"
#################################
# File splitting
#################################
def split_interleaved_file(file_prefix, file_content, output_dir):
"""
Unpacks an interleaved file into the standard FASTQ format
:param file_prefix: the prefix of the file name
:param file_content: the lines of content from the input file
:param output_dir: the location to store the unpacked files
:return: a tuple with first element being a list of output file names
(1 for se, 2 for pe); 2nd element a boolean flag - True if pe data,
False otherwise
"""
fastq_line_count_se = 4
fastq_line_count_pe = 8
paired_reads = False
output_file_names = []
file_prefix = output_dir + "/" + file_prefix
output_file = file_prefix + "_1.fq"
output_file_names.append(output_file)
output_file_writer = open(output_file, 'w')
count = 0
for line in file_content.strip().split("\n"):
# In the first line, check if it's paired or not
if count == 0 and len(line.strip().split("\t")) == fastq_line_count_pe:
paired_reads = True
output_file_pair = file_prefix + "_2.fq"
output_file_names.append(output_file_pair)
output_pair_writer = open(output_file_pair, 'w')
if paired_reads:
parts = line.strip().split("\t")
if len(parts) != fastq_line_count_pe:
continue
read_one = parts[:fastq_line_count_se]
read_two = parts[fastq_line_count_se:]
output_file_writer.write("\n".join(read_one) + "\n")
output_pair_writer.write("\n".join(read_two) + "\n")
else:
output_file_writer.writelines(line.strip().replace("\t", "\n") + "\n")
count += 1
output_file_writer.close()
if paired_reads:
output_pair_writer.close()
return output_file_names, paired_reads
def run_kallisto(file_names, output_dir):
print("Aligning reads...")
paired_read = True if len(file_names) == 2 else False
sample_name = os.path.splitext(os.path.basename(file_names[0]))[0]
kallisto_output_dir = "{}/{}".format(output_dir, sample_name)
# try:
if not os.path.exists(kallisto_output_dir):
os.mkdir(kallisto_output_dir)
# except FileExistsError:
# pass
# Construct the shell command for executing the alignment tool
aligner_args = "{app_folder}/kallisto quant {aligner_extra_args} -i {index_folder} -o {output_folder} " \
"{single_read_args} {fastq_file_names}". \
format(app_folder=APPLICATION_FOLDER,
aligner_extra_args="" if parser_result.aligner_extra_args is None else parser_result.aligner_extra_args,
index_folder=GENOME_REFERENCES_FOLDER + "/kallisto_index/transcripts.idx",
fastq_file_names=" ".join(file_names),
output_folder=kallisto_output_dir,
single_read_args="--single -l 200 -s 20" if not paired_read else "")
print("Command: " + aligner_args)
# Execute the shell command
aligner_process = Popen(shlex.split(aligner_args), stdout=PIPE, stderr=PIPE)
aligner_out, aligner_error = aligner_process.communicate()
# Check for error using return code
if aligner_process.returncode != 0:
raise ValueError("Kallisto failed to complete (Non-zero return code)!\n"
"Kallisto stdout: {std_out} \Aligner stderr: {std_err}".format(std_out=aligner_out,
std_err=aligner_error))
print('Completed reads alignment')
counter_output = []
with open(kallisto_output_dir + "/abundance.tsv") as f:
for index, line in enumerate(f):
if index == 0: # Command summary and header
continue
line = line.strip().split()
if len(line) == 0:
print(line)
gene, count = line[0], line[3]
counter_output.append((sample_name + "\t" + gene, int(float(count))))
return counter_output
#################################
# Picard tools
#################################
def run_picard(sample_name, aligned_output_filepath, picard_output_dir):
print("Getting alignment metrics...")
picard_args = "java8 -jar {}/picard-tools/picard.jar CollectRnaSeqMetrics I={} O={}/output.RNA_Metrics " \
"REF_FLAT={}/refFlat.txt STRAND={} {}". \
format(APPLICATION_FOLDER, aligned_output_filepath, picard_output_dir, GENOME_REFERENCES_FOLDER + "/genome_ref",
parser_result.strand_specificity, parser_result.picard_extra_args)
print("Command: " + picard_args)
picard_process = Popen(shlex.split(picard_args), stdout=PIPE, stderr=PIPE)
picard_out, picard_error = picard_process.communicate()
if not os.path.isfile(picard_output_dir + "/output.RNA_Metrics"):
raise ValueError("Picard tools failed to complete (No output file is found)!\n"
"Picard tools stdout: {} \nPicard tools stderr: {}".format(picard_out, picard_error))
picard_qc_output = []
with open(picard_output_dir + "/output.RNA_Metrics") as picard_qc:
picard_lines = picard_qc.readlines()
index = 0
while index < len(picard_lines):
current_line = picard_lines[index].strip()
if current_line.startswith("##") and current_line[2:].strip().startswith("METRICS CLASS"):
picard_metric_header = picard_lines[index + 1].strip().lower().split("\t")
picard_metric_value = picard_lines[index + 2].strip().split("\t")
metrics = dict(zip(picard_metric_header, picard_metric_value))
for metric in picard_collected_metrics:
if metrics[metric] != "":
picard_qc_output.append((sample_name + "\t" + "QC_picard_" + metric, int(metrics[metric])))
index += 2
index += 1
return picard_qc_output
def sum_gene_counts(cumulative_count, current_count):
return cumulative_count + current_count
def set_gene_id_as_key(keyval):
# Input: file_name\tgene, count as key,val
# Output: file_name, (gene,count) as key,val
key, val = keyval
file_group, gene_id = key.split("\t")
if gene_id == "QC_STAR_total_reads":
print(keyval)
return gene_id, [(file_group, val)]
def merge_count_by_gene_id(file_count_one, file_count_two):
return file_count_one + file_count_two
def process_count_by_gene_id(keyval):
gene_id, counts = keyval
return pd.DataFrame({k: v for k, v in counts}, index=[gene_id])
def combine_gene_counts(df_one, df_two):
return df_one.append(df_two)
#################################
# Main functions
#################################
def alignment_count_step(keyval):
# Input: file_name, file_content as key,val
# Output: [sample_name\tgene, count] as [key,val]
global parser_result, star_collected_metrics, picard_collected_metrics
file_name, file_content = keyval
prefix = file_name.rstrip("/").split("/")[-1].split(".")[0]
sample_name = prefix.rsplit("_part", 1)[0]
alignment_dir = TEMP_OUTPUT_FOLDER + "/alignment_" + prefix
try:
os.mkdir(alignment_dir)
except:
print('Alignment directory {} exist.'.format(alignment_dir))
print("Recreating FASTQ file(s)")
split_file_names, paired_reads = split_interleaved_file(prefix, file_content, alignment_dir)
print("Recreating FASTQ file(s) complete. Files recreated: {}".format(",".join(split_file_names)))
alignment_output_dir = alignment_dir + "/aligner_output"
try:
os.mkdir(alignment_output_dir)
except:
print('Alignment output directory {} exist.'.format(alignment_output_dir))
# if parser_result.run_picard:
# picard_qc_output = run_picard(sample_name, aligned_output_filepath, alignment_output_dir)
# counter_output.extend(picard_qc_output)
# shutil.rmtree(alignment_dir, ignore_errors=True)
return run_kallisto(split_file_names, alignment_output_dir)
if __name__ == "__main__":
global parser_result
parser = argparse.ArgumentParser(description='Spark-based RNA-seq Pipeline')
parser.add_argument('--input', '-i', action="store", dest="input_dir", help="Input directory - HDFS or S3")
parser.add_argument('--output', '-o', action="store", dest="output_dir", help="Output directory - HDFS or S3")
parser.add_argument('--annotation', '-a', action="store", dest="annotation_file",
help="Name of annotation file to be used")
parser.add_argument('--strand_specificity', '-ss', action="store", dest="strand_specificity", nargs='?',
help="Strand specificity: NONE|FIRST_READ_TRANSCRIPTION_STRAND|SECOND_READ_TRANSCRIPTION_STRAND"
, default="NONE")
parser.add_argument('--run_picard', '-rp', action="store_true", dest="run_picard", help="Run picard")
parser.add_argument('--aligner_tools', '-at', action="store", dest="aligner", nargs='?',
help="Aligner to be used (STAR|HISAT2)", default="STAR")
parser.add_argument('--aligner_extra_args', '-s', action="store", dest="aligner_extra_args", nargs='?',
help="Extra argument to be passed to alignment tool", default="")
parser.add_argument('--counter_tools', '-ct', action="store", dest="counter", nargs='?',
help="Counter to be used (featureCount|StringTie)", default="featureCount")
parser.add_argument('--counter_extra_args', '-c', action="store", dest="counter_extra_args", nargs='?',
help="Extra argument to be passed to quantification tool", default="")
parser.add_argument('--picard_extra_args', '-p', action="store", dest="picard_extra_args", nargs='?',
help="Extra argument to be passed to picard tools", default="")
parser.add_argument('--region', '-r', action="store", dest="aws_region", help="AWS region")
parser_result = parser.parse_args()
split_num = 0
conf = SparkConf().setAppName("Spark-based RNA-seq Pipeline Multifile")
sc = SparkContext(conf=conf)
if parser_result.input_dir.startswith("s3://"): # From S3
s3_client = boto3.client('s3', region_name=parser_result.aws_region)
# Get number of input files
s3_paginator = s3_client.get_paginator('list_objects')
input_bucket, key_prefix = parser_result.input_dir[5:].strip().split("/", 1)
input_file_num = 0
for result in s3_paginator.paginate(Bucket=input_bucket, Prefix=key_prefix):
for file in result.get("Contents"):
input_file_num += 1
if input_file_num == 0:
raise ValueError("Input directory is invalid or empty!")
split_num = input_file_num
else: # From HDFS
hdfs_process = Popen(shlex.split("hdfs dfs -count {}".format(parser_result.input_dir)),
stdout=PIPE, stderr=PIPE)
hdfs_out, hdfs_error = hdfs_process.communicate()
if hdfs_error:
raise ValueError("Input directory is invalid or empty!")
dir_count, file_count, size, path = hdfs_out.strip().split()
split_num = int(file_count)
input_files = sc.wholeTextFiles(parser_result.input_dir, split_num)
count_output = input_files.flatMap(alignment_count_step).reduceByKey(sum_gene_counts)
count_by_gene = count_output.map(set_gene_id_as_key).reduceByKey(merge_count_by_gene_id) \
.map(process_count_by_gene_id)
count_summary = count_by_gene.reduce(combine_gene_counts)
count_qc_index = [f.startswith("QC_") for f in count_summary.index]
count_only_index = [not x for x in count_qc_index]
count_only_summary = count_summary[count_only_index]
count_qc_summary = count_summary[count_qc_index]
# If normalisation is required
# count_summary = count_summary.apply(lambda x: x / np.sum(x) * 1000000)
expressions_file = 'samples_expression.csv'
qc_report_file = 'samples_qc_report.csv'
count_only_summary = count_only_summary.sort_index()
count_only_summary.to_csv(expressions_file)
count_qc_summary = count_qc_summary.sort_index()
count_qc_summary.to_csv(qc_report_file)
if parser_result.input_dir.startswith("s3://"): # From S3
output_bucket, key_prefix = parser_result.output_dir.strip().strip("/")[5:].split("/", 1)
s3_client.upload_file(expressions_file, output_bucket, key_prefix + "/" + expressions_file)
s3_client.upload_file(qc_report_file, output_bucket, key_prefix + "/" + qc_report_file)
else:
subprocess.call(["hdfs", "dfs", "-mkdir", "-p", parser_result.output_dir.rstrip("/")])
subprocess.call(["hdfs", "dfs", "-put", expressions_file, parser_result.output_dir.rstrip("/") + "/"
+ expressions_file])
subprocess.call(["hdfs", "dfs", "-put", qc_report_file, parser_result.output_dir.rstrip("/") + "/"
+ qc_report_file])
os.remove(expressions_file)
os.remove(qc_report_file)
| 41.677326 | 120 | 0.651671 | 1,814 | 14,337 | 4.854465 | 0.189636 | 0.024529 | 0.021236 | 0.011356 | 0.185215 | 0.104247 | 0.066659 | 0.033954 | 0.023734 | 0.014649 | 0 | 0.005801 | 0.218456 | 14,337 | 343 | 121 | 41.798834 | 0.780098 | 0.083979 | 0 | 0.076577 | 0 | 0 | 0.198042 | 0.024474 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0.013514 | 0.045045 | 0.013514 | 0.126126 | 0.04955 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2141317fbf5f3992f4c511744ecc5272b889b6f8 | 3,198 | py | Python | Scripts/Webscraping 2.0.py | PatrikLah/VerkonRaapiminen | 7df7f00654250b6f66c449ded6fb9c36fad4f548 | [
"Apache-2.0"
] | null | null | null | Scripts/Webscraping 2.0.py | PatrikLah/VerkonRaapiminen | 7df7f00654250b6f66c449ded6fb9c36fad4f548 | [
"Apache-2.0"
] | null | null | null | Scripts/Webscraping 2.0.py | PatrikLah/VerkonRaapiminen | 7df7f00654250b6f66c449ded6fb9c36fad4f548 | [
"Apache-2.0"
] | null | null | null | from requests_html import HTMLSession
import pandas as pd
import time
#Render Dynamic Pages - Web Scraping Product Links with Python
#Virvoitusjuomat = 1022 | sivuja n. 5
#Mehut = 1018 | sivuja n. 9
#Vedet = 1028 | sivuja n. 4
#Energiajuomat = 1038 | n. 2
category_code = 1008
page_count = 1
category_string = ""
if category_code == 1022:
category_string = "virkkarit"
page_count = 5
elif category_code == 1018:
category_string = "mehut"
page_count = 9
elif category_code == 1028:
category_string = "vedet"
page_count = 4
elif category_code == 1038:
category_string = "energiajuomat"
page_count = 2
url =f"https://www.foodie.fi/products/{kategoria_koodi}"
s = HTMLSession()
product_info_list = []
def removeFormatting(s):
return "".join(i for i in s if ord(i) < 126 and ord(i) > 31)
def request(url):
r = s.get(url)
r.html.render(sleep=1)
return r.html.xpath("/html/body/div[5]/div[2]/div[7]/div/div[2]/div[2]/div/div[2]/div/ul", first = True)
def parse(products):
for item in products.absolute_links:
r = s.get(item)
try:
gtin = (r.html.find('div.aisle', first = True).text)
gtin = gtin[18:]
#print("GTIN: " + gtin)
brand = (r.html.find('div.col-sm-7 > h2', first = True).text)
name = (r.html.find("div.col-sm-7 > h1", first = True).text)
#print("Nimi: " + name)
price = (r.html.find("div.price", first = True).text)
#print("Price: " + price)
quantity = (r.html.find("div.js-quantity", first = True).text)
#print("Quantity: " + quantity)
likes = (r.html.find('span.js-like-count', first = True).text)
dislikes = (r.html.find('span.js-dislike-count', first = True).text)
#print("Like count: " + likes + ", Dislike count: " + dislikes)
#Valmistaja = (r.html.find('div.active > p', second = True).text)
#Valmistaja = (r.html.xpath('//*[@id="origin"]/p'))
country = (r.html.find('#origin > p', first = True).text)
#print("Maa:" + country.text)
nutr_info = (r.html.find('div.nutritions > table.data-table', first = True))
nutrition_table = nutr_info
formatted_nutr_table = removeFormatting(nutr_info.text)
#print("Ravintosisältö: " + formatted_nutr_table)
except:
print("Tuotteen tietoja puuttui")
prod = {
"GTIN":gtin,
"Brand":brand,
"name":name,
"price":price,
"quantity":quantity,
"likes":likes,
"dislikes":dislikes,
"country":country,
"nutrition table":formatted_nutr_table
}
product_info_list.append(prod)
def output():
df = pd.DataFrame(product_info_list)
df.to_csv(f"prisma_{category_string}.csv", index=False)
print("Tallennettu")
x=1
while x <= page_count:
products = request(f"https://www.foodie.fi/products/{category_code}/page/{x}?main_view=1")
print(f"Sivulla: {x}")
parse(products)
print("Saatiin tuotteita:", len(product_info_list))
x += 1
time.sleep(3)
output() | 32.969072 | 108 | 0.590056 | 415 | 3,198 | 4.445783 | 0.337349 | 0.03523 | 0.04878 | 0.045528 | 0.062873 | 0.046612 | 0.019512 | 0 | 0 | 0 | 0 | 0.02827 | 0.258912 | 3,198 | 97 | 109 | 32.969072 | 0.750211 | 0.164478 | 0 | 0 | 0 | 0.014286 | 0.19481 | 0.043625 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.042857 | 0.014286 | 0.128571 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21418a1620b85c8b126f4c0005971414afc84bc6 | 3,718 | py | Python | cogs/welcomer.py | Abhiraj441/Superior | 1c597cc367f1fadbabf124c83e4684e35bc36a4e | [
"MIT"
] | null | null | null | cogs/welcomer.py | Abhiraj441/Superior | 1c597cc367f1fadbabf124c83e4684e35bc36a4e | [
"MIT"
] | null | null | null | cogs/welcomer.py | Abhiraj441/Superior | 1c597cc367f1fadbabf124c83e4684e35bc36a4e | [
"MIT"
] | null | null | null | from discord.ext import commands
import discord
import random
import datetime
import sqlite3
class Welcomer(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_member_join(self, member):
db = sqlite3.connect("main.sqlite")
cursor = db.cursor()
cursor.execute(
f"SELECT channel_id FROM main WHERE guild_id = {member.guild.id}"
)
result = cursor.fetchone()
if result is None:
return
else:
cursor.execute(f"SELECT msg FROM main WHERE guild_id = {member.guild.id}")
result1 = cursor.fetchone()
members = len(list(member.guild.members))
mention = member.mention
user = member.name
guild = member.guild
embed = discord.Embed(
color=discord.Color.dark_green(),
description=str(result1[0]).format(
members=members, mention=mention, user=user, guild=guild
),
timestap=datetime.datetime.utcfromtimestamp(1553629094),
)
embed.set_thumbnail(url=f"{member.avatar_url}")
embed.set_author(name=f"{member.name}", icon_url=f"{member.avatar_url}")
embed.set_footer(
text=f"{member.guild}", icon_url=f"{member.guild.icon_url}"
)
embed.timestamp = datetime.datetime.utcnow()
channel = self.client.get_channel(int(result[0]))
await channel.send(embed=embed)
@commands.group(invoke_without_command=True)
async def welcomer(self, ctx):
await ctx.send(
"**Available Setup Commands:** welcomer set_channel <#channel> & welcomer text <text>"
)
@welcomer.command()
async def set_channel(self, ctx, channel: discord.TextChannel):
if ctx.message.author.guild_permissions.manage_messages:
db = sqlite3.connect("main.sqlite")
cursor = db.cursor()
cursor.execute(
f"SELECT channel_id FROM main WHERE guild_id = {ctx.guild.id}"
)
result = cursor.fetchone()
if result is None:
sql = "INSERT INTO main(guild_id, channel_id) VALUES(?, ?)"
val = (ctx.guild.id, channel.id)
await ctx.send(f"Welcome channel set to **{channel.mention}**")
elif result is not None:
sql = "UPDATE main SET channel_id = ? WHERE guild_id = ?"
val = (channel.id, ctx.guild.id)
await ctx.send(f"Welcome channel updated to **{channel.mention}**")
cursor.execute(sql, val)
db.commit()
cursor.close()
db.close()
@welcomer.command()
async def set_text(self, ctx, *, text):
if ctx.message.author.guild_permissions.manage_messages:
db = sqlite3.connect("main.sqlite")
cursor = db.cursor()
cursor.execute(f"SELECT msg FROM main WHERE guild_id = {ctx.guild.id}")
result = cursor.fetchone()
if result is None:
sql = "INSERT INTO main(guild_id, msg) VALUES(?, ?)"
val = (ctx.guild.id, text)
await ctx.send(f"Welcome message set to **{text}**")
elif result is not None:
sql = "UPDATE main SET msg = ? WHERE guild_id = ?"
val = (text, ctx.guild.id)
await ctx.send(f"Welcome message updated to **{text}**")
cursor.execute(sql, val)
db.commit()
cursor.close()
db.close()
def setup(client):
client.add_cog(Welcomer(client))
| 38.329897 | 98 | 0.563206 | 424 | 3,718 | 4.849057 | 0.233491 | 0.054475 | 0.035019 | 0.038911 | 0.519455 | 0.457198 | 0.440661 | 0.396887 | 0.358463 | 0.304475 | 0 | 0.00716 | 0.32383 | 3,718 | 96 | 99 | 38.729167 | 0.81066 | 0 | 0 | 0.321839 | 0 | 0 | 0.210059 | 0.017483 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022989 | false | 0 | 0.057471 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
214429602fec8e745dc0430d164010181f0164da | 1,986 | py | Python | models/srresnet.py | 4tif4nwer/image-super-resolution | 12dd2f0bd601f5733e529d84ccfac204f0ac7bc5 | [
"Apache-2.0"
] | 9 | 2021-09-08T08:42:37.000Z | 2022-03-13T21:22:29.000Z | models/srresnet.py | 4tif4nwer/image-super-resolution | 12dd2f0bd601f5733e529d84ccfac204f0ac7bc5 | [
"Apache-2.0"
] | null | null | null | models/srresnet.py | 4tif4nwer/image-super-resolution | 12dd2f0bd601f5733e529d84ccfac204f0ac7bc5 | [
"Apache-2.0"
] | 5 | 2021-08-20T13:45:37.000Z | 2022-01-29T09:45:26.000Z | import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Add, Lambda
from tensorflow.python.keras.layers import PReLU
from utils.normalization import normalize_01, denormalize_m11
upsamples_per_scale = {
2: 1,
4: 2,
8: 3
}
pretrained_srresnet_models = {
"srresnet_bicubic_x4": {
"url": "https://image-super-resolution-weights.s3.af-south-1.amazonaws.com/srresnet_bicubic_x4/generator.h5",
"scale": 4
}
}
def pixel_shuffle(scale):
return lambda x: tf.nn.depth_to_space(x, scale)
def upsample(x_in, num_filters):
x = Conv2D(num_filters, kernel_size=3, padding='same')(x_in)
x = Lambda(pixel_shuffle(scale=2))(x)
return PReLU(shared_axes=[1, 2])(x)
def residual_block(block_input, num_filters, momentum=0.8):
x = Conv2D(num_filters, kernel_size=3, padding='same')(block_input)
x = BatchNormalization(momentum=momentum)(x)
x = PReLU(shared_axes=[1, 2])(x)
x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
x = BatchNormalization(momentum=momentum)(x)
x = Add()([block_input, x])
return x
def build_srresnet(scale=4, num_filters=64, num_res_blocks=16):
if scale not in upsamples_per_scale:
raise ValueError(f"available scales are: {upsamples_per_scale.keys()}")
num_upsamples = upsamples_per_scale[scale]
lr = Input(shape=(None, None, 3))
x = Lambda(normalize_01)(lr)
x = Conv2D(num_filters, kernel_size=9, padding='same')(x)
x = x_1 = PReLU(shared_axes=[1, 2])(x)
for _ in range(num_res_blocks):
x = residual_block(x, num_filters)
x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x_1, x])
for _ in range(num_upsamples):
x = upsample(x, num_filters * 4)
x = Conv2D(3, kernel_size=9, padding='same', activation='tanh')(x)
sr = Lambda(denormalize_m11)(x)
return Model(lr, sr)
| 28.371429 | 117 | 0.685297 | 295 | 1,986 | 4.413559 | 0.315254 | 0.076805 | 0.038402 | 0.065284 | 0.317972 | 0.271121 | 0.166667 | 0.166667 | 0.166667 | 0.136713 | 0 | 0.031902 | 0.179255 | 1,986 | 69 | 118 | 28.782609 | 0.766871 | 0 | 0 | 0.083333 | 0 | 0.020833 | 0.102719 | 0.014099 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.104167 | 0.020833 | 0.270833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21478a5a0f87a81a930087f6511cec8be95258ab | 21,338 | py | Python | config/settings/base.py | BanatIT/oxigen | 40d50d06de0ea4f3a73507ce96c9aa62b74c3608 | [
"MIT"
] | 1 | 2022-02-27T19:32:07.000Z | 2022-02-27T19:32:07.000Z | config/settings/base.py | BanatIT/oxigen | 40d50d06de0ea4f3a73507ce96c9aa62b74c3608 | [
"MIT"
] | 2 | 2020-11-29T12:31:06.000Z | 2022-02-28T17:46:40.000Z | config/settings/base.py | BanatIT/oxigen | 40d50d06de0ea4f3a73507ce96c9aa62b74c3608 | [
"MIT"
] | 1 | 2020-11-26T20:07:07.000Z | 2020-11-26T20:07:07.000Z | """
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# oxigen_api/
APPS_DIR = ROOT_DIR / "oxigen_api"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices aref
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"jazzmin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
# "allauth",
# "allauth.account",
# "allauth.socialaccount",
"django_celery_beat",
"django_celery_results",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"ckeditor"
]
LOCAL_APPS = [
"oxigen_api.donors.apps.DonorsConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "oxigen_api.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
# AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
# LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
# LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# CACHEs
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://redis:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# SESSION_CACHE_ALIAS = "default"
CACHE_TTL = 60 * 1
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"oxigen_api.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Costin Bleotu""", "costi@banatit.ro")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# Celery
# ------------------------------------------------------------------------------
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env("CELERY_BROKER_URL")
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ["json"]
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_SOFT_TIME_LIMIT = 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#beat-scheduler
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
# ACCOUNT_ADAPTER = "oxigen_api.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
# SOCIALACCOUNT_ADAPTER = "oxigen_api.users.adapters.SocialAccountAdapter"
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.AllowAny",),
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
# Your stuff...
# ------------------------------------------------------------------------------
CORS_ALLOWED_ORIGINS = env.list("CORS_ALLOWED_ORIGINS", default=[
"https://oxigen.primariatm.ro",
"https://oxigen.banatit.ro",
"http://localhost:8080"
])
# django-jazzmin
# -------------------------------------------------------------------------------
# django-jazzmin - https://django-jazzmin.readthedocs.io/configuration/
JAZZMIN_SETTINGS = {
# title of the window
"site_title": "Oxigen pentru Timișoara",
# Title on the brand, and the login screen (19 chars max)
"site_header": "#oxigenTM",
# square logo to use for your site, must be present in static files, used for favicon and brand on top left
"site_logo": "images/O2TM.png",
# Welcome text on the login screen
"welcome_sign": "Welcome",
# Copyright on the footer
"copyright": "AntiCovid-19",
# The model admin to search from the search bar, search bar omitted if excluded
"search_model": "donors.Donor",
# Field name on user model that contains avatar image
"user_avatar": None,
############
# Top Menu #
############
# Links to put along the top menu
"topmenu_links": [
# Url that gets reversed (Permissions can be added)
{"name": "Home", "url": "admin:index", "permissions": ["auth.view_user"]},
# external url that opens in a new window (Permissions can be added)
{"name": "View website", "url": "https://github.com/farridav/django-jazzmin/issues", "new_window": True},
# model admin to link to (Permissions checked against model)
{"model": "auth.User"},
],
#############
# User Menu #
#############
# Additional links to include in the user menu on the top right ("app" url type is not allowed)
"usermenu_links": [
{"name": "Support", "url": "https://github.com/farridav/django-jazzmin/issues", "new_window": True},
{"model": "auth.user"}
],
#############
# Side Menu #
#############
# Whether to display the side menu
"show_sidebar": True,
# Whether to aut expand the menu
"navigation_expanded": True,
# Hide these apps when generating side menu e.g (auth)
"hide_apps": [],
# Hide these models when generating side menu (e.g auth.user)
"hide_models": [],
# List of apps (and/or models) to base side menu ordering off of (does not need to contain all apps/models)
"order_with_respect_to": ["auth", "books", "books.author", "books.book"],
# Custom links to append to app groups, keyed on app name
"custom_links": {
"books": [{
"name": "Make Messages",
"url": "make_messages",
"icon": "fas fa-comments",
"permissions": ["books.view_book"]
}]
},
# Custom icons for side menu apps/models See https://fontawesome.com/icons?d=gallery&m=free
# for a list of icon classes
"icons": {
"auth": "fas fa-users-cog",
"auth.user": "fas fa-user",
"auth.Group": "fas fa-users",
"donors.Campaign": "fas fa-campground",
"donors.Donor": "fas fa-hand-holding-medical",
"donors.Expense": "fas fa-euro-sign",
"donors.Need": "fas fa-syringe",
"donors.Partner": "fas fa-hands-helping",
"donors.Quote": "fas fa-quote-right",
},
# Icons that are used when one is not manually specified
"default_icon_parents": "fas fa-chevron-circle-right",
"default_icon_children": "fas fa-circle",
#################
# Related Modal #
#################
# Use modals instead of popups
"related_modal_active": False,
#############
# UI Tweaks #
#############
# Relative paths to custom CSS/JS scripts (must be present in static files)
"custom_css": None,
"custom_js": None,
# Whether to show the UI customizer on the sidebar
"show_ui_builder": True,
###############
# Change view #
###############
# Render out the change view as a single form, or in tabs, current options are
# - single
# - horizontal_tabs (default)
# - vertical_tabs
# - collapsible
# - carousel
"changeform_format": "horizontal_tabs",
# override change forms on a per modeladmin basis
"changeform_format_overrides": {"auth.user": "collapsible", "auth.group": "vertical_tabs",},
# Add a language dropdown into the admin
# "language_chooser": True,
}
JAZZMIN_UI_TWEAKS = {
"navbar_small_text": False,
"footer_small_text": False,
"body_small_text": True,
"brand_small_text": False,
"brand_colour": False,
"accent": "accent-primary",
"navbar": "navbar-dark",
"no_navbar_border": False,
"navbar_fixed": True,
"layout_boxed": False,
"footer_fixed": False,
"sidebar_fixed": True,
"sidebar": "sidebar-dark-primary",
"sidebar_nav_small_text": False,
"sidebar_disable_expand": False,
"sidebar_nav_child_indent": True,
"sidebar_nav_compact_style": True,
"sidebar_nav_legacy_style": False,
"sidebar_nav_flat_style": False,
"theme": "default",
"dark_mode_theme": None
}
CKEDITOR_CONFIGS = {
'default': {
'skin': 'moono',
# 'skin': 'office2013',
'toolbar_Basic': [
['Source', '-', 'Bold', 'Italic']
],
'toolbar_YourCustomToolbarConfig': [
# {'name': 'document', 'items': ['Source', '-', 'Save', 'NewPage', 'Preview', 'Print', '-', 'Templates']},
# {'name': 'clipboard', 'items': ['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo']},
# {'name': 'editing', 'items': ['Find', 'Replace', '-', 'SelectAll']},
# {'name': 'forms',
# 'items': ['Form', 'Checkbox', 'Radio', 'TextField', 'Textarea', 'Select', 'Button', 'ImageButton',
# 'HiddenField']},
# '/',
{'name': 'basicstyles',
'items': ['Bold', 'Italic']},
{'name': 'paragraph',
'items': ['NumberedList', 'BulletedList']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
# {'name': 'insert',
# 'items': ['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak', 'Iframe']},
# '/',
# {'name': 'styles', 'items': ['Styles', 'Format', 'Font', 'FontSize']},
# {'name': 'colors', 'items': ['TextColor', 'BGColor']},
# {'name': 'tools', 'items': ['Maximize', 'ShowBlocks']},
# {'name': 'about', 'items': ['About']},
# '/', # put this to force next toolbar on new line
# {'name': 'yourcustomtools', 'items': [
# # put the name of your editor.ui.addButton here
# 'Preview',
# 'Maximize',
# ]},
],
'toolbar': 'YourCustomToolbarConfig', # put selected toolbar config here
# 'toolbarGroups': [{ 'name': 'document', 'groups': [ 'mode', 'document', 'doctools' ] }],
# 'height': 291,
# 'width': '100%',
# 'filebrowserWindowHeight': 725,
# 'filebrowserWindowWidth': 940,
# 'toolbarCanCollapse': True,
# 'mathJaxLib': '//cdn.mathjax.org/mathjax/2.2-latest/MathJax.js?config=TeX-AMS_HTML',
'tabSpaces': 4,
'extraPlugins': ','.join([
# 'uploadimage', # the upload image feature
# your extra plugins here
# 'div',
'autolink',
'autoembed',
'embedsemantic',
# 'autogrow',
# 'devtools',
# 'widget',
'lineutils',
'clipboard',
# 'dialog',
# 'dialogui',
# 'elementspath'
]),
}
}
| 38.585895 | 122 | 0.608867 | 2,227 | 21,338 | 5.713965 | 0.281096 | 0.03112 | 0.076071 | 0.086444 | 0.284872 | 0.276385 | 0.260039 | 0.247387 | 0.198585 | 0.128251 | 0 | 0.003539 | 0.165761 | 21,338 | 552 | 123 | 38.655797 | 0.711308 | 0.490908 | 0 | 0.029197 | 0 | 0 | 0.476442 | 0.261011 | 0 | 0 | 0 | 0.001812 | 0 | 1 | 0 | false | 0.036496 | 0.007299 | 0 | 0.007299 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2149d6420c3893223a5218dbf2a34414ddcf1d07 | 10,212 | py | Python | opengwas/io_hdf5.py | chengsoonong/opengwas | 2128ba2fd49c62e88130e3b073e804c423bd700f | [
"BSD-2-Clause"
] | 5 | 2015-04-13T01:59:43.000Z | 2018-07-04T06:22:27.000Z | opengwas/io_hdf5.py | chengsoonong/opengwas | 2128ba2fd49c62e88130e3b073e804c423bd700f | [
"BSD-2-Clause"
] | null | null | null | opengwas/io_hdf5.py | chengsoonong/opengwas | 2128ba2fd49c62e88130e3b073e804c423bd700f | [
"BSD-2-Clause"
] | 1 | 2016-11-23T00:45:23.000Z | 2016-11-23T00:45:23.000Z | """Interface with HDF5 file containing PLINK data.
The convention used in the container
Each probe is a row in the genotype array.
Each individual is a column in the genotype array.
import tables
raw_data = tables.openFile('test.h5', 'r')
raw_data.root.probes[1:10]['ID']
raw_data.root.individuals[3:6]
raw_data.root.genotype[:10,29:35]
raw_data.root.phenotype[:10]
To access in matlab, note that matrices are transposed:
probes = h5read('test.h5','/probes');
probes.ID(:,2:10)'
indiv = h5read('test.h5','/individuals');
indiv.individual(:,4:6)'
data = h5read('test.h5','/genotype')';
data(1:10,30:35)
labels = h5read('test.h5','/phenotype');
labels(1:10)
"""
import os
import tables
from numpy import genfromtxt
from numpy import array, flatnonzero
class GenotypeData(object):
"""Genotype data for genome wide association studies.
Provides methods for reading, writing and slicing data.
Container for genotype data in HDF5 format using PyTables.
"""
def __init__(self, filename):
self.file_name = filename
self.data_name = os.path.basename(filename)
self.h5_name = '%s.h5' % filename
# The file pointer to the raw data
self.h5_file = None
# probes and individuals are similar to PLINK BIM and FAM files resp.
self.probes = None
self.individuals = None
# phenotype and genotype points to a HDF5 carray
self.genotype = None
self.phenotype = None
# Faster when using pytables
#self.pytable_filters = tables.Filters(complevel=5, complib='blosc')
# for portability and matlab compatibility
self.pytable_filters = tables.Filters(complevel=5, complib='zlib')
@property
def num_probes(self):
"""The number of loci tested, the number of genotypes per invididual"""
return len(self.probes)
@property
def num_individuals(self):
"""The number of examples, the number of individuals in study"""
return len(self.individuals)
def open_file(self):
"""Open the HDF5 file and initialize variables"""
self.h5_file = tables.openFile(self.h5_name, 'r')
self.probes = self.h5_file.root.probes[:]
self.individuals = self.h5_file.root.individuals[:]
self.genotype = self.h5_file.root.genotype
self.phenotype = self.h5_file.root.phenotype
def init_file(self, overwrite=True):
"""Open a new HDF5 file for writing"""
if (not overwrite) and os.path.isfile(self.h5_name):
print('File %s found. Please remove manually.' % self.h5_name)
return
else:
self.h5_file = tables.openFile(self.h5_name, 'w', title=self.data_name)
def init_individuals(self, individuals):
"""Create the array on file to store the individuals.
Also store the corresponding phenotype.
TODO: typecheck input
"""
self.h5_file.createTable(self.h5_file.root, 'individuals', individuals,
title='Individuals', filters=self.pytable_filters)
self.h5_file.root.individuals[:] = individuals
self.individuals = self.h5_file.root.individuals[:]
def init_phenotypes(self):
"""Extract the phenotype from the individuals"""
#phenotypes = numpy.loadtxt(self.fam_name, usecols=[5], dtype=int)
phenotypes = self.individuals['phenotype']
phenotypes[phenotypes==1] = -1
phenotypes[phenotypes==2] = 1
phenotypes.shape = (len(phenotypes),1)
#check for undefined phenotype
undefined_phenotype = flatnonzero(phenotypes==0)
if len(undefined_phenotype) > 0:
print('Some phenotypes were undefined')
print(undefined_phenotype)
atom = tables.Int8Atom()
self.h5_file.createCArray(self.h5_file.root, 'phenotype',
atom, (1,self.num_individuals),
title='Phenotype', filters=self.pytable_filters)
self.h5_file.root.phenotype[:] = phenotypes.flatten()
self.phenotypes = self.h5_file.root.phenotype
def init_probes(self, probes):
"""Create the array on file to store the probe information.
TODO: typecheck input
"""
self.h5_file.createTable(self.h5_file.root, 'probes', probes,
title='Probes', filters=self.pytable_filters)
self.h5_file.root.probes[:] = probes
self.probes = self.h5_file.root.probes[:]
def init_genotypes(self):
"""Create the array on file (which is empty) to store genotypes."""
atom = tables.Int8Atom()
self.genotype = self.h5_file.createCArray(self.h5_file.root, 'genotype', atom,
(self.num_probes, self.num_individuals),
title='Genotype', filters=self.pytable_filters)
def close_file(self):
"""Close the HDF5 file."""
self.h5_file.close()
def get_genotype(self, idx_snp):
"""Return the array of genotypes for snp idx_snp
for all individuals"""
return array(self.genotype[idx_snp,:])
def get_idx_case(self):
"""Return the index of individuals who are cases"""
return flatnonzero(array(self.phenotype) == 1)
def get_idx_control(self):
"""Return the index of individuals who are controls"""
return flatnonzero(array(self.phenotype) == -1)
class GenotypeDataPlink(GenotypeData):
"""
Class for converting data in PLINK bed, bim, fam files
"""
def __init__(self, filename):
"""Just check that the files exists"""
super(GenotypeDataPlink, self).__init__(filename)
if not os.path.isfile('%s.bed' % filename):
print('%s.bed not found' % filename)
return
if not os.path.isfile('%s.bim' % filename):
print('%s.bim not found' % filename)
return
if not os.path.isfile('%s.fam' % filename):
print('%s.fam not found' % filename)
return
self.bed_name = '%s.bed' % filename
self.bim_name = '%s.bim' % filename
self.fam_name = '%s.fam' % filename
def plink2hdf5(self):
"""
Load the genotypes from the plink bed file,
and the phenotypes from the plink fam file.
Load probe information from bim file,
load individual information from fam file.
"""
print('Creating %s' % self.h5_name)
probes = self.load_probes()
individuals = self.load_individuals()
self.init_HDF5(individuals, probes)
print('Number of Probes: %d' % self.num_probes)
print('Number of Individuals: %d' % self.num_individuals)
print('Reading features from %s' % self.bed_name)
self.load_genotypes()
self.h5_file.close()
print('Compressed HDF5 file created at %s' % self.h5_name)
def init_HDF5(self, individuals, probes):
"""
Create an HDF5 file to contain the genotypes, phenotypes and probe information.
Assumes that phenotype and probe information already loaded.
"""
self.init_file()
self.init_individuals(individuals)
self.init_phenotypes()
self.init_probes(probes)
self.init_genotypes()
def load_genotypes(self):
"""
Load the plink BED format genotype data file.
Assumes samples in columns and SNP loci in rows.
Needs plinkio.
https://github.com/fadern/libplinkio
"""
from plinkio import plinkfile
bed_file = plinkfile.open(self.file_name)
for counter, row in enumerate(bed_file):
self.genotype[counter,:] = list(row)
if counter % 100000 == 99999:
print(counter+1)
bed_file.close()
def load_probes(self):
"""Read the BIM file to get the probe locations
chromosome (1-22, X, Y or 0 if unplaced)
rs# or snp identifier
Genetic distance (morgans)
Base-pair position (bp units)
"""
probes = genfromtxt(open(self.bim_name, 'r'), delimiter='\t',
dtype={'names': ['chrom','ID','distance','bp_position','allele1','allele2'],
'formats':[int, 'S16', int, int, 'S1', 'S1']})
return probes
def load_individuals(self):
"""Read the FAM file to get information about individuals
Family ID
Individual ID
Paternal ID
Maternal ID
Sex (1=male; 2=female; other=unknown)
Phenotype
"""
individuals = genfromtxt(open(self.fam_name, 'r'), delimiter=' ',
dtype={'names':['family','individual','paternal','maternal','sex','phenotype'],
'formats':['S10','S16',int,int,int,int]})
return individuals
def create_subset_probe(target, source, probe_idx):
"""Read the pytables HDF5 file source
create a pytables HDF5 file target
with the features with indices given by probe_idx.
"""
orig_data = GenotypeData(source)
orig_data.open_file()
new_data = GenotypeData(target)
new_data.init_file()
new_data.h5_file.copyNode(orig_data.h5_file.root.individuals, newparent=new_data.h5_file.root)
new_data.individuals = new_data.h5_file.root.individuals[:]
new_data.init_phenotypes()
probes = orig_data.probes[probe_idx]
new_data.init_probes(probes)
new_data.init_genotypes()
new_data.h5_file.root.genotype[:] = orig_data.genotype[probe_idx,:].copy()
orig_data.close_file()
new_data.close_file()
def summary(file_name, peek_ind=5, peek_prb=10):
"""Report some statistics about the file"""
data = GenotypeData(file_name)
data.open_file()
print('Number of Probes: %d' % data.num_probes)
print('Number of Individuals: %d' % data.num_individuals)
print('The genotype matrix is of size %d by %d' % data.genotype.shape)
print('First %d individuals' % peek_ind)
print(data.individuals[:peek_ind])
print('First %d probes' % peek_prb)
print(data.probes[:peek_prb])
print('The corresponding genotypes')
print(data.genotype[:peek_prb,:peek_ind])
data.close_file()
| 36.733813 | 98 | 0.63396 | 1,285 | 10,212 | 4.91751 | 0.204669 | 0.028486 | 0.036398 | 0.031018 | 0.208577 | 0.17392 | 0.148916 | 0.106504 | 0.029435 | 0.029435 | 0 | 0.017463 | 0.254211 | 10,212 | 277 | 99 | 36.866426 | 0.812237 | 0.284371 | 0 | 0.108108 | 0 | 0 | 0.090909 | 0 | 0 | 0 | 0 | 0.00722 | 0 | 1 | 0.141892 | false | 0 | 0.033784 | 0 | 0.263514 | 0.141892 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
214ac35945cc9f1e0cb8c7e8a00c985e721c4342 | 837 | py | Python | lv_examples/src/lv_ex_widgets/lv_ex_gauge/lv_ex_gauge_2_png.py | Yinyifeng18/lv_sim_qt | 4609c359fe7019e4d3b3a743b4b64fa03bb7a437 | [
"MIT"
] | 77 | 2022-01-12T01:25:12.000Z | 2022-03-29T11:52:38.000Z | lv_examples/src/lv_ex_widgets/lv_ex_gauge/lv_ex_gauge_2_png.py | Yinyifeng18/lv_sim_qt | 4609c359fe7019e4d3b3a743b4b64fa03bb7a437 | [
"MIT"
] | 1 | 2022-01-13T03:21:42.000Z | 2022-01-13T06:58:46.000Z | lv_examples/src/lv_ex_widgets/lv_ex_gauge/lv_ex_gauge_2_png.py | Yinyifeng18/lv_sim_qt | 4609c359fe7019e4d3b3a743b4b64fa03bb7a437 | [
"MIT"
] | 9 | 2022-01-12T13:28:45.000Z | 2022-02-17T11:44:35.000Z | from lv_colors import lv_colors
from imagetools import get_png_info, open_png
# Register PNG image decoder
decoder = lv.img.decoder_create()
decoder.info_cb = get_png_info
decoder.open_cb = open_png
# Create an image
with open('img_hand.png','rb') as f:
png_data = f.read()
img_hand_dsc = lv.img_dsc_t({
'data_size': len(png_data),
'data': png_data
})
# needle colors
needle_colors=[lv_colors.BLUE,lv_colors.ORANGE,lv_colors.PURPLE]
# create the gauge
gauge1=lv.gauge(lv.scr_act(),None)
gauge1.set_needle_count(3, needle_colors)
gauge1.set_size(200,200)
gauge1.align(None,lv.ALIGN.CENTER,0,0)
gauge1.set_needle_img(img_hand_dsc, 4, 4)
gauge1.set_style_local_image_recolor_opa(lv.gauge.PART.NEEDLE, lv.STATE.DEFAULT, lv.OPA.COVER)
# Set the values
gauge1.set_value(0, 10)
gauge1.set_value(1, 20)
gauge1.set_value(2, 30)
| 24.617647 | 94 | 0.769415 | 151 | 837 | 3.993377 | 0.397351 | 0.104478 | 0.069652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03877 | 0.106332 | 837 | 33 | 95 | 25.363636 | 0.76738 | 0.105137 | 0 | 0 | 0 | 0 | 0.036388 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
214cd801428e590c762681af80688789b13c9823 | 23,108 | py | Python | VoiExtractionManager.py | shinaji/texture_analysis | 1beb4c887d20eb011f0e8b5c98c223fa33d22a9c | [
"MIT"
] | 24 | 2017-11-22T01:26:53.000Z | 2022-01-04T04:14:30.000Z | VoiExtractionManager.py | shinaji/texture_analysis | 1beb4c887d20eb011f0e8b5c98c223fa33d22a9c | [
"MIT"
] | null | null | null | VoiExtractionManager.py | shinaji/texture_analysis | 1beb4c887d20eb011f0e8b5c98c223fa33d22a9c | [
"MIT"
] | 11 | 2018-01-07T00:10:19.000Z | 2021-05-06T01:04:43.000Z | #-*- coding:utf-8 -*-
"""
VoiExtractionManager
Copyright (c) 2016 Tetsuya Shinaji
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.phps
Date: 2016/02/10
"""
import pydicom as dicom
try:
from pydicom.contrib import pydicom_series
except ImportError:
from pydicom_ext import pydicom_series
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
import glob
import datetime
import traceback
import os
import copy
import json
from Utils import is_inside
import time
import argparse
import re
__version__ = "0.1.0"
class VoiExtractionManager:
def __init__(self, pet, ct, ctr, target_ctr_idx=None):
"""
init
:param pet: pet dicom series
:param ct: ct dicom series
:param ctr: contour dicom
:param target_ctr_idx: target contour index. if -1, use all contours
"""
if len(ctr.ReferencedFrameOfReferenceSequence) == 1:
raise Exception(
"The given contour data has only one reference sequence."
"Two reference sequences (CT and PET) are required to "
"perform the voi extraction of PET images."
"Probably, the given contour data contains reference sequence "
"for CT images."
)
self.pet = pet # copy.deepcopy(pet)
self.ct = ct # copy.deepcopy(ct)
self.ctr = copy.deepcopy(ctr)
self.ct_img = ct[0].get_pixel_array()
self.pet_img = pet[0].get_pixel_array()
self.ct_spacing = self.ct[0].info.PixelSpacing
self.ct_slice_thickness = self.ct[0].info.SliceThickness
self.ct_origin = self.ct[0].info.ImagePositionPatient
self.pet_spacing = self.pet[0].info.PixelSpacing
self.pet_slice_thickness = self.pet[0].info.SliceThickness
self.pet_origin = self.pet[0].info.ImagePositionPatient
self.suv_coeff = self.__get_SUV_conversion_coeff()
self.target_ctr_sequence = target_ctr_idx
self.rois = []
self.rois_co_slice_idxs = None
self.sop_instance_uids_of_rois = []
self.masked_roi_imgs = None
self.masked_roi_imgs_suv = None
self.non_cropped_masked_roi_imgs = None
self.co_pet_imgs = None
self.update(None, target_ctr_idx)
def update(self, ctr=None, target_ctr_idx=None):
"""
update extraction result
:param ctr: contour dicom
:param target_ctr_idx: target contour index
"""
if ctr is not None:
self.ctr = copy.deepcopy(ctr)
if target_ctr_idx is not None:
self.target_ctr_sequence = target_ctr_idx
else:
self.target_ctr_sequence = 0
if (len(self.ctr.ROIContourSequence) > 1) and (target_ctr_idx is None):
print('---WARNING---')
print(
'This contour dicom contains {} contours, but target counter '
'index was not given. Only first contour will be processed.'.format(
len(self.ctr.ROIContourSequence)
)
)
self.rois = []
self.__read_roi_data()
(self.masked_roi_imgs, self.non_cropped_masked_roi_imgs,
self.co_pet_imgs, self.rois_co_slice_idxs
) = self.__get_masked_roi_image()
self.masked_roi_imgs_suv = self.masked_roi_imgs * self.suv_coeff
def __get_masked_roi_image(self):
"""
return masked pet image. outside of roi is filled with -1.
The size of roi image is cropped to remove wasted space.
:return: cropped images, non-cropped images, corresponding pet images,
corresponding pet slice indices of ROIs
"""
pet_imgs = []
roi_imgs = []
roi_indices = []
for i in range(len(self.rois)):
idx_p, img_p, org_p, img_c, org_c = self.__get_corresponding_image(i)
pet_imgs.append(img_p)
roi_img = self.__mask_outside(img_p, self.rois[i])
roi_imgs.append(roi_img)
roi_indices.append(idx_p)
if self.target_ctr_sequence >= 0:
if not np.unique(roi_indices).shape[0] == (
np.unique(roi_indices).max() -
np.unique(roi_indices).min() + 1):
raise Exception('Some slices between start slice and '
'end slice are missing. \n{}\n{}'.format(
np.unique(roi_indices),
list(range(np.unique(roi_indices).min(),
np.unique(roi_indices).max()+1))
))
n_slices = np.unique(roi_indices).max() - \
np.unique(roi_indices).min() + 1
co_pet_imgs = np.ones(
(n_slices,
roi_imgs[0].shape[0],
roi_imgs[0].shape[1])
) * -1
rois_co_slice_idx = []
masked_roi_imgs = np.ones(
(n_slices,
roi_imgs[0].shape[0],
roi_imgs[0].shape[1])
) * -1
for i, idx in enumerate(roi_indices):
masked_roi_imgs[idx - min(roi_indices)][roi_imgs[i] >= 0] = \
roi_imgs[i][roi_imgs[i] >= 0]
co_pet_imgs[idx - min(roi_indices)] = pet_imgs[i]
rois_co_slice_idx.append(idx - min(roi_indices))
if self.target_ctr_sequence == -1:
for i, p_idx in enumerate(
range(min(roi_indices), max(roi_indices)+1)):
co_pet_imgs[i] = np.array(
self.pet[0]._datasets[p_idx].pixel_array *
self.pet[0]._datasets[p_idx].RescaleSlope +
self.pet[0]._datasets[p_idx].RescaleIntercept)
s = np.concatenate((self.pet_spacing, [self.pet_slice_thickness]))
pos_min = np.array(self.pet_img.shape[1:3])
pos_max = np.array([0, 0])
for i in range(len(self.rois)):
pos = ((self.rois[i] - self.pet_origin) / s)[:, 0:2].astype(np.int)
pos_max[0] = np.max([np.max(pos[:, 0]), pos_max[0]])
pos_max[1] = np.max([np.max(pos[:, 1]), pos_max[1]])
pos_min[0] = np.min([np.min(pos[:, 0]), pos_min[0]])
pos_min[1] = np.min([np.min(pos[:, 1]), pos_min[1]])
pos_min[0] = max(pos_min[0] - 2, 0)
pos_min[1] = max(pos_min[1] - 2, 0)
pos_max[0] = min(pos_max[0] + 2, masked_roi_imgs.shape[1])
pos_max[1] = min(pos_max[1] + 2, masked_roi_imgs.shape[2])
cropped_roi = np.array(masked_roi_imgs)[:, pos_min[1]:pos_max[1], :]
cropped_roi = cropped_roi[:, :, pos_min[0]:pos_max[0]]
return cropped_roi, masked_roi_imgs, co_pet_imgs, rois_co_slice_idx
def __read_roi_data(self):
"""
read roi data from contour dicom
"""
self.rois = []
self.sop_instance_uids_of_rois = []
if self.target_ctr_sequence >= 0:
self.__read_one_ctr_sequence(self.target_ctr_sequence)
elif self.target_ctr_sequence == -1:
for target in range(len(self.ctr.ROIContourSequence)):
if hasattr(self.ctr.ROIContourSequence[target],
'ContourSequence'):
self.__read_one_ctr_sequence(target)
else:
print('Cannot find ContourSequence at '
'target ctr idx {}'.format(target))
else:
raise Exception('Unknown target contour sequence number.'
'{} was given.'.format(self.target_ctr_sequence))
def __read_one_ctr_sequence(self, target_ctr_sequence):
"""
read target contour sequence
:param target_ctr_sequence: target contour sequence
"""
for i in range(
len(self.ctr.ROIContourSequence[
target_ctr_sequence].ContourSequence)):
roi = np.array(
self.ctr.ROIContourSequence[
target_ctr_sequence].ContourSequence[i].ContourData)
roi = roi.reshape((roi.size // 3, 3))
if not (self.ctr.ROIContourSequence[
target_ctr_sequence].ContourSequence[
i].ContourGeometricType == "CLOSED_PLANAR"):
raise Exception(
"{} is unsupported contour geometric type...".format(
self.ctr.ROIContourSequence[
target_ctr_sequence].ContourSequence[
i].ContourGeometricType))
self.rois.append(roi)
self.sop_instance_uids_of_rois.append(
self.ctr.ROIContourSequence[
target_ctr_sequence].ContourSequence[
i].ContourImageSequence[0].ReferencedSOPInstanceUID)
def save_voi(self, dirname, output_voi_figs=False):
"""
save extracted roi
:param dirname: directory name for save
:param output_voi_figs: if True, voi extraction result figures are saved
"""
filename = '{}/{}_{}_ctr{:02d}'.format(
dirname,
self.ctr.PatientName,
self.ctr.SeriesDescription,
self.target_ctr_sequence).replace(" ", "_")
np.save(filename + '.npy', self.masked_roi_imgs_suv)
np.save(filename + '_non_cropped_.npy', self.non_cropped_masked_roi_imgs * self.suv_coeff)
np.save(filename + '_non_cropped_non_masked_.npy', self.co_pet_imgs * self.suv_coeff)
roi_volume = (self.masked_roi_imgs[self.masked_roi_imgs >= 0].size *
self.pet_slice_thickness * np.prod(self.pet_spacing) /
1000)
radiopharmaceutical = \
self.pet[0].info.RadiopharmaceuticalInformationSequence[
0].Radiopharmaceutical
json_data = {
'radiopharmaceutical': radiopharmaceutical,
'roi_volume': roi_volume,
'patient_name': str(self.ctr.PatientName),
'series_description': str(self.ctr.SeriesDescription),
'target_ctr_idx': self.target_ctr_sequence,
'n_voxels': self.masked_roi_imgs[self.masked_roi_imgs >= 0].size,
'ref_roi_number': -1 if self.target_ctr_sequence == -1 else (
self.ctr.ROIContourSequence[
self.target_ctr_sequence].ReferencedROINumber),
'SUV_conversion_coeff': self.suv_coeff,
'n_pet_slices': self.pet_img.shape[0],
'pixel_spacing': list(self.pet[0].info.PixelSpacing),
'slice_thickness': self.pet[0].info.SliceThickness,
}
with open(filename + '_meta_data_.json', 'w') as f:
json.dump(json_data, f, sort_keys=True, indent=2)
if output_voi_figs:
self.__save_voi_fig(filename)
def __save_voi_fig(self, filename):
"""
save extraction result figures
"""
my_cmap = cm.viridis
my_cmap.set_under(alpha=0)
figs = [plt.figure(figsize=(10, 10)) for i in
range(self.non_cropped_masked_roi_imgs.shape[0])]
axes = [fig.add_subplot(111) for fig in figs]
vmin = max(0, self.non_cropped_masked_roi_imgs[
self.non_cropped_masked_roi_imgs >= 0].min())
vmax = self.non_cropped_masked_roi_imgs[
self.non_cropped_masked_roi_imgs >= 0].max()
for idx in np.unique(self.rois_co_slice_idxs):
axes[idx].imshow(self.co_pet_imgs[idx],
vmin=self.co_pet_imgs[idx].min(),
vmax=self.co_pet_imgs[idx].max(),
cmap='gray')
axes[idx].imshow(self.non_cropped_masked_roi_imgs[idx],
cmap=my_cmap,
vmin=vmin,
vmax=vmax,
alpha=0.5)
axes[idx].axis('off')
s = np.concatenate((self.pet_spacing, [self.pet_slice_thickness]))
for i, roi in enumerate(self.rois):
pos = ((roi - self.pet_origin) / s)[:, 0:2]
pos = np.vstack((pos, [pos[0]]))
slice_idx = self.rois_co_slice_idxs[i]
axes[slice_idx].plot(np.round(pos[:, 0] - 0.5) + 0.5,
np.round(pos[:, 1] - 0.5) + 0.5,
'-', color='#00FF00')
for i, fig in enumerate(figs):
fig.savefig(filename + '_{:02d}_.png'.format(i),
figsize=(20, 20))
fig.clf()
def __get_SUV_conversion_coeff(self):
"""
return conversion coefficient to convert intensity to SUV
:return conversion coefficient
"""
p = self.pet[0].info
scan_time_str = str(p[0x0008, 0x0021].value) + str(
p[0x0008, 0x0031].value)
content_time_str = str(p[0x0008, 0x0023].value) + str(
p[0x0008, 0x0033].value)
scan_time = datetime.datetime.strptime(scan_time_str, '%Y%m%d%H%M%S')
content_time = datetime.datetime.strptime(content_time_str[0:14],
'%Y%m%d%H%M%S')
delta = content_time - scan_time
if delta.total_seconds() <= 0:
raise Exception("Error: Series Date/Time is not correct.")
if len(p.RadiopharmaceuticalInformationSequence) > 1:
raise Exception('More than two radiopharmaceutical'
' information was detected')
t_half = float(
p.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if hasattr(p.RadiopharmaceuticalInformationSequence[0],
"RadiopharmaceuticalStartDateTime"):
measured_time_str = str(p.RadiopharmaceuticalInformationSequence[
0].RadiopharmaceuticalStartDateTime)
elif hasattr(p.RadiopharmaceuticalInformationSequence[0],
"RadiopharmaceuticalStartTime"):
print("Warning!!!\nRadiopharmaceuticalInformationSequence only "
"has RadiopharmaceuticalStartTime tag. Assume the date is "
"same as SeriesDate.\n"
"You must carefully check if SUV value is correct.")
measured_time_str = \
str(p[0x0008, 0x0021].value) + \
str(p.RadiopharmaceuticalInformationSequence[0
].RadiopharmaceuticalStartTime)
else:
raise Exception("Error: This dicom file dose not include "
"RadiopharmaceuticalStartDateTime data")
measured_time = datetime.datetime.strptime(measured_time_str,
'%Y%m%d%H%M%S.%f')
activity = float(
p.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
actual_activity = activity * (
2 ** (-(scan_time - measured_time).total_seconds() / t_half))
return p.PatientWeight * 1000. / actual_activity
def __get_corresponding_image(self, target_roi_index):
"""
return corresponding PET/CT iamges and those origin (mm)
:param target_roi_index: target roi index
"""
for i in range(len(self.ct[0]._datasets)):
uid_c = self.sop_instance_uids_of_rois[target_roi_index]
if self.ct[0]._datasets[i].SOPInstanceUID == uid_c:
img_c = np.array(
self.ct[0]._datasets[i].pixel_array * self.ct[
0]._datasets[i].RescaleSlope + self.ct[
0]._datasets[i].RescaleIntercept)
org_c = np.array(self.ct[0]._datasets[i].ImagePositionPatient)
ct_ctr_img_seq = copy.deepcopy(
self.ctr.ReferencedFrameOfReferenceSequence[
0].RTReferencedStudySequence[0].RTReferencedSeriesSequence[
0].ContourImageSequence)
pet_ctr_img_seq = copy.deepcopy(
self.ctr.ReferencedFrameOfReferenceSequence[
1].RTReferencedStudySequence[0].RTReferencedSeriesSequence[
0].ContourImageSequence)
if not ct_ctr_img_seq[0].ReferencedSOPClassUID == 'CT Image Storage':
if pet_ctr_img_seq[0].ReferencedSOPClassUID == 'CT Image Storage':
if (ct_ctr_img_seq[0].ReferencedSOPClassUID ==
'Positron Emission Tomography Image Storage'):
tmp = copy.deepcopy(ct_ctr_img_seq)
ct_ctr_img_seq = copy.deepcopy(pet_ctr_img_seq)
pet_ctr_img_seq = copy.deepcopy(tmp)
else:
raise Exception('Cannot find contour data for the PET image')
else:
raise Exception('Cannot find contour data for the CT image')
for i in range(len(ct_ctr_img_seq)):
if uid_c == ct_ctr_img_seq[i].ReferencedSOPInstanceUID:
uid_p = pet_ctr_img_seq[i].ReferencedSOPInstanceUID
idx_p = []
img_p = []
org_p = []
for i in range(len(self.pet[0]._datasets)):
if self.pet[0]._datasets[i].SOPInstanceUID == uid_p:
idx_p.append(i)
img_p.append(np.array(
self.pet[0]._datasets[i].pixel_array *
self.pet[0]._datasets[i].RescaleSlope +
self.pet[0]._datasets[i].RescaleIntercept))
org_p.append(np.array(
self.pet[0]._datasets[i].ImagePositionPatient))
if not len(idx_p) == 1:
if len(idx_p) == 0:
raise Exception(
'Cannot find corresponding image for '
'the target roi index {}.'.format(target_roi_index))
else:
raise Exception(
'Multiple pet images was detected as the corresponding '
'frame of the target roi index {}'.format(target_roi_index)
)
return idx_p[0], img_p[0], org_p[0], img_c, org_c
def __mask_outside(self, pet_img_2d, roi):
"""
return masked pet image. outside of roi is filled with -1.
:param pet_img_2d: a slice of pet images correspond to the roi
:param roi: array which contains roi vertex positions
:return: masked pet image
"""
s = np.concatenate((self.pet_spacing, [self.pet_slice_thickness]))
points = []
for i in range(len(roi)):
points.append((roi[i] - self.pet_origin) / s)
points = np.array(points)
points = np.round(points[:, 0:2] - 0.5, decimals=0) + 0.5
xmin = max(0, points[:, 0].min()-3)
xmax = min(pet_img_2d.shape[1], points[:, 0].max()+3)
ymin = max(0, points[:, 1].min()-3)
ymax = min(pet_img_2d.shape[0], points[:, 1].max()+3)
x, y = np.meshgrid(np.arange(int(xmin), int(xmax)),
np.arange(int(ymin), int(ymax)))
x = x.flatten()[:, np.newaxis]
y = y.flatten()[:, np.newaxis]
z0 = np.hstack((x, y)).astype(int)
z1 = np.array(points)
not_duplication_mask = np.ones(z1.shape[0], dtype=bool)
for i in range(z1.shape[0] - 1):
if np.all(z1[i] == z1[i + 1]):
not_duplication_mask[i+1] = False
z1 = z1[not_duplication_mask]
if not np.all(z1[0] == z1[-1]):
z1 = np.vstack((z1, [z1[0]]))
img = is_inside(z0, z1, pet_img_2d)
return img
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Voi extraction tool')
parser.add_argument('--data_dir_path', '-d', type=str,
default='./data/PA*',
help='Directory of target data files.')
args = parser.parse_args()
pa_dir_list = glob.glob(args.data_dir_path)
min_pa_num = min([int(re.findall("PA([0-9]*)", d)[0]) for d in pa_dir_list])
max_pa_num = max([int(re.findall("PA([0-9]*)", d)[0]) for d in pa_dir_list])
for pa_index in range(min_pa_num, max_pa_num+1):
try:
target_dir = args.data_dir_path.replace('*', '%d' % pa_index)
target_dir = os.path.join(target_dir, 'ST0')
print(target_dir)
pet = pydicom_series.read_files('{}/SE0/'.format(target_dir))
ct = pydicom_series.read_files('{}/SE1/'.format(target_dir))
# check if pet and ct are not flipped
if not hasattr(pet[0].info,
'RadiopharmaceuticalInformationSequence'):
begin = time.time()
# tmp_pet = copy.deepcopy(pet)
# pet = copy.deepcopy(ct)
# ct = copy.deepcopy(tmp_pet)
tmp_pet = pet
pet = ct
ct = tmp_pet
print('FLIP! {:.3f}s'.format(time.time()-begin))
ctrs = []
se_dir_list = glob.glob(os.path.join(target_dir, 'SE*'))
min_se_n = min([int(re.findall("SE([0-9]*)", d)[0]) for d in se_dir_list])
max_se_n = max([int(re.findall("SE([0-9]*)", d)[0]) for d in se_dir_list])
for i in range(max(2, min_se_n), max_se_n+1):
ctr_dcm_fname = '{}/SE{}/IM0'.format(target_dir, i)
if os.path.exists(ctr_dcm_fname):
print('Load contour: {}'.format(ctr_dcm_fname))
tmp_ctr = dicom.read_file(ctr_dcm_fname)
if hasattr(tmp_ctr, 'ROIContourSequence'):
ctrs.append(tmp_ctr)
else:
print('Not found: {}'.format(ctr_dcm_fname))
for i, ctr in enumerate(ctrs):
for idx in range(len(ctr.ROIContourSequence)):
try:
manager = VoiExtractionManager(pet, ct, ctr, idx)
manager.save_voi(target_dir, output_voi_figs=True)
except:
print(traceback.format_exc())
print('Error: ctr_id {}', i)
if len(ctr.ROIContourSequence) > 1:
try:
manager = VoiExtractionManager(pet, ct, ctr, -1)
manager.save_voi(target_dir, output_voi_figs=True)
except:
print(traceback.format_exc())
print('Error: ctr_id {}', i)
except:
print(traceback.format_exc())
print('Error:', target_dir)
| 43.273408 | 99 | 0.548858 | 2,707 | 23,108 | 4.449575 | 0.142963 | 0.020922 | 0.026982 | 0.024408 | 0.364051 | 0.258697 | 0.196098 | 0.165297 | 0.126609 | 0.084599 | 0 | 0.020485 | 0.342998 | 23,108 | 533 | 100 | 43.354597 | 0.772889 | 0.065129 | 0 | 0.148058 | 0 | 0 | 0.092915 | 0.011421 | 0 | 0 | 0.002904 | 0 | 0 | 1 | 0.024272 | false | 0 | 0.041262 | 0 | 0.07767 | 0.033981 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
214e52ac6288035be9504b22746e2464fc085760 | 416 | py | Python | config.py | EliCrystal2001/nds-string-editor | a166f2f69ac99b82968f47fffbd696e608ecd414 | [
"MIT"
] | null | null | null | config.py | EliCrystal2001/nds-string-editor | a166f2f69ac99b82968f47fffbd696e608ecd414 | [
"MIT"
] | null | null | null | config.py | EliCrystal2001/nds-string-editor | a166f2f69ac99b82968f47fffbd696e608ecd414 | [
"MIT"
] | null | null | null | #!/bin/python3
# Required:
ROM_NAME = "DSCDP_CRUJN6_00"
IGNORE_MD5 = False
# Custom Rules Begin Here
# This example one just excludes control characters (except 0x0A)
# Required: this method must exist, and return true / false on if a byte array
# is to be considred valid characters
def check_valid(obytes):
for b in obytes:
if b >= 0x00 and b <= 0x1F:
if not b == 0x0A:
return False
return True
| 18.909091 | 78 | 0.711538 | 66 | 416 | 4.409091 | 0.742424 | 0.068729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042813 | 0.213942 | 416 | 21 | 79 | 19.809524 | 0.847095 | 0.540865 | 0 | 0 | 0 | 0 | 0.082418 | 0 | 0 | 0 | 0.065934 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
214e58f9acc686b9612aa19291f9f026256d168a | 3,660 | py | Python | application/model/mailhandler.py | haiyoumeiyou/cherrybrigde | f00a0592240b60cc42b895ad194b0273485956d0 | [
"BSD-3-Clause"
] | null | null | null | application/model/mailhandler.py | haiyoumeiyou/cherrybrigde | f00a0592240b60cc42b895ad194b0273485956d0 | [
"BSD-3-Clause"
] | null | null | null | application/model/mailhandler.py | haiyoumeiyou/cherrybrigde | f00a0592240b60cc42b895ad194b0273485956d0 | [
"BSD-3-Clause"
] | null | null | null | import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Dict, List
class MailHandler(object):
def __init__(self, mail_host:str, mail_port:int, sender_email:str=None, sender_pass:str=None):
self.mail_host = mail_host
self.mail_port = mail_port
self.sender_email = sender_email
self.sender_pass = sender_pass
def validate_data(self, required_fields:tuple, data:Dict):
required_fields = required_fields
missing_fields = []
for field in required_fields:
if not field in data:
missing_fields.append(field)
return missing_fields
def prep_msg_body(self, data:Dict):
msg = MIMEMultipart("alternative")
msg["Subject"] = data.get('subject')
msg["From"] = data.get('sender')
msg["To"] = ", ".join(data.get('recipients'))
msg_body = MIMEText(data.get('content'), "plain")
msg.attach(msg_body)
return msg
def prep_msg_attchment(self, msg:MIMEMultipart, data:Dict):
msg = msg
attachments = data.get('attachments')
for attachment in attachments:
with open(attachment, "rb") as attached:
part = MIMEBase("application", "octet-stream")
part.set_payload(attached.read())
encoders.encode_base64(part)
part.add_header(
"Content-Disposition",
f"attachment; filename={attachment[9:]}",
)
msg.attach(part)
return msg
def make_plain_msg(self, data:Dict):
required_fields = ('subject', 'sender', 'recipients', 'content')
data_valid_check = self.validate_data(required_fields, data)
if len(data_valid_check)>0:
return False, data_valid_check
return True, self.prep_msg_body(data)
def make_attachment_msg(self, data:Dict):
required_fields = ('subject', 'sender', 'recipients', 'content', 'attachments')
data_valid_check = self.validate_data(required_fields, data)
if len(data_valid_check)>0:
return False, data_valid_check
msg = self.prep_msg_body(data)
return True, self.prep_msg_attchment(msg, data)
def send_mail(self, data:Dict, credential:Dict=None):
if 'attachments' in data:
ready, msg = self.make_attachment_msg(data)
else:
ready, msg = self.make_plain_msg(data)
if not ready:
return False, "not able to prepare message!"
context = ssl.create_default_context()
with smtplib.SMTP(self.mail_host, self.mail_port) as server:
# server.login(sender_email, password)
print("Sending mail...", data.get('sender'), data.get('recipients'), msg)
server.sendmail(data.get('sender'), data.get('recipients'), msg.as_string())
return True, "message sent."
def send_mail_o365(self, data:Dict, credential:Dict=None):
if 'attachments' in data:
ready, msg = self.make_attachment_msg(data)
else:
ready, msg = self.make_plain_msg(data)
if not ready:
return False, "not able to prepare message!"
with smtplib.SMTP(self.mail_host, self.mail_port) as server:
server.ehlo()
server.starttls()
server.login(self.sender_email, self.sender_pass)
server.sendmail(data.get('sender'), data.get('recipients'), msg.as_string())
return True, "message sent." | 40.21978 | 98 | 0.624044 | 445 | 3,660 | 4.946067 | 0.240449 | 0.034984 | 0.038164 | 0.036347 | 0.477056 | 0.413903 | 0.413903 | 0.39891 | 0.39891 | 0.39891 | 0 | 0.002986 | 0.268033 | 3,660 | 91 | 99 | 40.21978 | 0.818589 | 0.009836 | 0 | 0.329114 | 0 | 0 | 0.107922 | 0.0069 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101266 | false | 0.037975 | 0.075949 | 0 | 0.329114 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
214f1cdd4cb9bcfb4a7c08c88edef89e68f62e01 | 2,464 | py | Python | disco/layers.py | Hekstra-Lab/disco | 771f2ddb7214f231afae685d1a04161495795f70 | [
"MIT"
] | null | null | null | disco/layers.py | Hekstra-Lab/disco | 771f2ddb7214f231afae685d1a04161495795f70 | [
"MIT"
] | null | null | null | disco/layers.py | Hekstra-Lab/disco | 771f2ddb7214f231afae685d1a04161495795f70 | [
"MIT"
] | null | null | null | import numpy as np
import reciprocalspaceship as rs
import tensorflow as tf
from tensorflow import keras as tfk
from IPython import embed
class SelfAttentionBlock(tfk.layers.Layer):
def __init__(self, attention_dims, num_heads, ff_dims=None):
super().__init__()
attention_dims = attention_dims
if ff_dims is None:
ff_dims = attention_dims
self.ff_dims = ff_dims
self.att = tfk.layers.MultiHeadAttention(
num_heads = num_heads,
key_dim = attention_dims,
value_dim = attention_dims,
output_shape = attention_dims,
)
self.ff = tfk.Sequential([
tfk.layers.Dense(ff_dims, activation='ReLU', kernel_initializer='identity'),
tfk.layers.Dense(attention_dims, kernel_initializer='identity'),
])
self.layer_norm = tfk.layers.LayerNormalization()
def build(self, shapes):
mask = shapes[-1]
self.reflections_per_image = mask[-1]
def call(self, inputs):
qkv, mask = inputs
attention_mask = mask@tf.transpose(mask, [0, 2, 1])
out = self.att(qkv, qkv, qkv, attention_mask)
out = self.layer_norm(out + qkv)
out = self.layer_norm(self.ff(out) + out)
out = out*mask
return out, mask
class Assigner(tfk.models.Model):
def __init__(self, attention_blocks, attention_dims, num_heads, ff_dims=None, hmax=50):
super().__init__()
if ff_dims is None:
ff_dims = attention_dims
self.embed = tfk.layers.Dense(attention_dims, kernel_initializer='identity')
self.encoder_layers = []
for i in range(attention_blocks):
self.encoder_layers.append(SelfAttentionBlock(attention_dims, num_heads, ff_dims=ff_dims))
self.decoder_layers = []
self.decoder_layers.append(tfk.layers.Dense(3 * (2*hmax + 1), kernel_initializer='identity'))
self.decoder_layers.append(tfk.layers.Reshape((-1, 3, 2*hmax+1)))
self.decoder_layers.append(tfk.layers.Softmax(axis=-1))
def call(self, inputs):
"""
inputs : (xypos, mask)
"""
qkv, mask = inputs
#Preprocess qkv a bit
qkv = self.embed(qkv)
out = (qkv, mask)
for layer in self.encoder_layers:
out = layer(out)
out = out[0]
for layer in self.decoder_layers:
out = layer(out)
return out
| 31.189873 | 103 | 0.618506 | 307 | 2,464 | 4.742671 | 0.263844 | 0.107143 | 0.058379 | 0.043269 | 0.29739 | 0.254808 | 0.17033 | 0.127747 | 0.127747 | 0.050824 | 0 | 0.008994 | 0.278003 | 2,464 | 78 | 104 | 31.589744 | 0.809444 | 0.017451 | 0 | 0.210526 | 0 | 0 | 0.015025 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0 | 0.087719 | 0 | 0.245614 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21502ce2af890eea0b19adfd58d4172a57935762 | 2,157 | py | Python | keras/models/topcoder_crnn_finetune.py | Abhay242/language-identification- | 4b05f6cba588bc4862a3034911407f5f503db0d0 | [
"MIT"
] | 3 | 2019-08-20T08:02:21.000Z | 2020-10-17T17:45:13.000Z | keras/models/topcoder_crnn_finetune.py | Abhay242/language-identification- | 4b05f6cba588bc4862a3034911407f5f503db0d0 | [
"MIT"
] | 13 | 2020-01-28T22:32:17.000Z | 2022-02-10T00:01:56.000Z | keras/models/topcoder_crnn_finetune.py | Abhay242/language-identification- | 4b05f6cba588bc4862a3034911407f5f503db0d0 | [
"MIT"
] | null | null | null | from keras.layers.core import Dense, Permute, Reshape
from keras.layers.convolutional import Convolution2D
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D
from keras.layers.wrappers import Bidirectional
from keras.layers.recurrent import LSTM
from keras.models import Sequential, load_model
from keras.regularizers import l2
NAME = "Topcoder_CRNN_Finetune"
def create_model(input_shape, config):
weight_decay = 0.001
model = Sequential()
model.add(Convolution2D(16, 7, 7, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(32, 5, 5, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 1)))
model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 1)))
model.load_weights("logs/2017-04-08-13-03-44/weights.08.model", by_name=True)
# for ref_layer in ref_model.layers:
# layer = model.get_layer(ref_layer.name)
# if layer:
# layer.set_weights(ref_layer.get_weights())
for layer in model.layers:
layer.trainable = False
# (bs, y, x, c) --> (bs, x, y, c)
model.add(Permute((2, 1, 3)))
# (bs, x, y, c) --> (bs, x, y * c)
bs, x, y, c = model.layers[-1].output_shape
model.add(Reshape((x, y*c)))
model.add(Bidirectional(LSTM(512, return_sequences=False), merge_mode="concat"))
model.add(Dense(config["num_classes"], activation="softmax"))
return model
| 37.189655 | 114 | 0.701901 | 305 | 2,157 | 4.845902 | 0.288525 | 0.102842 | 0.060893 | 0.067659 | 0.435724 | 0.426252 | 0.415426 | 0.389039 | 0.37889 | 0.37889 | 0 | 0.047437 | 0.149745 | 2,157 | 57 | 115 | 37.842105 | 0.758451 | 0.09643 | 0 | 0.277778 | 0 | 0 | 0.055098 | 0.032441 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.222222 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
215270aa6d52000e2c885ac3642c2a4eaa77fe73 | 7,679 | py | Python | scripts/buildkite/tasks/__main__.py | malanchak/AuTuMN | 0cbd006d1f15da414d02eed44e48bb5c06f0802e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | scripts/buildkite/tasks/__main__.py | malanchak/AuTuMN | 0cbd006d1f15da414d02eed44e48bb5c06f0802e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | scripts/buildkite/tasks/__main__.py | malanchak/AuTuMN | 0cbd006d1f15da414d02eed44e48bb5c06f0802e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import os
import sys
import logging
import subprocess as sp
import click
from . import buildkite
logger = logging.getLogger(__name__)
BURN_IN_DEFAULT = 50 # Iterations
@click.group()
def cli():
"""
CLI tool for running Buildkite jobs
"""
@click.command()
def calibrate():
"""Run a calibration job in Buildkite"""
logger.info("Starting calibration.")
# Pull in envars
build_number = os.environ["BUILDKITE_BUILD_NUMBER"]
commit = os.environ["BUILDKITE_COMMIT"]
branch = os.environ["BUILDKITE_BRANCH"]
# Pull in metadata
trigger_downstream = buildkite.get_metadata("trigger-downstream")
model_name = buildkite.get_metadata("model-name")
num_chains = buildkite.get_metadata("mcmc-num-chains")
run_time_hours = buildkite.get_metadata("mcmc-runtime")
# Run the calibration
run_time_seconds = int(float(run_time_hours) * 3600)
job_name = f"{model_name}-{build_number}"
msg = "Running calbration job %s for %s model with %s chains for %s hours (%s seconds)"
logger.info(msg, job_name, model_name, num_chains, run_time_hours, run_time_seconds)
try:
cli_args = {
"job": job_name,
"calibration": model_name,
"chains": num_chains,
"runtime": run_time_seconds,
}
stdout = run_aws_script("calibrate", cli_args)
# Get `run_id` from string with format "... Calibration completed for $RUN_ID"
run_id = None
lines = (l.strip() for l in stdout.split("\n"))
for l in lines:
if "Calibration completed for " in l:
run_id = l.split(" ")[-1]
if not run_id:
raise ValueError("Could not find `run_id` in stdout")
except Exception:
logger.exception("Calibration for job %s failed", job_name)
sys.exit(1)
logging.info("Calibration for job %s succeeded", job_name)
if trigger_downstream != "yes":
logger.info("Not triggering full model run.")
else:
logger.info("Triggering full model run.")
pipeline_data = {
"steps": [
{
"label": "Trigger full model run",
"trigger": "full-model-run",
"async": True,
"build": {
"message": f"Triggered by calibration {model_name} (build {build_number})",
"commit": commit,
"branch": branch,
"env": {"RUN_ID": run_id},
},
}
]
}
buildkite.trigger_pipeline(pipeline_data)
logger.info("Results available at %s", get_run_url(run_id))
@click.command()
def full():
"""Run a full model run job in Buildkite"""
logger.info("Starting a full model run.")
# Pull in envars
run_id = os.environ.get("RUN_ID")
build_number = os.environ["BUILDKITE_BUILD_NUMBER"]
commit = os.environ["BUILDKITE_COMMIT"]
branch = os.environ["BUILDKITE_BRANCH"]
if not run_id:
# Pull in user-supplied metadata
logger.info("Using user-supplied run name.")
burn_in_option = buildkite.get_metadata("full-burn-in")
run_id = buildkite.get_metadata("run-id")
trigger_downstream = buildkite.get_metadata("trigger-downstream")
use_latest_code = buildkite.get_metadata("use-latest-code")
burn_in = burn_in_option or BURN_IN_DEFAULT
if not run_id:
logger.error("No user-supplied `run_id` found.")
sys.exit(1)
else:
# This is a job triggered by an upstream job
logger.info("Found run id from envar: %s", run_id)
trigger_downstream = "yes"
use_latest_code = "no"
burn_in = BURN_IN_DEFAULT
# Run the full models
model_name, _, _ = read_run_id(run_id)
job_name = f"{model_name}-{build_number}"
msg = "Running full model for %s with burn in %s"
logger.info(msg, model_name, burn_in)
try:
cli_args = {
"job": job_name,
"run": run_id,
"burn-in": burn_in,
}
if use_latest_code == "yes":
cli_args["latest-code"] = ""
run_aws_script("full", cli_args)
except Exception:
logger.exception("Full model run for job %s, run id %s failed", job_name, run_id)
sys.exit(1)
logging.info("Full model run job %s succeeded", job_name)
if trigger_downstream != "yes":
logger.info("Not triggering PowerBI processing.")
else:
logger.info("Triggering PowerBI processing.")
pipeline_data = {
"steps": [
{
"label": "Trigger PowerBI processing",
"trigger": "powerbi-processing",
"async": True,
"build": {
"message": f"Triggered by full model run {model_name} (build {build_number})",
"commit": commit,
"branch": branch,
"env": {"RUN_ID": run_id},
},
}
]
}
buildkite.trigger_pipeline(pipeline_data)
logger.info("Results available at %s", get_run_url(run_id))
@click.command()
def powerbi():
"""Run a PowerBI job in Buildkite"""
logger.info("Starting PowerBI post processing.")
# Pull in envars
run_id = os.environ.get("RUN_ID")
build_number = os.environ["BUILDKITE_BUILD_NUMBER"]
if not run_id:
# Pull in user-supplied metadata
logger.info("Using user-supplied run name.")
run_id = buildkite.get_metadata("run-id")
if not run_id:
logger.error("No user-supplied `run_id` found.")
sys.exit(1)
else:
# This is a job triggered by an upstream job
logger.info("Found run id from envar: %s", run_id)
# Run the powerbi processing
model_name, _, _ = read_run_id(run_id)
job_name = f"{model_name}-{build_number}"
logger.info("Running PowerBI post processing for model %s", model_name)
try:
cli_args = {
"job": job_name,
"run": run_id,
}
run_aws_script("powerbi", cli_args)
except Exception:
logger.info("Failed to run PowerBI post processing for run: %s", run_id)
sys.exit(1)
logger.info("PowerBI post processing for model %s suceeded", model_name)
logger.info("Results available at %s", get_run_url(run_id))
def run_aws_script(cmd: str, args: dict) -> str:
"""
Run the AWS CLI script, streams stdout to the user and returns stdout as a string.
"""
args_str = " ".join([f"--{k} {v}" for k, v in args.items()])
cmd_str = f"../aws/run.sh run {cmd} {args_str}"
stdout_lines = []
proc = sp.Popen(
cmd_str, stdout=sp.PIPE, shell=True, stderr=sp.STDOUT, encoding="utf-8", bufsize=1
)
for line in iter(proc.stdout.readline, ""):
sys.stdout.write(line)
stdout_lines.append(line)
print(f"Waiting for process with PID {proc.pid} to finish.")
retcode = proc.wait()
if retcode > 0:
raise Exception(f"Command failed with return code {retcode}: {cmd_str}")
return "".join(stdout_lines)
def get_run_url(run_id: str):
return f"http://autumn-data.s3-website-ap-southeast-2.amazonaws.com/{run_id}"
def read_run_id(run_id: str):
"""Read data from run id"""
parts = run_id.split("-")
git_commit = parts[-1]
timestamp = parts[-2]
model_name = "-".join(parts[:-2])
return model_name, timestamp, git_commit
cli.add_command(calibrate)
cli.add_command(full)
cli.add_command(powerbi)
cli()
| 32.816239 | 102 | 0.594869 | 984 | 7,679 | 4.453252 | 0.178862 | 0.052487 | 0.041077 | 0.013692 | 0.465769 | 0.411 | 0.370835 | 0.317435 | 0.317435 | 0.293702 | 0 | 0.003652 | 0.286756 | 7,679 | 233 | 103 | 32.957082 | 0.796421 | 0.079437 | 0 | 0.426966 | 0 | 0.011236 | 0.265344 | 0.020982 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039326 | false | 0 | 0.033708 | 0.005618 | 0.089888 | 0.005618 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21538763f9c252fe1d0c8fe8b65b09f0a22a7791 | 7,407 | py | Python | config/settings.py | uktrade/dnb-service | c8f22af82af70f33b8d6bf92e3ca6992fce1f220 | [
"MIT"
] | 4 | 2019-12-03T14:59:50.000Z | 2020-04-28T12:42:24.000Z | config/settings.py | uktrade/dnb-service | c8f22af82af70f33b8d6bf92e3ca6992fce1f220 | [
"MIT"
] | 17 | 2019-04-11T13:12:57.000Z | 2022-01-13T10:08:07.000Z | config/settings.py | uktrade/dnb-service | c8f22af82af70f33b8d6bf92e3ca6992fce1f220 | [
"MIT"
] | 3 | 2021-05-11T16:13:57.000Z | 2022-03-08T15:57:19.000Z | import os
import dj_database_url
import environ
import sentry_sdk
from celery.schedules import crontab
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env = environ.Env(
DEBUG=(bool, False),
RESTRICT_ADMIN=(bool, False),
)
environ.Env.read_env(os.path.join(BASE_DIR, '.env'))
VCAP_SERVICES = env.json('VCAP_SERVICES', {})
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_celery_results',
'django_celery_beat',
'rest_framework',
'rest_framework.authtoken',
'django_prometheus',
'elasticapm.contrib.django',
'api',
'company',
'core',
'dnb_direct_plus',
'dnb_worldbase',
'health_check',
'user',
'workspace',
'drf_yasg',
]
if DEBUG:
INSTALLED_APPS.append('django_extensions')
MIDDLEWARE = [
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'core.middleware.IpRestrictionMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Staff-sso config
AUTHBROKER_URL = env('AUTHBROKER_URL')
AUTHBROKER_CLIENT_ID = env('AUTHBROKER_CLIENT_ID')
AUTHBROKER_CLIENT_SECRET = env('AUTHBROKER_CLIENT_SECRET')
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'authbroker_client.backends.AuthbrokerBackend',
]
LOGIN_REDIRECT_URL = 'admin:index'
AUTH_USER_MODEL = 'user.User'
ENABLE_STAFF_SSO = env.bool('ENABLE_STAFF_SSO', True)
# IP restriction
IP_RESTRICT = env.bool('IP_RESTRICT')
IP_RESTRICT_APPS = ['admin']
IP_RESTRICT_PATH_NAMES = ['prometheus-django-metrics']
ALLOWED_IPS = env.list('ALLOWED_IPS')
ALLOWED_IP_RANGES = env.list('ALLOWED_IP_RANGES', default=[])
# DNB API
DNB_API_USERNAME = env('DNB_API_USERNAME')
DNB_API_PASSWORD = env('DNB_API_PASSWORD')
DNB_API_RENEW_ACCESS_TOKEN_SECONDS_REMAINING = 300
DNB_MONITORING_REGISTRATION_REFERENCE = env('DNB_MONITORING_REGISTRATION_REFERENCE')
DNB_MONITORING_S3_BUCKET = env('DNB_S3_MONITORING_BUCKET')
DNB_ARCHIVE_PROCESSED_FILES = env.bool('DNB_ARCHIVE_PROCESSED_FILES')
DNB_ARCHIVE_PATH = env('DNB_ARCHIVE_PATH', default='archive/')
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
# Redis
if 'redis' in VCAP_SERVICES:
REDIS_URL = VCAP_SERVICES['redis'][0]['credentials']['uri']
REDIS_CELERY_URL = f'{REDIS_URL}?ssl_cert_reqs=CERT_REQUIRED'
else:
REDIS_URL = env.str('REDIS_URL', '')
REDIS_CELERY_URL = REDIS_URL
# Celery
CELERY_BROKER_URL = REDIS_CELERY_URL
CELERY_RESULT_BACKEND = 'django-db'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_ALWAYS_EAGER = env.bool('CELERY_TASK_ALWAYS_EAGER', default=False)
sentry_sdk.init(
env('SENTRY_DSN'),
environment=env('SENTRY_ENVIRONMENT'),
integrations=[
DjangoIntegration(),
CeleryIntegration()
]
)
# DRF config
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'PAGE_SIZE': 100,
'DEFAULT_PAGINATION_CLASS': 'api.pagination.CustomCursorPagination',
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
# GOVUK notify
GOVUK_NOTIFICATIONS_API_KEY = env('GOVUK_NOTIFICATIONS_API_KEY')
# Change requests
CHANGE_REQUESTS_BATCH_SIZE = env.int('CHANGE_REQUESTS_BATCH_SIZE', 20)
CHANGE_REQUESTS_RECIPIENTS = env.list('CHANGE_REQUESTS_RECIPIENTS', default=[])
# Investigation requests
INVESTIGATION_REQUESTS_BATCH_SIZE = env.int('INVESTIGATION_REQUESTS_BATCH_SIZE', 20)
INVESTIGATION_REQUESTS_RECIPIENTS = env.list('INVESTIGATION_REQUESTS_RECIPIENTS', default=[])
# Celery beat
CELERY_BEAT_SCHEDULE = {}
if env.bool('ENABLE_CHANGE_REQUESTS_SUBMISSION', False):
CELERY_BEAT_SCHEDULE['change_requests_submission'] = {
'task': 'company.tasks.send_pending_change_requests',
'schedule': crontab(minute=0, hour=1,),
}
if env.bool('ENABLE_INVESTIGATION_REQUESTS_SUBMISSION', False):
CELERY_BEAT_SCHEDULE['investigation_requests_submission'] = {
'task': 'company.tasks.send_pending_investigation_requests',
'schedule': crontab(minute=0, hour=2,),
}
# Elastic APM settings
ELASTIC_APM_ENVIRONMENT = env('SENTRY_ENVIRONMENT')
ELASTIC_APM = {
'SERVICE_NAME': 'dnb-service',
'SECRET_TOKEN': env('ELASTIC_APM_SECRET_TOKEN'),
'SERVER_URL' : env('ELASTIC_APM_URL'),
'ENVIRONMENT': ELASTIC_APM_ENVIRONMENT,
}
| 27.63806 | 93 | 0.73444 | 831 | 7,407 | 6.233454 | 0.310469 | 0.040154 | 0.026255 | 0.019305 | 0.150965 | 0.126062 | 0.066988 | 0.015444 | 0.015444 | 0 | 0 | 0.00489 | 0.144053 | 7,407 | 267 | 94 | 27.741573 | 0.812145 | 0.09518 | 0 | 0.038462 | 0 | 0 | 0.463152 | 0.352756 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.032967 | 0.038462 | 0 | 0.038462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2154b26da699e433e31a94748940bfef5493cd1a | 13,362 | py | Python | instaread/instapaper/instapaper.py | anhdat/instaread | c554c58ae32119d3d80c8db1a163a1712c3f4f90 | [
"MIT"
] | null | null | null | instaread/instapaper/instapaper.py | anhdat/instaread | c554c58ae32119d3d80c8db1a163a1712c3f4f90 | [
"MIT"
] | null | null | null | instaread/instapaper/instapaper.py | anhdat/instaread | c554c58ae32119d3d80c8db1a163a1712c3f4f90 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------------------------
# Copyright (c) 2013-2016, Ryan Galloway (ryan@rsgalloway.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the name of the software nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---------------------------------------------------------------------------------------------
# docs and latest version available for download at
# http://github.com/rsgalloway/instapaper
# ---------------------------------------------------------------------------------------------
import sys
if sys.version_info > (3, 0):
import urllib.parse as urlparse
from urllib.parse import urlencode
from html.parser import HTMLParser
else:
import urlparse
from urllib import urlencode
from HTMLParser import HTMLParser
import json
import oauth2 as oauth
from re import sub
from sys import stderr
from traceback import print_exc
__author__ = "Ryan Galloway <ryan@rsgalloway.com>"
__doc__ = """
An unofficial Python wrapper to the full Instapaper API.
http://www.instapaper.com/api/full
"""
__todo__ = """
- refactor http requests to standalone function
"""
_BASE_ = "https://www.instapaper.com"
_API_VERSION_ = "api/1"
_ACCESS_TOKEN_ = "oauth/access_token"
_ACCOUNT_ = "account/verify_credentials"
_BOOKMARKS_LIST_ = "bookmarks/list"
_BOOKMARKS_TEXT_ = "bookmarks/get_text"
_BOOKMARKS_STAR_ = "bookmarks/star"
_BOOKMARKS_UNSTAR_ = "bookmarks/unstar"
_BOOKMARKS_ARCHIVE_ = "bookmarks/archive"
_BOOKMARKS_UNARCHIVE_ = "bookmarks/unarchive"
_BOOKMARKS_ADD_ = "bookmarks/add"
_BOOKMARKS_DELETE_ = "bookmarks/delete"
_BOOKMARKS_MOVE_ = "bookmarks/move"
_FOLDERS_ADD_ = "folders/add"
_FOLDERS_LIST_ = "folders/list"
class _DeHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.__text = []
def handle_data(self, data):
text = data.strip()
if len(text) > 0:
text = sub('[ \t\r\n]+', ' ', text)
self.__text.append(text + ' ')
def handle_starttag(self, tag, attrs):
if tag == 'p':
self.__text.append('\n\n')
elif tag == 'br':
self.__text.append('\n')
def handle_startendtag(self, tag, attrs):
if tag == 'br':
self.__text.append('\n\n')
def text(self):
return ''.join(self.__text).strip()
def dehtml(text):
try:
parser = _DeHTMLParser()
if text:
text = text.decode('UTF-8')
else:
return None
parser.feed(text)
parser.close()
return parser.text()
except:
print_exc(file=stderr)
return text
class Folder(object):
"""
{
'sync_to_mobile': 1, 'position': 1468247492,
'folder_id': 2973166, 'slug': 'swift',
'display_title': 'Swift', 'title': 'Swift',
'type': 'folder'
}
"""
def __init__(self, id, title, slug):
super().__init__()
self.id = id
self.title = title
self.slug = slug
def __str__(self):
return ('Folder: {id} - {title}'.format(id=self.id, title=self.title))
def __repr__(self):
return self.__str__()
class Bookmark(object):
def __init__(self, parent, params):
self.parent = parent
self.__text = None
self.__html = None
self.__dict__.update(params)
"""
{'hash': '21iTZfCr',
'description': u'',
'parent': <instapaper.Instapaper object at 0x104055ad0>,
'title': u'Let\u2019s Ignore Each Other Together',
'url': 'https://medium.com/re-form/lets-ignore-each-other-together-d7cf46a8a8ad',
'_Bookmark__html': None,
'time': 1422657611,
'progress_timestamp': 1422662236,
'bookmark_id': 550386320,
'_Bookmark__text': None,
'progress': 0.0,
'starred': '0',
'type': 'bookmark',
'content': u'',
'private_source': u'',
# is_private_from_source is used for adding a bookmark
'is_private_from_source': u''}
"""
try:
self.starred = (self.starred == '1') # convert to boolean
except:
self.starred = False
def __str__(self):
return '{}\n{}\n{}'.format(
self.bookmark_id,
self.title,
self.url,
)
@property
def html(self):
if self.__html is None:
response, html = self.parent.http.request(
"/".join([_BASE_, _API_VERSION_, _BOOKMARKS_TEXT_]),
method='POST',
body=urlencode({
'bookmark_id': self.bookmark_id,
}))
if response.get("status") == "200":
self.__html = html.decode('utf-8')
return self.__html
@property
def text(self):
if self.__text is None:
self.__text = dehtml(self.html)
return self.__text
def star(self):
response, html = self.parent.http.request(
"/".join([_BASE_, _API_VERSION_, _BOOKMARKS_STAR_]),
method='POST',
body=urlencode({
'bookmark_id': self.bookmark_id,
}))
if response.get("status") == "200":
self.starred = True
return True
return False
def unstar(self):
response, html = self.parent.http.request(
"/".join([_BASE_, _API_VERSION_, _BOOKMARKS_UNSTAR_]),
method='POST',
body=urlencode({
'bookmark_id': self.bookmark_id,
}))
if response.get("status") == "200":
self.starred = False
return True
return False
def archive(self):
response, html = self.parent.http.request(
"/".join([_BASE_, _API_VERSION_, _BOOKMARKS_ARCHIVE_]),
method='POST',
body=urlencode({
'bookmark_id': self.bookmark_id,
}))
if response.get("status") == "200":
self.starred = True
return True
return False
def unarchive(self):
response, html = self.parent.http.request(
"/".join([_BASE_, _API_VERSION_, _BOOKMARKS_UNARCHIVE_]),
method='POST',
body=urlencode({
'bookmark_id': self.bookmark_id,
}))
if response.get("status") == "200":
self.starred = False
return True
return False
def delete(self):
response, html = self.parent.http.request(
"/".join([_BASE_, _API_VERSION_, _BOOKMARKS_DELETE_]),
method='POST',
body=urlencode({
'bookmark_id': self.bookmark_id,
}))
if response.get("status") == "200":
return True
return False
def save(self, folder_id=None):
# add appropriate parameters to a dictionary for encoding
encoded_values = {}
try:
if self.content:
encoded_values['content'] = self.content
except:
pass
try:
if self.is_private_from_source:
encoded_values['is_private_from_source'] = self.is_private_from_source
except:
pass
try:
if self.url:
encoded_values['url'] = self.url
except:
pass
try:
if self.title:
encoded_values['title'] = self.title
except:
pass
try:
if self.description:
encoded_values['description'] = self.description
except:
pass
try:
if folder_id:
encoded_values['folder_id'] = folder_id
except:
pass
# send the http request
response, html = self.parent.http.request(
"/".join([_BASE_, _API_VERSION_, _BOOKMARKS_ADD_]),
method='POST',
body=urlencode(encoded_values))
if response.get("status") == "200":
self.__html = html
return self.__html
def move(self, folder_id):
response, html = self.parent.http.request(
"/".join([_BASE_, _API_VERSION_, _BOOKMARKS_MOVE_]),
method='POST',
body=urlencode({
'bookmark_id': self.bookmark_id,
'folder_id': folder_id
}))
if response.get("status") == "200":
return True
return False
class Instapaper(object):
def __init__(self, oauthkey, oauthsec):
self.consumer = oauth.Consumer(oauthkey, oauthsec)
self.client = oauth.Client(self.consumer)
self.token = None
self.http = None
def get_token_and_secret(self, username, password):
response, content = self.client.request(
"/".join([_BASE_, _API_VERSION_, _ACCESS_TOKEN_]),
"POST", urlencode({
'x_auth_mode': 'client_auth',
'x_auth_username': username,
'x_auth_password': password}))
_oauth = dict(urlparse.parse_qsl(content.decode('utf-8')))
return (_oauth['oauth_token'], _oauth['oauth_token_secret'])
def login(self, username, password):
response, content = self.client.request(
"/".join([_BASE_, _API_VERSION_, _ACCESS_TOKEN_]),
"POST", urlencode({
'x_auth_mode': 'client_auth',
'x_auth_username': username,
'x_auth_password': password}))
_oauth = dict(urlparse.parse_qsl(content.decode('utf-8')))
self.login_with_token(_oauth['oauth_token'], _oauth['oauth_token_secret'])
def login_with_token(self, oauth_token, oauth_token_secret):
"""
When you want to access a user's data using their existing token
"""
self.token = oauth.Token(oauth_token, oauth_token_secret)
self.http = oauth.Client(self.consumer, self.token)
def user(self):
response, data = self.http.request(
"/".join([_BASE_, _API_VERSION_, _ACCOUNT_]),
method='POST',
body=None)
user = json.loads(data.decode('utf-8'))[0]
if user.get("type") == "error":
raise Exception(data.get("message"))
return user
def bookmarks(self, folder="unread", limit=10, have=""):
"""
folder_id: Optional. Possible values are unread (default),
starred, archive, or a folder_id value.
limit: Optional. A number between 1 and 500, default 25.
"""
response, data = self.http.request(
"/".join([_BASE_, _API_VERSION_, _BOOKMARKS_LIST_]),
method='POST',
body=urlencode({
'folder_id': folder,
'limit': limit,
'have': have}))
marks = []
items = json.loads(data.decode('utf-8'))
for item in items:
if item.get("type") == "error":
raise Exception(item.get("message"))
elif item.get("type") == "bookmark":
marks.append(Bookmark(self, item))
return marks
def folders(self):
response, data = self.http.request(
"/".join([_BASE_, _API_VERSION_, _FOLDERS_LIST_]),
method='POST',
body=urlencode({}))
folders = []
items = json.loads(data.decode('utf-8'))
for item in items:
folders.append(Folder(item['folder_id'], item['title'], item['slug']))
return folders
def create_folder(self, title):
"""
title: Required. Title of the folder.
"""
response, data = self.http.request(
"/".join([_BASE_, _API_VERSION_, _FOLDERS_ADD_]),
method='POST',
body=urlencode({
'title': title}))
if response.get("status") == "200":
return True
raise Exception(response)
| 32.275362 | 95 | 0.570648 | 1,447 | 13,362 | 5.022806 | 0.228749 | 0.022014 | 0.028894 | 0.034673 | 0.385663 | 0.316868 | 0.299395 | 0.293753 | 0.287975 | 0.263209 | 0 | 0.013488 | 0.295315 | 13,362 | 413 | 96 | 32.353511 | 0.75839 | 0.181185 | 0 | 0.445993 | 0 | 0 | 0.104914 | 0.006836 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0.034843 | 0.041812 | 0.013937 | 0.247387 | 0.006969 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2156ad8ef70c868eff5a4e0ce1a0b2d9c87bd37f | 5,586 | py | Python | scraping/bigkinds_db_schedule.py | jaywoong/project_stock | 158b7ce209908b6dbe0f411a4d35b7690245a3ee | [
"Apache-2.0"
] | 2 | 2021-09-13T13:39:19.000Z | 2021-09-17T01:35:59.000Z | scraping/bigkinds_db_schedule.py | jaywoong/project_stock | 158b7ce209908b6dbe0f411a4d35b7690245a3ee | [
"Apache-2.0"
] | null | null | null | scraping/bigkinds_db_schedule.py | jaywoong/project_stock | 158b7ce209908b6dbe0f411a4d35b7690245a3ee | [
"Apache-2.0"
] | 1 | 2021-08-31T02:48:28.000Z | 2021-08-31T02:48:28.000Z | import sqlite3
import schedule
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
#import chromedriver_autoinstaller
from bs4 import BeautifulSoup
import pandas as pd
import time
class Bigkinds:
def __init__(self):
self.options = Options()
# headless는 화면이나 페이지 이동을 표시하지 않고 동작하는 모드 (브라우저 창 안보이기)
self.options.add_argument('headless')
self.driver = webdriver.Chrome('C:\chromedriver.exe', options=self.options)
def getURL(self,url):
# 빅카인즈 사이트 이동
self.driver.implicitly_wait(2)
self.driver.set_window_size(1300, 700)
self.driver.get(url)
# 검색기간 설정
self.driver.find_element_by_xpath('//*[@id="collapse-step-1-body"]/div[3]/div/div[1]/div[1]/a').click()
time.sleep(3)
# 1일
self.driver.find_element_by_xpath('//*[@id="srch-tab1"]/div/div[1]/span[1]/label').click()
# 1주일
# self.driver.find_element_by_xpath('//*[@id="srch-tab1"]/div/div[1]/span[2]/label').click()
time.sleep(3)
def getPage(self, url, querytxt):
self.getURL(url)
# 검색어 입력
searchbox = self.driver.find_element_by_id("total-search-key")
searchbox.send_keys(querytxt)
searchbox.send_keys("\n") # 검색버튼 엔터
time.sleep(2)
# 기사 분류 -> 경제로 설정
self.driver.find_element_by_xpath('//*[@id="filterTab03"]/li[2]/span').click()
time.sleep(3)
# 기사 100건 씩 출력
self.driver.find_element_by_xpath('//*[@id="select2"]/option[4]').click()
time.sleep(3)
# 전체 페이지 개수
soup = BeautifulSoup(self.driver.page_source, 'html.parser')
pageNum = soup.select('div.lastNum')[0].text
self.totalPage = int(pageNum)
del soup
def crawling(self, url, stockname):
self.getPage(url, stockname)
print('{} crawling start.'.format(stockname))
print('Total Pages : ', self.totalPage)
curPage = 1 # 현재 페이지
self.contents = []
self.texts = []
while curPage <= self.totalPage:
# bs4 초기화
soup = BeautifulSoup(self.driver.page_source, 'html.parser')
# 기사 리스트
articles = soup.select('div.news-inner')
# 페이지 번호 출력
print('Current Page : {}'.format(curPage))
# 세부 데이터
for article in articles:
title = article.select_one('span.title-elipsis').text.strip()
try:
press = article.select_one('div.info > div > a.provider').text.strip()
category = article.select_one('div.info > div > span.bullet-keyword').text.strip()
date = article.select_one('div.info > p.name').text.strip()
except:
press = None
pass
if press != None:
self.contents.append([title, press, category, date])
time.sleep(3)
# 기사 전문
for i in range(len(articles)):
self.driver.find_elements_by_css_selector('span.title-elipsis')[i].click()
time.sleep(4)
text = self.driver.find_elements_by_css_selector('div.news-view-body')[0].text
self.texts.append(text.replace('\n', ''))
self.driver.find_element_by_xpath("//div[@id='news-detail-modal']/div/div/button").click()
time.sleep(4)
# 페이지 수 증가
curPage += 1
if curPage > self.totalPage:
print('{} crawling succeed.'.format(stockname))
break
# 페이지 이동 클릭
self.driver.implicitly_wait(3)
try:
nextbtn = self.driver.find_element_by_xpath('//*[@id="news-results-tab"]/div[1]/div[2]/div/div/div/div/div[4]/a')
except:
nextbtn = self.driver.find_element_by_xpath('//*[@id="news-results-tab"]/div[6]/div[2]/div/div/div/div/div[4]/a')
self.driver.execute_script("arguments[0].click();", nextbtn)
# bs4 인스턴스 삭제
del soup
time.sleep(3)
def saving(self, stockname):
# 브라우저 종료
self.driver.close()
self.df_news = pd.DataFrame(data=self.contents, columns =['title', 'press', 'category', 'date'])
self.df_news['text'] = self.texts
self.df_news.to_sql('{}_news'.format(stockname), conn, if_exists='append', index=False) # 테이블명
conn.commit()
print('{} inserted to DB.'.format(stockname))
if __name__ == "__main__":
conn = sqlite3.connect('bigkinds.db')
c = conn.cursor()
def crawl():
url = 'https://www.bigkinds.or.kr/v2/news/index.do'
stocknames = ['삼성전자', 'SK하이닉스', 'LG화학', 'LG전자', 'LG이노텍', '삼성에스디에스', '삼성전기', '삼성생명', '삼성화재',
'SK텔레콤', 'KT', '현대건설', '삼성엔지니어링', '대한항공', '현대차', '기아', '오리온', 'CJ제일제당', '오뚜기',
'미래에셋증권', '한국금융지주', 'NH투자증권', 'LG생활건강', '아모레퍼시픽', '아모레G', '강원랜드', '호텔신라',
'KB금융', '신한지주,' '하나금융지주', '롯데쇼핑', '이마트', '신세계', 'GS리테일', 'NAVER', '카카오', 'CJ ENM',
'스튜디오드래곤', '삼성바이오로직스', '셀트리온', '한미약품', '엔씨소프트', '넷마블', '한화솔루션', 'LS', 'POSCO',
'고려아연', 'S-Oil', 'SK이노베이션', 'HMM']
for stockname in stocknames:
crawl = Bigkinds()
crawl.crawling(url, stockname)
crawl.saving(stockname)
conn.close()
# schedule.every().day.at("05:30").do(crawl) # 매일 5:30에 동작
# schedule.every().monday.at("5:30").do(crawl) # 매주 월요일 5:30에 동작
while True:
schedule.run_pending()
time.sleep(1)
| 41.686567 | 129 | 0.557286 | 685 | 5,586 | 4.443796 | 0.40292 | 0.065703 | 0.050591 | 0.062089 | 0.207622 | 0.19251 | 0.166229 | 0.133377 | 0.081472 | 0.067674 | 0 | 0.017259 | 0.284282 | 5,586 | 133 | 130 | 42 | 0.744122 | 0.083602 | 0 | 0.156863 | 0 | 0.029412 | 0.197761 | 0.071092 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0.009804 | 0.068627 | 0 | 0.137255 | 0.04902 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2157539dbdb8df956fc7c21309e4abc0c35771d6 | 3,157 | py | Python | src/main.py | f0lg0/geneticAlgorithm-1 | f9c0ee67a450db7915e81bc5cb492d7c49a1ae05 | [
"MIT"
] | 17 | 2020-02-03T20:46:46.000Z | 2020-06-28T05:51:39.000Z | src/main.py | f0lg0/geneticAlgorithm-PY | f9c0ee67a450db7915e81bc5cb492d7c49a1ae05 | [
"MIT"
] | 5 | 2020-02-04T20:28:57.000Z | 2020-05-02T08:17:19.000Z | src/main.py | f0lg0/geneticAlgorithm-1 | f9c0ee67a450db7915e81bc5cb492d7c49a1ae05 | [
"MIT"
] | 4 | 2020-02-04T20:09:57.000Z | 2020-06-28T05:51:40.000Z | import random
import time
from displayBanner import displayBanner, choice
TARGET = "genetic algorithm"
INDIVIDUAL_SIZE = len(TARGET)
POPULATION_SIZE = 100
MUTATION_RATE = 0.1
TOURNAMENT_SELECTION_SIZE = 40
KEYS = 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class Individual:
def __init__(self):
self._dna = random.choices(KEYS, k=INDIVIDUAL_SIZE)
self._fitness = -1
self._score = -1
def getDNA(self):
self._fitness = -1
self._score = -1
return self._dna
@property
def fitness(self):
if self._score == -1:
self._score = 0
for own_letter, target_letter in zip(self._dna, TARGET):
if own_letter == target_letter:
self._score += 1
self._fitness = self._score / len(TARGET)
return self._fitness
def __str__(self):
return "".join(self._dna)
class Population:
def __init__(self, size):
self._population = []
for i in range(size):
self._population.append(Individual())
def getPopulation(self):
return self._population
class GeneticAlgorithm:
def selectTournamentPopulation(self, pop):
tournament_pop = Population(0)
i = 0
while i < TOURNAMENT_SELECTION_SIZE:
tournament_pop.getPopulation().append(pop.getPopulation()[random.randrange(0, POPULATION_SIZE)])
i += 1
tournament_pop.getPopulation().sort(key = lambda x: x.fitness, reverse = True)
return tournament_pop
def reproduction(self, pop):
for i in range(len(pop.getPopulation())):
partnerA = self.selectTournamentPopulation(pop).getPopulation()[0]
partnerB = self.selectTournamentPopulation(pop).getPopulation()[1]
child = self.crossover(partnerA, partnerB)
pop.getPopulation()[i] = child
self.mutate(pop)
def crossover(self, parentA, parentB):
child = Individual()
midpoint = random.randrange(0, INDIVIDUAL_SIZE)
child.getDNA()[:midpoint] = parentA.getDNA()[:midpoint]
child.getDNA()[midpoint:] = parentB.getDNA()[midpoint:]
return child
def mutate(self, pop):
for x in pop.getPopulation():
if random.random() <= MUTATION_RATE:
x.getDNA()[random.randrange(0, INDIVIDUAL_SIZE)] = random.choice(KEYS)
def evolve(self, pop):
self.selectTournamentPopulation(pop)
self.reproduction(pop)
# =============================================
def printPopulation(pop, genNumber):
print("==========================================================")
print("Generation #", genNumber, "| Fittest individual fitness: ", pop.getPopulation()[0].fitness)
print("Target phrase:", TARGET)
print("==========================================================")
for i, x in enumerate(pop.getPopulation()):
print("Individual #", i, ":", ''.join(x.getDNA()), "| Fitness: ", x.fitness)
print()
def main():
displayBanner()
time.sleep(2)
choice()
population = Population(POPULATION_SIZE)
algo = GeneticAlgorithm()
generationNumber = 0
while True:
population.getPopulation().sort(key = lambda x: x.fitness, reverse = True)
printPopulation(population, generationNumber)
if population.getPopulation()[0].fitness >= 1:
break
algo.evolve(population)
generationNumber += 1
print("Simulation terminated, target reached")
if __name__ == "__main__":
main() | 26.090909 | 99 | 0.681026 | 357 | 3,157 | 5.859944 | 0.238095 | 0.076482 | 0.01912 | 0.015296 | 0.09369 | 0.06501 | 0.043977 | 0.043977 | 0.043977 | 0 | 0 | 0.010363 | 0.144124 | 3,157 | 121 | 100 | 26.090909 | 0.763879 | 0.014254 | 0 | 0.066667 | 0 | 0 | 0.099968 | 0.054002 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144444 | false | 0 | 0.033333 | 0.022222 | 0.277778 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
215757ab7cfa429f1c411f24ad14cf50ab7992a4 | 1,517 | py | Python | setup.py | Cyb3r-Jak3/ExifReader | 483df30b73ee6be81be5afb27e6cffc7fe12a27c | [
"BSD-3-Clause"
] | 1 | 2020-07-12T20:45:29.000Z | 2020-07-12T20:45:29.000Z | setup.py | Cyb3r-Jak3/ExifReader | 483df30b73ee6be81be5afb27e6cffc7fe12a27c | [
"BSD-3-Clause"
] | null | null | null | setup.py | Cyb3r-Jak3/ExifReader | 483df30b73ee6be81be5afb27e6cffc7fe12a27c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from exifreader import __version__, __doc__
readme_file = open("README.md", "rt").read()
setup(
name="ExifReader",
version=__version__,
author="Cyb3r Jak3",
author_email="jake@jwhite.network",
license="BSD",
python_requires=">=3.4",
packages=find_packages(),
install_requires=[
"defusedxml >= 0.6.0"
],
scripts=["EXIF.py"],
url="https://gitlab.com/Cyb3r-Jak3/exifreader",
project_urls={
"Issues": "https://gitlab.com/Cyb3r-Jak3/ExifReader/issues",
"Source Code": "https://gitlab.com/Cyb3r-Jak3/ExifReader/-/tree/publish",
"CI": "https://gitlab.com/Cyb3r-Jak3/ExifReader/pipelines",
"Releases": "https://github.com/Cyb3r-Jak3/ExifReader"
},
keywords="exif image metadata photo",
description=" ".join(__doc__.splitlines()).strip(),
long_description=readme_file,
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Utilities",
],
)
| 33.711111 | 81 | 0.626895 | 161 | 1,517 | 5.732919 | 0.571429 | 0.058505 | 0.065005 | 0.119177 | 0.143012 | 0.143012 | 0 | 0 | 0 | 0 | 0 | 0.022575 | 0.211602 | 1,517 | 44 | 82 | 34.477273 | 0.749164 | 0.013843 | 0 | 0.05 | 0 | 0 | 0.504685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21576cb938009e2cf19a938a7d1507717937f740 | 696 | py | Python | BreadTools/gui/window.py | TurtleP/BreadTools | d849768f3a3455a0ddb40f4212003022afdcea49 | [
"MIT"
] | null | null | null | BreadTools/gui/window.py | TurtleP/BreadTools | d849768f3a3455a0ddb40f4212003022afdcea49 | [
"MIT"
] | null | null | null | BreadTools/gui/window.py | TurtleP/BreadTools | d849768f3a3455a0ddb40f4212003022afdcea49 | [
"MIT"
] | null | null | null | from PyQt5.QtGui import QColor, QIcon
from PyQt5.QtWidgets import QMainWindow
from ..storage import Storage
from .panel import SideBar
from .colors import get_color
class Window(QMainWindow):
def __init__(self):
super().__init__()
Storage.load()
self.setFixedSize(600, 478)
self.setWindowTitle("Bread Tools")
self.setWindowIcon(QIcon(Storage.resolve_data("icon.ico")))
self.setAutoFillBackground(True)
self.refresh()
SideBar(self)
self.show()
def refresh(self):
palette = self.palette()
palette.setColor(self.backgroundRole(), QColor(get_color("main")))
self.setPalette(palette)
| 20.470588 | 74 | 0.66092 | 76 | 696 | 5.907895 | 0.539474 | 0.040089 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014953 | 0.231322 | 696 | 33 | 75 | 21.090909 | 0.824299 | 0 | 0 | 0 | 0 | 0 | 0.033046 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.25 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
215a383798053684214a8aef3bf4c5b4656e227c | 9,458 | py | Python | Book_1/Alien invasion/main.py | Master-sniffer/Learning | fb192ba3972f9238ebee98a8a539087772ad83b2 | [
"MIT"
] | 1 | 2020-07-15T22:44:43.000Z | 2020-07-15T22:44:43.000Z | Book_1/Alien invasion/main.py | Master-sniffer/Learning | fb192ba3972f9238ebee98a8a539087772ad83b2 | [
"MIT"
] | 3 | 2021-06-04T23:51:09.000Z | 2021-06-10T20:07:33.000Z | Book_1/Alien invasion/main.py | Master-sniffer/Learning-PYTHON- | 526f31fb189e6422693c32d38618831b22706f87 | [
"MIT"
] | null | null | null | #import this # delete the first hashtag to see the truth
import pygame, sys, json
from time import sleep
#my folders
from settings import Settings
from ship import Ship
from bullet import Bullet
from game_stats import GameStats
from alien import Alien
from button import Button
from scoreboard import Scoreboard
#from character import Character
class AlienInvasion:
def __init__(self):
"""ИНИЦИАЛИЗАЦИЯ ИГРЫ"""
pygame.init() #HERE IS EVERYTHING ALRIGHT
self.settings=Settings()
self.screen=pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height)) # 4
self.screen_rect=self.screen.get_rect()
# ЕС ХОТИТЕ СДЕЛАТЬ НА ФУЛ ЭКРАН - УБИРАЙТЕ # В 1 , 2 , 3 ПУНКТЕ И ПОСТАВЬТЕ # В 4 ПУНКТЕ
# self.screen=pygame.display.set_mode((0,0), pygame.FULLSCREEN) # 1
# self.settings.screen_width=self.screen.get_rect().width # 2
# self.settings.screen_height=self.screen.get_rect().height # 3
pygame.display.set_caption("ALIEN INVASION")
self.ship=Ship(self)
self.bullets=pygame.sprite.Group()
self.stats=GameStats(self)
self.sb=Scoreboard(self)
self.aliens=pygame.sprite.Group() #объявление группы пришельцев
#self.charecter=Character(self) # MB I WILL DO IT
self._create_fleet()#при первом вызыове
self.play_button=Button(self,"LETS START")
self.filename="saved_data.json"
def run_game(self):
"""ЗАПУСК ИГРЫ"""
while True: #ОТСЛЕЖИВАНИЕ ДЕЙСТВИЙ С КЛАВЫ
self._check_events()
if self.stats.game_active:
self.ship.update()
self._update_bullet()
self._update_aliens()
self._update_screen()
def _update_screen(self): #отвечает за то, что вообще на экране творится
self.screen.fill(self.settings.bg_colour) #Заполнить экран определенным цветом
self.ship.blitme() # Использовать метод показа корабля
#self.charecter.blitme() #MB I WILL DO IT
for bullet in self.bullets.sprites():
bullet.draw_bullet()
self.aliens.draw(self.screen)#показывает пришельцев на self.screen (общий экран)
if not self.stats.game_active:
self.play_button.draw_button()
self.sb.show_score() #Метод показа очков
pygame.display.flip() #обновление экрана
def _check_events(self): #общая проверка ивентов
for event in pygame.event.get():
if event.type==pygame.QUIT:
sys.exit()
elif event.type==pygame.KEYDOWN:
self._check_keyDOWN(event)
elif event.type==pygame.KEYUP:
self._check_keyUP(event)
elif event.type==pygame.MOUSEBUTTONDOWN:
mouse_pos=pygame.mouse.get_pos()
self._check_play_button(mouse_pos)
def _check_keyDOWN(self, event): #проверка нажатой клавиши
if event.key==pygame.K_RIGHT:
self.ship.moving_right=True
elif event.key==pygame.K_LEFT:
self.ship.moving_left=True
if event.key==pygame.K_UP:
self.ship.moving_up=True
elif event.key==pygame.K_DOWN:
self.ship.moving_down=True
elif event.key==pygame.K_ESCAPE: #стоп слово
some_score=self.stats.high_score
with open (self.filename, "w") as f: #Открытие файла
print ("d") #Лоигрование, что запись в файл идет
json.dump(some_score, f) #Сохранение результата
sys.exit()
elif event.key==pygame.K_SPACE: #Вызов метода стрельбы пули
self._fire_bullet()
def _check_keyUP(self, event): #проверка, какую клавишу отпустили (или отпущена)
if event.key==pygame.K_RIGHT:
self.ship.moving_right=False
elif event.key==pygame.K_LEFT:
self.ship.moving_left=False
elif event.key==pygame.K_UP:
self.ship.moving_up=False
elif event.key==pygame.K_DOWN:
self.ship.moving_down=False
def _check_play_button(self, mouse_pos):
button_clicked=self.play_button.rect.collidepoint(mouse_pos) #проверка нажатия кнопки
if button_clicked and not self.stats.game_active: #если кнопка нажата и игра не запущена начинается игра
self.settings.initialize_dynamic_settings()
self.stats.reset_stats()
self.stats.game_active=True
self.sb.prep_score()
self.sb.prep_level()
self.sb.prep_ships()
self.aliens.empty()
self.bullets.empty()
self._create_fleet()
self.ship.center_ship()
pygame.mouse.set_visible(False) #Метод, чтобы мышка исчезла
def _fire_bullet(self): #отвечает за запуск пули
if len(self.bullets) < self.settings.bullet_allowed: #Если длина меньше кол-во разрешенных пуль
new_bullet=Bullet(self)
self.bullets.add(new_bullet)
def _update_bullet(self): #отвечает за устраниение пули, когда она достигнет определенной точки
self.bullets.update()
for bullet in self.bullets.copy():
if bullet.rect.bottom<=0:
self.bullets.remove(bullet)
#print (len(self.bullets))
self._check_bullet()
def _check_bullet(self):
collisions=pygame.sprite.groupcollide(self.bullets, self.aliens, False, True) #Проверка взаимодействия группы пули и группы пришельцев
if collisions: #Если есть коллизия
for aliens in collisions.values():
self.stats.score+=self.settings.alien_score*len(aliens)
self.sb.prep_score()
self.sb.check_high_score()
if not self.aliens: #Если группа пришельцев пустая
self.bullets.empty()
self._create_fleet()
self.settings.increase_speed()
self.stats.level+=1
self.sb.prep_level()
def _ship_hit(self): #Проверка был ли удра корабля
if self.stats.ships_left>=0: #Если кол-во кораблей больше или равно нулю - продолжать действие
self.stats.ships_left-=1
self.sb.prep_ships()
self.aliens.empty()
self.bullets.empty()
self._create_fleet()
self.ship.center_ship()
sleep(0.5) #Остановить игру на 0.5 сек
else:
self.stats.game_active=False
pygame.mouse.set_visible(True)
def _create_fleet(self): #Создание флота пришельцев
alien=Alien(self)
alien_width, alien_height=alien.rect.size #метод alien.rect.size возвращает ширину и длину пришельца
ship_height=self.ship.rect.height #высота корабля
available_space_x=self.settings.screen_width-(2*alien_width) #настройки экрана - ширина пришельца * 2 (чтобы было место)
available_space_y=self.settings.screen_height-(3* alien_height) - ship_height #получаем настройки, чтобы узнать кол-во рядов, мы можем сделать
number_rows=available_space_y//(2*alien_height) #считаем точное кол-во, чтобы у игрока был шанс нормально играть без проблем
number_aliends=available_space_x//(2*alien_width) #кол-во пришельцев в ряду
for row in range ((number_rows-2)//2): #допилить вариацию ряда
for number in range(number_aliends): # вводим цикл для каждого пришельца
self._create_alien(number,row) #для каждого нового пришельца, мы вызываем метод создания пришельца (указываем кол-во в ряду и кол-во рядом)
self._create_alien_1(number,row+1)
def _create_alien(self, alien_number,row): # создание пришельца
alien=Alien(self)
alien_width, alien_height=alien.rect.size #Сначала идет ширина потом длина пришельца
alien.x=alien_width+2* alien_width * alien_number #определили расстояние между инепрешеленцами
alien.rect.x=alien.x #Расположение пришельца
alien.rect.y=alien_height +2*alien_height*row
self.aliens.add(alien) #добавление пришельца в "список пришельцев"
def _create_alien_1(self, alien_number, row):
alien=Alien(self)
alien_width, alien_height=alien.rect.size
alien.x=alien_width+alien_width+2* alien_width * alien_number
alien.rect.x=alien.x
alien.rect.y=2*alien_height*row
self.aliens.add(alien)
def _update_aliens(self):
self.aliens.update()
if pygame.sprite.spritecollideany(self.ship, self.aliens):
self._ship_hit()
self._aliens_check_bottom()
self._check_fleet()
def _aliens_check_bottom(self): #Проверка достиг ли пришелец дна
for alien in self.aliens.sprites():
if alien.rect.bottom>=self.screen_rect.bottom:
self._ship_hit()
break
def _check_fleet(self): #Проверка дошел ли пришелец до края
for alien in self.aliens.sprites():
if alien.check_edges():
self._change_direction()
break
def _change_direction(self): #Смена направления
for alien in self.aliens.sprites(): #меняем направление каждого пришельца с помощью группы спрайт
alien.rect.y += self.settings.fleet_drop_speed
self.settings.fleet_direction *=-1
if __name__=='__main__':
ai=AlienInvasion()
ai.run_game()
| 40.767241 | 155 | 0.645908 | 1,209 | 9,458 | 4.879239 | 0.276261 | 0.023055 | 0.023733 | 0.025428 | 0.223428 | 0.173928 | 0.143584 | 0.126801 | 0.104085 | 0.092897 | 0 | 0.004741 | 0.264009 | 9,458 | 231 | 156 | 40.943723 | 0.842695 | 0.241489 | 0 | 0.237288 | 0 | 0 | 0.006906 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.050847 | 0 | 0.158192 | 0.00565 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
215b795f7d3f42889803e45d66f7f3ec772868ce | 21,319 | py | Python | src/optimModels/comm_optim/ea_setup.py | sousamd/optimModels | b11d26eb72f19da0b97abbd4fb6513de06a348ef | [
"Apache-2.0"
] | null | null | null | src/optimModels/comm_optim/ea_setup.py | sousamd/optimModels | b11d26eb72f19da0b97abbd4fb6513de06a348ef | [
"Apache-2.0"
] | null | null | null | src/optimModels/comm_optim/ea_setup.py | sousamd/optimModels | b11d26eb72f19da0b97abbd4fb6513de06a348ef | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
from random import randint, uniform, choice
class EAConfig:
"""
This class sets up the parameters for EA
"""
def __init__(self,
mut = 0.30,
cross = 0.30,
cand_size = 10,
max_cand_value = 8, # for real and int representation
rep = 0, # 0 = binary, 1 = integer, else = real
pop_size = 10, # min = 2
max_gen = 3
):
"""
:param mut: float, rate at which a mutation occurs
:param cross: float, rate at which crossover occurs
:param cand_size: int, size of the candidate representation
:param max_cand_value: int or float, the max value that a value from the
representation can take, for int or real representations only
:param rep: int, defines the type of representation, 0 for binary, 1 for integer
and anything else for real
:param pop_size: int, defines the size of a population in the EA algorithm
:param max_gen: int, defines the max number of generations in the EA
"""
self.mutation_rate = mut
self.crossover_rate = cross
self.cand_size = cand_size
self.max_cand_value = max_cand_value
self.rep_type = rep
self.pop_size = pop_size
self.tourn_size = 2 + round(self.pop_size*0.05)
self.max_gen = max_gen
self.scoredic = {}
self.val_dic = {}
self.fit_dic = {}
def __str__(self):
configdic = {
"mut": self.mutation_rate,
"cross": self.crossover_rate,
"cand size": self.cand_size,
"max_cand": self.max_cand_value,
"rep type": self.rep_type,
"pop size": self.pop_size,
"tourn size": self.tourn_size,
"max_gen": self.max_gen,
# "scoredic": self.scoredic,
}
return str(configdic)
class Candidate:
"""
Class to represent each candidate in a population, is filled with a
representation upon being generated a population and the score for
that representation after being evaluated
"""
def __init__(self, rep):
"""
:param rep: list of int or list of float, depending on the rep_type
"""
self.rep = rep # candidate representation
self.score = None # filled during candidate evaluation
self.values = None # filled during candidate evaluation
self.fit_list = None # filled during candidate evaluation
def __str__(self):
return str("{}: {}".format(self.rep, self.score))
def update(self):
"""
updates the candidate information, used when the candidate has already
been evaluated
:return: nothing
"""
self.score = config.scoredic[str(self.rep)]
self.fit_list = config.fit_dic[str(self.rep)]
self.values = config.val_dic[str(self.rep)]
def set_cand_values(self, fit_list, val, score):
"""
sets the values of a candidate object
:param fit_list: list of values relative to the fitness reaction
:param val: list of all values
:param score: evaluation score for fitness
:return: nothing
"""
config.fit_dic[str(self.rep)] = fit_list
self.fit_list = config.fit_dic[str(self.rep)]
config.val_dic[str(self.rep)] = val
self.values = config.val_dic[str(self.rep)]
config.scoredic[str(self.rep)] = score
self.score = config.scoredic[str(self.rep)]
config = EAConfig()
def change_config(
pop_size = None,
max_gen = None,
cand = None,
rep = None,
max_val = None,
mut = None,
cross = None
):
"""
used to change the values of the EA parameters
:param pop_size: int, defines the size of a population in the EA algorithm
:param max_gen: int, defines the max number of generations in the EA
:param cand: int, size of the candidate representation
:param rep: int, defines the type of representation, 0 for binary, 1 for integer
and anything else for real
:param max_val: int or float, the max value that a value from the
representation can take, for int or real representations only
:param mut: float, rate at which a mutation occurs
:param cross: float, rate at which crossover occurs
:return: nothing
"""
if pop_size:
config.pop_size = pop_size
if max_gen:
config.max_gen = max_gen
if cand:
config.cand_size = cand
if rep or rep == 0:
config.rep_type = rep
if max_val:
config.max_cand_value = max_val
if mut or mut == 0:
config.mutation_rate = mut
if cross or cross == 0:
config.crossover_rate = cross
def reset_config():
"""
resets (empties) the dictionary parameters of the EAConfig object
:return: nothing
"""
config.scoredic = {}
config.val_dic = {}
config.fit_dic = {}
def binary_representation():
"""
creates a binary representation
:return: a list of random binary values, with at least one 1
"""
rep = [randint(0, 1) for _ in range(config.cand_size)]
if sum(rep) == 0:
rep = binary_representation()
return rep
def int_representation():
"""
creates an integer representation
:return: a list of sorted non repeated random integers
"""
int_rep = sorted([randint(0, config.max_cand_value) for _ in range(config.cand_size)])
if len(int_rep) == len(set(int_rep)):
return int_rep
else:
return int_representation()
def real_representation():
"""
creates a real representation
:return: a list of random real values
"""
return sorted([uniform(0, config.max_cand_value) for _ in range(config.cand_size)])
def binary_to_int_rep(rep):
"""
converts binary representations to integer format by creating a list
of the indexes of the zeros in the original representation
:param rep: list of binary values, representation in binary
:return: representation in integer
"""
return [i for i in range(len(rep)) if rep[i] == 0]
def inverse_int_rep(int_rep):
"""
converts an integer representation with the values possible that are not
present in the original
:param int_rep: list of integers
:return: list of integers, the inverse representation of the one introduced
"""
value = 0
if config.rep_type == 0:
value = config.cand_size
elif config.rep_type == 1:
value = config.max_cand_value + 1
new_rep = []
for ind in range(value):
if ind not in int_rep:
new_rep.append(ind)
return new_rep
def bit_flip_mutation_binary(candidate, pos = None):
"""
alters a random or selected binary value in the representation of a candidate
:param candidate: candidate object
:param pos: mutation index, autogenerated if not present
:return: candidate object, mutated
"""
rep = candidate.rep.copy()
if (not pos) and (pos != 0):
pos = randint(0, len(rep)-1)
if rep[pos] == 0:
rep[pos] = 1
elif rep[pos] == 1:
rep[pos] = 0
if sum(rep) == 0:
return candidate
return Candidate(rep)
def bit_flip_mutation_int(candidate, pos = None):
"""
alters a random or selected integer value in the representation of a candidate,
if the result has duplicate values, returns the original
:param candidate: candidate object
:param pos: mutation index, autogenerated if not present
:return: candidate object, mutated
"""
rep = candidate.rep.copy()
if (not pos) and (pos != 0):
pos = randint(0, len(rep)-1)
rep[pos] = randint(0, config.max_cand_value)
if len(rep) == len(set(rep)):
return Candidate(sorted(rep))
else:
return candidate
def bit_flip_mutation_real(candidate, pos = None):
"""
alters a random or selected real value in the representation of a candidate
:param candidate: candidate object
:param pos: mutation index, autogenerated if not present
:return: candidate object
"""
rep = candidate.rep.copy()
if (not pos) and (pos != 0):
pos = randint(0, len(rep)-1)
rep[pos] = uniform(0, config.max_cand_value)
return Candidate(sorted(rep))
def one_point_crossover(par1, par2):
"""
parts the representation of two parent candidates at a random point and
creates two new candidates with the beginning of one and the end of another parent
if one of the new candidate representations has duplicate values (for non binary
rep_type) it returns the original candidates instead
:param par1: candidate object
:param par2: candidate object
:return: two candidate objects
"""
pos = randint(0, len(par1.rep)-1)
rep_child1 = par1.rep[:pos]+par2.rep[pos:]
rep_child2 = par2.rep[:pos]+par1.rep[pos:]
if config.rep_type == 0:
return Candidate(rep_child1), Candidate(rep_child2)
elif config.rep_type != 0:
if (len(rep_child1) == len(set(rep_child1))) and (len(rep_child2) == len(set(rep_child2))):
return Candidate(sorted(rep_child1)), Candidate(sorted(rep_child2))
else:
return par1, par2
def uniform_crossover(par1, par2):
"""
it creates two new candidates, for every index of the candidate representations,
it will randomly assign the value to one of the new candidates
if the new candidates have duplicate values, it returns the original candidates
:param par1: candidate object
:param par2: candidate object
:return: two candidate objects
"""
rep_child1 = []
rep_child2 = []
for i in range(len(par1.rep)):
j = randint(0, 1)
if j == 0:
rep_child1.append(par1.rep[i])
rep_child2.append(par2.rep[i])
if j == 1:
rep_child1.append(par2.rep[i])
rep_child2.append(par1.rep[i])
if config.rep_type == 0:
return Candidate(rep_child1), Candidate(rep_child2)
elif config.rep_type != 0:
if (len(rep_child1) == len(set(rep_child1))) and (len(rep_child2) == len(set(rep_child2))):
return Candidate(sorted(rep_child1)), Candidate(sorted(rep_child2))
else:
return par1, par2
def generate_random_popu():
"""
creates a list of candidates with random representations according to the
previously defined rep_type
:return: a list of candidate objects
"""
populist = []
for _ in range(config.pop_size):
if config.rep_type == 0:
rep = binary_representation()
elif config.rep_type == 1:
rep = int_representation()
else:
rep = real_representation()
cand = Candidate(rep)
populist.append(cand)
return populist
def generate_headstart_popu(sample):
"""
generates a semi-random population given a sample to start from.
all the generated candidates will have, at least, the indexes present in the sample
:param sample: a candidate representation, smaller than the cand_size variable
:return: a list of candidate objects
"""
populist = []
for _ in range(config.pop_size):
if config.rep_type == 0:
rep = deepcopy(sample)
for i in range(len(rep)):
if rep[i] == 0:
rep[i] = randint(0, 1)
elif config.rep_type == 1:
rep = deepcopy(sample)
while len(set(rep)) < config.cand_size:
rep.append(randint(0, config.max_cand_value))
rep = sorted(list(set(rep)))
else:
rep = deepcopy(sample)
while len(set(rep)) > config.cand_size:
rep.append(uniform(0, config.max_cand_value))
rep = sorted(list(set(rep)))
cand = Candidate(rep)
populist.append(cand)
return populist
def new_popu_tourn(old_popu):
"""
repeatedly selects the best candidate out of 15 randomly chosen two times
to create two new candidates, that are added to a new list, until it reaches
the desired size
:param old_popu: list of candidate objects
:return: list of candidate objects
"""
new_popu = []
keep_best = 0
best_cand = None
for cand in old_popu:
if cand.score > keep_best:
keep_best = cand.score
best_cand = cand
if best_cand:
new_popu.append(best_cand)
while len(new_popu) < config.pop_size:
par1 = select_candidate(old_popu)
par2 = select_candidate(old_popu)
sib1, sib2 = maybe_crossover(par1, par2)
sib1 = maybe_mutate(sib1)
sib2 = maybe_mutate(sib2)
new_popu.append(sib1)
new_popu.append(sib2)
new_popu = new_popu[:config.pop_size]
return new_popu
def new_popu_changeworst(old_popu, quantity):
"""
generates a new list of candidates by changing the x number of members
that least contribute to the overall fitness
:param old_popu: a list of candidate objects
:param quantity: the number of members to be changed in each candidate
:return: a list of candidates
"""
new_popu = []
keep_best = 0
for cand in old_popu:
if cand.score > keep_best:
keep_best = cand.score
for cand in old_popu:
worst_quantity = quantity
new_cand = deepcopy(cand)
if new_cand.score == keep_best:
pass
else:
if config.rep_type == 0: # when change worst quantity is bigger
if sum(cand.rep) - 1 <= quantity: # than the number of organisms in the candidate
worst_quantity = sum(cand.rep) - 1 # this changes the value to one less than the cand size
worst = find_worst(cand.fit_list, worst_quantity)
for i in worst:
if config.rep_type == 0:
new_cand = bit_flip_mutation_binary(new_cand, i)
elif config.rep_type == 1:
new_cand = bit_flip_mutation_int(new_cand, i)
else:
new_cand = bit_flip_mutation_real(new_cand, i)
reverse_worst = inverse_int_rep(worst)
keep_mut_rate = deepcopy(config.mutation_rate)
change_config(mut = 0.15)
if config.rep_type == 0:
for i in reverse_worst:
new_cand = maybe_mutate(cand, i)
change_config(mut = keep_mut_rate)
new_popu.append(new_cand)
return new_popu
def find_worst(list_of_values, quantity):
"""
auxiliary function to changeworst,
finds the indexes of the worst performing members
:param list_of_values: list of values relative to the members of the candidate
used to determine which is the worst performing ones
:param quantity: the quantity of worst members
:return: a list with indexes of the worst candidates, to be eliminated
"""
if len(list_of_values) < quantity:
raise Exception("Quantity should be lower than the number of models present.")
worst_list = sorted([i for i in list_of_values if i])[:quantity]
worst_ind = []
for worst in worst_list:
for i in range(len(list_of_values)):
if list_of_values[i] == worst:
worst_ind.append(i)
return list(set(worst_ind))
def new_popu_keep_headstart_tourn(old_popu, sample):
"""
generates a new population by tournament and after alters the candidates
to include specific members
:param old_popu: a list of candidate objects
:param sample: a candidate representation, smaller than cand_size
:return: a list of candidate objects
"""
new_popu = new_popu_tourn(old_popu)
for cand in new_popu:
if config.rep_type == 0:
for i in range(len(sample)):
if sample[i] == 1:
cand.rep[i] = 1
else:
to_choose_from = [i for i in cand.rep if i not in sample]
new_rep = deepcopy(sample)
while len(set(new_rep)) < config.cand_size:
new_rep.append(choice(to_choose_from))
cand.rep = sorted(list(set(new_rep)))
return new_popu
def select_candidate(popu):
"""
selects a number of random candidates and returns the one from those with
the best score
:param popu: a list of candidate objects
:return: candidate object, the candidate with the best score
"""
cands = [randint(0, config.pop_size - 1) for _ in range(config.tourn_size)]
bestcand = []
bestcandscore = -99999
for i in cands:
if popu[i].score > bestcandscore:
bestcandscore = popu[i].score
bestcand = popu[i]
return bestcand
def maybe_crossover(par1, par2):
"""
determines randomly whether and which crossover occurs
:param par1: candidate object
:param par2:candidate object
:return: two candidate objects
"""
randval = uniform(0, 1)
if randval > config.crossover_rate:
return par1, par2
if randval < config.crossover_rate:
return uniform_crossover(par1, par2)
else:
return one_point_crossover(par1, par2)
def maybe_mutate(cand, pos = None):
"""
determines randomly whether mutation occurs
:param cand: candidate object
:param pos: index position if necessary
:return: candidate object
"""
randval = uniform(0, 1)
if randval < config.mutation_rate:
return cand
else:
if config.rep_type == 0:
return bit_flip_mutation_binary(cand, pos)
elif config.rep_type == 1:
return bit_flip_mutation_int(cand, pos)
else:
return bit_flip_mutation_real(cand, pos)
def sample_size_check(option_list, quantity):
"""
raises errors if the size of the sample is incoherent with the chosen options
:param option_list: option list parameter used in ea_run
:param quantity: quantity parameter used in ea_run
:return: nothing, raises errors when detected
"""
if (option_list[0][0] == "headstart") or (option_list[0][0] == 1):
if quantity == 0:
if len(option_list[0][1]) != config.cand_size:
raise Exception("Sample must have same length as candidate size.")
if quantity > 0:
if len(option_list[0][1]) > quantity:
raise Exception("Sample length must not be lower than quantity.")
if (option_list[1][0] == "keep") or (option_list[1][0] == 2):
if quantity == 0:
if len(option_list[1][1]) != config.cand_size:
raise Exception("Sample must have same length as candidate size.")
if quantity > 0:
if len(option_list[1][1]) > quantity:
raise Exception("Sample length must not be lower than quantity.")
def create_constraints(reac_list, lb = 0, up = 0):
"""
creates a dictionary of constraints ready to be used on other functions that use fba
:param reac_list: list of str, list of reaction ids to be constrained
:param lb: int or float, value of the lower bound
:param up: int or float, value of the upper bound
:return: dict, a dictionary with reaction ids as keys, and tuples of lower and upper
bounds as values
"""
if lb > up:
raise Exception("Lower bound must be lower than upper bound")
cons_dic = {}
for reac in reac_list:
cons_dic[reac] = (lb, up)
return cons_dic
def get_predecessor_reacs(model, reac_id):
"""
recovers the reactions that produce the metabolites used in the input reaction
:param model: framed model object
:param reac_id: str, reaction id
:return: list of str reaction ids
"""
res_list = []
target_reac = model.reactions[reac_id]
subs = target_reac.get_substrates()[0]
for reaction in model.reactions:
products = model.reactions[reaction].get_products()
if subs in products:
res_list.append(reaction)
return res_list
def get_fit_reac_values(cmodel, val, fit_reacs, indexes):
"""
this function takes a CModel object, a list its respective flux values,
and a list of reactions of which the values are to be retrieved
and returns the values in a list
:param cmodel: cmodel object
:param val: values parameter from solution object
:param fit_reacs: reactions related with the fitness evaluation
:param indexes: indexes of the individuals present
:return: a list of the values related to the reactions in fit_reacs
"""
relevant_fit_values = []
target_ids = [cmodel.models[i].id for i in indexes]
relevant_fit_reacs = [0 for _ in target_ids]
for ind in range(len(target_ids)):
for fit_reac in fit_reacs:
if fit_reac.endswith(target_ids[ind]):
relevant_fit_reacs[ind] = fit_reac
for reac in relevant_fit_reacs:
if reac:
relevant_fit_values.append(val[reac])
else:
relevant_fit_values.append(0)
return relevant_fit_values
if __name__ == '__main__':
print(binary_representation())
| 34.441034 | 111 | 0.630517 | 2,897 | 21,319 | 4.496721 | 0.110114 | 0.01566 | 0.017963 | 0.012896 | 0.382283 | 0.330467 | 0.283181 | 0.256007 | 0.238735 | 0.215399 | 0 | 0.012949 | 0.286411 | 21,319 | 618 | 112 | 34.496764 | 0.843358 | 0.348609 | 0 | 0.302594 | 0 | 0 | 0.028844 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089337 | false | 0.002882 | 0.005764 | 0.002882 | 0.204611 | 0.002882 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
215f062e64a317cf63a45c48fe295e5daa72421e | 9,867 | py | Python | isaacgymenvs/tasks/amp/utils_amp/gym_util.py | ZaneZh/IsaacGymEnvs | adc20a5fbd70d77a716fefe86eb947f83d3efbd2 | [
"BSD-3-Clause"
] | 1 | 2022-03-26T12:55:04.000Z | 2022-03-26T12:55:04.000Z | isaacgymenvs/tasks/amp/utils_amp/gym_util.py | ZaneZh/IsaacGymEnvs | adc20a5fbd70d77a716fefe86eb947f83d3efbd2 | [
"BSD-3-Clause"
] | null | null | null | isaacgymenvs/tasks/amp/utils_amp/gym_util.py | ZaneZh/IsaacGymEnvs | adc20a5fbd70d77a716fefe86eb947f83d3efbd2 | [
"BSD-3-Clause"
] | 2 | 2022-03-19T13:45:54.000Z | 2022-03-23T07:23:19.000Z | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import logger
from isaacgym import gymapi
import numpy as np
import torch
from isaacgym.torch_utils import *
from isaacgym import gymtorch
def setup_gym_viewer(config):
gym = initialize_gym(config)
sim, viewer = configure_gym(gym, config)
return gym, sim, viewer
def initialize_gym(config):
gym = gymapi.acquire_gym()
if not gym.initialize():
logger.warn("*** Failed to initialize gym")
quit()
return gym
def configure_gym(gym, config):
engine, render = config['engine'], config['render']
# physics engine settings
if(engine == 'FLEX'):
sim_engine = gymapi.SIM_FLEX
elif(engine == 'PHYSX'):
sim_engine = gymapi.SIM_PHYSX
else:
logger.warn("Uknown physics engine. defaulting to FLEX")
sim_engine = gymapi.SIM_FLEX
# gym viewer
if render:
# create viewer
sim = gym.create_sim(0, 0, sim_type=sim_engine)
viewer = gym.create_viewer(
sim, int(gymapi.DEFAULT_VIEWER_WIDTH / 1.25),
int(gymapi.DEFAULT_VIEWER_HEIGHT / 1.25)
)
if viewer is None:
logger.warn("*** Failed to create viewer")
quit()
# enable left mouse click or space bar for throwing projectiles
if config['add_projectiles']:
gym.subscribe_viewer_mouse_event(viewer, gymapi.MOUSE_LEFT_BUTTON, "shoot")
gym.subscribe_viewer_keyboard_event(viewer, gymapi.KEY_SPACE, "shoot")
else:
sim = gym.create_sim(0, -1)
viewer = None
# simulation params
scene_config = config['env']['scene']
sim_params = gymapi.SimParams()
sim_params.solver_type = scene_config['SolverType']
sim_params.num_outer_iterations = scene_config['NumIterations']
sim_params.num_inner_iterations = scene_config['NumInnerIterations']
sim_params.relaxation = scene_config.get('Relaxation', 0.75)
sim_params.warm_start = scene_config.get('WarmStart', 0.25)
sim_params.geometric_stiffness = scene_config.get('GeometricStiffness', 1.0)
sim_params.shape_collision_margin = 0.01
sim_params.gravity = gymapi.Vec3(0.0, -9.8, 0.0)
gym.set_sim_params(sim, sim_params)
return sim, viewer
def parse_states_from_reference_states(reference_states, progress):
# parse reference states from DeepMimicState
global_quats_ref = torch.tensor(
reference_states._global_rotation[(progress,)].numpy(),
dtype=torch.double
).cuda()
ts_ref = torch.tensor(
reference_states._translation[(progress,)].numpy(),
dtype=torch.double
).cuda()
vels_ref = torch.tensor(
reference_states._velocity[(progress,)].numpy(),
dtype=torch.double
).cuda()
avels_ref = torch.tensor(
reference_states._angular_velocity[(progress,)].numpy(),
dtype=torch.double
).cuda()
return global_quats_ref, ts_ref, vels_ref, avels_ref
def parse_states_from_reference_states_with_motion_id(precomputed_state,
progress, motion_id):
assert len(progress) == len(motion_id)
# get the global id
global_id = precomputed_state['motion_offset'][motion_id] + progress
global_id = np.minimum(global_id,
precomputed_state['global_quats_ref'].shape[0] - 1)
# parse reference states from DeepMimicState
global_quats_ref = precomputed_state['global_quats_ref'][global_id]
ts_ref = precomputed_state['ts_ref'][global_id]
vels_ref = precomputed_state['vels_ref'][global_id]
avels_ref = precomputed_state['avels_ref'][global_id]
return global_quats_ref, ts_ref, vels_ref, avels_ref
def parse_dof_state_with_motion_id(precomputed_state, dof_state,
progress, motion_id):
assert len(progress) == len(motion_id)
# get the global id
global_id = precomputed_state['motion_offset'][motion_id] + progress
# NOTE: it should never reach the dof_state.shape, cause the episode is
# terminated 2 steps before
global_id = np.minimum(global_id, dof_state.shape[0] - 1)
# parse reference states from DeepMimicState
return dof_state[global_id]
def get_flatten_ids(precomputed_state):
motion_offsets = precomputed_state['motion_offset']
init_state_id, init_motion_id, global_id = [], [], []
for i_motion in range(len(motion_offsets) - 1):
i_length = motion_offsets[i_motion + 1] - motion_offsets[i_motion]
init_state_id.extend(range(i_length))
init_motion_id.extend([i_motion] * i_length)
if len(global_id) == 0:
global_id.extend(range(0, i_length))
else:
global_id.extend(range(global_id[-1] + 1,
global_id[-1] + i_length + 1))
return np.array(init_state_id), np.array(init_motion_id), \
np.array(global_id)
def parse_states_from_reference_states_with_global_id(precomputed_state,
global_id):
# get the global id
global_id = global_id % precomputed_state['global_quats_ref'].shape[0]
# parse reference states from DeepMimicState
global_quats_ref = precomputed_state['global_quats_ref'][global_id]
ts_ref = precomputed_state['ts_ref'][global_id]
vels_ref = precomputed_state['vels_ref'][global_id]
avels_ref = precomputed_state['avels_ref'][global_id]
return global_quats_ref, ts_ref, vels_ref, avels_ref
def get_robot_states_from_torch_tensor(config, ts, global_quats, vels, avels,
init_rot, progress, motion_length=-1,
actions=None, relative_rot=None,
motion_id=None, num_motion=None,
motion_onehot_matrix=None):
info = {}
# the observation with quaternion-based representation
torso_height = ts[..., 0, 1].cpu().numpy()
gttrny, gqny, vny, avny, info['root_yaw_inv'] = \
quaternion_math.compute_observation_return_info(global_quats, ts,
vels, avels)
joint_obs = np.concatenate([gttrny.cpu().numpy(), gqny.cpu().numpy(),
vny.cpu().numpy(), avny.cpu().numpy()], axis=-1)
joint_obs = joint_obs.reshape(joint_obs.shape[0], -1)
num_envs = joint_obs.shape[0]
obs = np.concatenate([torso_height[:, np.newaxis], joint_obs], -1)
# the previous action
if config['env_action_ob']:
obs = np.concatenate([obs, actions], axis=-1)
# the orientation
if config['env_orientation_ob']:
if relative_rot is not None:
obs = np.concatenate([obs, relative_rot], axis=-1)
else:
curr_rot = global_quats[np.arange(num_envs)][:, 0]
curr_rot = curr_rot.reshape(num_envs, -1, 4)
relative_rot = quaternion_math.compute_orientation_drift(
init_rot, curr_rot
).cpu().numpy()
obs = np.concatenate([obs, relative_rot], axis=-1)
if config['env_frame_ob']:
if type(motion_length) == np.ndarray:
motion_length = motion_length.astype(np.float)
progress_ob = np.expand_dims(progress.astype(np.float) /
motion_length, axis=-1)
else:
progress_ob = np.expand_dims(progress.astype(np.float) /
float(motion_length), axis=-1)
obs = np.concatenate([obs, progress_ob], axis=-1)
if config['env_motion_ob'] and not config['env_motion_ob_onehot']:
motion_id_ob = np.expand_dims(motion_id.astype(np.float) /
float(num_motion), axis=-1)
obs = np.concatenate([obs, motion_id_ob], axis=-1)
elif config['env_motion_ob'] and config['env_motion_ob_onehot']:
motion_id_ob = motion_onehot_matrix[motion_id]
obs = np.concatenate([obs, motion_id_ob], axis=-1)
return obs, info
def get_xyzoffset(start_ts, end_ts, root_yaw_inv):
xyoffset = (end_ts - start_ts)[:, [0], :].reshape(1, -1, 1, 3)
ryinv = root_yaw_inv.reshape(1, -1, 1, 4)
calibrated_xyz_offset = quaternion_math.quat_apply(ryinv, xyoffset)[0, :, 0, :]
return calibrated_xyz_offset
| 40.604938 | 87 | 0.663424 | 1,287 | 9,867 | 4.837607 | 0.241647 | 0.037263 | 0.022486 | 0.01831 | 0.343881 | 0.285737 | 0.25008 | 0.222454 | 0.18487 | 0.146001 | 0 | 0.011199 | 0.239789 | 9,867 | 242 | 88 | 40.772727 | 0.818824 | 0.207054 | 0 | 0.253165 | 0 | 0 | 0.064019 | 0 | 0 | 0 | 0 | 0 | 0.012658 | 1 | 0.063291 | false | 0 | 0.037975 | 0 | 0.164557 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2161ceb1a3ec23817185ca046a16ca44a96eb732 | 1,159 | py | Python | ncbi-track.py | Ma-Fi-94/ncbi-track | d0ddee20c13fc75108b9343d92fb2981624bf09d | [
"Apache-2.0"
] | null | null | null | ncbi-track.py | Ma-Fi-94/ncbi-track | d0ddee20c13fc75108b9343d92fb2981624bf09d | [
"Apache-2.0"
] | 5 | 2022-01-08T00:27:55.000Z | 2022-01-25T15:03:39.000Z | ncbi-track.py | Ma-Fi-94/ncbi-track | d0ddee20c13fc75108b9343d92fb2981624bf09d | [
"Apache-2.0"
] | 1 | 2019-02-20T19:02:24.000Z | 2019-02-20T19:02:24.000Z | # User params
EMAIL = 'A.N.Other@example.com'
DEFAULT_YEAR_MIN = 1970
DEFAULT_YEAR_MAX = 2021
# Basic setup
from Bio import Entrez
from tqdm import tqdm
from typing import Tuple
from typing import List
Entrez.email = EMAIL
def _number_by_year(query: str, year: int) -> int:
'''Internal helper functions.
Returns number of published papers matching the query in a given year.'''
handle = Entrez.esearch(db='pubmed', retmax='200000000', retmode='xml', term=query+" "+str(year)+"[pdat]")
results = Entrez.read(handle)
return len(results["IdList"])
def track_absolute(query: str, year_min: int = DEFAULT_YEAR_MIN, year_max: int = DEFAULT_YEAR_MAX) -> Tuple[List[int], List[int]]:
'''Track absolute number of occurences of a keyword over the years.
If query is None, then " " will be searched for, which returns
the number of all entries in pubmed.'''
if query is None:
query = " "
years = [x for x in range(year_min,year_max+1)]
results = []
for year in tqdm(years):
nb_entries_query = _number_by_year(query, year)
results.append(nb_entries_query)
return (years, results)
| 32.194444 | 130 | 0.69025 | 172 | 1,159 | 4.517442 | 0.453488 | 0.056628 | 0.046332 | 0.043758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019459 | 0.201898 | 1,159 | 35 | 131 | 33.114286 | 0.820541 | 0.247627 | 0 | 0 | 0 | 0 | 0.0625 | 0.024764 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.190476 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2167dbfa987400283e4b93301c42383dc353687a | 1,127 | py | Python | project/people/views.py | dhmncivichacks/timewebsite | b0e6355ea9249143a217b213df14738eaf067f97 | [
"0BSD"
] | 5 | 2016-02-09T20:54:12.000Z | 2016-10-03T15:21:35.000Z | project/people/views.py | dhmncivichacks/timewebsite | b0e6355ea9249143a217b213df14738eaf067f97 | [
"0BSD"
] | 33 | 2016-02-09T21:18:52.000Z | 2017-02-06T21:49:04.000Z | project/people/views.py | dhmncivichacks/timewebsite | b0e6355ea9249143a217b213df14738eaf067f97 | [
"0BSD"
] | 1 | 2016-02-09T20:54:19.000Z | 2016-02-09T20:54:19.000Z | # imports
from flask import request
from flask import render_template
from flask import Blueprint
from flask import redirect
from flask import url_for
from flask.ext.login import current_user
from project import db
from project.models import Person
from .forms import PersonForm
# config
people_blueprint = Blueprint(
'people', __name__,
template_folder='templates'
)
# routes
@people_blueprint.route('/people', methods=['GET', 'POST'])
def people():
error = None
form = PersonForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
new_person = Person(
name=form.name.data,
url_handle=form.url_handle.data,
biography=form.biography.data,
added_by_user_id=current_user.id
)
db.session.add(new_person)
db.session.commit()
return redirect(url_for('people.people'))
else:
error = 'FIXME ERROR'
people = db.session.query(Person).all()
return render_template(
'people.html', form=form, people=people, error=error)
| 25.613636 | 61 | 0.650399 | 136 | 1,127 | 5.227941 | 0.411765 | 0.075949 | 0.105485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.255546 | 1,127 | 43 | 62 | 26.209302 | 0.847437 | 0.018634 | 0 | 0 | 0 | 0 | 0.061706 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.272727 | 0 | 0.363636 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
216a2f5727bd9e3ded4fd549e1965f8a1c6304e1 | 6,415 | py | Python | salt/_states/metalk8s.py | SaintLoong/metalk8s | 06fa3a731f35ab0f9ad8d3443fd8f8c4e7037432 | [
"Apache-2.0"
] | 255 | 2018-08-03T17:32:53.000Z | 2022-03-25T21:51:00.000Z | salt/_states/metalk8s.py | SaintLoong/metalk8s | 06fa3a731f35ab0f9ad8d3443fd8f8c4e7037432 | [
"Apache-2.0"
] | 3,259 | 2018-08-03T00:25:56.000Z | 2022-03-31T15:23:11.000Z | salt/_states/metalk8s.py | SaintLoong/metalk8s | 06fa3a731f35ab0f9ad8d3443fd8f8c4e7037432 | [
"Apache-2.0"
] | 43 | 2018-08-08T01:47:22.000Z | 2022-03-12T17:49:41.000Z | """Custom states for MetalK8s."""
import logging
import time
import traceback
import re
from salt.exceptions import CommandExecutionError
from salt.ext import six
__virtualname__ = "metalk8s"
log = logging.getLogger(__name__)
def __virtual__():
return __virtualname__
def _error(ret, err_msg):
ret["result"] = False
ret["comment"] = err_msg
return ret
def static_pod_managed(
name, source, config_files=None, config_files_opt=None, context=None
):
"""Simple helper to edit a static Pod manifest if configuration changes.
Expects the template to use:
- `config_digest` variable and store it in the `metadata.annotations`
section, with the key `metalk8s.scality.com/config-digest`.
- `metalk8s_version` variable and store it in the `metadata.labels`
section, with the key `metalk8s.scality.com/version`.
name:
Path to the static pod manifest.
source:
Source file used to render the manifest.
config_files:
List of file to use to generate a digest store in `config_digest` in
the source template.
config_files_opt:
Same as config_files but these files are optional (ignored if the file
does not exists).
context:
Context to use to render the source template.
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if not name:
return _error(ret, "Manifest file name is required")
if not isinstance(source, six.text_type):
return _error(ret, "Source must be a single string")
if not config_files:
config_files = []
for config_file in config_files_opt or []:
if __salt__["file.file_exists"](config_file):
config_files.append(config_file)
else:
log.debug(
"Ignoring optional config file %s: file does not exist", config_file
)
config_file_digests = []
for config_file in config_files:
try:
digest = __salt__["hashutil.digest_file"](config_file, checksum="sha256")
except CommandExecutionError as exc:
return _error(
ret,
"Unable to compute digest of config file {}: {}".format(
config_file, exc
),
)
config_file_digests.append(digest)
config_digest = __salt__["hashutil.md5_digest"]("-".join(config_file_digests))
match = re.search(r"metalk8s-(?P<version>.+)$", __env__)
metalk8s_version = match.group("version") if match else "unknown"
context_ = dict(
context or {}, config_digest=config_digest, metalk8s_version=metalk8s_version
)
if __opts__["test"]:
log.warning("Test functionality is not yet implemented.")
ret["comment"] = (
"The manifest {} is in the correct state (supposedly)."
).format(name)
return ret
# Gather the source file from the server
try:
source_filename, source_sum, comment_ = __salt__["file.get_managed"](
name,
template="jinja",
source=source,
source_hash="",
source_hash_name=None,
user="root",
group="root",
mode="0600",
attrs=None,
saltenv=__env__,
context=context_,
defaults=None,
)
except Exception as exc: # pylint: disable=broad-except
log.debug(traceback.format_exc())
return _error(ret, "Unable to get managed file: {}".format(exc))
if comment_:
return _error(ret, comment_)
else:
try:
return __salt__["metalk8s.manage_static_pod_manifest"](
name,
source_filename,
source,
source_sum,
saltenv=__env__,
)
except Exception as exc: # pylint: disable=broad-except
log.debug(traceback.format_exc())
return _error(ret, "Unable to manage file: {}".format(exc))
def module_run(name, attempts=1, sleep_time=10, **kwargs):
"""Classic module.run with a retry logic as it's buggy in salt version
https://github.com/saltstack/salt/issues/44639
"""
retry = attempts
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
while retry > 0 and not ret["result"]:
try:
ret = __states__["module.run"](name, **kwargs)
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = str(exc)
retry = retry - 1
if retry and not ret["result"]:
time.sleep(sleep_time)
return ret
def saltutil_cmd(name, **kwargs):
"""Simple `saltutil.cmd` state as `salt.function` do not support roster and
raw ssh, https://github.com/saltstack/salt/issues/58662"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
cmd_ret = __salt__["saltutil.cmd"](fun=name, **kwargs)
except Exception as exc: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(exc)
return ret
try:
ret["__jid__"] = cmd_ret[next(iter(cmd_ret))]["jid"]
except (StopIteration, KeyError):
pass
fail = set()
for minion, mdata in cmd_ret.items():
m_ret = False
if mdata.get("retcode"):
ret["result"] = False
fail.add(minion)
if mdata.get("failed", False):
fail.add(minion)
else:
if "return" in mdata and "ret" not in mdata:
mdata["ret"] = mdata.pop("return")
if "ret" in mdata:
m_ret = mdata["ret"]
if "stderr" in mdata or "stdout" in mdata:
m_ret = {
"retcode": mdata.get("retcode"),
"stderr": mdata.get("stderr"),
"stdout": mdata.get("stdout"),
}
if m_ret is False:
fail.add(minion)
ret["changes"][minion] = m_ret
if not cmd_ret:
ret["result"] = False
ret["comment"] = "No minions responded"
else:
if fail:
ret["result"] = False
ret["comment"] = "Running function {} failed on minions: {}".format(
name, ", ".join(fail)
)
else:
ret["comment"] = "Function ran successfully"
return ret
| 30.259434 | 85 | 0.5788 | 745 | 6,415 | 4.787919 | 0.271141 | 0.033642 | 0.023549 | 0.019064 | 0.200168 | 0.160639 | 0.120549 | 0.083544 | 0.083544 | 0.083544 | 0 | 0.007449 | 0.309431 | 6,415 | 211 | 86 | 30.402844 | 0.797743 | 0.182385 | 0 | 0.248276 | 0 | 0 | 0.168874 | 0.011687 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0.006897 | 0.041379 | 0.006897 | 0.165517 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
216b39fe89c4a27a4e4a39eb85d69f2512812ada | 2,419 | py | Python | programs/models/LinearRegression.py | hayato-n/AsakuraBook | 60c794961cb5d23ba257e33d350ae719ee72dd21 | [
"MIT"
] | null | null | null | programs/models/LinearRegression.py | hayato-n/AsakuraBook | 60c794961cb5d23ba257e33d350ae719ee72dd21 | [
"MIT"
] | null | null | null | programs/models/LinearRegression.py | hayato-n/AsakuraBook | 60c794961cb5d23ba257e33d350ae719ee72dd21 | [
"MIT"
] | null | null | null | import numpy as np
# from scipy import stats, optimize
def PolyBaseFunction(x, dims):
return np.array([x**d for d in range(dims+1)]).T
class LinearRegression:
def __init__(self, y, Phi):
self.y = np.array(y).reshape((-1, 1))
self.Phi = np.array(Phi)
self.N, self.D = self.Phi.shape
def fit_OLS(self):
self.beta_OLS \
= np.linalg.pinv(self.Phi.T @ self.Phi) @ self.Phi.T @ self.y
diff = self.y - self.predict_OLS(self.Phi)
self._loss_OLS = float(diff.T @ diff)
def predict_OLS(self, Phi):
return Phi @ self.beta_OLS
def fit_ML(self):
self.fit_OLS()
self.beta_ML = self.beta_OLS
self.sigma2_ML = self._loss_OLS / self.N
self._loglik_ML = -0.5*(self._loss_OLS / self.sigma2_ML
+ self.N * np.log(2*np.pi)
+ self.N * np.log(self.sigma2_ML))
def predict_ML(self, Phi):
return self.predict_OLS(Phi)
def fit_Ridge(self, alpha):
self.alpha_Ridge = float(alpha)
inversed = np.linalg.pinv(self.Phi.T @ self.Phi
+ self.alpha_Ridge*np.eye(self.D))
self.beta_Ridge \
= inversed @ self.Phi.T @ self.y
diff = self.y - self.predict_Ridge(self.Phi)
self._loss_Ridge = float(diff.T @ diff) \
+ self.alpha_Ridge * self._penalty_Ridge(self.beta_Ridge)
def predict_Ridge(self, Phi):
return Phi @ self.beta_Ridge
def _penalty_Ridge(self, beta):
return float(beta.T @ beta)
def fit_Bayes(self, sigma2, prior_cov):
self.sigma2_Bayes = float(sigma2)
self.prior_cov_Bayes = np.array(prior_cov).reshape((self.D, self.D))
self._prior_prec_Bayes = np.linalg.pinv(self.prior_cov_Bayes)
self._beta_prec_Bayes \
= self._prior_prec_Bayes \
+ self.Phi.T @ self.Phi / self.sigma2_Bayes
self.beta_cov_Bayes = np.linalg.pinv(self._beta_prec_Bayes)
self.beta_mean_Bayes = \
self.beta_cov_Bayes @ self.Phi.T @ self.y / self.sigma2_Bayes
def predict_Bayes(self, Phi):
N = len(Phi)
mu = (Phi @ self.beta_mean_Bayes).flatten()
sigma2 = np.empty(N)
for n in range(N):
sigma2[n] = 1 / self.sigma2_Bayes \
+ Phi[n].T @ self.beta_cov_Bayes @ Phi[n]
return mu, sigma2
| 32.689189 | 76 | 0.583299 | 353 | 2,419 | 3.784703 | 0.169972 | 0.089072 | 0.035928 | 0.053892 | 0.272455 | 0.142964 | 0.092814 | 0.092814 | 0.092814 | 0.049401 | 0 | 0.011144 | 0.295163 | 2,419 | 73 | 77 | 33.136986 | 0.772434 | 0.013642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.018182 | 0.090909 | 0.345455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
216bc585900177bb2561f98523e8970a569462b1 | 1,745 | py | Python | meerkat_analysis/test/test_univariate.py | fjelltopp/meerkat_analysis | ad68b02636ee5543e4aa78ac7f46126d040d67ed | [
"MIT"
] | null | null | null | meerkat_analysis/test/test_univariate.py | fjelltopp/meerkat_analysis | ad68b02636ee5543e4aa78ac7f46126d040d67ed | [
"MIT"
] | null | null | null | meerkat_analysis/test/test_univariate.py | fjelltopp/meerkat_analysis | ad68b02636ee5543e4aa78ac7f46126d040d67ed | [
"MIT"
] | null | null | null | import unittest
import pandas as pd
from statsmodels.stats import proportion
from meerkat_analysis import univariate, util
class UnivariateTest(unittest.TestCase):
""" Testing univariate"""
def test_breakdown_by_cateogry(self):
data = pd.read_csv("meerkat_analysis/test/test_data/univariate.csv")
variables = util.Variables.from_json_file("meerkat_analysis/test/test_data/variables.json")
breakdown = univariate.breakdown_by_category(variables, "gender", data)
self.assertEqual(breakdown.loc["Male"]["value"], 4)
self.assertEqual(breakdown.loc["Female"]["value"], 6)
breakdown = univariate.breakdown_by_category(variables, "age",
data, use_names=False)
self.assertEqual(breakdown.loc["age_1"]["value"], 1)
self.assertEqual(breakdown.loc["age_6"]["value"], 3)
def test_incidence_rate(self):
data = pd.read_csv("meerkat_analysis/test/test_data/univariate.csv")
variables = util.Variables.from_json_file("meerkat_analysis/test/test_data/variables.json")
incidence, ci = univariate.incidence_rate(data, var_id="gen_1")
self.assertEqual(incidence, 0.4)
self.assertEqual(ci, proportion.proportion_confint(4, 10, method="wilson"))
incidence, ci = univariate.incidence_rate(data, population=20, var_id="gen_1")
self.assertEqual(incidence, 0.2)
self.assertEqual(ci, proportion.proportion_confint(4, 20, method="wilson"))
incidence, ci = univariate.incidence_rate(data, name="Female", variables=variables)
self.assertEqual(incidence, 0.6)
self.assertEqual(ci, proportion.proportion_confint(6, 10, method="wilson"))
| 42.560976 | 100 | 0.689398 | 208 | 1,745 | 5.605769 | 0.283654 | 0.128645 | 0.06518 | 0.078902 | 0.648371 | 0.596913 | 0.445969 | 0.368782 | 0.2247 | 0.2247 | 0 | 0.017668 | 0.189112 | 1,745 | 40 | 101 | 43.625 | 0.80636 | 0.010315 | 0 | 0.148148 | 0 | 0 | 0.155233 | 0.106977 | 0 | 0 | 0 | 0 | 0.37037 | 1 | 0.074074 | false | 0 | 0.148148 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcbeed138887023ae7c0c9b4965a14a732578a69 | 214 | py | Python | _src/Section2/smoothing.py | paullewallencom/opencv-978-1-7893-4536-0 | 4a6686f44d8ed82280c94df04813cec5cbd30447 | [
"Apache-2.0"
] | 8 | 2019-06-11T16:27:36.000Z | 2021-11-11T14:42:19.000Z | _src/Section2/smoothing.py | paullewallencom/opencv-978-1-7893-4536-0 | 4a6686f44d8ed82280c94df04813cec5cbd30447 | [
"Apache-2.0"
] | null | null | null | _src/Section2/smoothing.py | paullewallencom/opencv-978-1-7893-4536-0 | 4a6686f44d8ed82280c94df04813cec5cbd30447 | [
"Apache-2.0"
] | 4 | 2019-04-28T14:13:59.000Z | 2021-04-22T22:21:11.000Z | import cv2
image = cv2.imread("noisy.png")
cv2.imshow("Original", image)
smooth = cv2.GaussianBlur(image, (11,11), 1) #cv2.GaussianBlur(input, kernel size, sigma)
cv2.imshow("De-noised", smooth)
cv2.waitKey(0) | 21.4 | 89 | 0.714953 | 32 | 214 | 4.78125 | 0.625 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068063 | 0.107477 | 214 | 10 | 90 | 21.4 | 0.732984 | 0.200935 | 0 | 0 | 0 | 0 | 0.152047 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc0d328db6ea81b64e1fd1fd7af946fd70cf4d5 | 15,165 | py | Python | src/solutions/common/integrations/cirklo/cirklo.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/solutions/common/integrations/cirklo/cirklo.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/solutions/common/integrations/cirklo/cirklo.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
import logging
import urllib
from datetime import datetime
from google.appengine.api import urlfetch
from google.appengine.api.apiproxy_stub_map import UserRPC
from google.appengine.ext import ndb, db
from typing import List, Optional
from mcfw.cache import cached
from mcfw.rpc import arguments, returns
from mcfw.utils import Enum
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.maps.services import search_services_by_tags, SearchTag
from rogerthat.bizz.opening_hours import get_opening_hours_info
from rogerthat.consts import DEBUG
from rogerthat.dal.profile import get_user_profile
from rogerthat.models import OpeningHours, ServiceIdentity
from rogerthat.models.settings import ServiceInfo
from rogerthat.rpc import users
from rogerthat.to import convert_to_unicode, TO
from rogerthat.to.service import SendApiCallCallbackResultTO, UserDetailsTO
from rogerthat.utils.service import get_service_user_from_service_identity_user
from solution_server_settings import get_solution_server_settings, SolutionServerSettings
from solutions import translate
from solutions.common.dal import get_solution_settings
from solutions.common.integrations.cirklo.models import CirkloUserVouchers, VoucherProviderId, \
CirkloCity
from solutions.common.integrations.cirklo.to import AppVoucher, AppVoucherList
from solutions.common.models import SolutionBrandingSettings
class CirkloApiMethod(Enum):
GET_VOUCHERS = 'integrations.cirklo.getvouchers'
ADD_VOUCHER = 'integrations.cirklo.addvoucher'
DELETE_VOUCHER = 'integrations.cirklo.deletevoucher'
GET_TRANSACTIONS = 'integrations.cirklo.gettransactions'
GET_MERCHANTS = 'integrations.cirklo.getmerchants'
class UnknownMethodException(Exception):
def __init__(self, method):
super(UnknownMethodException, self).__init__('Unknown cirklo method: ' + method)
class TranslatedException(Exception):
def __init__(self, msg):
super(TranslatedException, self).__init__(msg)
def _cirklo_api_call(settings, url, method, payload=None, staging=False):
# type: (SolutionServerSettings, str, str, dict) -> UserRPC
url_params = ('?' + urllib.urlencode(payload)) if payload and method == urlfetch.GET else ''
url = settings.cirklo_server_url + url + url_params
if staging and 'staging-app' not in url:
url = url.replace('https://', 'https://staging-app-')
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'x-api-key': settings.cirklo_api_key_staging if staging else settings.cirklo_api_key
}
if method in (urlfetch.PUT, urlfetch.POST) and payload:
payload = json.dumps(payload)
else:
payload = None
if DEBUG:
logging.debug('%s %s', method, url)
rpc = urlfetch.create_rpc(30)
return urlfetch.make_fetch_call(rpc, url, payload, method, headers, follow_redirects=False)
def list_cirklo_cities(staging):
# type: (bool) -> List[dict]
rpc = _cirklo_api_call(get_solution_server_settings(), '/cities', urlfetch.GET, staging=staging)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code == 200:
parsed = json.loads(result.content)
if staging:
for city in parsed:
city['id'] = 'staging-' + city['id']
return parsed
else:
logging.debug('%s\n%s', result.status_code, result.content)
raise Exception('Unexpected result from cirklo api')
def list_whitelisted_merchants(city_id):
staging = city_id.startswith('staging-')
payload = {'cityId': city_id.replace('staging-', ''),
'includeShops': True}
rpc = _cirklo_api_call(get_solution_server_settings(), '/whitelists', urlfetch.GET, payload, staging)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code == 200:
return json.loads(result.content)
else:
logging.debug('%s\n%s', result.status_code, result.content)
raise Exception('Unexpected result from cirklo api')
def check_merchant_whitelisted(city_id, email):
staging = city_id.startswith('staging-')
payload = {'cityId': city_id.replace('staging-', ''),
'emails': email}
rpc = _cirklo_api_call(get_solution_server_settings(), '/whitelists', urlfetch.GET, payload, staging)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code == 200:
merchant_list = json.loads(result.content)
return len(merchant_list) > 0
else:
logging.debug('%s\n%s', result.status_code, result.content)
return False
def whitelist_merchant(city_id, email):
staging = city_id.startswith('staging-')
payload = {'cityId': city_id.replace('staging-', ''),
'whitelistEntries': [{'email': email}]}
rpc = _cirklo_api_call(get_solution_server_settings(), '/whitelists', urlfetch.POST, payload, staging)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code != 201:
logging.debug('%s\n%s', result.status_code, result.content)
raise Exception('Unexpected result from cirklo api')
def add_voucher(service_user, app_user, qr_content):
# type: (users.User, users.User, str) -> dict
try:
parsed = json.loads(qr_content)
voucher_id = parsed.get('voucher')
except ValueError:
if len(qr_content) == 36:
# Some qrs for Dilbeek made in december 2020 contained just the qr id and no json
voucher_id = qr_content
else:
voucher_id = None
voucher_details = None
if voucher_id:
rpc = _cirklo_api_call(get_solution_server_settings(), '/vouchers/' + voucher_id, urlfetch.GET)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code == 200:
voucher_details = json.loads(result.content)
voucher_details['id'] = voucher_id
elif result.status_code in (400, 404):
logging.debug('%s\n%s', result.status_code, result.content)
voucher_id = None
else:
logging.debug('%s\n%s', result.status_code, result.content)
raise Exception('Unexpected result from cirklo api')
if not voucher_id:
sln_settings = get_solution_settings(service_user)
msg = translate(sln_settings.main_language, 'not_a_valid_cirklo_qr_code')
raise TranslatedException(msg)
key = CirkloUserVouchers.create_key(app_user)
vouchers = key.get() or CirkloUserVouchers(key=key) # type: CirkloUserVouchers
if voucher_id not in vouchers.voucher_ids:
vouchers.voucher_ids.append(voucher_id)
vouchers.put()
else:
sln_settings = get_solution_settings(service_user)
msg = translate(sln_settings.main_language, 'duplicate_cirklo_voucher')
raise TranslatedException(msg)
voucher = AppVoucher.from_cirklo(voucher_id, voucher_details, datetime.utcnow())
return {
'voucher': voucher.to_dict(),
'city': {
'city_id': voucher.cityId,
'logo_url': get_logo_url_for_city_id(voucher.cityId),
}
}
def delete_voucher(app_user, voucher_id):
vouchers = CirkloUserVouchers.create_key(app_user).get() # type: CirkloUserVouchers
if vouchers and voucher_id in vouchers.voucher_ids:
vouchers.voucher_ids.remove(voucher_id)
vouchers.put()
def get_user_vouchers_ids(app_user):
vouchers = CirkloUserVouchers.create_key(app_user).get() # type: CirkloUserVouchers
return vouchers.voucher_ids if vouchers else []
def get_logo_url_for_city_id(city_id):
return get_logo_url_for_city_ids([city_id])[city_id]
def get_logo_url_for_city_ids(city_ids):
city_keys = [CirkloCity.create_key(city_id) for city_id in city_ids]
cities = ndb.get_multi(city_keys) # type: List[CirkloCity]
logos = {}
for city_id, city in zip(city_ids, cities):
if city:
if city.logo_url:
logos[city_id] = city.logo_url
else:
branding_settings = db.get(SolutionBrandingSettings.create_key(users.User(city.service_user_email)))
logos[city_id] = branding_settings.avatar_url
else:
logos[city_id] = 'https://storage.googleapis.com/oca-files/misc/vouchers_default_city.png'
return logos
@cached(0)
@returns(unicode)
@arguments(service_email=unicode)
def get_city_id_by_service_email(service_email):
cirklo_city = CirkloCity.get_by_service_email(service_email)
return cirklo_city.city_id if cirklo_city else None
def get_vouchers(service_user, app_user):
# type: (users.User, users.User) -> AppVoucherList
ids = get_user_vouchers_ids(app_user)
settings = get_solution_server_settings()
rpcs = [(voucher_id, _cirklo_api_call(settings, '/vouchers/' + voucher_id, urlfetch.GET)) for voucher_id in ids]
vouchers = [] # type: List[AppVoucher]
current_date = datetime.utcnow()
for voucher_id, rpc in rpcs:
result = rpc.get_result() # type: urlfetch._URLFetchResult
logging.debug('%s: %s', result.status_code, result.content)
if result.status_code == 200:
vouchers.append(AppVoucher.from_cirklo(voucher_id, json.loads(result.content), current_date))
else:
logging.error('Invalid cirklo api response: %s', result.status_code)
try:
main_city_id = get_city_id_by_service_email(service_user.email())
except:
main_city_id = None
if not main_city_id:
logging.error('No cityId found for service %s' % service_user.email())
sln_settings = get_solution_settings(service_user)
msg = translate(sln_settings.main_language, 'cirklo_vouchers_not_live_yet')
raise TranslatedException(msg)
city_ids = {voucher.cityId for voucher in vouchers}
city_ids.add(main_city_id)
logos = get_logo_url_for_city_ids(list(city_ids))
voucher_list = AppVoucherList()
voucher_list.results = vouchers
voucher_list.main_city_id = main_city_id
voucher_list.cities = {}
for city_id, logo_url in logos.iteritems():
voucher_list.cities[city_id] = {'logo_url': logo_url}
return voucher_list
def get_merchants_by_community(community_id, language, cursor, page_size, query):
# type: (int, str, Optional[str], int, str) -> dict
community = get_community(community_id)
# Always filter by community id
tags = [
SearchTag.community(community_id),
SearchTag.environment(community.demo),
SearchTag.vouchers(VoucherProviderId.CIRKLO)
]
service_identity_users, new_cursor = search_services_by_tags(tags, cursor, page_size, query)
service_users = [get_service_user_from_service_identity_user(service_user)
for service_user in service_identity_users]
info_keys = [ServiceInfo.create_key(service_user, ServiceIdentity.DEFAULT) for service_user in service_users]
hours_keys = [OpeningHours.create_key(service_user, ServiceIdentity.DEFAULT) for service_user in service_users]
models = ndb.get_multi(info_keys + hours_keys)
infos = models[0: len(info_keys)]
hours = models[len(info_keys):]
results = []
for service_info, opening_hours in zip(infos, hours): # type: ServiceInfo, Optional[OpeningHours]
opening_hours_dict = None
if opening_hours:
now_open, title, subtitle, weekday_text = get_opening_hours_info(opening_hours, service_info.timezone,
language)
opening_hours_dict = {
'open_now': now_open,
'title': title,
'subtitle': subtitle,
'weekday_text': [t.to_dict() for t in weekday_text]
}
results.append({
'id': service_info.service_user.email(),
'name': service_info.name,
'address': service_info.addresses[0].to_dict() if service_info.addresses else None,
'email_addresses': [{'name': email.name, 'value': email.value} for email in service_info.email_addresses],
'websites': [{'name': website.name, 'value': website.value} for website in service_info.websites],
'phone_numbers': [{'name': phone.name, 'value': phone.value} for phone in service_info.phone_numbers],
'opening_hours': opening_hours_dict,
})
return {
'results': results,
'cursor': new_cursor,
'more': new_cursor is not None,
}
def handle_method(service_user, email, method, params, tag, service_identity, user_details):
# type: (users.User, str, str, str, str, str, List[UserDetailsTO]) -> SendApiCallCallbackResultTO
response = SendApiCallCallbackResultTO()
try:
json_data = json.loads(params) if params else {}
user = user_details[0]
app_user = user.toAppUser()
if method == CirkloApiMethod.GET_VOUCHERS:
result = get_vouchers(service_user, app_user)
elif method == CirkloApiMethod.ADD_VOUCHER:
qr_content = json_data['qrContent']
result = add_voucher(service_user, app_user, qr_content)
elif method == CirkloApiMethod.DELETE_VOUCHER:
delete_voucher(app_user, json_data['id'])
result = {}
elif method == CirkloApiMethod.GET_TRANSACTIONS:
# Not implemented yet
result = {'results': []}
elif method == CirkloApiMethod.GET_MERCHANTS:
language = get_user_profile(app_user).language
cursor = json_data.get('cursor')
page_size = json_data.get('page_size', 20)
query = (json_data.get('query') or '').strip()
user_profile = get_user_profile(app_user)
result = get_merchants_by_community(user_profile.community_id, language, cursor, page_size, query)
else:
raise UnknownMethodException(method)
response.result = convert_to_unicode(json.dumps(result.to_dict() if isinstance(result, TO) else result))
except TranslatedException as e:
logging.debug('User error while handling cirklo callback: %s', e.message)
response.error = e.message
except Exception:
logging.error('Error while handling cirklo call %s' % method, exc_info=True)
sln_settings = get_solution_settings(service_user)
response.error = translate(sln_settings.main_language, 'error-occured-unknown')
return response
| 43.82948 | 118 | 0.696406 | 1,880 | 15,165 | 5.370213 | 0.170213 | 0.020206 | 0.023772 | 0.013471 | 0.293483 | 0.255448 | 0.229101 | 0.187599 | 0.172147 | 0.158875 | 0 | 0.004152 | 0.205935 | 15,165 | 345 | 119 | 43.956522 | 0.834247 | 0.092911 | 0 | 0.204947 | 0 | 0 | 0.085289 | 0.018953 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056537 | false | 0 | 0.09894 | 0.003534 | 0.229682 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc2626101676e7724b219889678791a751b8ea3 | 361 | py | Python | 198. House Robber.py | patrick-luo/Leet-Code | 989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a | [
"MIT"
] | null | null | null | 198. House Robber.py | patrick-luo/Leet-Code | 989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a | [
"MIT"
] | null | null | null | 198. House Robber.py | patrick-luo/Leet-Code | 989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a | [
"MIT"
] | null | null | null | class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
prevMax = 0 # the max not robbing the previous house
currMax = 0 # the max
for n in nums:
temp = currMax
currMax = max(currMax, prevMax+n)
prevMax = temp
return currMax
| 25.785714 | 60 | 0.498615 | 41 | 361 | 4.390244 | 0.609756 | 0.044444 | 0.077778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009479 | 0.415512 | 361 | 13 | 61 | 27.769231 | 0.843602 | 0.224377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc368841481c483c9cecbe85cd5e6472b5ab80c | 1,907 | py | Python | hillSketch/code/data/bulk.py | ViskaWei/hill-sketch | cc3ebf19ec83e67e4178004107f3b1dcc9a5379b | [
"MIT"
] | 3 | 2020-12-02T04:00:48.000Z | 2020-12-04T06:42:06.000Z | hillSketch/code/data/bulk.py | ViskaWei/hill-sketch | cc3ebf19ec83e67e4178004107f3b1dcc9a5379b | [
"MIT"
] | null | null | null | hillSketch/code/data/bulk.py | ViskaWei/hill-sketch | cc3ebf19ec83e67e4178004107f3b1dcc9a5379b | [
"MIT"
] | null | null | null | # from sklearn.decomposition import PCA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
def prepro_data(data, isCenter=True, dimPCA=6,isPlot=True,method='minmax'):
if isCenter:
dataPREPRO = data - data.mean().mean()
else:
dataPREPRO = data
matPCA = get_pca(data, dim= dimPCA)
# matPCA = get_SVD(dataPREPRO, dimPCA = dimPCA, isPlot=isPlot)
matNorm = get_norm(matPCA, method=method, isPlot=isPlot)
# dfRebin = get_rebin(matNorm,base)
return matNorm
def get_pca(mat, dim=6):
pca = PCA(n_components=dim, random_state = 907)
matPCA=pca.fit_transform(mat)
print(matPCA.shape)
return matPCA
######################## PCA ###########################
def get_SVD(data, dimPCA = 6, isPlot=False):
cov = data.T.dot(data)
if isPlot: plt.matshow(cov)
pc = get_pc(cov, dimPCA)
matPCA = data.dot(pc)
if isPlot: plt.matshow(matPCA.T , aspect='auto')
return matPCA
def get_pc(cov, pca_comp):
print(f"=============== PCA N_component: {pca_comp} ===============")
u,s,v = np.linalg.svd(cov)
# assert np.allclose(u, v.T)
print('Explained Variance Ratio', np.round(s/sum(s),3))
pc = u[:,:pca_comp]
return pc
######################## NORM ###########################
def get_norm(matPCA, method='minmax', isPlot=False):
if method=='minmax':
try:
vmin,vmax = matPCA.min(), matPCA.max()
except:
vmin,vmax = np.min(matPCA), np.max(matPCA)
matNorm = (matPCA - vmin)/(vmax - vmin)
if isPlot: plt.matshow(matNorm.T, aspect='auto')
else:
raise 'select or implement norm method'
return matNorm
# def get_rebin(dfNorm, base):
# dfRebin=(dfNorm*(base-1)).round()
# assert (dfRebin.min().min()>=0) & (dfRebin.max().max()<=base-1)
# return dfRebin
| 32.322034 | 75 | 0.588883 | 250 | 1,907 | 4.42 | 0.332 | 0.027149 | 0.029864 | 0.048869 | 0.059729 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006689 | 0.216046 | 1,907 | 58 | 76 | 32.87931 | 0.732441 | 0.173571 | 0 | 0.146341 | 0 | 0 | 0.095563 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121951 | false | 0 | 0.097561 | 0 | 0.341463 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc530d338845900a1da1ea13b2ce974b70aa2fe | 7,535 | py | Python | example/mnist/train_mnist_keras.py | leonskim/webdnn | f97c798c9a659fe953f9dc8c8537b8917e4be7a2 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | example/mnist/train_mnist_keras.py | leonskim/webdnn | f97c798c9a659fe953f9dc8c8537b8917e4be7a2 | [
"MIT"
] | null | null | null | example/mnist/train_mnist_keras.py | leonskim/webdnn | f97c798c9a659fe953f9dc8c8537b8917e4be7a2 | [
"MIT"
] | null | null | null | """
This example is based on keras's mnist_mlp.py and mnist_cnn.py
Trains a simple deep NN on the MNIST dataset.
"""
import argparse
import json
import os
import keras
from webdnn.backend import generate_descriptor, backend_names
from webdnn.frontend.keras import KerasConverter
batch_size = 128
num_classes = 10
epochs = 2
img_rows, img_cols = 28, 28
def get_input_shape(model_type):
if model_type in ["conv", "dilated_conv", "residual", "complex"]:
return img_rows, img_cols, 1
elif model_type == "fc":
return img_rows * img_cols,
else:
raise NotImplementedError("Unknown model type")
def _setup_model(model_type):
from keras import backend as K
from keras.layers import Dense, Dropout, Flatten, Conv2D, AtrousConv2D, MaxPooling2D, Input, add, GlobalAveragePooling2D, Activation
from keras.models import Sequential, Model
input_shape = get_input_shape(model_type)
if model_type == "conv":
model = Sequential()
model.add(Conv2D(8, kernel_size=(3, 3), activation="relu", input_shape=input_shape))
model.add(Conv2D(16, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
elif model_type == "dilated_conv":
model = Sequential()
model.add(AtrousConv2D(8, kernel_size=(3, 3), atrous_rate=(2, 2), activation="relu", input_shape=input_shape)) # shape is 5x5
model.add(AtrousConv2D(16, kernel_size=(3, 3), atrous_rate=(3, 3), activation="relu")) # shape is 7x7
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
elif model_type == "fc":
model = Sequential()
model.add(Dense(512, activation="hard_sigmoid", input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
elif model_type == "residual":
nn_input = Input(shape=(28, 28, 1))
hidden = Conv2D(8, kernel_size=(3, 3), activation="relu")(nn_input)
hidden = MaxPooling2D(pool_size=(2, 2))(hidden)
hidden_1 = Conv2D(16, kernel_size=(1, 1), activation="relu", padding="same")(hidden)
hidden_2 = Conv2D(16, kernel_size=(3, 3), activation="relu", padding="same")(hidden)
hidden = add([hidden_1, hidden_2])
hidden_1 = hidden
hidden_2 = Conv2D(16, kernel_size=(3, 3), activation="relu", padding="same")(hidden)
hidden = add([hidden_1, hidden_2])
hidden = GlobalAveragePooling2D()(hidden)
nn_output = Dense(num_classes, activation="softmax")(hidden)
model = Model(inputs=[nn_input], outputs=[nn_output])
elif model_type == "complex":
# graph which has graph and sequential
# this is for testing converting complex model
nn_input = Input(shape=(28, 28, 1))
hidden_1 = Conv2D(8, kernel_size=(3, 3), activation="relu")(nn_input)
submodel_input = Input(shape=(26, 26, 8))
submodel_conv = Conv2D(8, kernel_size=(3, 3), activation="relu")
submodel_1 = submodel_conv(submodel_input)
submodel_2 = submodel_conv(submodel_1) # use same layer multiple times
submodel_3 = Conv2D(16, kernel_size=(3, 3), activation="relu")(submodel_1)
submodel = Model(inputs=[submodel_input], outputs=[submodel_3, submodel_2])
subseq = Sequential()
subseq.add(Conv2D(16, kernel_size=(3, 3), activation="relu", input_shape=(22, 22, 16)))
subseq.add(Flatten())
subseq.add(Dense(10))
hidden_2, hidden_3 = submodel(hidden_1)
hidden_4 = subseq(hidden_2)
hidden_5 = Flatten()(hidden_3)
hidden_6 = Dense(10)(hidden_5)
hidden_sum = add([hidden_4, hidden_6])
nn_output = Activation(activation="softmax")(hidden_sum)
model = Model(inputs=[nn_input], outputs=[nn_output])
else:
raise NotImplementedError("Unknown model type")
print(f"input shape: {input_shape}, data_format: {K.image_data_format()}")
return model
def _train_and_save(model_type, model_path, sample_path):
import keras
from keras import backend as K
from keras.datasets import mnist
from keras.optimizers import RMSprop
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if model_type in ["conv", "dilated_conv", "residual", "complex"]:
if K.image_data_format() == "channels_first":
raise NotImplementedError("Currently, WebDNN converter does not data_format==channels_first")
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
elif model_type == "fc":
x_train = x_train.reshape(x_train.shape[0], img_rows * img_cols)
x_test = x_test.reshape(x_test.shape[0], img_rows * img_cols)
else:
raise NotImplementedError("Unknown model type")
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], "train_and_save samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_test_orig = y_test # for exporting test sample
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = _setup_model(model_type)
model.summary()
model.compile(loss="categorical_crossentropy", optimizer=RMSprop(), metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
print("Saving trained model")
os.makedirs(os.path.dirname(model_path), exist_ok=True)
model.save(model_path)
print("Exporting test samples (for demo purpose)")
test_samples_json = []
for i in range(10):
test_samples_json.append({"x": x_test[i].flatten().tolist(), "y": int(y_test_orig[i])})
with open(sample_path, "w") as f:
json.dump(test_samples_json, f)
def generate_graph(model_type, output_dir):
model_path = os.path.join(output_dir, f"./keras_model/{model_type}.h5")
sample_path = os.path.join(output_dir, "test_samples.json")
if not os.path.exists(model_path):
_train_and_save(model_type, model_path, sample_path)
model = keras.models.load_model(model_path, compile=False)
graph = KerasConverter(batch_size=1).convert(model)
return model, graph
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="fc", choices=["fc", "conv", "dilated_conv", "residual", "complex"])
parser.add_argument("--out", default="output_keras")
parser.add_argument("--backend", default=",".join(backend_names))
args = parser.parse_args()
model, graph = generate_graph(args.model, args.out)
for backend in args.backend.split(","):
exec_info = generate_descriptor(backend, graph)
exec_info.save(args.out)
if __name__ == "__main__":
main()
| 36.756098 | 136 | 0.667153 | 1,042 | 7,535 | 4.602687 | 0.196737 | 0.035029 | 0.022936 | 0.025021 | 0.429316 | 0.375104 | 0.329858 | 0.32965 | 0.242702 | 0.180567 | 0 | 0.032306 | 0.198938 | 7,535 | 204 | 137 | 36.936275 | 0.76226 | 0.042601 | 0 | 0.277027 | 0 | 0 | 0.098056 | 0.014306 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033784 | false | 0 | 0.087838 | 0 | 0.148649 | 0.047297 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc64800430131d283de4b85bb796a7a3b2b1f39 | 1,793 | py | Python | vega/metrics/tensorflow/classifier_metric.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | vega/metrics/tensorflow/classifier_metric.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | vega/metrics/tensorflow/classifier_metric.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric of classifier task."""
import tensorflow as tf
from vega.metrics.tensorflow.metrics import MetricBase
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.METRIC)
class accuracy(MetricBase):
"""Calculate classification accuracy between output and target."""
__metric_name__ = 'accuracy'
def __init__(self, topk=(1, 5)):
"""Init accuracy metric."""
self.topk = topk
def __call__(self, output, target, *args, **kwargs):
"""Forward and calculate accuracy."""
top_accuracy = {}
is_one = True if len(self.topk) == 1 else False
if len(output.shape) == 3:
output = output[0]
top_1 = -1
for k in self.topk:
key = self.__metric_name__ if is_one else 'accuracy_top{}'.format(k)
in_top_k = tf.cast(tf.nn.in_top_k(output, target, k), tf.float32)
top_k_accuracy = tf.compat.v1.metrics.mean(in_top_k)
if top_1 == -1:
top_1 = top_k_accuracy
top_accuracy["accuracy"] = top_1
top_accuracy[key] = top_k_accuracy
return top_accuracy
| 36.591837 | 80 | 0.668154 | 249 | 1,793 | 4.646586 | 0.493976 | 0.051858 | 0.015557 | 0.027658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016691 | 0.231456 | 1,793 | 48 | 81 | 37.354167 | 0.822932 | 0.42164 | 0 | 0 | 0 | 0 | 0.02991 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc6929ebc43874a263f2fdc7e4382a4c2c8e1f4 | 11,290 | py | Python | python/pmercury/protocols/tls_certificate.py | raj-apoorv/mercury | 9b372e9ba8e7e0bcb87787038e7d058f8f769b44 | [
"BSD-2-Clause"
] | 299 | 2019-09-11T18:48:17.000Z | 2022-03-28T11:12:03.000Z | python/pmercury/protocols/tls_certificate.py | raj-apoorv/mercury | 9b372e9ba8e7e0bcb87787038e7d058f8f769b44 | [
"BSD-2-Clause"
] | 21 | 2019-09-30T17:59:33.000Z | 2022-03-24T19:15:41.000Z | python/pmercury/protocols/tls_certificate.py | raj-apoorv/mercury | 9b372e9ba8e7e0bcb87787038e7d058f8f769b44 | [
"BSD-2-Clause"
] | 58 | 2019-09-30T17:59:45.000Z | 2022-03-27T13:40:47.000Z | """
Copyright (c) 2019 Cisco Systems, Inc. All rights reserved.
License at https://github.com/cisco/mercury/blob/master/LICENSE
"""
import os
import sys
import base64
# TLS helper classes
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
from pmercury.protocols.protocol import Protocol
from pmercury.utils.tls_utils import *
from pmercury.utils.tls_constants import *
from pmercury.utils.cert_constants import *
class TLS_Certificate(Protocol):
def __init__(self):
self.fp_db = None
@staticmethod
def proto_identify(data, offset):
if (data[offset] == 22 and
data[offset+1] == 3 and
data[offset+2] <= 3 and
data[offset+5] == 11):
return True
return False
@staticmethod
def proto_identify_hs(data, offset):
if (data[offset] == 11 and
data[offset+1] == 0 and
data[offset+4] == 0 and
data[offset+7] == 0):
return True
return False
@staticmethod
def proto_identify_sh(data, offset):
if (data[offset] == 22 and
data[offset+1] == 3 and
data[offset+2] <= 3 and
data[offset+5] == 2 and
data[offset+9] == 3 and
data[offset+10] <= 3):
return True
return False
@staticmethod
def fingerprint(data, app_offset, data_len):
data_len = len(data)
offset = app_offset
if (data[offset] == 22 and
data[offset+1] == 3 and
data[offset+2] <= 3 and
data[offset+5] == 2 and
data[offset+9] == 3 and
data[offset+10] <= 3):
offset += 9+int(data[offset+6:offset+9].hex(),16)
if offset >= data_len:
return None, None
if (data[offset] == 22 and
data[offset+1] == 3 and
data[offset+2] <= 3 and
data[offset+5] == 11 and
data[offset+6] == 0):
offset += 5
elif (data[offset] == 11 and
data[offset+1] == 0 and
data[offset+4] == 0 and
data[offset+7] == 0):
pass
else:
return None, None
certificates_length = int(data[offset+4:offset+7].hex(),16)
offset += 7
if offset >= data_len:
return None, None
certs = None
while offset < certificates_length:
cert_len = int(data[offset:offset+3].hex(),16)
offset += 3
if offset >= data_len:
return certs, None
if certs == None:
certs = []
certs.append(base64.b64encode(data[offset:offset+cert_len]).decode())
offset += cert_len
if offset >= data_len:
return certs, None
return certs, None
def get_human_readable(self, fp_str_):
fp_h = []
for cert_ in fp_str_:
cert = base64.b64decode(cert_)
cert_json = self.cert_parser(cert)
fp_h.append(cert_json)
return fp_h
def cert_parser(self, cert):
out_ = {}
# parse to cert data
_, _, cert, _ = self.parse_tlv(cert, 0)
_, _, cert, _ = self.parse_tlv(cert, 0)
offset = 2
# parse version
_, _, value, offset = self.parse_tlv(cert, offset)
if value != None:
if value.hex() in cert_versions:
value = cert_versions[value.hex()]
else:
offset = 0
value = cert_versions['02']
else:
offset = 0
value = cert_versions['02']
out_['version'] = value
# parse serial number
_, _, value, offset = self.parse_tlv(cert, offset)
if offset == None:
return out_
out_['serial_number'] = value.hex()
# parse signature
_, _, value, offset = self.parse_tlv(cert, offset)
if offset == None:
return out_
out_['algorithm_identifier'] = self.parse_signature_algorithm(value)
# parse issuer
_, _, value, offset = self.parse_tlv(cert, offset)
if offset == None:
return out_
out_['issuer'] = self.parse_rdn_sequence(value)
# parse validity
_, _, value, offset = self.parse_tlv(cert, offset)
if offset == None:
return out_
out_['validity'] = self.parse_validity(value)
# parse subject
_, _, value, offset = self.parse_tlv(cert, offset)
if offset == None:
return out_
out_['subject'] = self.parse_rdn_sequence(value)
# skip subject_public_key_info
_, _, value, offset = self.parse_tlv(cert, offset)
if offset == None:
return out_
# parse extensions
_, _, cert_ext_outer, offset = self.parse_tlv(cert, offset)
if offset == None:
return out_
_, _, cert_ext, _ = self.parse_tlv(cert_ext_outer, 0)
if cert_ext == None:
return out_
# parse individual extensions
cert_exts = []
ext_offset = 0
_, _, ext_value, ext_offset = self.parse_tlv(cert_ext, ext_offset)
if ext_offset == None:
return out_
while ext_offset != None:
ext_ = self.parse_ext(ext_value)
if ext_ != None:
cert_exts.append(ext_)
_, _, ext_value, ext_offset = self.parse_tlv(cert_ext, ext_offset)
out_['extensions'] = cert_exts
return out_
def parse_signature_algorithm(self, val_):
_, _, alg_, offset = self.parse_tlv(val_, 0)
if offset == None:
return val_
alg_ = alg_.hex()
if alg_ in oid_mapping:
alg_ = oid_mapping[alg_]
out_ = {}
out_['algorithm'] = alg_
_, _, params_, offset = self.parse_tlv(val_, offset)
if offset == None:
return out_
out_['parameters'] = params_.hex()
return out_
def parse_subject_alt_name(self, val_):
_, _, sans_, offset = self.parse_tlv(val_, 0)
if offset == None:
return ''
san_arr = []
tag_, _, san_, offset = self.parse_tlv(sans_, 0)
while offset != None:
general_name_type = tag_ - 128
general_name = 'Unknown'
if general_name_type in cert_general_name_types:
general_name = cert_general_name_types[general_name_type]
try:
san_arr.append(f'{general_name}: {san_.decode()}')
except:
san_arr.append(f'{general_name}: {san_.hex()}')
tag_, _, san_, offset = self.parse_tlv(sans_, offset)
return san_arr
def parse_key_usage(self, val_):
_, _, val_, offset = self.parse_tlv(val_, 0)
if offset == None or len(val_) < 2:
return ''
padding = val_[0]
value = []
if val_[1] & 0x80:
value.append('digitalSignature')
if val_[1] & 0x40:
value.append('contentCommitment')
if val_[1] & 0x20:
value.append('keyEncipherment')
if val_[1] & 0x10:
value.append('dataEncipherment')
if val_[1] & 0x08:
value.append('keyAgreement')
if val_[1] & 0x04:
value.append('keyCertSign')
if val_[1] & 0x02:
value.append('cRLSign')
if val_[1] & 0x01:
value.append('encipherOnly')
if len(val_) > 2 and val_[2] & 0x80:
value.append('decipherOnly')
return {'padding': padding, 'value': value}
def parse_ext(self, data):
offset = 0
_, _, id_, offset = self.parse_tlv(data, offset)
if offset == None:
return None
_, _, val_, offset = self.parse_tlv(data, offset)
if offset == None:
return None
critical = False
if val_.hex() == 'ff':
critical = True
_, _, val_, offset = self.parse_tlv(data, offset)
id_ = id_.hex()
if id_.startswith('551d') and len(id_) == 6 and id_[4:6] in cert_extensions:
id_ = cert_extensions[id_[4:6]]
elif id_.startswith('2b060105050701') and len(id_) == 16 and id_[14:16] in cert_extensions_private:
id_ = cert_extensions_private[id_[14:16]]
elif id_ in oid_mapping:
id_ = oid_mapping[id_]
out_val_ = ''
if id_ == 'id-ce-subjectAltName':
out_val_ = self.parse_subject_alt_name(val_)
elif id_ == 'id-ce-keyUsage':
out_val_ = self.parse_key_usage(val_)
if out_val_ == '':
out_val_ = val_.hex()
return {id_: out_val_, 'critical': critical}
def parse_validity(self, data):
offset = 0
_, _, not_before, offset = self.parse_tlv(data, offset)
if offset == None:
return None
try:
out_ = {'not_before': not_before.decode()}
except:
out_ = {'not_before': not_before.hex()}
_, _, not_after, offset = self.parse_tlv(data, offset)
if offset == None:
return out_
try:
out_['not_after'] = not_after.decode()
except:
out_['not_after'] = not_after.hex()
return out_
def parse_rdn_sequence_item(self, data):
_, _, value, _ = self.parse_tlv(data, 0)
if value == None:
return None
offset = 0
_, _, id_, offset = self.parse_tlv(value, offset)
if offset == None:
return None
tag_, _, val_, offset = self.parse_tlv(value, offset)
if offset == None:
return None
id_ = id_.hex()
if id_.startswith('5504') and len(id_) == 6 and id_[4:6] in cert_attribute_types:
id_ = cert_attribute_types[id_[4:6]]
if tag_ == 19 or tag_ == 12: # printable string
val_ = val_.decode()
else:
val_ = val_.hex()
return {id_: val_}
def parse_rdn_sequence(self, data):
offset = 0
len_ = len(data)
items = []
tag_, _, value, offset = self.parse_tlv(data, offset)
while offset != None:
item_ = self.parse_rdn_sequence_item(value)
if item_ != None:
items.append(item_)
_, _, value, offset = self.parse_tlv(data, offset)
return items
def parse_tlv(self, data, offset):
if len(data) < offset+2:
return None, None, None, None
tag_ = data[offset]
len_ = data[offset+1]
if len_ == 0:
return tag_, len_, b'', offset+2
if len_ >= 128:
num_octets = len_ - 128
if num_octets <= 0:
return None, None, None, None
len_ = int(data[offset+2:offset+2+num_octets].hex(),16)
offset += num_octets
val_ = data[offset+2:offset+2+len_]
return tag_, len_, val_, offset+2+len_
def proc_identify(self, fp_str_, context_, dst_ip, dst_port, list_procs=5):
return None
| 28.87468 | 107 | 0.533924 | 1,336 | 11,290 | 4.213323 | 0.143713 | 0.094155 | 0.061823 | 0.079943 | 0.454077 | 0.39332 | 0.360455 | 0.282999 | 0.259371 | 0.253331 | 0 | 0.029728 | 0.356422 | 11,290 | 390 | 108 | 28.948718 | 0.744977 | 0.030558 | 0 | 0.417808 | 0 | 0 | 0.036437 | 0 | 0 | 0 | 0.003296 | 0 | 0 | 1 | 0.054795 | false | 0.003425 | 0.023973 | 0.003425 | 0.239726 | 0.003425 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc6c7dc55590c974bdea386d0db32e6d88e9c06 | 3,182 | py | Python | plugins/aea-cli-benchmark/aea_cli_benchmark/case_agent_construction_time/case.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | plugins/aea-cli-benchmark/aea_cli_benchmark/case_agent_construction_time/case.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | plugins/aea-cli-benchmark/aea_cli_benchmark/case_agent_construction_time/case.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Check amount of time and mem for agent setup."""
import os
import time
from pathlib import Path
from statistics import mean
from tempfile import TemporaryDirectory
from typing import List, Tuple, Union
from aea_cli_benchmark.utils import get_mem_usage_in_mb
from click.testing import CliRunner
from aea import AEA_DIR as _AEA_DIR
from aea.aea_builder import AEABuilder
AEA_DIR = Path(_AEA_DIR)
PACKAGES_DIR = AEA_DIR.parent / "packages"
def run(agents: int) -> List[Tuple[str, Union[int, float]]]:
"""Check construction time and memory usage."""
from aea.cli.core import cli
load_times = []
full_times = []
with TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
if (
CliRunner()
.invoke(
cli,
[
f"--registry-path={PACKAGES_DIR}",
"fetch",
"--local",
"fetchai/my_first_aea",
"--alias",
"agent",
],
catch_exceptions=False,
)
.exit_code
!= 0
):
raise Exception("fetch failed")
agent_dir = Path(tmp_dir) / "agent"
os.chdir(agent_dir)
if (
CliRunner()
.invoke(cli, ["generate-key", "fetchai"], catch_exceptions=False)
.exit_code
!= 0
):
raise Exception("generate-key failed")
if (
CliRunner()
.invoke(cli, ["add-key", "fetchai"], catch_exceptions=False)
.exit_code
!= 0
):
raise Exception("add-key failed")
agents_list = []
env_mem_usage = get_mem_usage_in_mb()
for _ in range(agents):
start_time = time.time()
builder = AEABuilder.from_aea_project(agent_dir)
load_times.append(time.time() - start_time)
agents_list.append(builder.build())
full_times.append(time.time() - start_time)
mem_usage = get_mem_usage_in_mb()
return [
("avg config load time", mean(load_times)),
("avg full construction", mean(full_times)),
("avg build time", mean(full_times) - mean(load_times)),
("agent mem usage (Mb)", mem_usage - env_mem_usage),
]
| 32.469388 | 80 | 0.560339 | 366 | 3,182 | 4.70765 | 0.412568 | 0.037145 | 0.019153 | 0.022635 | 0.18108 | 0.145676 | 0.113175 | 0.086477 | 0.061521 | 0.061521 | 0 | 0.009362 | 0.295097 | 3,182 | 97 | 81 | 32.804124 | 0.758805 | 0.28127 | 0 | 0.223881 | 0 | 0 | 0.106336 | 0.013292 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.164179 | 0 | 0.19403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc7a77f4169521fe89b86a05dfe67f63795adcd | 4,077 | py | Python | whizzlibrary/plotting.py | rodrigolece/whizz-library | beb3b6a31000239843bdfae8f4edd2a700749ce7 | [
"MIT"
] | null | null | null | whizzlibrary/plotting.py | rodrigolece/whizz-library | beb3b6a31000239843bdfae8f4edd2a700749ce7 | [
"MIT"
] | null | null | null | whizzlibrary/plotting.py | rodrigolece/whizz-library | beb3b6a31000239843bdfae8f4edd2a700749ce7 | [
"MIT"
] | null | null | null |
import numpy as np
import scipy.stats as st # for pearsonr, has to be imported explicitly
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes # to use the inset in subplot
from mpl_toolkits.axes_grid1 import make_axes_locatable # to scale the colorbar
from .quarters import histogramQuarters
def plotTopicHistograms(mat, topic_names, ignore_empty=False):
nb_topics, nb_pupils = mat.shape
plt.figure(figsize=(8,4))
for i in range(nb_topics):
counts, bins = histogramQuarters(mat[i,:])
if ignore_empty and bins[0] == -12.5:
idx = np.nonzero(counts)[0] # returns (i,j) indices as for a matrix
first_nonzero = idx[1]
counts, bins = counts[first_nonzero:], bins[first_nonzero:]
counts /= nb_pupils
centers = bins[:-1] + 12.5
plt.bar(centers, counts, 25, alpha=0.6, label=topic_names[i])
plt.plot(centers, counts, 'o', ms=4)
plt.legend()
def plotSingInfo(mat, topic_names, nb_vecs=4):
nb_topics = len(topic_names)
u, s, vt = np.linalg.svd(mat, full_matrices=False) # full_matrices False means that v is m by n
fig, axes = plt.subplots(figsize=(15,8), nrows=2, ncols=2)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# plt.setp(axes, xticks=range(nb_topics), xticklabels=topic_names) not that useful anymore after rearangement
plt.sca(axes[0,0])
plt.imshow(mat, aspect='auto', vmin=0, vmax=1400)
plt.yticks(range(nb_topics), topic_names)
plt.sca(axes[0,1])
plt.semilogy(s)
plt.title('Singular values')
plt.grid()
width, height = 2.4, 1.2 # inches, could also use "30%"
inset_ax = inset_axes(axes[0,1], width, height)
inset_ax.plot(s)
inset_ax.grid()
plt.sca(axes[1,0])
plt.plot(u[:,:nb_vecs])
plt.title('Top left singular vectors')
plt.legend(range(nb_vecs))
plt.axhline(0, color='k', linestyle='--', alpha=0.6)
plt.xticks(range(nb_topics), topic_names)
plt.grid()
plt.sca(axes[1,1])
axes[1,1].set_prop_cycle('color', colors[nb_vecs:])
plt.plot(u[:,-nb_vecs:])
plt.title('Bottom left singular vectors')
plt.legend(range(nb_topics-nb_vecs, nb_topics), loc=1)
plt.axhline(0, color='k', linestyle='--', alpha=0.6)
plt.xticks(range(nb_topics), topic_names)
plt.grid()
# plt.tight_layout() # incompatible with inset_ax
def correlationMat(mat):
nb_topics, _ = mat.shape
correl_mat = np.zeros((nb_topics, nb_topics))
for i in range(nb_topics):
for j in range(i,nb_topics):
correl_mat[i,j], _ = st.pearsonr(mat[i,:], mat[j,:])
correl_mat[correl_mat == 0.0] = np.nan
return correl_mat
def plotCorrelations(mat, topic_names):
correl_mat = correlationMat(mat)
nb_topics = len(topic_names)
triu = np.triu(correl_mat, k=1)
triu = triu[np.nonzero(triu)]
m, M = np.min(triu), np.max(triu)
topics_min = np.concatenate(np.where(correl_mat == m))
topics_max = np.concatenate(np.where(correl_mat == M))
_, axes = plt.subplots(figsize=(15, 4), ncols=3) #figsize=(nb_topics+1,nb_topics+1)
plt.sca(axes[0])
plt.imshow(correl_mat, vmin=0)
axes[0].xaxis.tick_top()
axes[0].yaxis.tick_right()
plt.xticks(range(nb_topics), topic_names)
plt.yticks(range(nb_topics), topic_names)
divider = make_axes_locatable(axes[0])
cax = divider.append_axes('right', size='5%', pad=0.5)
plt.colorbar(cax=cax)
plt.sca(axes[1])
plt.plot(mat[topics_min[0],:], mat[topics_min[1],:], 'o')
plt.axis('equal')
plt.xlabel(topic_names[topics_min[0]])
plt.ylabel(topic_names[topics_min[1]])
plt.title('%s (rho = %.3f)' % (' - '.join(topic_names[topics_min]), m ))
plt.sca(axes[2])
plt.plot(mat[topics_max[0],:], mat[topics_max[1],:], 'o')
plt.axis('equal')
plt.xlabel(topic_names[topics_max[0]])
plt.ylabel(topic_names[topics_max[1]])
plt.title('%s: rho = %.3f' % ('-'.join(topic_names[topics_max]), M ))
plt.tight_layout()
| 31.361538 | 113 | 0.648516 | 637 | 4,077 | 3.992151 | 0.276295 | 0.059772 | 0.046009 | 0.035391 | 0.32324 | 0.259929 | 0.223358 | 0.130948 | 0.117184 | 0.117184 | 0 | 0.025571 | 0.19426 | 4,077 | 129 | 114 | 31.604651 | 0.748554 | 0.095904 | 0 | 0.177778 | 0 | 0 | 0.043277 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.066667 | 0 | 0.122222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc94d1efbebd6519f53641f480b76cc940d4d0c | 774 | py | Python | posts/urls.py | mmanchev23/network | 248a23089640096fe866abc4557e82383768b5bc | [
"MIT"
] | null | null | null | posts/urls.py | mmanchev23/network | 248a23089640096fe866abc4557e82383768b5bc | [
"MIT"
] | null | null | null | posts/urls.py | mmanchev23/network | 248a23089640096fe866abc4557e82383768b5bc | [
"MIT"
] | null | null | null | from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("profile/<str:username>", views.profile, name="profile"),
path("profile/config/<str:username>", views.config, name="config"),
path("profile/<str:username>/newpost", views.newpost, name="newpost"),
path("<str:post_id>/delete", views.delete, name="delete"),
path("following/<str:username>", views.following, name='following'),
path("posts/<int:post_id>/edit", views.edit, name="edit"),
url(r'^likepost/$', views.like_post, name='like-post')
] | 43 | 74 | 0.674419 | 102 | 774 | 5.068627 | 0.294118 | 0.085106 | 0.092843 | 0.085106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121447 | 774 | 18 | 75 | 43 | 0.760294 | 0 | 0 | 0 | 0 | 0 | 0.323871 | 0.166452 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcc9ddc5d8f9f9a656c88d8718ebff1b675da47f | 2,476 | py | Python | nnaps/tests/test_mesa.py | vosjo/nnaps | bc4aac715b511c5df897ef24fb953ad7265927ea | [
"MIT"
] | 4 | 2020-09-24T12:55:58.000Z | 2021-05-19T14:46:10.000Z | nnaps/tests/test_mesa.py | vosjo/nnaps | bc4aac715b511c5df897ef24fb953ad7265927ea | [
"MIT"
] | 4 | 2021-06-02T09:28:35.000Z | 2021-06-04T08:32:24.000Z | nnaps/tests/test_mesa.py | vosjo/nnaps | bc4aac715b511c5df897ef24fb953ad7265927ea | [
"MIT"
] | 3 | 2020-10-05T13:18:27.000Z | 2021-06-02T09:29:11.000Z | import os
import pytest
import pandas as pd
from nnaps.mesa import compress_mesa, fileio
from pathlib import Path
base_path = Path(__file__).parent
class Test2H5:
def test_read_mesa_output(self):
filename = base_path / 'test_data/M1.013_M0.331_P32.85_Z0.00155/LOGS/history1.data'
_, data = compress_mesa.read_mesa_output(filename=filename, only_first=False)
assert 'model_number' in data.dtype.names
assert min(data['model_number']) == 1
assert max(data['model_number']) == 30000
assert len(data.dtype.names) == 53
assert data.shape[0] == 10263
def test_convert2hdf5(self):
data = [[1.013, 0.331, 32.85, 0.12, -0.8, 0.00155, 749, 986, 0, 2000, 'M1.013_M0.331_P32.85_Z0.00155']]
columns = ['M1MSun', 'M2MSun', 'PRLODays', 'PoverPMax', 'FeH', 'ZMIST', 'TtipMyr',
'GalAgeMyr', 'AgeBinNum', 'DeltaTBin', 'path']
modellist = pd.DataFrame(data=data, columns=columns)
try:
compress_mesa.convert2hdf5(modellist, star_columns=None, binary_columns=None, add_stopping_condition=True,
skip_existing=False,
input_path_kw='path', input_path_prefix=base_path / 'test_data',
star1_history_file='LOGS/history1.data', star2_history_file='LOGS/history2.data',
binary_history_file='LOGS/binary_history.data', log_file='log.txt',
profile_files='all', profiles_path='LOGS', profile_pattern='profile_*.data',
output_path=base_path / 'test_data/hdf5')
assert os.path.isfile(base_path / 'test_data/hdf5/M1.013_M0.331_P32.85_Z0.00155.h5')
data = fileio.read_hdf5(base_path / 'test_data/hdf5/M1.013_M0.331_P32.85_Z0.00155.h5')
assert 'history' in data
assert 'star1' in data['history']
assert 'star2' in data['history']
assert 'binary' in data['history']
assert 'extra_info' in data
assert 'nnaps-version' in data['extra_info']
assert 'termination_code' in data['extra_info']
assert 'profile_legend' in data
assert 'profiles' in data
finally:
os.remove(base_path / 'test_data/hdf5/M1.013_M0.331_P32.85_Z0.00155.h5')
os.rmdir(base_path / 'test_data/hdf5/')
| 38.092308 | 120 | 0.599354 | 313 | 2,476 | 4.498403 | 0.348243 | 0.042614 | 0.059659 | 0.079545 | 0.183239 | 0.125 | 0.125 | 0.125 | 0.09375 | 0.09375 | 0 | 0.091474 | 0.284733 | 2,476 | 64 | 121 | 38.6875 | 0.703557 | 0 | 0 | 0 | 0 | 0 | 0.240291 | 0.101942 | 0 | 0 | 0 | 0 | 0.357143 | 1 | 0.047619 | false | 0 | 0.119048 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcca02e10815f058cde445c4c498cd6121ab7987 | 10,588 | py | Python | riot_transmute/v5/match_to_game.py | mrtolkien/riotwatcher_dto | 777455c45159f177d3a7ba3956043ff26f625c30 | [
"MIT"
] | 8 | 2020-10-01T18:01:15.000Z | 2022-02-22T22:51:30.000Z | riot_transmute/v5/match_to_game.py | mrtolkien/riotwatcher_dto | 777455c45159f177d3a7ba3956043ff26f625c30 | [
"MIT"
] | 2 | 2021-12-15T23:02:02.000Z | 2022-01-25T06:07:53.000Z | riot_transmute/v5/match_to_game.py | mrtolkien/riotwatcher_dto | 777455c45159f177d3a7ba3956043ff26f625c30 | [
"MIT"
] | 4 | 2020-07-30T23:24:06.000Z | 2022-02-21T19:26:01.000Z | import lol_dto.classes.game as dto
from lol_dto.classes.sources.riot_lol_api import RiotGameSource, RiotPlayerSource
from riot_transmute.common.iso_date_from_ms import get_iso_date_from_ms_timestamp
role_trigrams = {
"TOP": "TOP",
"JUNGLE": "JGL",
"MIDDLE": "MID",
"BOTTOM": "BOT",
"UTILITY": "SUP",
}
from dataclasses import dataclass
@dataclass
class RiotGameRankedSource(RiotGameSource):
tournamentCode: str = None
def match_to_game(match_dto: dict) -> dto.LolGame:
"""
Returns a LolGame from a MatchDto from match-v5 endpoints
Args:
match_dto: A MatchDto from Riot's API,
Returns:
The LolGame representation of the game
"""
# Creating some data fields in a friendlier format
# ms timestamp -> ISO format
iso_creation_date = get_iso_date_from_ms_timestamp(match_dto["gameCreation"])
# v5 has game start as well
iso_start_date = get_iso_date_from_ms_timestamp(match_dto["gameStartTimestamp"])
# only 2 values for the patch key (gameVersion is also saved)
patch = ".".join(match_dto["gameVersion"].split(".")[:2])
# Saving winner as BLUE or RED
if not any(match_dto["teams"][i]["win"] in ["Win", True] for i in range(0, 2)):
raise ValueError
winner = (
"BLUE"
if (match_dto["teams"][0]["teamId"] == 100)
== (
(
# I saw both between esports games and live games
match_dto["teams"][0]["win"] == "Win"
or match_dto["teams"][0]["win"] == True
)
)
else "RED"
)
# Riot made changes to duration on 11.20
# Prior to patch 11.20, this field returns the game length in milliseconds calculated from gameEndTimestamp - gameStartTimestamp.
# Post patch 11.20, this field returns the max timePlayed of any participant in the game in seconds, which makes the behavior of this field consistent with that of match-v4.
# The best way to handling the change in this field is to treat the value as milliseconds if the gameEndTimestamp field isn't in the response and
# to treat the value as seconds if gameEndTimestamp is in the response.
if not match_dto.get("gameEndTimestamp"):
duration = int(match_dto["gameDuration"] / 1000)
else:
duration = int(match_dto["gameDuration"])
# Creating our object's structure
game = dto.LolGame(
duration=duration,
creation=iso_creation_date,
start=iso_start_date,
patch=patch,
gameVersion=match_dto["gameVersion"],
winner=winner,
lobbyName=match_dto["gameName"],
type=match_dto["gameType"],
queue_id=match_dto["queueId"],
)
setattr(
game.sources,
"riotLolApi",
RiotGameRankedSource(
gameId=match_dto["gameId"],
platformId=match_dto["platformId"],
tournamentCode=match_dto.get("tournamentCode"),
),
)
for dto_team in match_dto["teams"]:
if dto_team["teamId"] == 100:
game_team = game.teams.BLUE
elif dto_team["teamId"] == 200:
game_team = game.teams.RED
else:
raise ValueError(f"{dto_team['teamId']=} value not supported")
game_team.bans = [b["championId"] for b in dto_team["bans"]]
game_team.endOfGameStats = dto.LolGameTeamEndOfGameStats(
firstTurret=dto_team["objectives"]["tower"]["first"],
turretKills=dto_team["objectives"]["tower"]["kills"],
firstRiftHerald=dto_team["objectives"]["riftHerald"]["first"],
riftHeraldKills=dto_team["objectives"]["riftHerald"]["kills"],
firstDragon=dto_team["objectives"]["dragon"]["first"],
dragonKills=dto_team["objectives"]["dragon"]["kills"],
firstBaron=dto_team["objectives"]["baron"]["first"],
baronKills=dto_team["objectives"]["baron"]["kills"],
firstInhibitor=dto_team["objectives"]["inhibitor"]["first"],
inhibitorKills=dto_team["objectives"]["inhibitor"]["kills"],
)
for dto_player in match_dto["participants"]:
if dto_player["teamId"] == 100:
game_team = game.teams.BLUE
elif dto_player["teamId"] == 200:
game_team = game.teams.RED
else:
raise ValueError(f"{dto_player['teamId']=} value not supported")
game_player = dto.LolGamePlayer(
id=dto_player["participantId"],
inGameName=dto_player["summonerName"],
role=role_trigrams.get(dto_player["individualPosition"]),
championId=dto_player["championId"],
primaryRuneTreeId=dto_player["perks"]["styles"][0]["style"],
secondaryRuneTreeId=dto_player["perks"]["styles"][1]["style"],
)
setattr(
game_player.sources,
"riotLolApi",
RiotPlayerSource(
# We have to use get to also be compatible with esports games
puuid=dto_player.get("puuid"),
summonerId=dto_player.get("summonerId"),
),
)
# We extend the runes with the primary and secondary trees
game_player.runes.extend(
dto.LolGamePlayerRune(
slot=len(game_player.runes),
id=r["perk"],
stats=[r["var1"], r["var2"], r["var3"]],
)
for style in dto_player["perks"]["styles"]
for r in style["selections"]
)
# We then add stats perks
game_player.runes.extend(
[
dto.LolGamePlayerRune(
slot=len(game_player.runes),
id=dto_player["perks"]["statPerks"]["offense"],
),
dto.LolGamePlayerRune(
slot=len(game_player.runes) + 1,
id=dto_player["perks"]["statPerks"]["flex"],
),
dto.LolGamePlayerRune(
slot=len(game_player.runes) + 2,
id=dto_player["perks"]["statPerks"]["defense"],
),
]
)
game_player.summonerSpells.extend(
dto.LolGamePlayerSummonerSpell(
# Bayes' GAMH data uses spell1Id instead of summoner1Id
id=dto_player.get(f"summoner{spell_id}Id")
or dto_player.get(f"spell{spell_id}Id"),
slot=spell_id - 1,
casts=dto_player[f"summoner{spell_id}Casts"],
)
for spell_id in (1, 2)
)
game_team.earlySurrendered = dto_player["teamEarlySurrendered"]
items = [
dto.LolGamePlayerItem(id=dto_player.get(f"item{i}"), slot=i)
for i in range(0, 7)
]
end_of_game_stats = dto.LolGamePlayerEndOfGameStats(
items=items,
firstBlood=dto_player["firstBloodKill"],
firstBloodAssist=dto_player["firstBloodAssist"],
kills=dto_player["kills"],
deaths=dto_player["deaths"],
assists=dto_player["assists"],
gold=dto_player["goldEarned"],
cs=int(dto_player["totalMinionsKilled"] or 0)
+ int(dto_player["neutralMinionsKilled"] or 0),
level=dto_player["champLevel"],
wardsPlaced=dto_player["wardsPlaced"],
wardsKilled=dto_player["wardsKilled"],
visionWardsBought=dto_player["visionWardsBoughtInGame"],
visionScore=dto_player["visionScore"],
killingSprees=dto_player["killingSprees"],
largestKillingSpree=dto_player["largestKillingSpree"],
doubleKills=dto_player["doubleKills"],
tripleKills=dto_player["tripleKills"],
quadraKills=dto_player["quadraKills"],
pentaKills=dto_player["pentaKills"],
monsterKills=dto_player["neutralMinionsKilled"],
totalDamageDealt=dto_player["totalDamageDealt"],
physicalDamageDealt=dto_player["physicalDamageDealt"],
magicDamageDealt=dto_player["magicDamageDealt"],
totalDamageDealtToChampions=dto_player["totalDamageDealtToChampions"],
physicalDamageDealtToChampions=dto_player["physicalDamageDealtToChampions"],
magicDamageDealtToChampions=dto_player["magicDamageDealtToChampions"],
damageDealtToObjectives=dto_player["damageDealtToObjectives"],
damageDealtToTurrets=dto_player["damageDealtToTurrets"],
damageDealtToBuildings=dto_player["damageDealtToBuildings"],
totalDamageTaken=dto_player["totalDamageTaken"],
physicalDamageTaken=dto_player["physicalDamageTaken"],
magicDamageTaken=dto_player["magicDamageTaken"],
longestTimeSpentLiving=dto_player["longestTimeSpentLiving"],
largestCriticalStrike=dto_player["largestCriticalStrike"],
goldSpent=dto_player["goldSpent"],
totalHeal=dto_player["totalHeal"],
totalUnitsHealed=dto_player["totalUnitsHealed"],
damageSelfMitigated=dto_player["damageSelfMitigated"],
totalTimeCCDealt=dto_player["totalTimeCCDealt"],
# New match-v5 fields
xp=dto_player["champExperience"],
bountyLevel=dto_player["bountyLevel"],
baronKills=dto_player["baronKills"],
dragonKills=dto_player["dragonKills"],
inhibitorKills=dto_player["inhibitorKills"],
inhibitorTakedowns=dto_player["inhibitorTakedowns"],
championTransform=dto_player["championTransform"],
consumablesPurchased=dto_player["consumablesPurchased"],
detectorWardsPlaced=dto_player["detectorWardsPlaced"],
itemsPurchased=dto_player["itemsPurchased"],
nexusKills=dto_player["nexusKills"],
nexusTakedowns=dto_player["nexusTakedowns"],
objectivesStolen=dto_player["objectivesStolen"],
objectivesStolenAssists=dto_player["objectivesStolenAssists"],
sightWardsBoughtInGame=dto_player["sightWardsBoughtInGame"],
totalDamageShieldedOnTeammates=dto_player["totalDamageShieldedOnTeammates"],
totalHealsOnTeammates=dto_player["totalHealsOnTeammates"],
totalTimeSpentDead=dto_player["totalTimeSpentDead"],
turretTakedowns=dto_player["turretTakedowns"],
turretKills=dto_player["turretKills"],
)
game_player.endOfGameStats = end_of_game_stats
game_team.players.append(game_player)
return game
| 40.723077 | 177 | 0.621364 | 1,029 | 10,588 | 6.216715 | 0.297376 | 0.112553 | 0.026575 | 0.008129 | 0.134125 | 0.085509 | 0.081601 | 0.059715 | 0.059715 | 0.03658 | 0 | 0.007482 | 0.26785 | 10,588 | 259 | 178 | 40.880309 | 0.817724 | 0.113336 | 0 | 0.118812 | 0 | 0 | 0.198073 | 0.03833 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004951 | false | 0 | 0.019802 | 0 | 0.039604 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dccbb1ed1d93a36b9b6ce4c8495a089a6511c9fc | 1,946 | py | Python | chart/tests/helm_template_generator.py | jrderuiter/airflow | 3122b351a515d455b2c69412b9cc72af888abc2b | [
"Apache-2.0"
] | 1 | 2020-02-29T18:15:04.000Z | 2020-02-29T18:15:04.000Z | chart/tests/helm_template_generator.py | jrderuiter/airflow | 3122b351a515d455b2c69412b9cc72af888abc2b | [
"Apache-2.0"
] | null | null | null | chart/tests/helm_template_generator.py | jrderuiter/airflow | 3122b351a515d455b2c69412b9cc72af888abc2b | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import sys
from tempfile import NamedTemporaryFile
import yaml
from kubernetes.client.api_client import ApiClient
api_client = ApiClient()
def render_chart(name="RELEASE-NAME", values=None, show_only=None):
"""
Function that renders a helm chart into dictionaries. For helm chart testing only
"""
values = values or {}
with NamedTemporaryFile() as tmp_file:
content = yaml.dump(values)
tmp_file.write(content.encode())
tmp_file.flush()
command = ["helm", "template", name, sys.path[0], '--values', tmp_file.name]
if show_only:
for i in show_only:
command.extend(["--show-only", i])
templates = subprocess.check_output(command)
k8s_objects = yaml.load_all(templates)
k8s_objects = [k8s_object for k8s_object in k8s_objects if k8s_object] # type: ignore
return k8s_objects
def render_k8s_object(obj, type_to_render):
"""
Function that renders dictionaries into k8s objects. For helm chart testing only.
"""
return api_client._ApiClient__deserialize_model(obj, type_to_render) # pylint: disable=W0212
| 37.423077 | 97 | 0.723022 | 270 | 1,946 | 5.1 | 0.474074 | 0.043573 | 0.018882 | 0.023239 | 0.033406 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011553 | 0.199383 | 1,946 | 51 | 98 | 38.156863 | 0.872272 | 0.489209 | 0 | 0 | 0 | 0 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.227273 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dccd47a41be8ee6cd564590ef78258b59157e457 | 999 | py | Python | src/states/minnesota/main.py | mail-my-ballot/elections-officials | f52862b62121a6e30f8f1c865bfe62705d3a0748 | [
"Apache-2.0"
] | 3 | 2020-04-26T02:05:33.000Z | 2020-07-06T22:11:04.000Z | src/states/minnesota/main.py | mail-my-ballot/elections-officials | f52862b62121a6e30f8f1c865bfe62705d3a0748 | [
"Apache-2.0"
] | 25 | 2020-05-22T18:51:01.000Z | 2020-06-28T00:47:19.000Z | src/states/minnesota/main.py | vote-by-mail/elections-officials | ac3cfb896db11f553f9c22ea2303fa54e6bb1a39 | [
"Apache-2.0"
] | 5 | 2020-05-21T01:28:57.000Z | 2020-07-01T23:02:31.000Z | import re
from bs4 import BeautifulSoup
from common import cache_request
BASE_URL = 'https://www.sos.state.mn.us/elections-voting/find-county-election-office/'
re_lines = re.compile(r'(?<=Absentee voting contact)\s*(.*?)\s*'
+ r'Phone:\s*([0-9\-]+)(?:\s*ext\s*\d+)?\s*Fax:\s*([0-9\-]+)\s*Email:\s*(\S+)',
flags=re.DOTALL)
def parse_county(county, datum):
lines = re_lines.findall(datum.get_text('\n'))[0]
return {
'locale': county.text,
'county': county.text,
'official': lines[0],
'phones': [lines[1]],
'faxes': [lines[2]],
'emails': [lines[3]],
}
def fetch_data():
text = cache_request(BASE_URL)
soup = BeautifulSoup(text, 'lxml')
counties = soup.select('h2.contentpage-h2 a')
data = []
for county in counties:
data_id = county['data-target'].split('#')[1]
datum = soup.find(id=data_id)
data.append(parse_county(county, datum))
return data
if __name__ == '__main__':
print(fetch_data())
| 24.975 | 101 | 0.60961 | 140 | 999 | 4.2 | 0.514286 | 0.061224 | 0.054422 | 0.064626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01601 | 0.187187 | 999 | 39 | 102 | 25.615385 | 0.708128 | 0 | 0 | 0 | 0 | 0.068966 | 0.267267 | 0.073073 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.103448 | 0 | 0.241379 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dccda10f7d1ebffea799e8be451190ac7dc23768 | 3,788 | py | Python | tests/test_conv.py | eLeVeNnN/xshinnosuke | 69da91e0ea5042437edfc31c0e6ff9ef394c6cc9 | [
"MIT"
] | 290 | 2020-07-06T02:13:12.000Z | 2021-01-04T14:23:39.000Z | tests/test_conv.py | E1eveNn/xshinnosuke | 69da91e0ea5042437edfc31c0e6ff9ef394c6cc9 | [
"MIT"
] | 1 | 2020-12-03T11:11:48.000Z | 2020-12-03T11:11:48.000Z | tests/test_conv.py | E1eveNn/xshinnosuke | 69da91e0ea5042437edfc31c0e6ff9ef394c6cc9 | [
"MIT"
] | 49 | 2020-07-16T00:27:47.000Z | 2020-11-26T03:03:14.000Z | import xs.nn
import xs.nn.functional
import xs.optim
from xs.layers import Conv2D, ReLU, BatchNormalization
import numpy as np
import torch.nn
import torch.nn.functional
import torch.optim
import os
import matplotlib.pyplot as plt
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
def xs_network(weight1, bias1, bn_weight1, bn_bias1, weight2, bias2):
weight1 = xs.nn.Parameter(weight1)
bias1 = xs.nn.Parameter(bias1)
bn_weight1 = xs.nn.Parameter(bn_weight1)
bn_bias1 = xs.nn.Parameter(bn_bias1)
weight2 = xs.nn.Parameter(weight2)
bias2 = xs.nn.Parameter(bias2)
l1 = Conv2D(out_channels=3, kernel_size=3)
bn1 = BatchNormalization(epsilon=1e-5, momentum=0.9)
a1 = ReLU()
l2 = Conv2D(out_channels=5, kernel_size=3)
l1.parameters([weight1, bias1])
l2.parameters([weight2, bias2])
bn1.parameters([bn_weight1, bn_bias1])
bn1.moving_mean = xs.zeros(3)
bn1.moving_variance = xs.ones(3)
net = xs.nn.Sequential(l1, bn1, a1, l2)
return net
def torch_network(weight1, bias1, bn_weight1, bn_bias1, weight2, bias2):
weight1 = torch.nn.Parameter(torch.tensor(weight1), requires_grad=True)
bias1 = torch.nn.Parameter(torch.tensor(bias1), requires_grad=True)
bn_weight1 = torch.nn.Parameter(torch.tensor(bn_weight1), requires_grad=True)
bn_bias1 = torch.nn.Parameter(torch.tensor(bn_bias1), requires_grad=True)
weight2 = torch.nn.Parameter(torch.tensor(weight2), requires_grad=True)
bias2 = torch.nn.Parameter(torch.tensor(bias2), requires_grad=True)
l1 = torch.nn.Conv2d(in_channels=1, out_channels=3, kernel_size=3)
bn1 = torch.nn.BatchNorm2d(3, eps=1e-5, momentum=0.1)
a1 = torch.nn.ReLU()
l2 = torch.nn.Conv2d(in_channels=3, out_channels=5, kernel_size=3)
l1.weight = weight1
l1.bias = bias1
bn1.weight = bn_weight1
bn1.bias = bn_bias1
l2.weight = weight2
l2.bias = bias2
net = torch.nn.Sequential(l1, bn1, a1, l2)
return net
######################### Hyper Parameters
EPOCH = 50
LR = 0.1
######################### Read Data
np.random.seed(0)
train_datas = np.random.rand(2, 1, 6, 6).astype(np.float32)
train_labels = np.random.randint(0, 2, (2, ), dtype=np.int64)
weight1 = np.random.rand(3, 1, 3, 3).astype(np.float32)
bias1 = np.random.rand(3).astype(np.float32)
bn_weight1 = np.random.rand(3).astype(np.float32)
bn_bias1 = np.random.rand(3).astype(np.float32)
weight2 = np.random.rand(5, 3, 3, 3).astype(np.float32)
bias2 = np.random.rand(5).astype(np.float32)
######################### Network
xs_net = xs_network(weight1, bias1, bn_weight1, bn_bias1, weight2, bias2)
torch_net = torch_network(weight1, bias1, bn_weight1, bn_bias1, weight2, bias2)
######################### Optimizer
optim1 = xs.optim.Adam(xs_net.parameters(), lr=LR)
optim2 = torch.optim.Adam(torch_net.parameters(), lr=LR)
loss1_list = []
loss2_list = []
for i in range(EPOCH):
print('#### Epoch ', i)
xs_x, xs_y = xs.tensor(train_datas), xs.tensor(train_labels)
torch_x, torch_y = torch.tensor(train_datas), torch.tensor(train_labels)
optim1.zero_grad()
optim2.zero_grad()
pred1 = xs_net(xs_x).view(xs_x.size(0), -1, 2).mean(axis=1)
pred2 = torch_net(torch_x).view(torch_x.size(0), -1, 2).mean(dim=1)
# print('Prediction -->')
# print('XS:\n', pred1)
# print('Torch:\n', pred2)
loss1 = xs.nn.functional.cross_entropy(pred1, xs_y)
loss2 = torch.nn.functional.cross_entropy(pred2, torch_y)
loss1_list.append(loss1.item())
loss2_list.append(loss2.item())
print('Loss -->')
print('XS:\n', loss1)
print('Torch:\n', loss2)
loss1.backward()
loss2.backward()
optim1.step()
optim2.step()
plt.figure()
plt.plot(loss1_list, label='xs_loss')
plt.plot(loss2_list, label='torch_loss')
plt.legend()
plt.show()
| 35.735849 | 81 | 0.682682 | 582 | 3,788 | 4.304124 | 0.201031 | 0.039122 | 0.033533 | 0.038323 | 0.315369 | 0.251098 | 0.19002 | 0.149301 | 0.086228 | 0.086228 | 0 | 0.059969 | 0.145987 | 3,788 | 105 | 82 | 36.07619 | 0.714374 | 0.031151 | 0 | 0.022472 | 0 | 0 | 0.020477 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022472 | false | 0 | 0.11236 | 0 | 0.157303 | 0.044944 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcd28b594bfbebe11f19c266f448d60744584605 | 290 | py | Python | tests/web/views/application/test_security.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 4 | 2018-03-01T10:22:30.000Z | 2020-04-04T16:31:11.000Z | tests/web/views/application/test_security.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 60 | 2018-05-20T04:42:32.000Z | 2022-02-10T17:03:37.000Z | tests/web/views/application/test_security.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 1 | 2018-08-04T05:06:45.000Z | 2018-08-04T05:06:45.000Z | import pytest
from bitcaster.security import APP_ROLES
@pytest.mark.django_db
def test_application_admin(django_app, application1, user1):
application1.add_member(user1, APP_ROLES.ADMIN)
url = application1.urls.edit
res = django_app.get(url, user=user1.email)
assert res
| 24.166667 | 60 | 0.775862 | 41 | 290 | 5.292683 | 0.634146 | 0.073733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024096 | 0.141379 | 290 | 11 | 61 | 26.363636 | 0.84739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcd2a7a049ecfd7fc7c9464d81aae3b346e5d160 | 3,421 | py | Python | Square Run/Prototype/genetic/population.py | Soare-Robert-Daniel/Project-Sarus | 38f8b5c579eda0cf941f05e35bd5063d31d06ce4 | [
"MIT"
] | null | null | null | Square Run/Prototype/genetic/population.py | Soare-Robert-Daniel/Project-Sarus | 38f8b5c579eda0cf941f05e35bd5063d31d06ce4 | [
"MIT"
] | null | null | null | Square Run/Prototype/genetic/population.py | Soare-Robert-Daniel/Project-Sarus | 38f8b5c579eda0cf941f05e35bd5063d31d06ce4 | [
"MIT"
] | null | null | null | import random
from individual import Individual
import settings
class Population:
def __init__(self, size, mutation):
self.pool = []
self.mutation = mutation
self.size = size
self.reset_pool()
self.max_score = 0
self.count_max = 0
self.all_fit = False
def get_fittest(self):
if not self.pool:
return 0, 0
max_fittest_score = max([indv.score for indv in self.pool])
fittest = [indv for indv in self.pool if indv.score == max_fittest_score]
fittest_trait = fittest[0].threshold # get the threshold value
self.pool.remove(fittest[0])
return fittest_trait, max_fittest_score
def create_offsprings(self):
if not self.pool:
self.reset_pool()
# If the minimum score is equal with the max score possible then there is no need to create offsprings
if min([indv.score for indv in self.pool]) == settings.MAX_SCORE_SESSION:
print("They smart!")
return
print("> Apply the law of jungle\n# Begin")
# ----------------------------------- Selection ----------------------------------------------
first_max_score, first_max_fit = self.get_fittest()
second_max_score, second_max_fit = self.get_fittest()
self.check_evolution(first_max_score)
# Check if the individuals are stuck and there no sign of evolution for "EXTINCTION_COUNTER" rounds
if self.count_max > settings.EXTINCTION_COUNTER and self.max_score != settings.MAX_SCORE_SESSION:
print("BEGIN the extinction")
self.reset_pool()
else:
# ----------------------------------- Crossover ---------------------------------------------
threshold1 = first_max_score
threshold2 = second_max_score
# The final weight ratio: 75% from the first and 25% from the second
offspring_threshold = (threshold1 * 3 + threshold2) / 4
print("Offspring: %d" % offspring_threshold)
# Kill the old population
self.pool.clear()
# ------------------------------------ Mutation ----------------------------------------------
# Create the offsprings and apply mutation
for i in range(self.size):
self.pool.append(Individual(offspring_threshold + self.get_mutation(i)))
print("# End")
def get_mutation(self, index):
if random.uniform(0, 1) <= self.mutation:
value = random.randint(-15, 15)
print("Mutation with %d on the %d-th AI" % (value, index))
return value
return 0
def check_evolution(self, max_score_session):
if self.max_score < max_score_session:
# They have evolved
self.max_score = max_score_session
self.count_max = 1
elif self.max_score == max_score_session:
# No sign of evolution
self.count_max += 1
def reset_pool(self):
# Make new individuals
print("Reset Pool")
self.pool = []
for i in range(self.size):
self.pool.append(Individual(random.randrange(40, 150, 5)))
def add_individuals(self, indv):
self.pool.append(indv)
def get_individual(self, index):
return self.pool[index]
def get_pool(self):
return self.pool
| 34.555556 | 110 | 0.565039 | 403 | 3,421 | 4.627792 | 0.26799 | 0.072922 | 0.038606 | 0.020912 | 0.196247 | 0.117426 | 0.073995 | 0.046113 | 0.046113 | 0.046113 | 0 | 0.012726 | 0.287928 | 3,421 | 98 | 111 | 34.908163 | 0.752874 | 0.20228 | 0 | 0.138462 | 0 | 0 | 0.046057 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138462 | false | 0 | 0.046154 | 0.030769 | 0.307692 | 0.107692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcd55843a9662ccceae7d69532befbd320afdb43 | 1,273 | py | Python | python/PyQt5/16_pixels_test.py | nnaabbcc/exercise | 255fd32b39473b3d0e7702d4b1a8a97bed2a68f8 | [
"MIT"
] | 1 | 2016-11-23T08:18:08.000Z | 2016-11-23T08:18:08.000Z | python/PyQt5/16_pixels_test.py | nnaabbcc/exercise | 255fd32b39473b3d0e7702d4b1a8a97bed2a68f8 | [
"MIT"
] | null | null | null | python/PyQt5/16_pixels_test.py | nnaabbcc/exercise | 255fd32b39473b3d0e7702d4b1a8a97bed2a68f8 | [
"MIT"
] | 1 | 2016-11-23T08:17:34.000Z | 2016-11-23T08:17:34.000Z |
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout
from PyQt5.QtGui import QPainter
from PyQt5.QtCore import Qt
class MyWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.title = 'PyQt5 pixels example'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.__init_ui()
def __init_ui(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), Qt.white)
self.setPalette(p)
layout = QVBoxLayout()
canvas = MyPaintWidget(self)
layout.addWidget(canvas)
self.setLayout(layout)
class MyPaintWidget(QWidget):
def paintEvent(self, event):
import random
p = QPainter(self)
p.setPen(Qt.black)
size = self.size()
for i in range(1024):
x = random.randint(1, size.width() - 1)
y = random.randint(1, size.height() - 1)
p.drawPoint(x, y)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
w = MyWidget()
w.show()
sys.exit(app.exec_())
| 23.574074 | 70 | 0.601728 | 150 | 1,273 | 4.953333 | 0.46 | 0.036339 | 0.037685 | 0.048452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026403 | 0.285939 | 1,273 | 53 | 71 | 24.018868 | 0.790979 | 0 | 0 | 0 | 0 | 0 | 0.022013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.128205 | 0 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcd6bdeb888785f2ab42d4a43ffa59943e64ac31 | 3,423 | py | Python | scripts/outliers.py | gmackall/deep-quant | 1c0081795c8be9b7513697b367e9a1381f7f742a | [
"MIT"
] | 122 | 2017-10-04T04:34:24.000Z | 2022-03-10T23:59:22.000Z | scripts/outliers.py | emrekesici/deep-quant | 19ae66d25924c4bc4b09879d82794013140d2a8b | [
"MIT"
] | 20 | 2018-02-07T16:34:10.000Z | 2020-07-21T08:45:59.000Z | scripts/outliers.py | Henrywcj/MLII | bf639e82f64b11b4af973570bccdfe5e2ed25533 | [
"MIT"
] | 51 | 2017-11-16T15:42:13.000Z | 2022-03-19T00:46:57.000Z | #!/bin/sh
''''exec python3 -u -- "$0" ${1+"$@"} # '''
# Copyright 2016 Euclidean Technologies Management LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import sys
import copy
import numpy as np
import regex as re
import pandas as pd
from utils import model_utils
import deep_quant
import configs as configs
from batch_generator import BatchGenerator
def main():
config = deep_quant.get_configs()
train_path = utils.data_utils.get_data_path(config.data_dir,config.datafile)
print("Loading training data ...")
config.batch_size = 1
batches = BatchGenerator(train_path,config)
# batches.cache(verbose=True)
# batches.shuffle()
params = batches.get_scaling_params('StandardScaler')
print(params['scale'])
print(params['center'])
col_names = batches.feature_names
df = pd.DataFrame(columns=col_names)
gvkeys = list()
dates = list()
steps = list()
print("Num batches sampled: %d"%batches.num_batches)
for j in range(batches.num_batches):
# for j in range(5000):
b = batches.next_batch()
seq_len = b.seq_lengths[0]
idx = seq_len-1
for i in range(seq_len):
gvkeys.append( b.attribs[idx][0][0] )
dates.append( b.attribs[idx][0][1] )
steps.append( i )
x = (b.inputs[i][0] - params['center']) / params['scale']
# x = b.inputs[i][0]
n = len(df.index)
df.loc[n] = x
if (j % 1000)==0:
print(".",end='')
sys.stdout.flush()
print()
df = pd.concat( [pd.DataFrame( {'gvkey' : gvkeys, 'date': dates, 'step' : steps } ), df], axis=1 )
# write to outfile
df.to_csv(config.mse_outfile,sep=' ',float_format="%.4f")
# print feature charateristics
for feature in col_names:
mean = np.mean( df[feature] )
std = np.std( df[feature] )
print("%s %.4f %.4f"%(feature,mean,std))
print('--------------------------------')
# print min and max values
for feature in col_names:
print("%s:"%feature)
st = df.sort_values(feature)
rt = df.sort_values(feature, ascending=False)
for i in range(5):
min_el = st.iloc[i,:]
max_el = rt.iloc[i,:]
#print(min_el)
#print(max_el)
print("%s %s %s %s"%
(min_el['gvkey'],min_el['date'],min_el['step'],min_el[feature]),end=' ')
print("%s %s %s %s"%
(max_el['gvkey'],max_el['date'],max_el['step'],max_el[feature]))
print('--------------------------------')
if __name__ == "__main__":
main()
| 29.765217 | 102 | 0.585159 | 449 | 3,423 | 4.316258 | 0.4098 | 0.03096 | 0.006192 | 0.016512 | 0.087719 | 0.028896 | 0.028896 | 0 | 0 | 0 | 0 | 0.01323 | 0.249197 | 3,423 | 114 | 103 | 30.026316 | 0.740856 | 0.26468 | 0 | 0.092308 | 0 | 0 | 0.096061 | 0.025723 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015385 | false | 0 | 0.215385 | 0 | 0.230769 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcdb7d35dacad0a6039ec92643d3ff7d8da2b159 | 3,912 | py | Python | foss_library/import_data/frappe_api.py | nikochiko/foss-library | 8d2eccf3658b8ada0176f3dc24b9d87e465815de | [
"MIT"
] | 1 | 2021-09-16T09:35:36.000Z | 2021-09-16T09:35:36.000Z | foss_library/import_data/frappe_api.py | nikochiko/foss-library | 8d2eccf3658b8ada0176f3dc24b9d87e465815de | [
"MIT"
] | 1 | 2021-09-10T13:49:11.000Z | 2021-09-10T13:49:11.000Z | foss_library/import_data/frappe_api.py | nikochiko/foss-library | 8d2eccf3658b8ada0176f3dc24b9d87e465815de | [
"MIT"
] | null | null | null | import asyncio
import os
from math import ceil
from pprint import pprint
from typing import Any, AsyncIterator, Optional
import aiohttp
from foss_library.books.models import Book
FRAPPE_API_URL = os.getenv(
"FRAPPE_API_URL", "https://frappe.io/api/method/frappe-library"
)
async def get_n_books_from_api(
count: int,
title: Optional[str] = None,
authors: Optional[str] = None,
isbn: Optional[str] = None,
publisher: Optional[str] = None,
):
n_pages = ceil(count / 20)
params = {
"title": title,
"authors": authors,
"isbn": isbn,
"publisher": publisher,
}
sent_count = 0
async with aiohttp.ClientSession() as session:
async for json_book in fetch_n_pages_concurrently(session, n_pages, params):
if sent_count >= count:
break
book_obj = convert_json_to_book(json_book)
sent_count += 1
yield book_obj
async def fetch_n_pages_concurrently(
session: aiohttp.ClientSession,
pages: int,
params: Optional[dict[str, Any]],
offset: Optional[int] = 0,
) -> AsyncIterator[dict[str, Any]]:
"""
Fetches data from API in batches. Downloading is done
concurrently for the requests within each batch.
"""
start_page = offset + 1
end_page = start_page + pages
print(f"Fetching pages {start_page} to {end_page}")
awaitables = [
get_data_by_page(session, page, params) for page in range(start_page, end_page)
]
# gets a coroutines as they complete
for coroutine in asyncio.as_completed(awaitables):
json_books = await coroutine
for json_book in json_books:
yield json_book
async def get_data_by_page(
session: aiohttp.ClientSession,
page: int,
params: Optional[dict[str, Any]] = {},
) -> list[dict[str, Any]]:
"""
Get data from the API for a given page
session is expected as the first argument to make use of connection pooling
"""
params = params.copy()
params.update(page=page)
async with session.get(
FRAPPE_API_URL, params=params, raise_for_status=True
) as response:
response_data = await response.json()
return response_data["message"]
def convert_json_to_book(json_book: dict[str, Any]) -> Book:
"""Converts a JSON dict (as per return format of API) to a Book object"""
# a mapping of keys from API response to the keys that should go in as kwargs to Book
cleanup_str_func = lambda x: str(x.strip())
converted_keys_and_cleanup_funcs = {
"bookID": ("id", int),
"title": ("title", cleanup_str_func),
"authors": ("authors", cleanup_str_func),
"average_rating": ("average_rating", float),
"isbn": ("isbn", cleanup_str_func),
"isbn13": ("isbn13", cleanup_str_func),
"language_code": ("language_code", cleanup_str_func),
"num_pages": ("num_pages", int),
"ratings_count": ("ratings_count", int),
"text_reviews_count": ("text_reviews_count", int),
"publication_date": ("publication_date", cleanup_str_func),
"publisher": ("publisher_name", cleanup_str_func),
}
# new dict for kwargs to be passed to Book()
kwargs = {}
for key in json_book:
# some keys have extra spaces
cleaned_key = key.strip()
if cleaned_key not in converted_keys_and_cleanup_funcs:
# this should alert us
raise Exception(f"Unexpected key found: '{key}'")
# get new converted key and assign it in the kwargs dict
kwargs_key, cleanup_func = converted_keys_and_cleanup_funcs[cleaned_key]
try:
kwargs[kwargs_key] = cleanup_func(json_book[key])
except ValueError as e:
print(f"Got ValueError: {e} for following JSON. Skipping")
pprint(json_book)
return
return Book(**kwargs)
| 30.325581 | 89 | 0.645961 | 512 | 3,912 | 4.724609 | 0.318359 | 0.026457 | 0.0463 | 0.028524 | 0.119057 | 0.042993 | 0 | 0 | 0 | 0 | 0 | 0.003425 | 0.253579 | 3,912 | 128 | 90 | 30.5625 | 0.825 | 0.085378 | 0 | 0.022472 | 0 | 0 | 0.135347 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011236 | false | 0 | 0.078652 | 0 | 0.11236 | 0.044944 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcdba7dd7903e0df919b227a20e23652c6d7a728 | 1,712 | py | Python | dcp_diag/component_agents/azul_agent.py | HumanCellAtlas/dcp-diag | 85c324ed86785d8b224fca2f3cbc1097f8606dde | [
"MIT"
] | 2 | 2018-10-03T17:56:54.000Z | 2018-10-08T18:10:38.000Z | dcp_diag/component_agents/azul_agent.py | HumanCellAtlas/dcp-diag | 85c324ed86785d8b224fca2f3cbc1097f8606dde | [
"MIT"
] | 15 | 2018-11-20T11:06:32.000Z | 2019-09-17T20:06:20.000Z | dcp_diag/component_agents/azul_agent.py | HumanCellAtlas/dcp-diag | 85c324ed86785d8b224fca2f3cbc1097f8606dde | [
"MIT"
] | null | null | null | import requests
import json
class AzulAgent:
def __init__(self, deployment):
self.deployment = deployment
if self.deployment == 'prod':
self.azul_service_url = 'https://service.explore.data.humancellatlas.org'
else:
self.azul_service_url = f'https://service.{deployment}.explore.data.humancellatlas.org'
def get_project_bundle_fqids(self, document_id, page_size=1000):
bundle_fqids = set()
filters = {
'projectId': {
'is': [
document_id
]
}
}
params = {
'filters': json.dumps(filters),
'size': page_size
}
url = self.azul_service_url + f'/repository/bundles'
page = 0
while True:
page += 1
response = requests.get(url, params=params)
response_json = response.json()
hit_list = response_json.get('hits', [])
for content in hit_list:
bundle_fqids.update(f"{bundle['bundleUuid']}.{bundle['bundleVersion']}"
for bundle in content['bundles'])
pagination = response_json.get('pagination')
if pagination is None:
break
search_after = pagination.get('search_after')
search_after_uid = pagination.get('search_after_uid')
if search_after is None and search_after_uid is None:
break
params.update({
'size': page_size,
'search_after': search_after,
'search_after_uid': search_after_uid
})
return bundle_fqids
| 30.035088 | 99 | 0.536799 | 169 | 1,712 | 5.195266 | 0.35503 | 0.125285 | 0.079727 | 0.061503 | 0.100228 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005545 | 0.367991 | 1,712 | 56 | 100 | 30.571429 | 0.805915 | 0 | 0 | 0.044444 | 0 | 0 | 0.164136 | 0.028037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.044444 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcdc4a0b43fc30222496ff000d7b32f612ff3e55 | 3,764 | py | Python | BabyForm1.py | UofU-BMI-BirthRecord/BirthRecord-Application | 1ebb6674f59623ea3ce424b5feef5071fa63df05 | [
"Apache-2.0"
] | null | null | null | BabyForm1.py | UofU-BMI-BirthRecord/BirthRecord-Application | 1ebb6674f59623ea3ce424b5feef5071fa63df05 | [
"Apache-2.0"
] | null | null | null | BabyForm1.py | UofU-BMI-BirthRecord/BirthRecord-Application | 1ebb6674f59623ea3ce424b5feef5071fa63df05 | [
"Apache-2.0"
] | null | null | null | #from flask_wtf import Form
from flask_wtf import FlaskForm
from wtforms import IntegerField, SubmitField, TextAreaField, SelectField, StringField, TextField, DecimalField, HiddenField, BooleanField
from wtforms import validators, ValidationError
import GetPatientInfo
from WeightUtil import *
class BabyForm(FlaskForm):
#values = {}
#values['newborn_family_name_value'] = StringField()
CODEFILE = "FHIR_resource_codes_1.txt"
codes = GetPatientInfo.getCODETABLE(file=CODEFILE, page='baby')
submit = SubmitField("Save and Next")
fields = ['newborn_family_name',
'newborn_first_name',
'birth_weight',
'birth_weight_grams',
'birth_weight_lbs',
'birth_weight_ozs',
'apgar10m_score',
'apgar5m_score',
'apgar1m_score']
for f in fields:
field = codes[f]
comm = f + " = "
if field['datatype'].lower() == 'int':
comm += "IntegerField('%s')" % field['desc']
elif field['datatype'].lower() == 'string':
comm += "StringField('%s')" % field['desc']
elif field['datatype'].lower() == 'float':
comm += "DecimalField('%s')" % field['desc']
elif field['datatype'].lower() == 'boolean':
comm += "BooleanField('%s')" % field['desc']
print("Executing command: " + comm)
exec (comm)
mother_id = StringField("Mother ID", [validators.DataRequired()])
def preload(self, pid):
"""
codes = GetPatientInfo.getCODETABLE(page='baby')
self.labels = {}
values = {}
for code in codes.values():
datatype = code['datatype']
if datatype == 'text':
self.labels[code['name']] = code['desc']
else:
values[code['name']] = datatype
self.newborn_family_name_value = StringField()
self.newborn_first_name_value = StringField()
self.apgar1m_score_value = IntegerField()
self.apgar5m_score_value = IntegerField()
self.apgar10m_score_value = IntegerField()
self.birth_weight_grams_value = DecimalField()
self.birth_weight_lbs_value = DecimalField()
self.birth_weight_ozs_value = DecimalField()
"""
self.mother_id.data = GetPatientInfo.getMotherID(pid)
baby, medInfo = GetPatientInfo.getPatientMedical(pid=pid, codes=self.codes)
if baby == None:
return
if baby.name[0].family != None:
self.newborn_family_name.data = baby.name[0].family
#self.values['newborn_family_name_value.data'] = baby.name[0].family
if baby.name[0].given[0] != None:
self.newborn_first_name.data = baby.name[0].given[0]
value, unit = GetPatientInfo.getMedInfoValue(medInfo, "apgar1m_score")
if value != None:
self.apgar1m_score.data = value
value, unit = GetPatientInfo.getMedInfoValue(medInfo, "apgar5m_score")
if value != None:
self.apgar5m_score.data = value
value, unit = GetPatientInfo.getMedInfoValue(medInfo, "apgar10m_score")
if value != None:
self.apgar10m_score.data = value
value, unit = GetPatientInfo.getMedInfoValue(medInfo, "birth_weight")
if value != None:
vGrams, vLbs, vOzs = convertWeight(float(value), unit)
self.birth_weight_grams.data = vGrams
self.birth_weight_lbs.data = vLbs
self.birth_weight_ozs.data = vOzs
#self.motherid = GetPatientInfo.getMotherID(pid)
#self.mother_id = '<input type="hidden" name="mother_id" value="%s">' %self.motherid
self.mother_id.data = GetPatientInfo.getMotherID(pid)
| 38.408163 | 138 | 0.613177 | 393 | 3,764 | 5.699746 | 0.254453 | 0.054018 | 0.040179 | 0.067857 | 0.316964 | 0.161161 | 0.161161 | 0.079018 | 0 | 0 | 0 | 0.008677 | 0.265143 | 3,764 | 97 | 139 | 38.804124 | 0.801157 | 0.240967 | 0 | 0.105263 | 0 | 0 | 0.14963 | 0.009259 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.087719 | 0 | 0.22807 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcde5efdccefb089a037137083b3846183985189 | 2,984 | py | Python | assets/utils/piaf/af_structure_browser.py | 47lining/quickstart-osisoft-pisystem2aws-connector | f6bdcb84b3cb271d3498d057474be6833f67b5be | [
"Apache-2.0"
] | null | null | null | assets/utils/piaf/af_structure_browser.py | 47lining/quickstart-osisoft-pisystem2aws-connector | f6bdcb84b3cb271d3498d057474be6833f67b5be | [
"Apache-2.0"
] | null | null | null | assets/utils/piaf/af_structure_browser.py | 47lining/quickstart-osisoft-pisystem2aws-connector | f6bdcb84b3cb271d3498d057474be6833f67b5be | [
"Apache-2.0"
] | null | null | null | import re
class AfStructureBrowser(object):
def __init__(self, assets_query, assets_field="name", attributes_query=".*", attributes_field="name"):
self.assets_query = assets_query.replace("\\", "\\\\") if assets_field == 'path' else assets_query
self.assets_field = assets_field
self.attributes_query = attributes_query
self.attributes_field = attributes_field
def search_assets(self, structure):
results = {}
self._search_assets_tree(structure, results)
return results
def _search_assets_tree(self, structure, results):
for asset in structure:
if self._match_asset_field_with_query(asset, self.assets_query, self.assets_field):
copy = self._copy_node_and_remove_children_assets(asset)
filtered_attributes = self._filter_attributes(copy['attributes'])
if len(filtered_attributes) > 0:
copy['attributes'] = filtered_attributes
results[copy['path']] = copy
if 'assets' in asset:
self._search_assets_tree(asset['assets'], results)
def _copy_node_and_remove_children_assets(self, asset):
copy = asset.copy()
if 'assets' in asset:
copy.pop('assets')
return copy
def _filter_attributes(self, attributes_list):
result = []
for attribute in attributes_list:
if self._match_attribute_field_with_query(attribute, self.attributes_query, self.attributes_field):
result.append(attribute)
return result
@staticmethod
def _match_asset_field_with_query(asset, query, field):
field_not_present = field not in asset \
or asset[field] is None \
or (isinstance(asset[field], list) and len(asset[field]) == 0)
if field_not_present:
return query == ".*"
if field != 'categories':
string_to_match = asset[field]
return re.match("^" + query + "$", string_to_match)
else:
for category in asset['categories']:
if re.match("^" + query + "$", category):
return True
return False
@staticmethod
def _match_attribute_field_with_query(attribute, query, field):
field_not_present = field not in attribute \
or attribute[field] is None \
or (isinstance(attribute[field], list) and len(attribute[field]) == 0)
if field_not_present:
return query == ".*"
if field != 'categories':
string_to_match = attribute[field]
return re.match("^" + query + "$", string_to_match)
else:
for category in attribute['categories']:
for k, v in category.items():
if re.match("^" + query + "$", v):
return True
return False
| 39.263158 | 111 | 0.586126 | 319 | 2,984 | 5.203762 | 0.178683 | 0.036145 | 0.033735 | 0.025301 | 0.422892 | 0.3 | 0.183133 | 0.183133 | 0.140964 | 0.140964 | 0 | 0.001471 | 0.316354 | 2,984 | 75 | 112 | 39.786667 | 0.812255 | 0 | 0 | 0.285714 | 0 | 0 | 0.040214 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.015873 | 0 | 0.31746 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcdede0c23afb24fce856491c71696e393155ec4 | 3,492 | py | Python | scripts/noaa_spider.py | guilload/thrashtide | b7cfc2e092c032251a862a46102420c169f92a68 | [
"MIT"
] | null | null | null | scripts/noaa_spider.py | guilload/thrashtide | b7cfc2e092c032251a862a46102420c169f92a68 | [
"MIT"
] | null | null | null | scripts/noaa_spider.py | guilload/thrashtide | b7cfc2e092c032251a862a46102420c169f92a68 | [
"MIT"
] | null | null | null | import json
import scrapy
HARCON_HEADER = ("index", "name", "amplitude", "phase", "speed")
class Station(scrapy.Item):
constituents = scrapy.Field()
latitude = scrapy.Field()
longitude = scrapy.Field()
MLLW = scrapy.Field()
MTL = scrapy.Field()
noaa_id = scrapy.Field()
noaa_name = scrapy.Field()
state = scrapy.Field()
class NOAASpider(scrapy.Spider):
name = "NOAA"
start_urls = ["http://tidesandcurrents.noaa.gov/stations.html?type=Harmonic+Constituents"]
def parse(self, response):
for noaa_id in [noaa_id[1:] for noaa_id in response.xpath("//div[contains(@class, 'station')]/@id").extract()]:
station = Station(noaa_id=noaa_id)
datums_path = "datums.html?units=1&epoch=0&id={}".format(noaa_id)
datums_url = response.urljoin(datums_path)
yield scrapy.Request(datums_url, callback=self.parse_datums, meta={'station': station})
def parse_datums(self, response):
station = response.meta['station']
xpath = "//tbody/tr/td/a[text() = '{}']/../../td[2]/text()"
noaa_id = station["noaa_id"]
for datum in ("MLLW", "MTL"):
value = response.xpath(xpath.format(datum))[0].extract()
station[datum] = float(value)
harcon_path = "harcon.html?unit=0&timezone=0&id={}".format(noaa_id)
harcon_url = response.urljoin(harcon_path)
yield scrapy.Request(harcon_url, callback=self.parse_harcons, meta={'station': station})
def parse_harcons(self, response):
station = response.meta['station']
noaa_id = station["noaa_id"]
harcons = []
for tr in response.xpath("//tbody/tr"):
harcon = {}
for i, td in enumerate(tr.xpath("td/text()")[:5]):
harcon[HARCON_HEADER[i]] = td.extract()
for key, func in (("index", int), ("amplitude", float), ("phase", float), ("speed", float)):
harcon[key] = func(harcon[key])
harcons.append(harcon)
station["constituents"] = harcons
stationhome_path = "stationhome.html?id={}".format(noaa_id)
stationhome_url = response.urljoin(stationhome_path)
yield scrapy.Request(stationhome_url, callback=self.parse_station, meta={'station': station})
def parse_station(self, response):
station = response.meta['station']
location, _ = response.xpath("//h3/text()")[0].extract().strip().rsplit('-', 1)
name, state = location.rsplit(',', 1)
station["noaa_name"] = name.strip()
station["state"] = state.strip()
texts = response.xpath("//td/text()").extract()
latitude = self.ugly_search(texts, "Latitude")
longitude = self.ugly_search(texts, "Longitude")
station["latitude"] = self.ugly_convert_coordinate(latitude)
station["longitude"] = self.ugly_convert_coordinate(longitude)
yield station
@staticmethod
def ugly_convert_coordinate(coordinate):
degrees, minutes, _ = coordinate.split()
degrees = "".join([c for c in degrees if c.isdigit() or c == '.'])
minutes = "".join([c for c in minutes if c.isdigit() or c == '.'])
return round(float(degrees) + float(minutes) / 60, 2)
@staticmethod
def ugly_search(elements, key):
for i, element in enumerate(elements):
if element == key:
return elements[i + 1]
raise KeyError("key '{}' no found".format(key))
| 34.574257 | 119 | 0.609966 | 410 | 3,492 | 5.080488 | 0.25122 | 0.037446 | 0.024964 | 0.020163 | 0.147864 | 0.054729 | 0 | 0 | 0 | 0 | 0 | 0.005975 | 0.233104 | 3,492 | 100 | 120 | 34.92 | 0.771845 | 0 | 0 | 0.1 | 0 | 0 | 0.140607 | 0.045246 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.028571 | 0 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcdf88e243084249bdc7e74dd8256b969863ffa7 | 3,079 | py | Python | torchvision_3d/models/vgg.py | rubythalib33/3D-Torchvision | 7dab0a3d1d83e6046320f879af2bff28b31310ab | [
"MIT"
] | 4 | 2022-03-09T02:53:12.000Z | 2022-03-10T14:35:06.000Z | torchvision_3d/models/vgg.py | rubythalib33/3D-Torchvision | 7dab0a3d1d83e6046320f879af2bff28b31310ab | [
"MIT"
] | null | null | null | torchvision_3d/models/vgg.py | rubythalib33/3D-Torchvision | 7dab0a3d1d83e6046320f879af2bff28b31310ab | [
"MIT"
] | 1 | 2022-03-10T14:35:08.000Z | 2022-03-10T14:35:08.000Z | from turtle import forward
import torch.nn as nn
import torch
class VGG3D(nn.Module):
def __init__(self, type, pretrained=False):
super(VGG3D, self).__init__()
assert type in ['vgg11', 'vgg13', 'vgg16', 'vgg19', 'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn'], 'type only support for vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn'
self.type = type
self.features = None
if type == 'vgg11':
from torchvision.models.vgg import vgg11
self.features = vgg11(pretrained=pretrained).features
elif type == 'vgg13':
from torchvision.models.vgg import vgg13
self.features = vgg13(pretrained=pretrained).features
elif type == 'vgg16':
from torchvision.models.vgg import vgg16
self.features = vgg16(pretrained=pretrained).features
elif type == 'vgg19':
from torchvision.models.vgg import vgg19
self.features = vgg19(pretrained=pretrained).features
elif type == 'vgg11_bn':
from torchvision.models.vgg import vgg11_bn
self.features = vgg11_bn(pretrained=pretrained).features
elif type == 'vgg13_bn':
from torchvision.models.vgg import vgg13_bn
self.features = vgg13_bn(pretrained=pretrained).features
elif type == 'vgg16_bn':
from torchvision.models.vgg import vgg16_bn
self.features = vgg16_bn(pretrained=pretrained).features
elif type == 'vgg19_bn':
from torchvision.models.vgg import vgg19_bn
self.features = vgg19_bn(pretrained=pretrained).features
self.features = self.init_features()
def forward(self, x):
for m in self.features:
x = m(x)
return x
def init_features(self):
features = []
for model in self.features.children():
if isinstance(model, nn.Conv2d):
model_temp = nn.Conv3d(in_channels=model.in_channels, out_channels=model.out_channels, kernel_size=(1,*model.kernel_size), stride=(1,*model.stride), padding=(0,*model.padding))
model_temp.weight.data = torch.stack([model.weight.data] , dim=2)
model_temp.bias.data = model.bias.data
features.append(model_temp)
elif isinstance(model, nn.MaxPool2d):
model_temp = nn.MaxPool3d(kernel_size=[1,model.kernel_size, model.kernel_size], stride=[1,model.stride, model.stride], padding=[0,model.padding, model.padding])
features.append(model_temp)
elif isinstance(model, nn.ReLU):
features.append(model)
elif isinstance(model, nn.BatchNorm2d):
model_temp = nn.BatchNorm3d(num_features=model.num_features)
features.append(model_temp)
return nn.Sequential(*features)
if __name__ == '__main__':
example = torch.randn(1, 3, 3, 224, 224).cuda()
type_ = "vgg11_bn"
model = VGG3D(type=type_).cuda()
print(model(example).shape) | 45.955224 | 199 | 0.626177 | 367 | 3,079 | 5.089918 | 0.201635 | 0.083512 | 0.089936 | 0.102784 | 0.494647 | 0.475375 | 0.166488 | 0.098501 | 0.051392 | 0.051392 | 0 | 0.047345 | 0.265995 | 3,079 | 67 | 200 | 45.955224 | 0.779204 | 0 | 0 | 0.05 | 0 | 0 | 0.067532 | 0 | 0 | 0 | 0 | 0 | 0.016667 | 1 | 0.05 | false | 0 | 0.183333 | 0 | 0.283333 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dce1be765fc810f1f2efc2a19e81f0226d3354d5 | 25,193 | py | Python | codenerix/models.py | codenerix/django-codenerix | 1fc2edb451c5cf2359e243d860940876af0cf575 | [
"Apache-2.0"
] | 15 | 2018-03-21T10:47:57.000Z | 2022-02-01T08:17:56.000Z | codenerix/models.py | codenerix/django-codenerix | 1fc2edb451c5cf2359e243d860940876af0cf575 | [
"Apache-2.0"
] | 4 | 2018-04-11T10:19:08.000Z | 2020-02-29T18:54:36.000Z | codenerix/models.py | codenerix/django-codenerix | 1fc2edb451c5cf2359e243d860940876af0cf575 | [
"Apache-2.0"
] | 5 | 2018-03-12T16:57:02.000Z | 2021-04-15T16:51:08.000Z | # -*- coding: utf-8 -*-
#
# django-codenerix
#
# Copyright 2017 Centrologic Computational Logistic Center S.L.
#
# Project URL : http://www.codenerix.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bson import json_util
import json
from django.db import models
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.db.models import Q
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_text, smart_text
from django.conf import settings
from django.db.models.signals import post_delete, pre_delete
from django.dispatch.dispatcher import receiver
from django.core.exceptions import PermissionDenied
from codenerix.middleware import get_current_user
from codenerix.helpers import daterange_filter
# Separator to log
SEPARATOR = u'\u8594'
SEPARATOR_HTML = ' → '
class CodenerixMetaType(dict):
"""
Define type for CodenerixMeta of the instance NOT the class
Example:
m = CodenerixMetaType({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(CodenerixMetaType, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k in arg:
self[k] = arg[k]
if kwargs:
for k in kwargs:
self[k] = kwargs[k]
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(CodenerixMetaType, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(CodenerixMetaType, self).__delitem__(key)
del self.__dict__[key]
def __getnewargs__(self):
return tuple()
def __getstate__(self):
return self.__dict__
class CodenerixModelBase(models.Model):
class Meta:
abstract = True
# return method relation objects
def __getmro__(self):
return self.__class__.__mro__
# recolecta informacion de todas las clases que intervienen en la instancia
# collects information from all classes that intervene in the instance
def __init__(self, *args, **kwards):
self.CodenerixMeta = CodenerixMetaType()
mro = self.__getmro__()
for cl in reversed(mro):
if 'CodenerixMeta' in cl.__dict__.keys():
for key in cl.CodenerixMeta.__dict__.keys():
if '__' != key[0:2]:
value = getattr(cl.CodenerixMeta, key)
if value:
if key not in self.CodenerixMeta:
self.CodenerixMeta[key] = value
else:
if type(value) == dict:
self.CodenerixMeta[key].update(value)
elif type(value) == list:
if type(self.CodenerixMeta[key]) != list:
self.CodenerixMeta[key] = list(self.CodenerixMeta[key])
self.CodenerixMeta[key] += value
elif type(self.CodenerixMeta[key]) == list:
self.CodenerixMeta[key] += list(value)
return super(CodenerixModelBase, self).__init__(*args, **kwards)
class CodenerixModel(CodenerixModelBase):
'''
Special methods are
__fields__: it is a list of fields
Usage: fields.append(('key','Name',size:int_in_pixels,'alignment:left|right|center'))
Example 1: fields.append(('title',_('Title')))
Example 2: fields.append(('title',_('Title'),100,'center'))
Example 3: fields.append(('title',_('Title'),None,'center')) # We don't want to define the size but we want to define the alignment
Example 4: fields.append((None,_('Title'))) # We don't want any ordering in the field Title
Example 5: fields.append(('user__username',_('Username'))) # You can define here relationships as well
__limitQ__:
__searchQ__:
__searchF__:
'''
created = models.DateTimeField(_("Created"), editable=False, auto_now_add=True)
updated = models.DateTimeField(_("Updated"), editable=False, auto_now=True)
class Meta:
abstract = True
default_permissions = ('add', 'change', 'delete', 'view', 'list')
class CodenerixMeta(object):
abstract = None
def __init__(self, *args, **kwards):
self.CodenerixMeta = CodenerixMetaType()
return super(CodenerixModel, self).__init__(*args, **kwards)
def __strlog_add__(self):
return ''
def __strlog_update__(self, newobj):
return ''
def __strlog_delete__(self):
return ''
def __limitQ__(self, info):
return {}
def __searchQ__(self, info, text):
return {}
def __searchF__(self, info):
return {}
def lock_update(self, request=None):
return None
def internal_lock_delete(self):
# if we have a specific lock delete from model
answer = self.lock_delete()
if answer is None:
# for each field
for related in self._meta.get_fields():
# check if it is protected
if 'on_delete' in related.__dict__ and related.on_delete == models.PROTECT:
# if we have a name
field = getattr(self, related.related_name, None)
if field:
# try to get 'exists' function
f_exists = getattr(field, 'exists', None)
# if we didn't get it or the result from it is positive
if f_exists is None or f_exists():
# answer that the item is locked
answer = _('Cannot delete item, relationship with %(model_name)s') % {'model_name': related.related_model._meta.verbose_name}
break
return answer
def lock_delete(self, request=None):
return None
# check lock update
def clean(self):
if self.lock_update() is not None:
raise ValidationError(self.lock_update())
else:
return super(CodenerixModel, self).clean()
class GenInterface(CodenerixModelBase):
"""
Check force_methods options in CodenerixMeta class and it makes sure that the specified methods exists
"""
class Meta:
abstract = True
class CodenerixMeta(object):
"""
force_methods = {'alias': ('method_name', 'Description'), }
"""
pass
def __init__(self, *args, **kwards):
self.CodenerixMeta = CodenerixMetaType()
super(GenInterface, self).__init__(*args, **kwards)
# revisamos que esten implementados los metodos indicados
# we checked that the indicated methods are implemented
force_methods = getattr(self.CodenerixMeta, "force_methods", None)
if force_methods:
for alias in force_methods.keys():
method = force_methods[alias]
if not hasattr(self, method[0]) or not callable(getattr(self, method[0])):
raise IOError("Method {}() not found in class {}: {}".format(method[0], self._meta.object_name, method[1]))
return super(GenInterface, self).__init__(*args, **kwards)
@receiver(pre_delete)
def codenerixmodel_delete_pre(sender, instance, **kwargs):
if not hasattr(instance, "name_models_list") and hasattr(instance, 'internal_lock_delete') and callable(instance.internal_lock_delete):
lock_delete = instance.internal_lock_delete()
if lock_delete is not None:
raise PermissionDenied(lock_delete)
# We don't use log system when PQPRO_CASSANDRA == TRUE
if not (hasattr(settings, "PQPRO_CASSANDRA") and settings.PQPRO_CASSANDRA):
from django.contrib.admin.models import ADDITION, CHANGE, DELETION
TYPE_ACTION = (
(ADDITION, _("Add")),
(CHANGE, _("Change")),
(DELETION, _("Delete")),
)
class Log(models.Model):
'''
Control the possible log
'''
action_time = models.DateTimeField('Date', auto_now=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.DO_NOTHING, blank=True, null=True)
username = models.CharField('Username', max_length=200, blank=True, null=False, default="")
content_type = models.ForeignKey(ContentType, on_delete=models.DO_NOTHING, blank=True, null=True)
object_id = models.TextField('Object id', blank=True, null=True)
object_repr = models.CharField('Object repr', max_length=200)
action_flag = models.PositiveSmallIntegerField(_("Action"), choices=TYPE_ACTION)
change_json = models.TextField('Json', blank=True, null=False)
change_txt = models.TextField('Txt', blank=True, null=False)
snapshot_txt = models.TextField('Snapshot Txt', blank=True, null=False)
class Meta:
permissions = (
("list_log", "Can list log"),
("detail_log", "Can view log"),
)
def show(self, view='html'):
text = []
if self.change_txt:
cambios = json.loads(self.change_txt)
else:
cambios = ''
for c in cambios:
if type(cambios[c]) is list:
text.append(u"{}: {}".format(cambios[c][0], cambios[c][1]))
else:
text.append(u"{}: {}".format(_(c), cambios[c]))
if view == 'html':
result = mark_safe(u'<ul><li>{}</li></ul>'.format(u'</li><li>'.join(text)).replace(SEPARATOR, SEPARATOR_HTML))
else:
result = '\n'.join(text)
return result
def __unicode__(self):
return self.show(view='txt')
def __str__(self):
return self.__unicode__()
def action(self):
# Find the action
if self.action_flag == ADDITION:
answer = _("Add")
elif self.action_flag == CHANGE:
answer = _("Change")
elif self.action_flag == DELETION:
answer = _("Delete")
else:
answer = "?"
# Return answer
return answer
def __fields__(self, info):
fields = []
fields.append(('action_time', _('Date')))
fields.append(('user__username', _('Actual user')))
fields.append(('username', _('Original user')))
fields.append(('get_action_flag_display', _('Action')))
# fields.append(('content_type__name', _('APP Name')))
fields.append(('content_type', _('APP Name')))
fields.append(('content_type__app_label', _('APP Label')))
fields.append(('content_type__model', _('APP Model')))
fields.append(('object_id', _('ID')))
# fields.append(('object_repr', _('Representation')))
fields.append(('show', _('Txt')))
return fields
def __searchQ__(self, info, text):
tf = {}
tf['user'] = Q(user__username__icontains=text)
tf['username'] = Q(username__icontains=text)
# Flag
if (text.lower() == 'add') or (text.lower() == 'addition') or (text.lower() == _('add')) or (text.lower() == _('addition')):
tf['action_flag'] = Q(action_flag=ADDITION)
elif (text.lower() == 'change') or (text.lower() == 'changed') or (text.lower() == _('change')) or (text.lower() == _('changed')):
tf['action_flag'] = Q(action_flag=CHANGE)
elif (text.lower() == 'edit') or (text.lower() == 'edition') or (text.lower() == _('edit')) or (text.lower() == _('edition')):
tf['action_flag'] = Q(action_flag=CHANGE)
elif (text.lower() == 'delete') or (text.lower() == 'deleted') or (text.lower() == _('delete')) or (text.lower() == _('deleted')):
tf['action_flag'] = Q(action_flag=DELETION)
# tf['content_type'] = Q(content_type__name__icontains=text)
tf['object_id'] = Q(object_id__icontains=text)
tf['object_repr'] = Q(object_repr__icontains=text)
tf['action_time'] = 'datetime'
return tf
def __searchF__(self, info):
tf = {}
tf['action_time'] = (_('Date'), lambda x: Q(**daterange_filter(x, 'action_time')), 'daterange')
tf['get_action_flag_display'] = (_('Action'), lambda x: Q(action_flag=x), list(TYPE_ACTION))
tf['object_id'] = (_('ID'), lambda x: Q(object_id=x), 'input')
tf['user__username'] = (_('Actual user'), lambda x: Q(user__username__icontains=x), 'input')
tf['username'] = (_('Original user'), lambda x: Q(username__icontains=x), 'input')
tf['content_type__app_label'] = (_('APP Label'), lambda x: Q(content_type__app_label__icontains=x), 'input')
# tf['users']=(_('User'),lambda x: Q(user__username=x),[('M','M*'),('S','S*')])
return tf
class GenLog(object):
class CodenerixMeta(CodenerixModel.CodenerixMeta):
log_full = False
def post_save(self, log):
# custom post save from application
pass
def save(self, **kwargs):
user = get_current_user()
if user:
user_id = user.pk
username = user.username
else:
user_id = None
username = ""
model = apps.get_model(self._meta.app_label, self.__class__.__name__)
isnew = True
if self.pk is not None:
list_obj = model.objects.filter(pk=self.pk)
isnew = list_obj.count() == 0
# raise IOError,self.__dict__
# only attributes changes
attrs = {}
attrs_txt = {}
# attributes from database
attrs_bd = {}
if isnew:
action = ADDITION
pk = None
else:
action = CHANGE
pk = self.pk
# Instance object
# obj = model.objects.get(pk=self.pk)
obj = list_obj.get()
for key in obj._meta.get_fields():
key = key.name
# exclude manytomany
if obj._meta.model._meta.local_many_to_many and key in [x.name for x in obj._meta.model._meta.local_many_to_many]:
value = None
elif obj._meta.get_fields(include_hidden=True) and key in [x.name for x in obj._meta.get_fields(include_hidden=True) if x.many_to_many and x.auto_created]:
value = None
else:
value = getattr(obj, key, None)
attrs_bd[key] = value
# comparison attributes
# for key in self._meta.get_fields():
aux = None
list_fields = [x.name for x in self._meta.get_fields()]
for ffield in self._meta.get_fields():
key = ffield.name
# exclude manytomany
if self._meta.model._meta.local_many_to_many and key in [x.name for x in self._meta.model._meta.local_many_to_many]:
field = None
# elif self._meta.get_all_related_many_to_many_objects() and key in [x.name for x in self._meta.get_all_related_many_to_many_objects()]:
elif self._meta.get_fields(include_hidden=True) and key in [x.name for x in self._meta.get_fields(include_hidden=True) if x.many_to_many and x.auto_created]:
field = None
else:
field = getattr(self, key, None)
if key in list_fields:
# if (not attrs_bd.has_key(key)) or (field != attrs_bd[key]):
if (key not in attrs_bd) or (field != attrs_bd[key]):
if field is not None or action == CHANGE:
aux = ffield
field_txt = field
if field_txt is None:
field_txt = '---'
if isinstance(field, CodenerixModel):
field = field.pk
try:
json.dumps(field, default=json_util.default)
if key not in attrs_bd or not self.CodenerixMeta.log_full:
attrs[key] = field
else:
if isinstance(attrs_bd[key], CodenerixModel):
attrs[key] = (attrs_bd[key].pk, field, )
else:
attrs[key] = (attrs_bd[key], field, )
except Exception:
# If related, we don't do anything
if getattr(field, 'all', None) is None:
field = str(field)
if key not in attrs_bd or not self.CodenerixMeta.log_full:
attrs[key] = field
else:
attrs[key] = (attrs_bd[key], field, )
if hasattr(ffield, "verbose_name"):
try:
string_checks = [unicode, str]
except NameError:
string_checks = [str]
if type(ffield.verbose_name) in string_checks:
ffield_verbose_name = ffield.verbose_name
else:
ffield_verbose_name = str(ffield.verbose_name)
if key not in attrs_bd or not self.CodenerixMeta.log_full:
attrs_txt[ffield_verbose_name] = force_text(field_txt, errors='replace')
else:
if attrs_bd[key] is None:
attrs_bd[key] = '---'
attrs_txt[key] = (
ffield_verbose_name,
u"{}{}{}".format(
force_text(attrs_bd[key], errors='replace'),
SEPARATOR,
force_text(field_txt, errors='replace')
)
)
log = Log()
log.user_id = user_id
log.username = username
log.content_type_id = ContentType.objects.get_for_model(self).pk
log.object_id = pk
log.object_repr = force_text(self, errors="replace")
try:
log.change_json = json.dumps(attrs, default=json_util.default)
except UnicodeDecodeError:
log.change_json = json.dumps({'error': '*JSON_ENCODE_ERROR*'}, default=json_util.default)
try:
log.change_txt = json.dumps(attrs_txt, default=json_util.default)
except UnicodeDecodeError:
log.change_txt = json.dumps({'error': '*JSON_ENCODE_ERROR*'}, default=json_util.default)
log.action_flag = action
if pk is None:
log.snapshot_txt = self.__strlog_add__()
else:
log.snapshot_txt = obj.__strlog_update__(self)
aux = super(GenLog, self).save(**kwargs)
if pk is None:
# if new element, get pk
log.object_id = self.pk
log.save()
# custom post save from application
self.post_save(log)
return aux
class GenLogFull(GenLog):
class CodenerixMeta(CodenerixModel.CodenerixMeta):
log_full = True
@receiver(post_delete)
def codenerixmodel_delete_post(sender, instance, **kwargs):
if not hasattr(instance, "name_models_list") and issubclass(sender, GenLog):
user = get_current_user()
if user:
user_id = user.pk
username = user.username
else:
user_id = None
username = "*Unknown*"
action = DELETION
attrs = {}
attrs_txt = {}
# ._meta.get_fields() return all fields include related name
# ._meta.fields return all fields of models
# list_fields = [x.name for x in instance._meta.get_fields()]
for ffield in instance._meta.get_fields():
key = ffield.name
# exclude manytomany
if instance._meta.model._meta.local_many_to_many and key in [x.name for x in instance._meta.model._meta.local_many_to_many]:
field = None
# elif self._meta.get_all_related_many_to_many_objects() and key in [x.name for x in self._meta.get_all_related_many_to_many_objects()]:
elif instance._meta.get_fields(include_hidden=True) and key in [x.name for x in instance._meta.get_fields(include_hidden=True) if x.many_to_many and x.auto_created]:
field = None
else:
field = getattr(instance, key, None)
value = getattr(instance, ffield.name)
if isinstance(value, CodenerixModel):
attrs[ffield.name] = value.pk
else:
try:
json.dump(value, default=json_util.default)
attrs[ffield.name] = value
except TypeError:
# If related, we don't do anything
if getattr(value, 'all', None) is None:
# value = str(value)
value = smart_text(value)
attrs[ffield.name] = value
if hasattr(ffield, "verbose_name"):
try:
string_checks = [unicode, str]
except NameError:
string_checks = [str]
if type(ffield.verbose_name) in string_checks:
ffield_verbose_name = ffield.verbose_name
else:
ffield_verbose_name = str(ffield.verbose_name)
attrs_txt[ffield_verbose_name] = force_text(field, errors='replace')
log = Log()
log.user_id = user_id
log.username = username
log.content_type_id = ContentType.objects.get_for_model(instance).pk
log.object_id = instance.pk
log.object_repr = force_text(instance)
log.change_json = json.dumps(attrs, default=json_util.default)
log.change_txt = json.dumps(attrs_txt, default=json_util.default)
log.snapshot_txt = instance.__strlog_delete__()
log.action_flag = action
log.save()
class RemoteLog(CodenerixModel):
'''
RemoteLog system
'''
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.DO_NOTHING, blank=True, null=True)
username = models.CharField('Username', max_length=200, blank=True, null=False, default="")
data = models.TextField('Data', blank=False, null=False)
def __fields__(self, info):
fields = []
fields.append(('pk', _('ID')))
fields.append(('created', _('Created')))
fields.append(('user', _('Actual user')))
fields.append(('username', _('Original user')))
return fields
def save(self, **kwargs):
if self.user:
self.username = self.user.username
super(RemoteLog, self).__init__(**kwargs)
| 42.48398 | 181 | 0.543167 | 2,718 | 25,193 | 4.783297 | 0.149007 | 0.019383 | 0.019614 | 0.006923 | 0.36528 | 0.328667 | 0.294439 | 0.25375 | 0.223137 | 0.195293 | 0 | 0.002515 | 0.352915 | 25,193 | 592 | 182 | 42.555743 | 0.794994 | 0.139563 | 0 | 0.342169 | 0 | 0 | 0.055752 | 0.004289 | 0 | 0 | 0 | 0.001689 | 0 | 1 | 0.084337 | false | 0.004819 | 0.040964 | 0.033735 | 0.262651 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dce29bb40a99d94f488b8f470037b28cd28ee97f | 858 | py | Python | migrations/versions/a75e5cbedaad_.py | deanarchy/FLChat | 49cd437d7ca30102b519f6d4b96ac0b9602c689e | [
"MIT"
] | null | null | null | migrations/versions/a75e5cbedaad_.py | deanarchy/FLChat | 49cd437d7ca30102b519f6d4b96ac0b9602c689e | [
"MIT"
] | null | null | null | migrations/versions/a75e5cbedaad_.py | deanarchy/FLChat | 49cd437d7ca30102b519f6d4b96ac0b9602c689e | [
"MIT"
] | null | null | null | """empty message
Revision ID: a75e5cbedaad
Revises: 520f9f05e787
Create Date: 2021-05-21 10:36:28.752323
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a75e5cbedaad'
down_revision = '520f9f05e787'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('participants',
sa.Column('conversation_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['conversation_id'], ['conversation.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('participants')
# ### end Alembic commands ###
| 25.235294 | 72 | 0.685315 | 100 | 858 | 5.79 | 0.51 | 0.046632 | 0.072539 | 0.079447 | 0.238342 | 0.238342 | 0.151986 | 0.151986 | 0 | 0 | 0 | 0.061538 | 0.166667 | 858 | 33 | 73 | 26 | 0.748252 | 0.343823 | 0 | 0 | 0 | 0 | 0.21673 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dce5dec856ec133da037182f8f14b17c7fb3f435 | 2,542 | py | Python | DailyProgrammer/DP20120704B.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | 2 | 2020-12-23T18:59:22.000Z | 2021-04-14T13:16:09.000Z | DailyProgrammer/DP20120704B.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | DailyProgrammer/DP20120704B.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | """
An X-ray illuminator is the bright plate that doctors put filters over in order to view x-ray images.
In our problem, we are going to place various sizes of red and blue tinted cellophane randomly on top of a finite x-ray
illuminator.
If a given part of the illuminator is covered by only red filters, then the light is red. If it is covered by only blue
filters, then the light is blue. If it is covered by a mixture of red and blue filters, the light will be a shade of
purple.
Given some set of red and blue sheets, what is the total area of all the purple regions?
Specification: Each piece of cellophane is guaranteed to be an positive integer number of centimeters wide and tall, and
will be placed at an integer coordinate on the illuminator.
The input file will contain the following:
First, an integer n <= 1024 specifying how many pieces of cellophane there are
Then n lines for each piece of cellophane, where each line contains a character 'R' or 'B' for the color of the
cellophane sheet, then two positive integers x,y for the position of the upper-left corner of the sheet, then two
positive integers w,h for the width and height of the sheet.
IMPORTANT: Here are the constraints on the dimensions: 1 <= x+w <= 4096,1<=y+h<=4096,1<=w<=4095,1<=h<=4095...in other
words, a sheet should always lie within the boundry of the 4k by 4k board.
Here is an example input and output
input file:
3
R 0 0 5 5
R 10 0 5 5
B 3 2 9 2
Here is an ascii art example visualizing that input:
RRRRR RRRRR
RRRRR RRRRR
RRRPPBBBBBPPRRR
RRRPPBBBBBPPRRR
RRRRR RRRRR
expected program output: 8
Write a program to count the number of purple blocks given an input file.
For testing, here are some test files I generated:
http://codepad.org/5HtVUwCT
http://codepad.org/2KXIrWlj
http://codepad.org/Weyka1Pp
I am a fallible mod, but I believe the correct answer for those files should be 13064038,15822641,15666634 respectively.
"""
import numpy as np
def main():
with open('DP20120704B.txt', 'r') as f:
data = f.read()
size = 4096
red = np.zeros((size, size))
blue = np.zeros((size, size))
for r in data.split('\n')[1:]:
work = r.split()
x, y = int(work[1]), int(work[2])
w, h = int(work[3]), int(work[4])
if work[0] == 'R':
red[x:x+w, y:y+h] = 1
if work[0] == 'B':
blue[x:x+w, y:y+h] = 1
print(sum([1 for stack in np.dstack((red, blue)) for s in stack if s[0] and s[1]]))
if __name__ == "__main__":
main()
| 31.775 | 120 | 0.697089 | 460 | 2,542 | 3.834783 | 0.41087 | 0.017007 | 0.013605 | 0.020408 | 0.080499 | 0.007937 | 0.007937 | 0 | 0 | 0 | 0 | 0.046394 | 0.219906 | 2,542 | 79 | 121 | 32.177215 | 0.843167 | 0.767506 | 0 | 0 | 0 | 0 | 0.048027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.111111 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dce730e56dda1e585a4258679358f534f02fcfd2 | 2,410 | py | Python | gym_modular/rewards/robot/end_effector_acceleration_reward.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | gym_modular/rewards/robot/end_effector_acceleration_reward.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | gym_modular/rewards/robot/end_effector_acceleration_reward.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | from typing import Optional
import numpy as np
from ... import BaseTask
from assembly_gym.environment.generic import RobotComponent
from .. import Reward
class EndEffectorAccelerationReward(Reward[BaseTask]):
"""
A reward for punishing high (linear and angular) end-effector accelerations.
"""
def __init__(self, robot_name: str = "ur10", intermediate_timestep_reward_scale: float = 0.8,
final_timestep_reward_scale: Optional[float] = None, max_acceleration: float = 100.0):
"""
:param intermediate_timestep_reward_scale: scaling factor (applied to the reward at every step in which the gym
environment does not terminate)
:param final_timestep_reward_scale: scaling factor (applied to the reward at the step in which the gym
environment terminates)
:param max_acceleration: the maximum acceleration to use for normalizing the (unscaled)
reward to lie in [-1, 0]
"""
name = "endeffector_acceleration_reward"
super().__init__(name, intermediate_timestep_reward_scale, final_timestep_reward_scale, clip=False,
abbreviated_name="ee_acc")
self.__robot_name = robot_name
self.__gripper: Optional[RobotComponent] = None
self.__max_acceleration: float = max_acceleration
def _reset(self) -> None:
self.__gripper = self.task.environment.robots[self.__robot_name].gripper
self.__previous_linear_velocity = np.zeros(3)
self.__previous_angular_velocity = np.zeros(3)
def _calculate_reward_unnormalized(self) -> float:
linear_velocity, angular_velocity = self.__gripper.velocity
linear_acceleration = (linear_velocity - self.__previous_linear_velocity) / self.task.environment.time_step
angular_acceleration = (angular_velocity - self.__previous_angular_velocity) / self.task.environment.time_step
linear_acceleration_len = np.linalg.norm(linear_acceleration)
angular_acceleration_len = np.linalg.norm(angular_acceleration)
cost = np.linalg.norm(linear_acceleration_len + angular_acceleration_len)
return -cost
def _get_min_reward_unnormalized(self) -> float:
return -self.__max_acceleration
| 50.208333 | 120 | 0.678008 | 265 | 2,410 | 5.803774 | 0.339623 | 0.054616 | 0.074122 | 0.060468 | 0.215865 | 0.149545 | 0.06762 | 0.06762 | 0.06762 | 0.06762 | 0 | 0.006667 | 0.253112 | 2,410 | 47 | 121 | 51.276596 | 0.847778 | 0.257261 | 0 | 0 | 0 | 0 | 0.023963 | 0.018118 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.178571 | 0.035714 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dce79509cbf2a7826a942f2c2ddbb3230955066c | 6,525 | py | Python | nodedge/graphics_cut_line.py | Nodedge/nodedge | 5658269a1841f33b3c42d6f79b8b50411e105787 | [
"MIT"
] | 7 | 2020-03-25T19:54:56.000Z | 2021-06-09T04:43:58.000Z | nodedge/graphics_cut_line.py | Nodedge/nodedge | 5658269a1841f33b3c42d6f79b8b50411e105787 | [
"MIT"
] | 9 | 2020-01-17T10:47:54.000Z | 2021-05-30T12:40:28.000Z | nodedge/graphics_cut_line.py | nodedge/nodedge | 5658269a1841f33b3c42d6f79b8b50411e105787 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Graphics cut line module containing
:class:`~nodedge.graphics_cut_line.GraphicsCutLine` class. """
import logging
from enum import IntEnum
from typing import List, Optional
from PySide2.QtCore import QEvent, QPointF, QRectF, Qt
from PySide2.QtGui import QMouseEvent, QPainter, QPainterPath, QPen, QPolygonF
from PySide2.QtWidgets import (
QApplication,
QGraphicsItem,
QStyleOptionGraphicsItem,
QWidget,
)
from nodedge.utils import dumpException
class CutLineMode(IntEnum):
"""
:class:`~nodedge.graphics_cut_line.CutLineMode` class.
"""
NOOP = 1 #: Mode representing ready state
CUTTING = 2 #: Mode representing when we draw a cutting edge
class CutLine:
"""
:class:`~nodedge.graphics_cut_line.CutLine` class.
"""
def __init__(self, graphicsView: "GraphicsView") -> None: # type: ignore
self.__logger = logging.getLogger(__file__)
self.__logger.setLevel(logging.INFO)
self.mode: CutLineMode = CutLineMode.NOOP
self.graphicsCutLine: GraphicsCutLine = GraphicsCutLine()
self.graphicsView = graphicsView
self.graphicsView.graphicsScene.addItem(self.graphicsCutLine)
def update(self, event: QMouseEvent) -> Optional[QMouseEvent]:
"""
Update the state machine of the cut line as well as the graphics cut line.
:param event: Event triggering the update
:type event: ``QMouseEvent``
:return: Optional modified event needed by
:class:`~nodedge.graphics_view.GraphicsView`
:rtype: Optional[QMouseEvent]
"""
eventButton: Qt.MouseButton = event.button()
eventType: QEvent.Type = event.type()
eventScenePos = self.graphicsView.mapToScene(event.pos())
eventModifiers: Qt.KeyboardModifiers = event.modifiers()
if self.mode == CutLineMode.NOOP:
if (
eventType == QEvent.MouseButtonPress
and eventButton == Qt.LeftButton
and eventModifiers & Qt.ControlModifier
):
self.mode = CutLineMode.CUTTING
QApplication.setOverrideCursor(Qt.CrossCursor)
return QMouseEvent(
QEvent.MouseButtonRelease,
event.localPos(),
event.screenPos(),
Qt.LeftButton,
Qt.NoButton,
event.modifiers(),
)
if self.mode == CutLineMode.CUTTING:
if event.type() == QEvent.MouseMove:
self.graphicsCutLine.linePoints.append(eventScenePos)
self.graphicsCutLine.update()
elif (
eventType == QEvent.MouseButtonRelease and eventButton == Qt.LeftButton
):
self.cutIntersectingEdges()
self.graphicsCutLine.linePoints = []
self.graphicsCutLine.update()
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.mode = CutLineMode.NOOP
return None
def cutIntersectingEdges(self) -> None:
"""
Compare which :class:`~nodedge.edge.Edge`s intersect with current
:class:`~nodedge.graphics_cut_line.GraphicsCutLine` and delete them safely.
"""
try:
scene: "Scene" = self.graphicsView.graphicsScene.scene # type: ignore
self.__logger.debug(f"Cutting points: {self.graphicsCutLine.linePoints}")
for ix in range(len(self.graphicsCutLine.linePoints) - 1):
p1 = self.graphicsCutLine.linePoints[ix]
p2 = self.graphicsCutLine.linePoints[ix + 1]
# @TODO: Notify intersecting edges once.
# we could collect all touched nodes, and notify them once after
# all edges removed we could cut 3 edges leading to a single editor
# this will notify it 3x maybe we could use some Notifier class with
# methods collect() and dispatch()
for edge in reversed(scene.edges):
if edge.graphicsEdge.intersectsWith(p1, p2):
self.__logger.debug(
f"[{p1.__pos__()}, {p2.__pos__()}] intersects with: {edge}"
)
edge.remove()
else:
self.__logger.debug(
f"[{p1.__pos__()}, {p2.__pos__()}] does not intersect with: "
f"{edge.graphicsEdge.path()}"
)
scene.history.store("Delete cut edges.")
self.__logger.debug("Cutting has been done.")
except Exception as e:
self.__logger.debug("e")
dumpException(e)
class GraphicsCutLine(QGraphicsItem):
""":class:`~nodedge.graphics_cut_line.GraphicsCutLine` class
Cutting Line used for cutting multiple `Edges` with one stroke"""
def __init__(self, parent: Optional[QGraphicsItem] = None) -> None:
"""
:param parent: parent widget
:type parent: ``Optional[QGraphicsItem]``
"""
super().__init__(parent)
self.linePoints: List[QPointF] = []
p = QApplication.palette()
self._pen: QPen = QPen(p.link().color())
self._pen.setWidth(2)
self._pen.setDashPattern([2, 4])
self.setZValue(2)
def boundingRect(self) -> QRectF:
"""
Define Qt' bounding rectangle
"""
return self.shape().boundingRect()
def shape(self) -> QPainterPath:
"""
Calculate the ``QPainterPath`` object from list of line points.
:return: shape function returning ``QPainterPath`` representation of cut line
:rtype: ``QPainterPath``
"""
if len(self.linePoints) > 1:
path = QPainterPath(self.linePoints[0])
for point in self.linePoints[1:]:
path.lineTo(point)
else:
path = QPainterPath(QPointF(0, 0))
path.lineTo(QPointF(1, 1))
return path
def paint(
self,
painter: QPainter,
option: QStyleOptionGraphicsItem,
widget: Optional[QWidget] = None,
) -> None:
"""
Paint the cut line
"""
painter.setRenderHint(QPainter.Antialiasing)
painter.setBrush(Qt.NoBrush)
painter.setPen(self._pen)
poly = QPolygonF(self.linePoints)
painter.drawPolyline(poly)
| 34.52381 | 89 | 0.585747 | 617 | 6,525 | 6.095624 | 0.345219 | 0.018612 | 0.027918 | 0.030577 | 0.082957 | 0.068599 | 0.038819 | 0.013826 | 0 | 0 | 0 | 0.006498 | 0.316015 | 6,525 | 188 | 90 | 34.707447 | 0.836209 | 0.217778 | 0 | 0.072072 | 0 | 0 | 0.050826 | 0.01219 | 0 | 0 | 0 | 0.005319 | 0 | 1 | 0.063063 | false | 0 | 0.063063 | 0 | 0.207207 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcea88cb35a02f7e9b11a8fbb9f570211bddc11f | 922 | py | Python | grid_point.py | drexelai/protein-nets | 06d705ac6be36ef5383b715b0a4b910538efee1d | [
"MIT"
] | 3 | 2021-03-31T01:32:31.000Z | 2022-03-24T07:51:59.000Z | grid_point.py | drexelai/protein-nets | 06d705ac6be36ef5383b715b0a4b910538efee1d | [
"MIT"
] | 3 | 2021-04-14T23:24:05.000Z | 2021-04-15T03:06:02.000Z | grid_point.py | drexelai/protein-nets | 06d705ac6be36ef5383b715b0a4b910538efee1d | [
"MIT"
] | null | null | null | # Distance is measured in Angstroms
# Class represents a single grid point in a 3D window
class grid_point:
def __init__(gp, occupancy = 0, atom = None, coords = None, aa = None, diangle = None, distance_to_nearest_atom = None, nearest_atom = None,threshold = 3, atoms_within_threshold = None, dm = None):
# Logical value of whether an atom is present
gp.occupancy = occupancy
# Name of atom if present
gp.atom = atom
# Store atomic positions when atom is present and middle of grid when an atom is not
gp.coords = coords
# Name of residue if present
gp.aa = aa
# Dihedral angle if present
gp.diangle = diangle
# Distance to nearest atom if atom is present
gp.distance_to_nearest_atom = distance_to_nearest_atom
gp.nearest_atom = nearest_atom
# Threshold and atoms within that threshold is atom is present
gp.threshold = threshold
gp.atoms_within_threshold = atoms_within_threshold | 32.928571 | 198 | 0.749458 | 142 | 922 | 4.704225 | 0.34507 | 0.115269 | 0.101796 | 0.125749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004043 | 0.195228 | 922 | 28 | 199 | 32.928571 | 0.896226 | 0.427332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dceb66b083eaedc5043fa09fbfd61c6eb3cc9dd0 | 426 | py | Python | swgpu/urls.py | JCristobal/SWGPU | 50039ae7ef6163e1bb3cccb0b6e4e4f5a62b620b | [
"Apache-2.0"
] | null | null | null | swgpu/urls.py | JCristobal/SWGPU | 50039ae7ef6163e1bb3cccb0b6e4e4f5a62b620b | [
"Apache-2.0"
] | null | null | null | swgpu/urls.py | JCristobal/SWGPU | 50039ae7ef6163e1bb3cccb0b6e4e4f5a62b620b | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^index$', views.index, name='index'),
url(r'^peticion_ackley/$', views.peticion_ackley, name='peticion_ackley'), # para hacer peticion mediante el algoritmo Ackley
url(r'^peticion_rastrigin/$', views.peticion_rastrigin, name='peticion_rastrigin'), # para hacer peticion mediante el algoritmo Rastrigin
]
| 38.727273 | 139 | 0.741784 | 57 | 426 | 5.438596 | 0.333333 | 0.051613 | 0.090323 | 0.122581 | 0.380645 | 0.380645 | 0 | 0 | 0 | 0 | 0 | 0 | 0.112676 | 426 | 10 | 140 | 42.6 | 0.820106 | 0.234742 | 0 | 0 | 0 | 0 | 0.281734 | 0.065015 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcecd6b18e1e75df9c0e0d80b2d7ec546970b7a2 | 2,898 | py | Python | py_algo/sorting/competition/monk_monitor.py | Sk0uF/Algorithms | 236cc5b056ce2637d5d947c5fc1e3367cde886bf | [
"MIT"
] | 1 | 2021-07-05T15:39:04.000Z | 2021-07-05T15:39:04.000Z | py_algo/sorting/competition/monk_monitor.py | Sk0uF/Algorithms | 236cc5b056ce2637d5d947c5fc1e3367cde886bf | [
"MIT"
] | null | null | null | py_algo/sorting/competition/monk_monitor.py | Sk0uF/Algorithms | 236cc5b056ce2637d5d947c5fc1e3367cde886bf | [
"MIT"
] | 1 | 2021-09-02T21:31:34.000Z | 2021-09-02T21:31:34.000Z | """
Codemonk link: https://www.hackerearth.com/problem/algorithm/monk-being-monitor-709e0fd3/
Monk being the monitor of the class needs to have all the information about the class students. He is very busy with
many tasks related to the same, so he asked his friend Mishki for her help in one task. She will be given heights of all
the students present in the class and she needs to choose 2 students having heights h1 and h2 respectively, such that
h1 > h2 and difference between the number of students having height h1 and number of students having height h2 is
maximum. Note: The difference should be greater than 0. As Mishki has never been a monitor of the class, help her in the
same. If there exists such students then print the required difference else print "1" (without quotes).
Input - Output:
The first line will consists of one integer T, which denotes the number of test cases.
For each test case: One line consists of a integer N, denotes the number of students in the class.
Second line contains N space separated integers, where th integer denotes the height of the ith
student in the class.
For each test case, if the required difference exists then print its value, otherwise print -1.
Sample input:
1
6
3 1 3 2 3 2
Sample Output:
2
"""
"""
Sort the heights. After that, we can find the the desired value in linear time. Start iterating from the smaller to the
biggest height. Increase a counter when the height is the same. If it's not the same, the maximum difference of heights
is the maximum between our current maximum and counter - minimum, where the minimum is the minimum between our current
minimum number of heights and counter. The problem can be of course solved if we find the frequency of occurrences for
all numbers and then find the max and min values and subtract them. That would be a bit slower though.
Final complexity: O(NlogN + N)
"""
inp_len = int(input())
for _ in range(inp_len):
n = int(input())
heights = list(map(int, input().rstrip().split()))
heights = sorted(heights)
counter = 1
min_val = 0
max_val = -1
first = True
for i in range(1, len(heights)):
if heights[i] == heights[i-1]:
counter += 1
else:
# The first time we find a different value
# we initialize our current min.
if first:
first = False
min_val = counter
counter = 1
continue
max_val = max(max_val, counter - min_val)
min_val = min(min_val, counter)
counter = 1
# Account for the last heights or height.
if not first:
max_val = max(max_val, counter - min_val)
if max_val <= 0:
# In case where all the heights were the same.
print(-1)
else:
print(max_val)
| 39.69863 | 121 | 0.677364 | 458 | 2,898 | 4.251092 | 0.373362 | 0.021572 | 0.015408 | 0.017463 | 0.079096 | 0.028762 | 0.028762 | 0.028762 | 0 | 0 | 0 | 0.016023 | 0.267771 | 2,898 | 72 | 122 | 40.25 | 0.901508 | 0.486197 | 0 | 0.259259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dced4a3b2f06f94d7a92f57915a6444ff5071ff9 | 1,983 | py | Python | homedisplay/info_birthdays/models.py | ojarva/home-info-display | 873d022308732baff94d0dc2381cf9dc7dce23b7 | [
"BSD-3-Clause"
] | 1 | 2016-11-28T04:35:06.000Z | 2016-11-28T04:35:06.000Z | homedisplay/info_birthdays/models.py | ojarva/home-info-display | 873d022308732baff94d0dc2381cf9dc7dce23b7 | [
"BSD-3-Clause"
] | 160 | 2015-01-01T20:59:29.000Z | 2016-04-25T13:36:52.000Z | homedisplay/info_birthdays/models.py | ojarva/home-info-display | 873d022308732baff94d0dc2381cf9dc7dce23b7 | [
"BSD-3-Clause"
] | 1 | 2015-02-25T21:24:01.000Z | 2015-02-25T21:24:01.000Z | # -*- coding: utf-8 -*-
from django.core import serializers
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from homedisplay.utils import publish_ws
import datetime
import json
def get_birthdays(selected_date):
if selected_date == "all":
items = Birthday.objects.all()
else:
date = datetime.date.today()
if selected_date == "tomorrow":
date = date + datetime.timedelta(days=1)
items = Birthday.objects.filter(
birthday__month=date.month, birthday__day=date.day)
return json.loads(serializers.serialize("json", items))
class Birthday(models.Model):
name = models.CharField(max_length=100, verbose_name="Nimi")
nickname = models.CharField(max_length=100, null=True, blank=True,
verbose_name="Lempinimi", help_text="Jos täytetty, näytetään nimen sijaan")
birthday = models.DateField(verbose_name="Merkkipäivä")
valid_year = models.NullBooleanField(
default=True, verbose_name="Vuosi oikein", help_text="Onko vuosi oikein? Jos ei, ikää ei näytetä.")
@property
def age(self):
diff = now() - self.birthday
return int(diff.days / 365.2425)
def __unicode__(self):
return u"%s (%s) %s (valid_year=%s)" % (self.name, self.nickname, self.birthday, self.valid_year)
class Meta:
ordering = ["name"]
verbose_name = "Merkkipäivä"
verbose_name_plural = "Merkkipäivät"
def publish_changes():
for k in ("today", "tomorrow", "all"):
publish_ws("birthdays_%s" % k, get_birthdays(k))
@receiver(post_delete, sender=Birthday, dispatch_uid='birthday_delete_signal')
def publish_birthday_deleted(sender, instance, using, **kwargs):
publish_changes()
@receiver(post_save, sender=Birthday, dispatch_uid="birthday_saved_signal")
def publish_birthday_saved(sender, instance, *args, **kwargs):
publish_changes()
| 33.05 | 107 | 0.690368 | 246 | 1,983 | 5.373984 | 0.426829 | 0.049924 | 0.018154 | 0.036309 | 0.090772 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009369 | 0.192637 | 1,983 | 59 | 108 | 33.610169 | 0.816365 | 0.01059 | 0 | 0.046512 | 0 | 0 | 0.129592 | 0.021939 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0.162791 | 0.023256 | 0.511628 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcedd2fe6ce8860d2ebfc3feb64b8b8afde89134 | 1,183 | py | Python | TransformerNet/Transformer.py | TeaKatz/Models_Corpus | 6d9e91eb97829e73d88ecfc4754492f6324ef383 | [
"MIT"
] | null | null | null | TransformerNet/Transformer.py | TeaKatz/Models_Corpus | 6d9e91eb97829e73d88ecfc4754492f6324ef383 | [
"MIT"
] | null | null | null | TransformerNet/Transformer.py | TeaKatz/Models_Corpus | 6d9e91eb97829e73d88ecfc4754492f6324ef383 | [
"MIT"
] | null | null | null | import tensorflow as tf
from TransformerNet.layers import Encoder, Decoder
class Transformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, d_ff, input_vocab_size, target_vocab_size, pe_input, pe_target, drop_rate=0.1):
super().__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, d_ff, input_vocab_size, pe_input, drop_rate)
self.decoder = Decoder(num_layers, d_model, num_heads, d_ff, target_vocab_size, pe_target, drop_rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask, look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, input_seq_len, d_model)
# dec_output.shape: (batch_size, target_seq_len, d_model)
# attention_weight.shape: (batch, target_seq_len, input_seq_len)
dec_output, attention_weights = self.decoder(tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, target_seq_len, target_vocab_size)
return final_output, attention_weights
| 62.263158 | 135 | 0.741336 | 175 | 1,183 | 4.565714 | 0.297143 | 0.067584 | 0.075094 | 0.05632 | 0.252816 | 0.20025 | 0.132666 | 0.132666 | 0.100125 | 0.100125 | 0 | 0.002035 | 0.169062 | 1,183 | 18 | 136 | 65.722222 | 0.810783 | 0.171598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcf1077dc281e4de424e7b5cbf25db2a3e03b45d | 470 | py | Python | leetcode/python/hard/p023_mergeKLists.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | leetcode/python/hard/p023_mergeKLists.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | leetcode/python/hard/p023_mergeKLists.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeKLists(self, lists):
head = pre = ListNode(None)
nodeVals = []
for l in lists:
while l:
nodeVals.append(l.val)
l = l.next
for i in sorted(nodeVals):
pre.next = ListNode(i)
pre = pre.next
return head.next
| 22.380952 | 38 | 0.514894 | 56 | 470 | 4.25 | 0.482143 | 0.042017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.395745 | 470 | 20 | 39 | 23.5 | 0.838028 | 0.07234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcf154140d779be50df39ec625bcaf518e1e2022 | 2,234 | py | Python | tests/V2/test_product_view.py | kmwangemi/Store-Manager-Challenge-3 | baf37c9316acadfd630009424757ed15091aaf92 | [
"MIT"
] | null | null | null | tests/V2/test_product_view.py | kmwangemi/Store-Manager-Challenge-3 | baf37c9316acadfd630009424757ed15091aaf92 | [
"MIT"
] | null | null | null | tests/V2/test_product_view.py | kmwangemi/Store-Manager-Challenge-3 | baf37c9316acadfd630009424757ed15091aaf92 | [
"MIT"
] | null | null | null | import unittest
import json
from run import app
from app.api.V2.views.product_view import Product
class ProductstestCase(unittest.TestCase):
def setUp(self):
"""will be called before every test"""
self.client = app.test_client
self.product = {
"product_name" : "product_name",
"category" : "category",
"quantity" : "quantity",
"price" : "price",
"description" : "description"
}
self.empty_product = {
"product_name" : "",
"category" : "",
"quantity" : "",
"price" : "",
"description" : ""
}
'''Tests for product creation'''
def test_product_created_successfully(self):
"""Tests that a product is created successfully"""
res = self.client().post('/api/v2/products', data=json.dumps(self.product), headers = {"content-type": "application/json"})
self.assertEqual(res.status_code, 201)
def test_product_cannot_create_with_invalid_details(self):
"""Tests that a product cannot be created with empty fields"""
res = self.client().post('/api/v2/products', data=json.dumps(self.empty_product), headers = {"content-type": "application/json"})
self.assertEqual(res.status_code, 201)
'''Tests for getting successfully created products'''
def test_gets_successfully_created_products(self):
"""Tests that api gets all created products"""
res = self.client().get('/api/v2/products', data=json.dumps(self.product), headers = {"content-type": "application/json"})
self.assertEqual(res.status_code, 200)
'''Tests for getting one product'''
def test_gets_one_successfully_created_product(self):
"""Tests that api gets one successfully created product"""
res = self.client().get('/api/v2/products/<productId>', data=json.dumps(self.product), headers = {"content-type": "application/json"})
self.assertEqual(res.status_code, 200)
| 39.892857 | 142 | 0.570278 | 229 | 2,234 | 5.436681 | 0.279476 | 0.02008 | 0.041767 | 0.054618 | 0.493173 | 0.37751 | 0.37751 | 0.341365 | 0.341365 | 0.341365 | 0 | 0.010947 | 0.304834 | 2,234 | 56 | 143 | 39.892857 | 0.790728 | 0.102059 | 0 | 0.121212 | 0 | 0 | 0.171951 | 0.015046 | 0 | 0 | 0 | 0 | 0.121212 | 1 | 0.151515 | false | 0 | 0.121212 | 0 | 0.30303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcf1dfdabf48f9691feb1a7313770d2b7803e043 | 12,008 | py | Python | examples/ServiceSchema.py | meGregV/blpapi-python | 30b11686255665956538826cf035a76bba9d301c | [
"Unlicense"
] | 1 | 2019-08-17T07:05:41.000Z | 2019-08-17T07:05:41.000Z | examples/ServiceSchema.py | doomoolmori/blpapi-python | 55cb3e20d7ab3e22cf4718660d2789b3608142fd | [
"Unlicense"
] | null | null | null | examples/ServiceSchema.py | doomoolmori/blpapi-python | 55cb3e20d7ab3e22cf4718660d2789b3608142fd | [
"Unlicense"
] | null | null | null | # ServiceSchema.py
from __future__ import print_function
from __future__ import absolute_import
import blpapi
import time
from optparse import OptionParser
TOKEN_SUCCESS = blpapi.Name("TokenGenerationSuccess")
TOKEN_FAILURE = blpapi.Name("TokenGenerationFailure")
AUTHORIZATION_SUCCESS = blpapi.Name("AuthorizationSuccess")
REFERENCE_DATA_RESPONSE = blpapi.Name("ReferenceDataResponse")
TOKEN = blpapi.Name("token")
AUTH_SERVICE = "//blp/apiauth"
ELEMENT_DATATYPE_NAMES = {
blpapi.DataType.BOOL: "BOOL",
blpapi.DataType.CHAR: "CHAR",
blpapi.DataType.BYTE: "BYTE",
blpapi.DataType.INT32: "INT32",
blpapi.DataType.INT64: "INT64",
blpapi.DataType.FLOAT32: "FLOAT32",
blpapi.DataType.FLOAT64: "FLOAT64",
blpapi.DataType.STRING: "STRING",
blpapi.DataType.BYTEARRAY: "BYTEARRAY",
blpapi.DataType.DATE: "DATE",
blpapi.DataType.TIME: "TIME",
blpapi.DataType.DECIMAL: "DECIMAL",
blpapi.DataType.DATETIME: "DATETIME",
blpapi.DataType.ENUMERATION: "ENUMERATION",
blpapi.DataType.SEQUENCE: "SEQUENCE",
blpapi.DataType.CHOICE: "CHOICE",
blpapi.DataType.CORRELATION_ID: "CORRELATION_ID"
}
SCHEMA_STATUS_NAMES = {
blpapi.SchemaStatus.ACTIVE: "ACTIVE",
blpapi.SchemaStatus.DEPRECATED: "DEPRECATED",
blpapi.SchemaStatus.INACTIVE: "INACTIVE",
blpapi.SchemaStatus.PENDING_DEPRECATION: "PENDING"
}
def parseCmdLine():
parser = OptionParser()
parser.add_option("-a",
"--host",
dest="host",
help="HOST address to connect to",
metavar="HOST",
default="localhost")
parser.add_option("-p",
"--port",
dest="port",
type="int",
help="PORT to connect to (%default)",
metavar="PORT",
default=8194)
parser.add_option("-s",
"--service",
default="//blp/apiflds",
help="SERVICE to print the schema of "
"('//blp/apiflds' by default)")
parser.add_option("",
"--auth-type",
type="choice",
choices=["LOGON", "NONE", "APPLICATION", "DIRSVC",
"USER_APP"],
dest="authType",
help="Authentification type: LOGON (default), NONE, "
"APPLICATION, DIRSVC or USER_APP",
default="LOGON")
parser.add_option("",
"--auth-name",
dest="authName",
help="The name of application or directory service",
default="")
(options, args) = parser.parse_args()
options.auth = getAuthentificationOptions(options.authType,
options.authName)
return options
def getAuthentificationOptions(type, name):
if type == "NONE":
return None
elif type == "USER_APP":
return "AuthenticationMode=USER_AND_APPLICATION;"\
"AuthenticationType=OS_LOGON;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + name
elif type == "APPLICATION":
return "AuthenticationMode=APPLICATION_ONLY;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + name
elif type == "DIRSVC":
return "AuthenticationType=DIRECTORY_SERVICE;"\
"DirSvcPropertyName=" + name
else:
return "AuthenticationType=OS_LOGON"
def printMessage(msg):
if msg.messageType() != REFERENCE_DATA_RESPONSE:
print("[{0}]: {1}".format(", ".join(map(str, msg.correlationIds())),
msg))
else:
# This case demonstrates how to get values of individual elements
securityDataArray = msg.getElement("securityData")
for securityData in securityDataArray.values():
securityName = securityData.getElementValue("security")
print(securityName)
fieldData = securityData.getElement("fieldData")
for fieldName in options.field:
try:
fieldValue = fieldData.getElementValue(fieldName)
print(("%s %s" % (fieldName, fieldValue)))
except:
print(("%s n/a" % fieldName))
def auth(session):
eq = blpapi.EventQueue()
# Generate token
session.generateToken(eventQueue=eq)
# Process related response
ev = eq.nextEvent()
token = None
if ev.eventType() == blpapi.Event.TOKEN_STATUS:
for msg in ev:
printMessage(msg)
if msg.messageType() == TOKEN_SUCCESS:
token = msg.getElementAsString(TOKEN)
elif msg.messageType() == TOKEN_FAILURE:
break
if not token:
raise Exception("Failed to get token")
# Purge EventQueue to reuse one for the next request
eq.purge()
# Open authentification service
if not session.openService(AUTH_SERVICE):
raise Exception("Failed to open auth service")
# Obtain opened service
authService = session.getService(AUTH_SERVICE)
# Create and fill the authorithation request
authRequest = authService.createAuthorizationRequest()
authRequest.set(TOKEN, token)
# Create Identity
identity = session.createIdentity()
# Send authorithation request to "fill" the Identity
session.sendAuthorizationRequest(authRequest, identity, eventQueue=eq)
# Process related responses
while True:
ev = eq.nextEvent()
if ev.eventType() in set([
blpapi.Event.RESPONSE,
blpapi.Event.PARTIAL_RESPONSE,
blpapi.Event.REQUEST_STATUS]):
for msg in ev:
printMessage(msg)
if msg.messageType() == AUTHORIZATION_SUCCESS:
# auth passed, identity "filled"
return identity
else:
raise Exception("Authorization failed")
def getIndent(level):
return "" if level == 0 else " ".ljust(level * 2)
# Print enumeration (constant list)
def printEnumeration(cl, level):
indent = getIndent(level + 1)
print(indent + " {0} {1} {2} \"{3}\" possible values:".format(
cl.name(),
SCHEMA_STATUS_NAMES[cl.status()],
ELEMENT_DATATYPE_NAMES[cl.datatype()],
cl.description()))
# Enumerate and print all constant list's values (constants)
for i in cl:
print(indent + " {0} {1} {2} \"{3}\" = {4!s}".format(
i.name(),
SCHEMA_STATUS_NAMES[i.status()],
ELEMENT_DATATYPE_NAMES[i.datatype()],
i.description(),
i.getValue()))
# Recursively print element definition
def printElementDefinition(ed, level=0):
indent = getIndent(level)
maxValues = ed.maxValues()
if maxValues == blpapi.SchemaElementDefinition.UNBOUNDED:
valuesRange = "[{0}, INF)".format(ed.minValues())
else:
valuesRange = "[{0}, {1}]".format(ed.minValues(), maxValues)
# Get and print alternate element names
alternateNames = ed.alternateNames()
if alternateNames:
alternateNames = "[{0}]".format(",".join(map(str, alternateNames)))
else:
alternateNames = ""
print(indent + "* {0} {1} {2} {3} \"{4}\"".format(
ed.name(),
SCHEMA_STATUS_NAMES[ed.status()],
valuesRange,
alternateNames,
ed.description()))
# Get and print related type definition
td = ed.typeDefinition()
print(indent + " {0} {1} {2} {3}{4}{5}\"{6}\"".format(
td.name(),
SCHEMA_STATUS_NAMES[td.status()],
ELEMENT_DATATYPE_NAMES[td.datatype()],
"complex " if td.isComplexType() else "",
"simple " if td.isSimpleType() else "",
"enum " if td.isEnumerationType() else "",
td.description()))
# Get and print all possible values for enumeration type
enumeration = td.enumeration()
if not enumeration is None:
printEnumeration(enumeration, level)
if td.numElementDefinitions():
print(indent + " Elements[{0}]:".format(
td.numElementDefinitions()))
# Enumerate and print all sub-element definitions
for i in td.elementDefinitions():
printElementDefinition(i, level + 1)
def printOperation(operation, service):
print("{0} \"{1}\" Request:".format(
operation.name(),
operation.description()))
# Print operation's request definition
printElementDefinition(operation.requestDefinition(), 1)
print("Responses[{0}]:".format(operation.numResponseDefinitions()))
# Enumerate and print all operation's response definitions
for r in operation.responseDefinitions():
printElementDefinition(r, 1)
print()
def main():
options = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(options.host)
sessionOptions.setServerPort(options.port)
if options.auth:
sessionOptions.setAuthenticationOptions(options.auth)
# Create a Session
session = blpapi.Session(sessionOptions)
# Start a Session
if not session.start():
raise Exception("Can't start session.")
try:
print("Session started.")
# Perform authentification
if options.auth:
identity = auth(session)
print("Authentification passed ({0})".format(
{-1: "Unknown seat type",
0: "BPS",
1: "No BPS"}[identity.getSeatType()]))
else:
identity = None
print("No authentification specified")
# Open service to get reference data from
if not session.openService(options.service):
raise Exception("Can't open '{0}' service.".format(
options.service))
# Obtain previously opened service
service = session.getService(options.service)
print("Service {0}:".format(options.service))
print("Service event definitions[{0}]:".format(
service.numEventDefinitions()))
# Enumerate and print all service's event definitions
for ed in service.eventDefinitions():
printElementDefinition(ed)
print()
print("Operations[{0}]:".format(service.numOperations()))
# Enumerate and print all service's operations
for operation in service.operations():
printOperation(operation, service)
finally:
# Stop the session
session.stop()
if __name__ == "__main__":
print("ServiceSchema")
try:
main()
except KeyboardInterrupt:
print("Ctrl+C pressed. Stopping...")
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| 34.210826 | 76 | 0.615756 | 1,201 | 12,008 | 6.09159 | 0.288926 | 0.032531 | 0.009021 | 0.013669 | 0.052214 | 0.047977 | 0.038272 | 0.031711 | 0.012849 | 0.012849 | 0 | 0.008053 | 0.276149 | 12,008 | 350 | 77 | 34.308571 | 0.83364 | 0.088108 | 0 | 0.095785 | 0 | 0 | 0.238025 | 0.029765 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0.003831 | 0.019157 | 0.003831 | 0.084291 | 0.1341 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcf5d0348f75187b044a2672de3945039ccc0b71 | 16,033 | py | Python | adaptiveStreamProducer.py | kurthorvath/adaptivestreamer | 7c85a67017ccba41b81968caf34f3ea49f0330a5 | [
"MIT"
] | null | null | null | adaptiveStreamProducer.py | kurthorvath/adaptivestreamer | 7c85a67017ccba41b81968caf34f3ea49f0330a5 | [
"MIT"
] | null | null | null | adaptiveStreamProducer.py | kurthorvath/adaptivestreamer | 7c85a67017ccba41b81968caf34f3ea49f0330a5 | [
"MIT"
] | null | null | null | import cv2,imutils, socket
import time
import threading
import time
import uuid
import logging, os
from functools import partial
from numpy import double
import requests
import json
import sys
import configparser
####### CONFIG PARAMS
#CONFIGSERVERIP = '127.0.0.1'
#CONFIGSERVERPORT = 9997
#logging.basicConfig(filename='adaptiveStreamserver.log', format='%(asctime)s %(levelname)-8s %(message)s',encoding='utf-8', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
logging.basicConfig(handlers=[logging.FileHandler(filename='adaptiveStreamserver.log', encoding='utf-8', mode='a+')], format='%(asctime)s %(levelname)-8s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
######################
p1y = 5
p1x = 5
p2y = 405
p2x = 405
desiredfps = 10
originalfps = 0
imagebuffer = bytes()
configDict = {}
firstStart = True
######################
##### REMOVE ######
def readCLI(s):
while s != True:
cmd = input("Please enter Command: ")
print("You entered: " + cmd)
if cmd == 'add':
host = input("enter Client HOST (e.g.: 192.168.100.1:1234): ")
client = host.split(':')
if (len(client) != 2):
print("invalid client syntax")
continue
global clients
clients.append(host)
CID = str(client[0])+":"+str(client[1])
print("CID: ", CID)
global configDict
#create default config
configDict[CID+"fps"] = 30
configDict[CID+"p1x"] = 0
configDict[CID+"p1y"] = 0
configDict[CID+"p2x"] = 0
configDict[CID+"p2y"] = 0
#CID = uuid.uuid4()
#logging.debug(CID)
thread = threading.Thread(target=worker, args=(stop,client[0], client[1],CID,))
threads.append(thread)
thread.start()
else:
print("#######################################\n HELP\n Supported Commands: \n add: add new client")
def capture(stopthread, name, camid, headless):
if camid < 0:
vid = cv2.VideoCapture("test2.mp4")
else:
vid = cv2.VideoCapture(camid) # replace 'rocket.mp4' with 0 for webcam
#vid.set(cv2.CAP_PROP_FRAME_WIDTH, 4000)
#vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 3000)
fps,st,frames_to_count,cnt = (0,0,20,0)
windowname = 'BUFFERED VIDEO'+name
while True:
while(vid.isOpened()):
#continue
_,frame = vid.read()
#frame = frame[p1y:p2y,p1x:p2x]
#frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#print("dtype: ",frame.dtype)
logging.debug(frame.shape)
#cv2.imshow('INPUT_ORIG',frame)
logging.debug(' buffer: '+ str(len(bytes(frame))))
global originalfps
global imagebuffer
bufferlock.acquire()
imagebuffer = frame
bufferlock.release()
#frame = cv2.putText(frame,'FPS: '+str(fps),(10,40),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)
if headless == False:
cv2.imshow(windowname,frame)
key = cv2.waitKey(1) & 0xFF
#if key == ord('q'):
# server_socket.close()
# break
if cnt == frames_to_count:
try:
fps = round(frames_to_count/(time.time()-st))
if originalfps == 0:
originalfps = fps
st=time.time()
cnt=0
except:
pass
cnt+=1
def worker(s,host_ip,port, cid):
BUFF_SIZE = 65536
port = int(port)
CID = cid #host_ip+":"+str(port)
print("CID in worker: ", CID)
s = socket.socket()
s.connect((host_ip, port))
class writeproc(threading.Thread):
def __init__(self, server, CID):
threading.Thread.__init__(self)
self.server = server
self.CID = CID
def run(self):
recieved = 0
while True:
global configDict
if configDict[self.CID+"terminate"] == True:
raise Exception("Got Termination Request")
if configDict[self.CID+"mode"] == "single" and configDict[self.CID+"stop"] == False:
configDict[self.CID+"stop"] = True
global imagebuffer
global distabcebuffer
if len(imagebuffer) == 0:
time.sleep(0.1)
continue
b = time.time()
x = recieved
message = imagebuffer
if 0 != configDict[self.CID+"p1x"] + configDict[self.CID+"p1y"] + configDict[self.CID+"p2x"] + configDict[self.CID+"p2y"]:
message = message[configDict[self.CID+"p1y"]:configDict[self.CID+"p2y"],configDict[self.CID+"p1x"]:configDict[self.CID+"p2x"]]
messagelen = str("%020d"%len(message))
if message.size > 0:
encoded,buffer = cv2.imencode('.jpg',message,[cv2.IMWRITE_JPEG_QUALITY,80])
lenlen = int(len(buffer))
s = "00000000"
blen = str(len(buffer))
#buf = s[:len(blen)-1]+blen
preamble = bytes(str("AB01")+blen.zfill(10), 'utf-8')
#print("buf: ", int(len(buffer)))
#print(type(buffer))
#print(type(buffer.shape))
#print(type(buffer.ndim))
#print(type(buffer.dtype))
#print(type(preamble))
#print(len(preamble))
self.server.send(preamble+buffer.tobytes())
#time.sleep(200)
key = cv2.waitKey(1) & 0xFF
#time.sleep(0.1)
#self.server.send(bytes((messagelen+message), "utf_8"))
lock.acquire()
recieved += 1
lock.release()
#if message == "BYE!":
# final.set()
# break
a = time.time()
sleep_dur = 1/configDict[self.CID+"fps"]-(a-b)
#print("sleeping ",sleep_dur)
while configDict[self.CID+"stop"] == True:
time.sleep(0.05)
if configDict[self.CID+"terminate"] == True:
raise Exception("Got Termination Request")
sleep_dur = 0 # no need to wait anymore
if sleep_dur < 0:
#print("have to speed up")
continue
#print("duration:",a-b, " add sleeping ",sleep_dur)
time.sleep(sleep_dur)
class readproc(threading.Thread):
def __init__(self, server, CID):
threading.Thread.__init__(self)
self.server = server
self.CID = CID
def run(self):
global recieved
while True:
global configDict
if configDict[self.CID+"terminate"] == True:
raise Exception("Got Termination Request")
recieved = 0
x = recieved
if final.is_set():
break
mlen = int(str(self.server.recv(20), 'utf_8'))
response = str(self.server.recv(mlen), 'utf_8')
lock.acquire()
recieved += 1
lock.release()
logging.info(response)
lElem = response.split(",")
command = lElem[0]
#logging.info("Command: "+ command)
if command == 'init':
# make a random UUID
logging.info("do reinit")
#CID = uuid.uuid4()
if command == 'update':
logging.debug("execute update")
configDict[self.CID+"fps"] = int(lElem[1])
configDict[self.CID+"p1x"] = int(lElem[2])
configDict[self.CID+"p1y"] = int(lElem[3])
configDict[self.CID+"p2x"] = int(lElem[4])
configDict[self.CID+"p2y"] = int(lElem[5])
continue
if command == 'stop':
continue
# print("---------------", recieved)
recieved = 0
for i in range(5):
time.sleep(0.1)
#try:
final = threading.Event()
lock = threading.Lock()
cwrite = writeproc(s,CID)
cread = readproc(s,CID)
cwrite.start()
cread.setDaemon(True)
cread.start()
cwrite.join()
s.close()
#except:
# print("worker terminated!")
def callRegistry(name):
#register ourself
global firstStart
if firstStart == True:
x = requests.post('http://'+CONFIGSERVERIP+':'+str(CONFIGSERVERRESTPORT)+'/addProducer', json={'prodname': name})
print(x.status_code)
firstStart = False
if x.ok != True:
return x.ok
#get consumers
x = requests.post('http://'+CONFIGSERVERIP+':'+str(CONFIGSERVERRESTPORT)+'/getConsumers', json={'prodname': name})
json_data = json.loads(x.text)
print(len(json_data))
type(json_data)
for val in json_data:
print(val)
print((json_data[val]["host"]))
global clients
host = json_data[val]["consname"]
CID = host
if CID not in clients:
print("new consumer ... add")
clients.append(host)
print("CID: ", CID)
global configDict
configDict[CID+"fps"] = json_data[val]["fps"]
configDict[CID+"p1x"] = json_data[val]["p1x"]
configDict[CID+"p1y"] = json_data[val]["p1y"]
configDict[CID+"p2x"] = json_data[val]["p2x"]
configDict[CID+"p2y"] = json_data[val]["p2y"]
configDict[CID+"mode"] = json_data[val]["mode"]
configDict[CID+"stop"] = json_data[val]["stop"]
configDict[CID+"terminate"] = False
if configDict[CID+"mode"] == "single" and configDict[CID+"stop"] == False:
configDict[CID+"stop"] = True
elif configDict[CID+"mode"] != "single":
configDict[CID+"stop"] = False
thread = threading.Thread(target=worker, args=(stop,json_data[val]["host"], json_data[val]["port"],CID,))
#configDict[CID+"tid"] = json_data[val]["stop"]
threads.append(thread)
thread.start()
else:
print("known consumer ... skip")
def configThread(stopMainApp, name):
while True:
try:
logging.debug("start config thread ")
INCONFsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
INCONFsock.connect((CONFIGSERVERIP, CONFIGSERVERPORT))
INCONFsock.send(bytes("Hallo Registry ,"+name+", ", 'utf-8'))
while True:
global clients
command = ''
# TODO check if we still need to do this
if len(clients) > 0:
print("New clients: ",str(len(clients)))
#newClient = clients.pop(0)
command = 'init'
buf = INCONFsock.recv(1024)
logging.debug(">>> Config REQUEST: "+str(buf,'utf-8'))
print(">>> Config REQUEST: "+str(buf,'utf-8'))
s = buf.decode("utf-8")
lElem = s.split(",")
command = lElem[0]
logging.debug("Command: "+ command)
print(lElem)
if command == 'init':
# make a random UUID
print("do init")
if command == 'update':
print("do update")
CID = (lElem[7])
if CID in clients:
if configDict[CID+"terminate"] == True: #remove client from previous iteration
clients.remove(CID)
if CID not in clients:
print("potential NEW consumer!")
callRegistry(name)
else:
print("update existing consumer")
configDict[CID+"fps"] = double(lElem[1])
configDict[CID+"p1x"] = int(lElem[2])
configDict[CID+"p1y"] = int(lElem[3])
configDict[CID+"p2x"] = int(lElem[4])
configDict[CID+"p2y"] = int(lElem[5])
configDict[CID+"mode"] = str(lElem[6])
if configDict[CID+"mode"] == "single":
configDict[CID+"stop"] = True
else:
configDict[CID+"stop"] = False
continue
if command == 'start':
print("do start")
CID = (lElem[1])
configDict[CID+"stop"] = False
continue
if command == 'stop':
print("do stop")
CID = (lElem[1])
configDict[CID+"stop"] = True
continue
if command == 'terminate':
print("do terminate")
CID = (lElem[1])
configDict[CID+"terminate"] = True
continue
if command == 'single':
print("do single picture")
CID = (lElem[7])
if CID not in clients:
print("potential NEW consumer!")
callRegistry(name)
else:
print("update existing consumer")
configDict[CID+"fps"] = double(lElem[1])
configDict[CID+"p1x"] = int(lElem[2])
configDict[CID+"p1y"] = int(lElem[3])
configDict[CID+"p2x"] = int(lElem[4])
configDict[CID+"p2y"] = int(lElem[5])
configDict[CID+"mode"] = int(lElem[6])
if configDict[CID+"mode"] == "single":
configDict[CID+"stop"] = True
else:
configDict[CID+"stop"] = False
continue
except:
print("exception in conf thread")
bufferlock = threading.Lock()
stop = False
stopMainApp = False
threads = []
clients = []
###### Load Config File ######
name = sys.argv[1]
config = configparser.ConfigParser()
config.sections()
config.read(name + '.ini')
print(">>: ",config['PRODUCER']['Camid'])
camid = int(config['PRODUCER']['Camid']) # id to identify camera (-1 ... file stream)
reghost = str(config['PRODUCER']['RHost']) # registry host
regport = int(config['PRODUCER']['RPort']) # registry port
regportrest = int(config['PRODUCER']['RPortRest']) # registry port rest-api
headless = bool(config['PRODUCER'].getboolean('Headless')) #
print(headless)
if 'reghost' not in locals():
print("set default ports for registry")
CONFIGSERVERIP = "127.0.0.1"
CONFIGSERVERPORT = 9997
CONFIGSERVERRESTPORT = 10000
else:
CONFIGSERVERIP = reghost
CONFIGSERVERPORT = regport
CONFIGSERVERRESTPORT = regportrest
callRegistry(name)
# config Thread (client config)
cthread = threading.Thread(target=configThread, args=(stopMainApp,name))
cthread.start()
# CLI Thread (reads args from commandline during runtime)
cthread = threading.Thread(target=readCLI, args=(stopMainApp,))
#cthread.start()
# capture Thread (reads buffer from camera)
cthread = threading.Thread(target=capture, args=(stopMainApp,name, camid, headless))
cthread.start()
| 34.553879 | 217 | 0.493357 | 1,597 | 16,033 | 4.909831 | 0.214152 | 0.067976 | 0.04553 | 0.014029 | 0.310165 | 0.270119 | 0.242061 | 0.15419 | 0.145262 | 0.145262 | 0 | 0.024722 | 0.371796 | 16,033 | 463 | 218 | 34.62851 | 0.753773 | 0.111769 | 0 | 0.371951 | 0 | 0 | 0.097696 | 0.004608 | 0 | 0 | 0.000567 | 0.00216 | 0 | 1 | 0.027439 | false | 0.003049 | 0.036585 | 0 | 0.073171 | 0.088415 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcfd579f248c6ae550fd05ab1865da2443b5ad26 | 2,303 | py | Python | BioClients/chebi/Client.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 10 | 2020-05-26T07:29:14.000Z | 2021-12-06T21:33:40.000Z | BioClients/chebi/Client.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 1 | 2021-10-05T12:25:30.000Z | 2021-10-05T17:05:56.000Z | BioClients/chebi/Client.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 2 | 2021-03-16T03:20:24.000Z | 2021-08-08T20:17:10.000Z | #!/usr/bin/env python3
"""
Utility for ChEBI SOAP API.
* https://www.ebi.ac.uk/chebi/webServices.do
"""
###
import sys,os,re,json,argparse,time,logging
import pandas as pd
#
from .. import chebi
#
##############################################################################
if __name__=='__main__':
epilog="Example entity IDs: 30273,33246,24433"
parser = argparse.ArgumentParser(description='ChEBI SOAP API client', epilog=epilog)
ops = [
"get_entity",
"get_entity_children",
"get_entity_parents",
"search"
]
parser.add_argument("op", choices=ops, help='OPERATION (select one)')
parser.add_argument("--ids", help="input IDs")
parser.add_argument("--i", dest="ifile", help="input file, IDs")
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--query", help="search query (SMILES)")
parser.add_argument("--skip", type=int, default=0)
parser.add_argument("--nmax", type=int, default=None)
parser.add_argument("--api_host", default=chebi.Utils.API_HOST)
parser.add_argument("--api_base_path", default=chebi.Utils.API_BASE_PATH)
parser.add_argument("-v","--verbose", action="count", default=0)
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))
base_url='https://'+args.api_host+args.api_base_path
fout = open(args.ofile, 'w') if args.ofile else sys.stdout
ids=[]
if args.ifile:
fin = open(args.ifile)
while True:
line = fin.readline()
if not line: break
ids.append(line.rstrip())
fin.close()
elif args.ids:
ids = re.split('[, ]+', args.ids.strip())
if len(ids)>0: logging.info('Input IDs: %d'%(len(ids)))
if args.op[:3]=="get" and not (args.ifile or args.ids):
parser.error(f"--i or --ids required for operation {args.op}.")
if args.op == "get_entity":
chebi.Utils.GetEntity(ids, base_url, fout)
elif args.op == "get_entity_children":
chebi.Utils.GetEntityChildren(ids, base_url, fout)
elif args.op == "get_entity_parents":
chebi.Utils.GetEntityParents(ids, base_url, fout)
elif args.op == "search":
parser.error(f'Not yet implemented: {args.op}')
#chebi.Utils.Search(args.query, base_url, fout)
else:
parser.error(f'Invalid operation: {args.op}')
| 32.43662 | 116 | 0.659574 | 327 | 2,303 | 4.513761 | 0.382263 | 0.060976 | 0.115176 | 0.030488 | 0.060976 | 0.060976 | 0.060976 | 0.044715 | 0.044715 | 0 | 0 | 0.010537 | 0.134607 | 2,303 | 70 | 117 | 32.9 | 0.730055 | 0.061224 | 0 | 0 | 0 | 0 | 0.238164 | 0.012077 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.061224 | 0 | 0.061224 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcfdd812b058711aa5ae957f61479006e6bfa153 | 9,793 | py | Python | Coach.py | NetasDev/NetworkTraining | 1683309f4638d69f2c4be60dfd12ba821be6d961 | [
"MIT"
] | null | null | null | Coach.py | NetasDev/NetworkTraining | 1683309f4638d69f2c4be60dfd12ba821be6d961 | [
"MIT"
] | null | null | null | Coach.py | NetasDev/NetworkTraining | 1683309f4638d69f2c4be60dfd12ba821be6d961 | [
"MIT"
] | null | null | null | import logging
import os
import sys
from collections import deque
from pickle import Pickler, Unpickler
from random import shuffle
from time import perf_counter
from othello.OthelloPlayers import *
import wandb
import numpy as np
from tqdm import tqdm
from Arena import Arena
from MCTS import MCTS
log = logging.getLogger(__name__)
class Coach():
"""
This class executes the self-play + learning. It uses the functions defined
in Game and NeuralNet. args are specified in main.py.
"""
def __init__(self, game, nnet, args):
self.game = game
self.nnet = nnet
self.pnet = self.nnet.__class__(self.game) # the competitor network
self.args = args
self.mcts = MCTS(self.game, self.nnet, self.args)
self.trainExamplesHistory = [] # history of examples from args.numItersForTrainExamplesHistory latest iterations
self.skipFirstSelfPlay = False # can be overriden in loadTrainExamples()
def executeEpisode(self):
"""
This function executes one episode of self-play, starting with player 1.
As the game is played, each turn is added as a training example to
trainExamples. The game is played till the game ends. After the game
ends, the outcome of the game is used to assign values to each example
in trainExamples.
It uses a temp=1 if episodeStep < tempThreshold, and thereafter
uses temp=0.
Returns:
trainExamples: a list of examples of the form (canonicalBoard, currPlayer, pi,v)
pi is the MCTS informed policy vector, v is +1 if
the player eventually won the game, else -1.
"""
trainExamples = []
board = self.game.getInitBoard()
self.curPlayer = 1
episodeStep = 0
while True:
#self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree
episodeStep += 1
canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)
temp = int(episodeStep < self.args.tempThreshold)
pi = self.mcts.getActionProb(canonicalBoard, temp=temp)
sym = self.game.getSymmetries(canonicalBoard, pi)
for b, p in sym:
trainExamples.append([b, self.curPlayer, p, None])
action = np.random.choice(len(pi), p=pi)
board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)
r = self.game.getGameEnded(board, self.curPlayer)
if r != 0:
if r!=1 and r!= -1:
return [(x[0],x[2],r) for x in trainExamples]
else:
return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]
def learn(self):
"""
Performs numIters iterations with numEps episodes of self-play in each
iteration. After every iteration, it retrains neural network with
examples in trainExamples (which has a maximum length of maxlenofQueue).
It then pits the new neural network against the old one and accepts it
only if it wins >= updateThreshold fraction of games.
"""
start_time = perf_counter()
time_this_iteration = start_time
all_time_selfplay,all_time_training,all_time_validation,all_wins,all_looses,all_draws,all_games,generation = 0,0,0,0,0,0,0,0
for i in range(1, self.args.numIters + 1):
start_time_selfplay = perf_counter()
# start of all/selfplay
# bookkeeping
log.info(f'Starting Iter #{i} ...')
# examples of the iteration
if not self.skipFirstSelfPlay or i > 1:
iterationTrainExamples = []
for _ in tqdm(range(self.args.numEps), desc="Self Play"):
self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree
iterationTrainExamples += self.executeEpisode()
# save the iteration examples to the history
self.trainExamplesHistory.append(iterationTrainExamples)
if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:
log.warning(
f"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}")
self.trainExamplesHistory.pop(0)
# backup history to a file
# NB! the examples were collected using the model from the previous iteration, so (i-1)
self.saveTrainExamples(i - 1)
# shuffle examples before training
trainExamples = []
for e in self.trainExamplesHistory:
trainExamples.extend(e)
shuffle(trainExamples)
#end of selfplay/start of training network
start_time_training = perf_counter()
# training new network, keeping a copy of the old one
if perf_counter() - start_time >self.args.maxtime:
print(perf_counter()-start_time)
break
self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp')
self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp')
pmctsplayer = NeuralNetworkPlayer(self.game,self.pnet,self.args)
loss,pi_loss,v_loss = self.nnet.train(trainExamples)
nmctsplayer = NeuralNetworkPlayer(self.game,self.nnet,self.args)
# end of selfplay/start of validation
start_time_validation = perf_counter()
log.info('PITTING AGAINST PREVIOUS VERSION')
arena = Arena(pmctsplayer,nmctsplayer,self.game,self.args.tempThreshold)
pwins, nwins, draws = arena.playGames(self.args.arenaCompare)
log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))
if pwins + nwins == 0 or float(nwins+0.45*draws) / (pwins + nwins + draws) < self.args.updateThreshold:
log.info('REJECTING NEW MODEL')
self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp')
else:
log.info('ACCEPTING NEW MODEL')
self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))
self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best')
generation = generation + 1
# end of Validation
# wandb speicherung anfang
end_time_validation = perf_counter()
time_this_iteration = perf_counter() - start_time_selfplay
all_time = perf_counter()-start_time
selfplay_time_iteration = start_time_training - start_time_selfplay
training_time_iteration = start_time_validation - start_time_training
validation_time_iteration = end_time_validation- start_time_validation
all_time_selfplay = all_time_selfplay + start_time_training - start_time_selfplay
all_time_training = all_time_training + start_time_validation - start_time_training
all_time_validation = all_time_validation + end_time_validation- start_time_validation
games = pwins+nwins+draws
all_wins = all_wins + nwins
all_looses = all_looses + pwins
all_draws = all_draws + draws
all_games = all_games + pwins + nwins + draws
wandb.log({"Wins":nwins,"Losses":pwins,"Draws":draws,"Win-Rate":nwins/games,"Overall Win-Rate":all_wins/all_games,
"Overall Draw-Rate":all_draws/all_games,"Time":all_time,"Selfplay-Time Iteration":selfplay_time_iteration,
"Training-Time Iteration":training_time_iteration,"Validation-Time Iteration":validation_time_iteration,
"Selfplay-Time Iteration %":selfplay_time_iteration*100/time_this_iteration,
"Traing-Time Iteration %":training_time_iteration*100/time_this_iteration,
"Validation-Time Iteration %":validation_time_iteration*100/time_this_iteration,
"Selfplay-Time":all_time_selfplay,"Selfplay-Time %":all_time_selfplay*100/all_time,
"Training-Time":all_time_training,"Training-Time %":all_time_training*100/all_time,
"Validation-Time":all_time_validation,"Validation-Time %":all_time_validation*100/all_time,
"Generation":generation,"loss":loss,"pi_loss":pi_loss,"v_loss":v_loss
})
#wandb speicherung ende
def getCheckpointFile(self, iteration):
return 'checkpoint_' + str(iteration)
def saveTrainExamples(self, iteration):
folder = self.args.checkpoint
if not os.path.exists(folder):
os.makedirs(folder)
filename = os.path.join(folder, self.getCheckpointFile(iteration) + ".examples")
with open(filename, "wb+") as f:
Pickler(f).dump(self.trainExamplesHistory)
f.closed
def loadTrainExamples(self):
modelFile = os.path.join(self.args.load_folder_file[0], self.args.load_folder_file[1])
examplesFile = modelFile + ".examples"
if not os.path.isfile(examplesFile):
log.warning(f'File "{examplesFile}" with trainExamples not found!')
r = input("Continue? [y|n]")
if r != "y":
sys.exit()
else:
log.info("File with trainExamples found. Loading it...")
with open(examplesFile, "rb") as f:
self.trainExamplesHistory = Unpickler(f).load()
log.info('Loading done!')
# examples based on the model were already collected (loaded)
self.skipFirstSelfPlay = True
| 44.716895 | 133 | 0.632799 | 1,143 | 9,793 | 5.284339 | 0.230096 | 0.029139 | 0.01457 | 0.023841 | 0.248179 | 0.168377 | 0.097682 | 0.066225 | 0.049669 | 0.016887 | 0 | 0.007934 | 0.279281 | 9,793 | 219 | 134 | 44.716895 | 0.847832 | 0.184111 | 0 | 0.051095 | 0 | 0 | 0.094432 | 0.007313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043796 | false | 0 | 0.094891 | 0.007299 | 0.167883 | 0.007299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcfec011a1b81ce9c217c8eefeb848ef377b3c27 | 1,351 | py | Python | tests/gold_tests/headers/via-observer.py | zhangzhongkui/http-over-http | 18e27573e3338ee797648c44d7e01114e1d3321c | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/headers/via-observer.py | zhangzhongkui/http-over-http | 18e27573e3338ee797648c44d7e01114e1d3321c | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/headers/via-observer.py | zhangzhongkui/http-over-http | 18e27573e3338ee797648c44d7e01114e1d3321c | [
"Apache-2.0"
] | 1 | 2020-06-17T11:31:22.000Z | 2020-06-17T11:31:22.000Z | '''
Extract the protocol information from the VIA headers and store it in a log file for later verification.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
log = open('via.log', 'w')
rxp = re.compile('(\S+)\s+(\S+)\s\((\S+)\s+\[([^]]+)\]\s\[([^]]+)\]\s*\)')
def observe(headers):
if 'via' in headers:
via = headers['via']
if via:
via = rxp.sub(r'\1 = \5', via)
else:
via = '---empty---'
else:
via = '---missing---'
log.write("Via: {}\n".format(via))
log.flush()
Hooks.register(Hooks.ReadRequestHook, observe)
| 33.775 | 104 | 0.661732 | 194 | 1,351 | 4.608247 | 0.546392 | 0.01566 | 0.020134 | 0.022371 | 0.008949 | 0.008949 | 0.008949 | 0 | 0 | 0 | 0 | 0.005623 | 0.210215 | 1,351 | 39 | 105 | 34.641026 | 0.83224 | 0.645448 | 0 | 0.133333 | 0 | 0 | 0.236324 | 0.118162 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcff7d1a940e3714715c9d14f1f889fc1690f9b3 | 978 | py | Python | cpa/profiling/accuracy.py | oba14/CellProfiler-Analyst | 7e194e72e8ce8440ce7c16196cb91c50052c2df1 | [
"MIT"
] | 1 | 2020-01-16T14:24:08.000Z | 2020-01-16T14:24:08.000Z | cpa/profiling/accuracy.py | oba14/CellProfiler-Analyst | 7e194e72e8ce8440ce7c16196cb91c50052c2df1 | [
"MIT"
] | null | null | null | cpa/profiling/accuracy.py | oba14/CellProfiler-Analyst | 7e194e72e8ce8440ce7c16196cb91c50052c2df1 | [
"MIT"
] | null | null | null | """
Compute the overall accuracy of a confusion matrix
"""
from __future__ import print_function
import sys
from optparse import OptionParser
import numpy as np
import cpa.util
from cpa.profiling.confusion import confusion_matrix, load_confusion
parser = OptionParser("usage: %prog [options] CONFUSION")
parser.add_option('-f', dest='float', action='store_true', help='use floating-point accuracies')
parser.add_option('-o', dest='output_filename', help='file to store the profiles in')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments')
(input_filename,) = args
confusion = load_confusion(input_filename)
cm = confusion_matrix(confusion, 'if'[options.float or 0])
acc = 100.0 * np.diag(cm).sum() / cm.sum()
def write_output(f):
print('%.0f%%' % acc, file=f)
if options.output_filename:
with cpa.util.replace_atomically(options.output_filename) as f:
write_output(f)
else:
write_output(sys.stdout)
| 29.636364 | 96 | 0.743354 | 141 | 978 | 5 | 0.510638 | 0.06383 | 0.042553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008226 | 0.129857 | 978 | 32 | 97 | 30.5625 | 0.820212 | 0.051125 | 0 | 0 | 0 | 0 | 0.17519 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.26087 | 0 | 0.304348 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d03e7daf00c1563b1b6fe2d0009cfcb5c0c684e | 4,813 | py | Python | vigir_flexbe_states/src/vigir_flexbe_states/goto_single_arm_joint_config_state.py | team-vigir/vigir_behaviors | 6696e7b7aadb24bb5495475065cc7b10d80b7db4 | [
"BSD-3-Clause"
] | 5 | 2015-08-25T18:47:52.000Z | 2019-12-04T21:40:28.000Z | vigir_flexbe_states/src/vigir_flexbe_states/goto_single_arm_joint_config_state.py | team-vigir/vigir_behaviors | 6696e7b7aadb24bb5495475065cc7b10d80b7db4 | [
"BSD-3-Clause"
] | 2 | 2017-08-16T16:09:47.000Z | 2020-08-18T17:25:22.000Z | vigir_flexbe_states/src/vigir_flexbe_states/goto_single_arm_joint_config_state.py | team-vigir/vigir_behaviors | 6696e7b7aadb24bb5495475065cc7b10d80b7db4 | [
"BSD-3-Clause"
] | 5 | 2015-11-06T21:57:37.000Z | 2022-03-30T10:15:57.000Z | #!/usr/bin/env python
import rospy
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
from control_msgs.msg import *
from trajectory_msgs.msg import *
"""
Created on 05/18/2015
@author: Spyros Maniatopoulos
"""
class GotoSingleArmJointConfigState(EventState):
"""
Directly commands the trajectory/joint controllers to move a
single joint to the desired configuration.
-- target_config int Identifier of the pre-defined pose to be used.
-- arm_side string Arm side {left, right}
># current_config dict The current arm joint positions
joint_names string[] : joint_values[]
<= done Successfully executed the motion.
<= failed Failed to execute the motion.
"""
# Wrists
WRIST_CCW = 11
WRIST_CW = 12
# Forearms
# ...
def __init__(self, arm_side, target_config, time = 2.0):
"""Constructor"""
super(GotoSingleArmJointConfigState, self).__init__(outcomes = ['done', 'failed'],
input_keys = ['current_config'])
if not rospy.has_param("behavior/robot_namespace"):
Logger.logerr("Need to specify parameter behavior/robot_namespace at the parameter server")
return
self._robot = rospy.get_param("behavior/robot_namespace")
if not rospy.has_param("behavior/joint_controllers_name"):
Logger.logerr("Need to specify parameter behavior/joint_controllers_name at the parameter server")
return
controller_namespace = rospy.get_param("behavior/joint_controllers_name")
################################ ATLAS ################################
self._configs = dict()
self._configs['flor'] = dict()
self._configs['flor']['left'] = {
11: {'joint_name': 'l_arm_wry2', 'joint_value': -2.5},
12: {'joint_name': 'l_arm_wry2', 'joint_value': +2.5}
}
self._configs['flor']['right'] = {
11: {'joint_name': 'r_arm_wry2', 'joint_value': +2.5},
12: {'joint_name': 'r_arm_wry2', 'joint_value': -2.5}
}
################################ THOR #################################
self._configs['thor_mang'] = dict()
self._configs['thor_mang']['left'] = {
11: {'joint_name': 'l_wrist_yaw2', 'joint_value': 3.84},
12: {'joint_name': 'l_wrist_yaw2', 'joint_value': -3.84}
}
self._configs['thor_mang']['right'] = {
11: {'joint_name': 'r_wrist_yaw2', 'joint_value': -3.84},
12: {'joint_name': 'r_wrist_yaw2', 'joint_value': 3.84}
}
#######################################################################
self._joint_name = self._configs[self._robot][arm_side][target_config]['joint_name']
self._joint_value = self._configs[self._robot][arm_side][target_config]['joint_value']
self._time = time
self._action_topic = "/" + controller_namespace + "/" + arm_side + \
"_arm_traj_controller" + "/follow_joint_trajectory"
self._client = ProxyActionClient({self._action_topic: FollowJointTrajectoryAction})
self._failed = False
def execute(self, userdata):
"""Execute this state"""
if self._failed:
return 'failed'
if self._client.has_result(self._action_topic):
result = self._client.get_result(self._action_topic)
if result:
if result.error_code == FollowJointTrajectoryResult.SUCCESSFUL:
return 'done'
else:
Logger.logwarn('Joint trajectory failed to execute (%d). Reason: %s' % (result.error_code, result.error_string))
self._failed = True
return 'failed'
else:
Logger.logwarn('Wait for result returned True even though the result is %s' % str(result))
self._failed = True
return 'failed'
def on_enter(self, userdata):
'''On enter, create and send the follow joint trajectory action goal.'''
self._failed = False
current_config = userdata.current_config
# Get the index of the joint whose value will be replaced
index_of_joint = current_config['joint_names'].index(self._joint_name)
# Replace the old joint value with the target_config's one
new_values = current_config['joint_values']
new_values[index_of_joint] = self._joint_value
# Create trajectory point out of the revised joint values
point = JointTrajectoryPoint(
positions = new_values,
time_from_start = rospy.Duration.from_sec(self._time))
# Create trajectory message
trajectory = JointTrajectory(
joint_names = current_config['joint_names'],
points = [point])
action_goal = FollowJointTrajectoryGoal(trajectory = trajectory)
# execute the motion
try:
self._client.send_goal(self._action_topic, action_goal)
except Exception as e:
Logger.logwarn('Failed to send trajectory action goal:\n%s' % str(e))
self._failed = True
def on_exit(self, userdata):
'''Destructor'''
if not self._client.has_result(self._action_topic):
self._client.cancel(self._action_topic)
Logger.loginfo("Cancelled active action goal.")
| 31.457516 | 117 | 0.677748 | 606 | 4,813 | 5.112211 | 0.292079 | 0.038735 | 0.033893 | 0.02195 | 0.23725 | 0.178179 | 0.161394 | 0.109748 | 0.109748 | 0 | 0 | 0.01441 | 0.163723 | 4,813 | 152 | 118 | 31.664474 | 0.75528 | 0.206108 | 0 | 0.144578 | 0 | 0 | 0.244001 | 0.050957 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0 | 0.060241 | 0 | 0.216867 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d0559140c74316b4c9189d66f026718d182678f | 4,008 | py | Python | homeassistant/components/litterrobot/hub.py | kristianheljas/homeassistant-core | eb7220ff262fe81d53c929884107bcedc8af7850 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/litterrobot/hub.py | kristianheljas/homeassistant-core | eb7220ff262fe81d53c929884107bcedc8af7850 | [
"Apache-2.0"
] | 51 | 2020-10-14T01:19:07.000Z | 2022-03-31T06:02:48.000Z | homeassistant/components/litterrobot/hub.py | kristianheljas/homeassistant-core | eb7220ff262fe81d53c929884107bcedc8af7850 | [
"Apache-2.0"
] | 1 | 2021-08-16T02:53:15.000Z | 2021-08-16T02:53:15.000Z | """A wrapper 'hub' for the Litter-Robot API and base entity for common attributes."""
from datetime import time, timedelta
import logging
from types import MethodType
from typing import Any, Optional
from pylitterbot import Account, Robot
from pylitterbot.exceptions import LitterRobotException, LitterRobotLoginException
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
import homeassistant.util.dt as dt_util
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
REFRESH_WAIT_TIME = 12
UPDATE_INTERVAL = 10
class LitterRobotHub:
"""A Litter-Robot hub wrapper class."""
def __init__(self, hass: HomeAssistant, data: dict):
"""Initialize the Litter-Robot hub."""
self._data = data
self.account = None
self.logged_in = False
async def _async_update_data():
"""Update all device states from the Litter-Robot API."""
await self.account.refresh_robots()
return True
self.coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=DOMAIN,
update_method=_async_update_data,
update_interval=timedelta(seconds=UPDATE_INTERVAL),
)
async def login(self, load_robots: bool = False):
"""Login to Litter-Robot."""
self.logged_in = False
self.account = Account()
try:
await self.account.connect(
username=self._data[CONF_USERNAME],
password=self._data[CONF_PASSWORD],
load_robots=load_robots,
)
self.logged_in = True
return self.logged_in
except LitterRobotLoginException as ex:
_LOGGER.error("Invalid credentials")
raise ex
except LitterRobotException as ex:
_LOGGER.error("Unable to connect to Litter-Robot API")
raise ex
class LitterRobotEntity(CoordinatorEntity):
"""Generic Litter-Robot entity representing common data and methods."""
def __init__(self, robot: Robot, entity_type: str, hub: LitterRobotHub):
"""Pass coordinator to CoordinatorEntity."""
super().__init__(hub.coordinator)
self.robot = robot
self.entity_type = entity_type if entity_type else ""
self.hub = hub
@property
def name(self):
"""Return the name of this entity."""
return f"{self.robot.name} {self.entity_type}"
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self.robot.serial}-{self.entity_type}"
@property
def device_info(self):
"""Return the device information for a Litter-Robot."""
model = "Litter-Robot 3 Connect"
if not self.robot.serial.startswith("LR3C"):
model = "Other Litter-Robot Connected Device"
return {
"identifiers": {(DOMAIN, self.robot.serial)},
"name": self.robot.name,
"manufacturer": "Litter-Robot",
"model": model,
}
async def perform_action_and_refresh(self, action: MethodType, *args: Any):
"""Perform an action and initiates a refresh of the robot data after a few seconds."""
await action(*args)
async_call_later(
self.hass, REFRESH_WAIT_TIME, self.hub.coordinator.async_request_refresh
)
@staticmethod
def parse_time_at_default_timezone(time_str: str) -> Optional[time]:
"""Parse a time string and add default timezone."""
parsed_time = dt_util.parse_time(time_str)
if parsed_time is None:
return None
return time(
hour=parsed_time.hour,
minute=parsed_time.minute,
second=parsed_time.second,
tzinfo=dt_util.DEFAULT_TIME_ZONE,
)
| 32.585366 | 94 | 0.645709 | 456 | 4,008 | 5.491228 | 0.309211 | 0.048323 | 0.019169 | 0.013578 | 0.019968 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002043 | 0.267216 | 4,008 | 122 | 95 | 32.852459 | 0.850528 | 0.099551 | 0 | 0.078652 | 0 | 0 | 0.069281 | 0.011203 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067416 | false | 0.022472 | 0.134831 | 0 | 0.303371 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |