max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
src/pkgcheck/checks/deprecated.py | chutz/pkgcheck | 0 | 6613051 | <reponame>chutz/pkgcheck<gh_stars>0
from snakeoil.mappings import ImmutableDict
from snakeoil.strings import pluralism as _pl
from .. import results
from . import Check
class DeprecatedEclass(results.VersionResult, results.Warning):
"""Package uses an eclass that is deprecated/abandoned."""
def __init__(self, eclasses, **kwargs):
super().__init__(**kwargs)
self.eclasses = tuple(eclasses)
@property
def desc(self):
eclass_migration = []
for old_eclass, new_eclass in self.eclasses:
if new_eclass:
update_path = f'migrate to {new_eclass}'
else:
update_path = 'no replacement'
eclass_migration.append(f'{old_eclass} ({update_path})')
return "uses deprecated eclass%s: [ %s ]" % (
_pl(eclass_migration, plural='es'), ', '.join(eclass_migration))
class DeprecatedEclassCheck(Check):
"""Check for ebuilds using deprecated eclasses."""
known_results = frozenset([DeprecatedEclass])
blacklist = ImmutableDict({
'64-bit': None,
'autotools-multilib': 'multilib-minimal',
'autotools-utils': None,
'base': None,
'bash-completion': 'bash-completion-r1',
'boost-utils': None,
'clutter': 'gnome2',
'confutils': None,
'darcs': None,
'distutils': 'distutils-r1',
'db4-fix': None,
'debian': None,
'embassy-2.10': None,
'embassy-2.9': None,
'epatch': (
'eapply from EAPI 6',
lambda pkg: 'eapply' in pkg.eapi.bash_funcs),
'fdo-mime': 'xdg-utils',
'games': None,
'gems': 'ruby-fakegem',
'git': 'git-r3',
'git-2': 'git-r3',
'gcc': None,
'gnustep-old': None,
'gpe': None,
'gst-plugins-bad': 'gstreamer',
'gst-plugins-base': 'gstreamer',
'gst-plugins-good': 'gstreamer',
'gst-plugins-ugly': 'gstreamer',
'gst-plugins10': 'gstreamer',
'gtk-engines': None,
'gtk-engines2': None,
'inherit': None,
'jakarta-commons': None,
'java-pkg': None,
'java-utils': None,
'kde-base': None,
'kde-i18n': None,
'kde4-meta-pkg': 'kde5-meta-pkg',
'kde-source': None,
'kmod': None,
'koffice-i18n': None,
'mono': 'mono-env',
'mozconfig': None,
'mozconfig-2': 'mozconfig-3',
'mozcoreconf': 'mozcoreconf-2',
'motif': None,
'mozilla': None,
'myth': None,
'pcmcia': None,
'perl-post': None,
'php': None,
'php-2': None,
'php-ext': None,
'php-ext-base': None,
'php-ext-pecl': None,
'php-ext-pecl-r1': 'php-ext-pecl-r2',
'php-ext-source': None,
'php-ext-source-r1': 'php-ext-source-r2',
'php-lib': None,
'php-pear': 'php-pear-r1',
'php-sapi': None,
'php5-sapi': None,
'php5-sapi-r1': None,
'php5-sapi-r2': None,
'php5-sapi-r3': None,
'python': 'python-r1 / python-single-r1 / python-any-r1',
'python-distutils-ng': 'python-r1 + distutils-r1',
'qt3': None,
'qt4': 'qt4-r2',
'ruby': 'ruby-ng',
'ruby-gnome2': 'ruby-ng-gnome2',
'tla': None,
'ltprune': None,
'user': 'acct-user/acct-group packages',
'versionator': (
'ver_* functions from EAPI 7',
lambda pkg: 'ver_cut' in pkg.eapi.bash_funcs),
'vim': None,
'webapp-apache': None,
'x-modular': 'xorg-2',
'xfconf': None,
'xfree': None,
})
__doc__ = "Scan for deprecated eclass usage.\n\ndeprecated eclasses: %s\n" % \
", ".join(sorted(blacklist))
def feed(self, pkg):
deprecated_eclasses = []
for eclass in set(pkg.inherit).intersection(self.blacklist):
replacement = self.blacklist[eclass]
if isinstance(replacement, tuple):
replacement, conditional = replacement
if not conditional(pkg):
continue
deprecated_eclasses.append((eclass, replacement))
if deprecated_eclasses:
yield DeprecatedEclass(sorted(deprecated_eclasses), pkg=pkg)
| from snakeoil.mappings import ImmutableDict
from snakeoil.strings import pluralism as _pl
from .. import results
from . import Check
class DeprecatedEclass(results.VersionResult, results.Warning):
"""Package uses an eclass that is deprecated/abandoned."""
def __init__(self, eclasses, **kwargs):
super().__init__(**kwargs)
self.eclasses = tuple(eclasses)
@property
def desc(self):
eclass_migration = []
for old_eclass, new_eclass in self.eclasses:
if new_eclass:
update_path = f'migrate to {new_eclass}'
else:
update_path = 'no replacement'
eclass_migration.append(f'{old_eclass} ({update_path})')
return "uses deprecated eclass%s: [ %s ]" % (
_pl(eclass_migration, plural='es'), ', '.join(eclass_migration))
class DeprecatedEclassCheck(Check):
"""Check for ebuilds using deprecated eclasses."""
known_results = frozenset([DeprecatedEclass])
blacklist = ImmutableDict({
'64-bit': None,
'autotools-multilib': 'multilib-minimal',
'autotools-utils': None,
'base': None,
'bash-completion': 'bash-completion-r1',
'boost-utils': None,
'clutter': 'gnome2',
'confutils': None,
'darcs': None,
'distutils': 'distutils-r1',
'db4-fix': None,
'debian': None,
'embassy-2.10': None,
'embassy-2.9': None,
'epatch': (
'eapply from EAPI 6',
lambda pkg: 'eapply' in pkg.eapi.bash_funcs),
'fdo-mime': 'xdg-utils',
'games': None,
'gems': 'ruby-fakegem',
'git': 'git-r3',
'git-2': 'git-r3',
'gcc': None,
'gnustep-old': None,
'gpe': None,
'gst-plugins-bad': 'gstreamer',
'gst-plugins-base': 'gstreamer',
'gst-plugins-good': 'gstreamer',
'gst-plugins-ugly': 'gstreamer',
'gst-plugins10': 'gstreamer',
'gtk-engines': None,
'gtk-engines2': None,
'inherit': None,
'jakarta-commons': None,
'java-pkg': None,
'java-utils': None,
'kde-base': None,
'kde-i18n': None,
'kde4-meta-pkg': 'kde5-meta-pkg',
'kde-source': None,
'kmod': None,
'koffice-i18n': None,
'mono': 'mono-env',
'mozconfig': None,
'mozconfig-2': 'mozconfig-3',
'mozcoreconf': 'mozcoreconf-2',
'motif': None,
'mozilla': None,
'myth': None,
'pcmcia': None,
'perl-post': None,
'php': None,
'php-2': None,
'php-ext': None,
'php-ext-base': None,
'php-ext-pecl': None,
'php-ext-pecl-r1': 'php-ext-pecl-r2',
'php-ext-source': None,
'php-ext-source-r1': 'php-ext-source-r2',
'php-lib': None,
'php-pear': 'php-pear-r1',
'php-sapi': None,
'php5-sapi': None,
'php5-sapi-r1': None,
'php5-sapi-r2': None,
'php5-sapi-r3': None,
'python': 'python-r1 / python-single-r1 / python-any-r1',
'python-distutils-ng': 'python-r1 + distutils-r1',
'qt3': None,
'qt4': 'qt4-r2',
'ruby': 'ruby-ng',
'ruby-gnome2': 'ruby-ng-gnome2',
'tla': None,
'ltprune': None,
'user': 'acct-user/acct-group packages',
'versionator': (
'ver_* functions from EAPI 7',
lambda pkg: 'ver_cut' in pkg.eapi.bash_funcs),
'vim': None,
'webapp-apache': None,
'x-modular': 'xorg-2',
'xfconf': None,
'xfree': None,
})
__doc__ = "Scan for deprecated eclass usage.\n\ndeprecated eclasses: %s\n" % \
", ".join(sorted(blacklist))
def feed(self, pkg):
deprecated_eclasses = []
for eclass in set(pkg.inherit).intersection(self.blacklist):
replacement = self.blacklist[eclass]
if isinstance(replacement, tuple):
replacement, conditional = replacement
if not conditional(pkg):
continue
deprecated_eclasses.append((eclass, replacement))
if deprecated_eclasses:
yield DeprecatedEclass(sorted(deprecated_eclasses), pkg=pkg) | en | 0.758048 | Package uses an eclass that is deprecated/abandoned. Check for ebuilds using deprecated eclasses. | 2.041956 | 2 |
smooth/framework/functions/save_results.py | LauWien/smooth | 5 | 6613052 | import datetime
import pickle
def save_results(file_name, result_data):
"""Save the result of either a smooth run or an optimization run by the genetic algorithm.
:param file_name: name of the result pickle file
:type file_name: string
:param result_data: data to save
"""
# Create the name of result by using the current time and then "_smooth_optimization_result.pcl"
time_now = datetime.datetime.now()
file_name = time_now.strftime("%Y-%m-%d_%H-%M-%S_{}.pickle".format(file_name))
# Create pointer to the file where the result will be saved.
with open(file_name, 'wb') as save_file:
# Pickle the result.
pickle.dump(result_data, save_file)
| import datetime
import pickle
def save_results(file_name, result_data):
"""Save the result of either a smooth run or an optimization run by the genetic algorithm.
:param file_name: name of the result pickle file
:type file_name: string
:param result_data: data to save
"""
# Create the name of result by using the current time and then "_smooth_optimization_result.pcl"
time_now = datetime.datetime.now()
file_name = time_now.strftime("%Y-%m-%d_%H-%M-%S_{}.pickle".format(file_name))
# Create pointer to the file where the result will be saved.
with open(file_name, 'wb') as save_file:
# Pickle the result.
pickle.dump(result_data, save_file)
| en | 0.756255 | Save the result of either a smooth run or an optimization run by the genetic algorithm. :param file_name: name of the result pickle file :type file_name: string :param result_data: data to save # Create the name of result by using the current time and then "_smooth_optimization_result.pcl" # Create pointer to the file where the result will be saved. # Pickle the result. | 3.476845 | 3 |
nerodia/elements/date_time_field.py | harsh183/nerodia | 83 | 6613053 | <filename>nerodia/elements/date_time_field.py
import datetime
from inspect import stack
import six
from dateutil import parser
from nerodia.elements.html_elements import InputCollection
from .input import Input
from ..meta_elements import MetaHTMLElement
@six.add_metaclass(MetaHTMLElement)
class DateTimeField(Input):
def js_set(self, value):
"""
Set datetime field to the given date and time
:param value: value to set to
:type value: datetime or str
:Example:
browser.date_time_field(id='start_date').set('2018/01/31 14:00')
"""
if isinstance(value, (float, int)):
value = datetime.datetime.fromtimestamp(value)
if isinstance(value, datetime.time):
value = datetime.datetime.combine(datetime.date.today(), value)
if isinstance(value, six.string_types):
value = parser.parse(value, fuzzy=True)
if not isinstance(value, datetime.datetime):
raise TypeError('DateTimeField#{} only accepts instances of datetime or '
'time'.format(stack()[0][3]))
date_time_string = value.strftime('%Y-%m-%dT%H:%M')
self._element_call(lambda: self._execute_js('setValue', self.el, date_time_string),
precondition=self.wait_for_writable)
set = js_set
@property
def value(self):
return self.attribute_value('value')
@value.setter # alias
def value(self, *args):
self.set(*args)
class DateTimeFieldCollection(InputCollection):
# private
@property
def _element_class(self):
return DateTimeField
| <filename>nerodia/elements/date_time_field.py
import datetime
from inspect import stack
import six
from dateutil import parser
from nerodia.elements.html_elements import InputCollection
from .input import Input
from ..meta_elements import MetaHTMLElement
@six.add_metaclass(MetaHTMLElement)
class DateTimeField(Input):
def js_set(self, value):
"""
Set datetime field to the given date and time
:param value: value to set to
:type value: datetime or str
:Example:
browser.date_time_field(id='start_date').set('2018/01/31 14:00')
"""
if isinstance(value, (float, int)):
value = datetime.datetime.fromtimestamp(value)
if isinstance(value, datetime.time):
value = datetime.datetime.combine(datetime.date.today(), value)
if isinstance(value, six.string_types):
value = parser.parse(value, fuzzy=True)
if not isinstance(value, datetime.datetime):
raise TypeError('DateTimeField#{} only accepts instances of datetime or '
'time'.format(stack()[0][3]))
date_time_string = value.strftime('%Y-%m-%dT%H:%M')
self._element_call(lambda: self._execute_js('setValue', self.el, date_time_string),
precondition=self.wait_for_writable)
set = js_set
@property
def value(self):
return self.attribute_value('value')
@value.setter # alias
def value(self, *args):
self.set(*args)
class DateTimeFieldCollection(InputCollection):
# private
@property
def _element_class(self):
return DateTimeField
| en | 0.394932 | Set datetime field to the given date and time :param value: value to set to :type value: datetime or str :Example: browser.date_time_field(id='start_date').set('2018/01/31 14:00') #{} only accepts instances of datetime or ' # alias # private | 2.425374 | 2 |
ietf/secr/drafts/report_id_activity.py | wpjesus/codematch | 1 | 6613054 | <gh_stars>1-10
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ietf.settings")
from ietf.secr.drafts.views import report_id_activity
import sys
print report_id_activity(sys.argv[1], sys.argv[2]),
| import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ietf.settings")
from ietf.secr.drafts.views import report_id_activity
import sys
print report_id_activity(sys.argv[1], sys.argv[2]), | none | 1 | 1.305382 | 1 | |
src/runQP.py | entn-at/QPNet | 0 | 6613055 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME> (Nagoya University)
# based on a WaveNet script by <NAME> (Nagoya University)
# (https://github.com/kan-bayashi/PytorchWaveNetVocoder)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Pytorch QPNet script
Usage: runQP.py -w WAVLIST -a AUXLIST
[-hmr] [-f FS]
[-x UPWAVLIST] [-u UPAUXLIST]
[-y VALIDWAVLIST] [-v VALIDAUXLIST]
[-e EVALLIST]
[-g GPUID] [-n NETWORK] [-d DENSE]
[-I ITER] [-U UITER]
[-R RESUME] [-M MODEL]
[-1] [-2] [-3] [-4] [-5] [TESTSPK]
Options:
-h, --help Show the help
-r, --replace Over write the exist evaluation results
-m, --multi Multi-speaker QPNet generatiron
-w WAVLIST The list of the training waveform files
-a AUXLIST The list of the training auxiliary features
-x UPWAVLIST The list of the updating waveform files
-u UPAUXLIST The list of the updating auxiliary features
-y VALIDWAVLIST The list of the validation waveform files
-v VALIDAUXLIST The list of the validation auxiliary features
-e EVALLIST The list of the evaluation features
-f FS The sampling rate
-g GPUID The GPU device ID
-n NETWORK The name of the network structure ('d4r4')
-d DENSE The dense factor a
-I ITER The number of iteration
-U UITER The number if update iteration
-R RESUME The number of iteration to resume model
-M MODEL The number of iteration of model for testing
-1, --step1 Execute step1 (train QPNet)
-2, --step2 Execute step2 (update QPNet)
-3, --step3 Execute step3 (QPNet decode)
-4, --step4 Execute step4 (noiseshaping restored)
-5, --step5 Execute step5 (validation)
TESTSPK The speaker name of the evaluation list
"""
import os
import sys
import h5py
import math
import yaml
import numpy as np
from docopt import docopt
from utils.utils_pathlist import _path_initial, _path_check, _list_initial, _remove_temp_file
from utils.utils_pathlist import _templist, _templist_eval
from utils.param_model import qpwn_parameter
from utils.param_feat import acoustic_parameter
from utils.param_path import LIBRARY_DIR, CUDA_DIR, ROOT_DIR
from utils.param_path import PRJ_DIR, COP, COP_DIR, SCP_DIR, SRC_DIR
N_JOBS = 25
N_GPUS = 1
SEED = 1
DECODE_SEED = 100
DECODE_BATCH_SIZE = 20
# MAIN
if __name__ == "__main__":
args = docopt(__doc__)
print(args)
# STEP CONTRAL
execute_steps = [False] \
+ [args["--step{}".format(step_index)] for step_index in range(1, 6)]
if not any(execute_steps):
raise("Please specify steps with options")
# ENVIRONMET PARAMETER SETTING
os.environ['LD_LIBRARY_PATH'] += ":" + LIBRARY_DIR
os.environ['CUDA_HOME'] = CUDA_DIR
os.environ['PATH'] += (":" + SRC_DIR + "bin:" + SRC_DIR + "utils")
os.environ['PYTHONPATH'] = (SRC_DIR + "utils")
os.environ['PYTHONPATH'] += (":" + SRC_DIR + "nets")
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
if args['-g'] is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = args['-g']
num_gpus = 1
else:
num_gpus = N_GPUS
# ACOUSTIC FEATURE & WAVEFORM SETTING
feat_format = "h5"
shiftms = 5
wav_mode = "noiseshaped"
synonym_wavtype = "wav_%s_ns" % (feat_format)
filter_version = "noise_restored"
mag = 0.5
pow_adjust = 1.0
fs = "22050"
restored_mode = "restored"
if args['-f'] is not None:
fs = args['-f']
feat_param = acoustic_parameter(fs, shiftms=shiftms)
# RUNNING SETTING
network = "qpnet"
synonym_root = "rootpath"
execution_root = "./bin"
execution_train = "%s/%s_train.py" % (execution_root, network)
execution_update = "%s/%s_update.py" % (execution_root, network)
execution_validate = "%s/%s_validate.py" % (execution_root, network)
execution_decode = "%s/%s_decode.py" % (execution_root, network)
execution_filter = "%s/%s.py" % (execution_root, filter_version)
# MODEL SETTING
if args['-d'] is not None:
dense_factor = np.int(args['-d'])
else:
dense_factor = 8
aux_version = os.path.basename(args['-a']).split(".")[0].split("-")[-1]
wav_version = os.path.basename(args['-w']).split(".")[0].split("-")[-1]
model_version = "A%s_W%s_d%d" % (aux_version, wav_version, dense_factor) # model name
net_name = "default" # network structure
iters = "200000" # number of training iteration
check_interval = "10000"
up_iters = "3000" # number of updating iteration
update_interval = "100"
model_iters = "final" # testing model version
if args['-n'] is not None:
net_name = args['-n']
if net_name != "default":
model_version = "%s_%s" % (model_version, net_name)
if args['-I'] is not None:
iters = args['-I']
if args['-U'] is not None:
up_iters = args['-U']
if args['-M'] is not None:
model_iters = args['-M']
model_param = qpwn_parameter(net_name,
aux=int(feat_param.aux_dim),
iters=int(iters),
update_iters= int(up_iters),
checkpoint_interval=int(check_interval),
update_interval=int(update_interval),
decode_batch_size=DECODE_BATCH_SIZE)
validation_intervel = range(model_param.checkpoint_interval,
model_param.iters+1,
model_param.checkpoint_interval)
# PATH INITIALIZATION
corpus_dir = COP_DIR
scp_dir = SCP_DIR
stats = "%sstats/%s_stats.%s" % (corpus_dir, wav_version, feat_format)
expdir = "%s%s_models/%s/" % (PRJ_DIR, network, model_version)
outdir = "%s%s_output/%s/" % (PRJ_DIR, network, model_version)
config = expdir + "model.conf"
tempdir = "%stemp/" % PRJ_DIR
_path_initial([tempdir])
_path_check([corpus_dir, stats])
# LIST INITIALIZATION
def _get_list(auxlist, wavlist, modelver, setname):
# get auxiliary feat list
aux_feats = "%s%s%s_%sauxfeats.tmp" % (tempdir, COP, modelver, setname)
_templist(auxlist, aux_feats, "", [synonym_root, "wav"], [corpus_dir, feat_format])
# get waveform list
waveforms = "%s%s%s_%swaveforms.tmp" % (tempdir, COP, modelver, setname)
keyword = [synonym_root, "wav", ".%s"%synonym_wavtype]
subword = [corpus_dir, synonym_wavtype, ".wav"]
_templist(wavlist, waveforms, "", keyword, subword)
return aux_feats, waveforms
# get training auxiliary feat & waveform list
aux_feats, waveforms = _get_list(scp_dir + args['-a'],
scp_dir + args['-w'],
model_version, 'training')
# NETWORK TRAINING
if execute_steps[1]:
# resume setting
if args['-R'] is not None:
resume = expdir + "checkpoint-%s.pkl" % (args['-R'])
_path_check([resume])
else:
resume = "None"
# training
cmd = "python " + execution_train + \
" --waveforms " + waveforms + \
" --feats " + aux_feats + \
" --stats " + stats + \
" --expdir " + expdir + \
" --config " + config + \
" --n_quantize " + str(model_param.quantize) + \
" --n_aux " + str(model_param.aux) + \
" --n_resch " + str(model_param.resch) + \
" --n_skipch " + str(model_param.skipch) + \
" --dilationF_depth " + str(model_param.dilationF_depth) + \
" --dilationF_repeat " + str(model_param.dilationF_repeat) + \
" --dilationA_depth " + str(model_param.dilationA_depth) + \
" --dilationA_repeat " + str(model_param.dilationA_repeat) + \
" --kernel_size " + str(model_param.kernel_size) + \
" --dense_factor " + str(dense_factor) + \
" --upsampling_factor " + str(feat_param.upsampling_factor)+ \
" --feature_type " + str(feat_param.feature_type)+ \
" --feature_format " + str(feat_format) + \
" --batch_length " + str(model_param.batch_length) + \
" --batch_size " + str(model_param.batch_size) + \
" --max_length " + str(model_param.max_length) + \
" --f0_threshold " + str(model_param.f0_threshold) + \
" --lr " + str(model_param.lr) + \
" --weight_decay " + str(model_param.weight_decay) + \
" --iters " + str(model_param.iters) + \
" --checkpoint_interval " + str(model_param.checkpoint_interval) + \
" --seed " + str(SEED) + \
" --resume " + resume + \
" --n_gpus " + str(num_gpus) + \
" --verbose 1 "
#print(cmd)
os.system(cmd)
_remove_temp_file([waveforms, aux_feats])
# NETWORK ADAPTATION
if not args['--multi']:
if args['-u'] is None or args['-x'] is None:
print("Please assign the updating auxilary list by '-u UPAUXLIST' " + \
" and the corresponding wav list by '-x UPWAVLIST' " + \
"or select the multi speaker mode by '--multi'.")
sys.exit(0)
# check the pretrained checkpoint
pretrain_checkpoint = "%s/checkpoint-final.pkl" % (expdir)
_path_check([pretrain_checkpoint])
# get updating model version
upaux_version = os.path.basename(args['-u']).split(".")[0].split("-")[-1]
upwav_version = os.path.basename(args['-x']).split(".")[0].split("-")[-1]
model_version = "%s_U%s_V%s" % (model_version, upaux_version, upwav_version)
# get updating auxiliary feat & waveform list
upaux_feats, upwaveforms = _get_list(scp_dir + args['-u'],
scp_dir + args['-x'],
model_version, 'updating')
# update path
expdir = "%s%s_models/%s/" % (PRJ_DIR, network, model_version)
outdir = "%s%s_output/%s/" % (PRJ_DIR, network, model_version)
# update validation interval
validation_intervel = range(model_param.update_interval,
model_param.update_iters+1,
model_param.update_interval)
# resume setting
if args['-R'] is not None:
resume = expdir + "checkpoint-%s.pkl" % (args['-R'])
_path_check([resume])
else:
resume = "None"
# adaptation
if execute_steps[2]:
cmd = "python " + execution_update + \
" --waveforms " + upwaveforms + \
" --feats " + upaux_feats + \
" --stats " + stats + \
" --expdir " + expdir + \
" --config " + config + \
" --pretrain " + pretrain_checkpoint + \
" --batch_length " + str(model_param.batch_length) + \
" --batch_size " + str(model_param.batch_size) + \
" --max_length " + str(model_param.max_length) + \
" --f0_threshold " + str(model_param.f0_threshold) + \
" --lr " + str(model_param.lr) + \
" --weight_decay " + str(model_param.weight_decay) + \
" --iters " + str(model_param.update_iters) + \
" --checkpoint_interval " + str(model_param.update_interval) + \
" --resume " + resume + \
" --seed " + str(SEED) + \
" --n_gpus " + str(num_gpus) + \
" --verbose 1 "
# print(cmd)
os.system(cmd)
_remove_temp_file([upwaveforms, upaux_feats])
# EVALUATION
if args['-e'] is None:
print("(warning) test list is empty.")
else:
# testing settings initialization
if args['TESTSPK'] is None:
print("Pleas assign the evaluation speaker.")
sys.exit(0)
testspk = args['TESTSPK']
outdir_eval = os.path.join(outdir, wav_mode, testspk, model_iters, "feat_id.wav")
test_feats = "%s%s%s_testfeats.tmp" % (tempdir, COP, model_version)
tlist = scp_dir + args['-e']
keyword = [synonym_root, "wav"]
subword = [corpus_dir, feat_format]
f0_factor = 1.0 # f0 scaled factor (1.0 means unchanged)
extra_memory = False # set True will accelerate the decoding but consume lots of memory
# speech decoding
if execute_steps[3]:
final_checkpoint = "%s/checkpoint-%s.pkl" % (expdir, model_iters)
_path_check([final_checkpoint])
# check the evaluation list
if not _list_initial(args['--replace'], feat_format, tlist, test_feats, outdir_eval, keyword, subword):
print("%s is skipped" % (args['-e']))
else:
cmd = "python " + execution_decode + \
" --feats " + test_feats + \
" --stats " + stats + \
" --config " + config + \
" --outdir " + outdir_eval + \
" --checkpoint " + final_checkpoint + \
" --fs " + str(feat_param.fs) + \
" --batch_size " + str(model_param.decode_batch_size) + \
" --extra_memory " + str(extra_memory) + \
" --seed " + str(DECODE_SEED) + \
" --n_gpus " + str(num_gpus) + \
" --f0_factor " + str(f0_factor) + \
" --f0_dim_index " + str(feat_param.f0_dim_idx)
# print(cmd)
os.system(cmd)
# noise shaping restored
if execute_steps[4]:
_path_check([os.path.dirname(outdir_eval)])
writedir = outdir_eval.replace(wav_mode, restored_mode)
_templist(tlist, test_feats, outdir_eval, keyword, subword)
cmd = "python " + execution_filter + \
" --feats " + test_feats + \
" --stats " + stats + \
" --outdir " + outdir_eval + \
" --writedir " + writedir + \
" --feature_type " + str(feat_param.feature_type)+ \
" --feature_format " + feat_format + \
" --pow_adjust " + str(pow_adjust) + \
" --fs " + str(feat_param.fs) + \
" --shiftms " + str(feat_param.shiftms) + \
" --fftl " + str(feat_param.fftl) + \
" --mcep_dim_start " + str(feat_param.mcep_dim_start) + \
" --mcep_dim_end " + str(feat_param.mcep_dim_end) + \
" --mcep_alpha " + str(feat_param.mcep_alpha) + \
" --mag " + str(mag) + \
" --n_jobs " + str(N_JOBS) + \
" --inv false"
# print(cmd)
os.system(cmd)
_remove_temp_file([test_feats])
# NETWORK VALIDATION
if execute_steps[5]:
if args['-v'] is None or args['-y'] is None:
print("Please assign the validation auxilary list by '-v VALIDAUXLIST' " + \
" and the corresponding wav list by '-y VALIDWAVLIST' ")
sys.exit(0)
# get validation auxiliary feat & waveform list
validaux_feats, validwaveforms = _get_list(scp_dir + args['-v'],
scp_dir + args['-y'],
model_version, 'validation')
for model_iters in validation_intervel:
checkpoint = "%s/checkpoint-%s.pkl" % (expdir, model_iters)
_path_check([checkpoint])
cmd = "python " + execution_validate + \
" --waveforms " + validwaveforms + \
" --feats " + validaux_feats + \
" --stats " + stats + \
" --resultdir " + expdir + \
" --config " + config + \
" --checkpoint " + checkpoint + \
" --batch_length " + str(model_param.batch_length) + \
" --batch_size " + str(model_param.batch_size) + \
" --max_length " + str(model_param.max_length) + \
" --n_gpus " + str(num_gpus) + \
" --verbose 1 "
# print(cmd)
os.system(cmd)
_remove_temp_file([validwaveforms, validaux_feats]) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME> (Nagoya University)
# based on a WaveNet script by <NAME> (Nagoya University)
# (https://github.com/kan-bayashi/PytorchWaveNetVocoder)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Pytorch QPNet script
Usage: runQP.py -w WAVLIST -a AUXLIST
[-hmr] [-f FS]
[-x UPWAVLIST] [-u UPAUXLIST]
[-y VALIDWAVLIST] [-v VALIDAUXLIST]
[-e EVALLIST]
[-g GPUID] [-n NETWORK] [-d DENSE]
[-I ITER] [-U UITER]
[-R RESUME] [-M MODEL]
[-1] [-2] [-3] [-4] [-5] [TESTSPK]
Options:
-h, --help Show the help
-r, --replace Over write the exist evaluation results
-m, --multi Multi-speaker QPNet generatiron
-w WAVLIST The list of the training waveform files
-a AUXLIST The list of the training auxiliary features
-x UPWAVLIST The list of the updating waveform files
-u UPAUXLIST The list of the updating auxiliary features
-y VALIDWAVLIST The list of the validation waveform files
-v VALIDAUXLIST The list of the validation auxiliary features
-e EVALLIST The list of the evaluation features
-f FS The sampling rate
-g GPUID The GPU device ID
-n NETWORK The name of the network structure ('d4r4')
-d DENSE The dense factor a
-I ITER The number of iteration
-U UITER The number if update iteration
-R RESUME The number of iteration to resume model
-M MODEL The number of iteration of model for testing
-1, --step1 Execute step1 (train QPNet)
-2, --step2 Execute step2 (update QPNet)
-3, --step3 Execute step3 (QPNet decode)
-4, --step4 Execute step4 (noiseshaping restored)
-5, --step5 Execute step5 (validation)
TESTSPK The speaker name of the evaluation list
"""
import os
import sys
import h5py
import math
import yaml
import numpy as np
from docopt import docopt
from utils.utils_pathlist import _path_initial, _path_check, _list_initial, _remove_temp_file
from utils.utils_pathlist import _templist, _templist_eval
from utils.param_model import qpwn_parameter
from utils.param_feat import acoustic_parameter
from utils.param_path import LIBRARY_DIR, CUDA_DIR, ROOT_DIR
from utils.param_path import PRJ_DIR, COP, COP_DIR, SCP_DIR, SRC_DIR
N_JOBS = 25
N_GPUS = 1
SEED = 1
DECODE_SEED = 100
DECODE_BATCH_SIZE = 20
# MAIN
if __name__ == "__main__":
args = docopt(__doc__)
print(args)
# STEP CONTRAL
execute_steps = [False] \
+ [args["--step{}".format(step_index)] for step_index in range(1, 6)]
if not any(execute_steps):
raise("Please specify steps with options")
# ENVIRONMET PARAMETER SETTING
os.environ['LD_LIBRARY_PATH'] += ":" + LIBRARY_DIR
os.environ['CUDA_HOME'] = CUDA_DIR
os.environ['PATH'] += (":" + SRC_DIR + "bin:" + SRC_DIR + "utils")
os.environ['PYTHONPATH'] = (SRC_DIR + "utils")
os.environ['PYTHONPATH'] += (":" + SRC_DIR + "nets")
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
if args['-g'] is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = args['-g']
num_gpus = 1
else:
num_gpus = N_GPUS
# ACOUSTIC FEATURE & WAVEFORM SETTING
feat_format = "h5"
shiftms = 5
wav_mode = "noiseshaped"
synonym_wavtype = "wav_%s_ns" % (feat_format)
filter_version = "noise_restored"
mag = 0.5
pow_adjust = 1.0
fs = "22050"
restored_mode = "restored"
if args['-f'] is not None:
fs = args['-f']
feat_param = acoustic_parameter(fs, shiftms=shiftms)
# RUNNING SETTING
network = "qpnet"
synonym_root = "rootpath"
execution_root = "./bin"
execution_train = "%s/%s_train.py" % (execution_root, network)
execution_update = "%s/%s_update.py" % (execution_root, network)
execution_validate = "%s/%s_validate.py" % (execution_root, network)
execution_decode = "%s/%s_decode.py" % (execution_root, network)
execution_filter = "%s/%s.py" % (execution_root, filter_version)
# MODEL SETTING
if args['-d'] is not None:
dense_factor = np.int(args['-d'])
else:
dense_factor = 8
aux_version = os.path.basename(args['-a']).split(".")[0].split("-")[-1]
wav_version = os.path.basename(args['-w']).split(".")[0].split("-")[-1]
model_version = "A%s_W%s_d%d" % (aux_version, wav_version, dense_factor) # model name
net_name = "default" # network structure
iters = "200000" # number of training iteration
check_interval = "10000"
up_iters = "3000" # number of updating iteration
update_interval = "100"
model_iters = "final" # testing model version
if args['-n'] is not None:
net_name = args['-n']
if net_name != "default":
model_version = "%s_%s" % (model_version, net_name)
if args['-I'] is not None:
iters = args['-I']
if args['-U'] is not None:
up_iters = args['-U']
if args['-M'] is not None:
model_iters = args['-M']
model_param = qpwn_parameter(net_name,
aux=int(feat_param.aux_dim),
iters=int(iters),
update_iters= int(up_iters),
checkpoint_interval=int(check_interval),
update_interval=int(update_interval),
decode_batch_size=DECODE_BATCH_SIZE)
validation_intervel = range(model_param.checkpoint_interval,
model_param.iters+1,
model_param.checkpoint_interval)
# PATH INITIALIZATION
corpus_dir = COP_DIR
scp_dir = SCP_DIR
stats = "%sstats/%s_stats.%s" % (corpus_dir, wav_version, feat_format)
expdir = "%s%s_models/%s/" % (PRJ_DIR, network, model_version)
outdir = "%s%s_output/%s/" % (PRJ_DIR, network, model_version)
config = expdir + "model.conf"
tempdir = "%stemp/" % PRJ_DIR
_path_initial([tempdir])
_path_check([corpus_dir, stats])
# LIST INITIALIZATION
def _get_list(auxlist, wavlist, modelver, setname):
# get auxiliary feat list
aux_feats = "%s%s%s_%sauxfeats.tmp" % (tempdir, COP, modelver, setname)
_templist(auxlist, aux_feats, "", [synonym_root, "wav"], [corpus_dir, feat_format])
# get waveform list
waveforms = "%s%s%s_%swaveforms.tmp" % (tempdir, COP, modelver, setname)
keyword = [synonym_root, "wav", ".%s"%synonym_wavtype]
subword = [corpus_dir, synonym_wavtype, ".wav"]
_templist(wavlist, waveforms, "", keyword, subword)
return aux_feats, waveforms
# get training auxiliary feat & waveform list
aux_feats, waveforms = _get_list(scp_dir + args['-a'],
scp_dir + args['-w'],
model_version, 'training')
# NETWORK TRAINING
if execute_steps[1]:
# resume setting
if args['-R'] is not None:
resume = expdir + "checkpoint-%s.pkl" % (args['-R'])
_path_check([resume])
else:
resume = "None"
# training
cmd = "python " + execution_train + \
" --waveforms " + waveforms + \
" --feats " + aux_feats + \
" --stats " + stats + \
" --expdir " + expdir + \
" --config " + config + \
" --n_quantize " + str(model_param.quantize) + \
" --n_aux " + str(model_param.aux) + \
" --n_resch " + str(model_param.resch) + \
" --n_skipch " + str(model_param.skipch) + \
" --dilationF_depth " + str(model_param.dilationF_depth) + \
" --dilationF_repeat " + str(model_param.dilationF_repeat) + \
" --dilationA_depth " + str(model_param.dilationA_depth) + \
" --dilationA_repeat " + str(model_param.dilationA_repeat) + \
" --kernel_size " + str(model_param.kernel_size) + \
" --dense_factor " + str(dense_factor) + \
" --upsampling_factor " + str(feat_param.upsampling_factor)+ \
" --feature_type " + str(feat_param.feature_type)+ \
" --feature_format " + str(feat_format) + \
" --batch_length " + str(model_param.batch_length) + \
" --batch_size " + str(model_param.batch_size) + \
" --max_length " + str(model_param.max_length) + \
" --f0_threshold " + str(model_param.f0_threshold) + \
" --lr " + str(model_param.lr) + \
" --weight_decay " + str(model_param.weight_decay) + \
" --iters " + str(model_param.iters) + \
" --checkpoint_interval " + str(model_param.checkpoint_interval) + \
" --seed " + str(SEED) + \
" --resume " + resume + \
" --n_gpus " + str(num_gpus) + \
" --verbose 1 "
#print(cmd)
os.system(cmd)
_remove_temp_file([waveforms, aux_feats])
# NETWORK ADAPTATION
if not args['--multi']:
if args['-u'] is None or args['-x'] is None:
print("Please assign the updating auxilary list by '-u UPAUXLIST' " + \
" and the corresponding wav list by '-x UPWAVLIST' " + \
"or select the multi speaker mode by '--multi'.")
sys.exit(0)
# check the pretrained checkpoint
pretrain_checkpoint = "%s/checkpoint-final.pkl" % (expdir)
_path_check([pretrain_checkpoint])
# get updating model version
upaux_version = os.path.basename(args['-u']).split(".")[0].split("-")[-1]
upwav_version = os.path.basename(args['-x']).split(".")[0].split("-")[-1]
model_version = "%s_U%s_V%s" % (model_version, upaux_version, upwav_version)
# get updating auxiliary feat & waveform list
upaux_feats, upwaveforms = _get_list(scp_dir + args['-u'],
scp_dir + args['-x'],
model_version, 'updating')
# update path
expdir = "%s%s_models/%s/" % (PRJ_DIR, network, model_version)
outdir = "%s%s_output/%s/" % (PRJ_DIR, network, model_version)
# update validation interval
validation_intervel = range(model_param.update_interval,
model_param.update_iters+1,
model_param.update_interval)
# resume setting
if args['-R'] is not None:
resume = expdir + "checkpoint-%s.pkl" % (args['-R'])
_path_check([resume])
else:
resume = "None"
# adaptation
if execute_steps[2]:
cmd = "python " + execution_update + \
" --waveforms " + upwaveforms + \
" --feats " + upaux_feats + \
" --stats " + stats + \
" --expdir " + expdir + \
" --config " + config + \
" --pretrain " + pretrain_checkpoint + \
" --batch_length " + str(model_param.batch_length) + \
" --batch_size " + str(model_param.batch_size) + \
" --max_length " + str(model_param.max_length) + \
" --f0_threshold " + str(model_param.f0_threshold) + \
" --lr " + str(model_param.lr) + \
" --weight_decay " + str(model_param.weight_decay) + \
" --iters " + str(model_param.update_iters) + \
" --checkpoint_interval " + str(model_param.update_interval) + \
" --resume " + resume + \
" --seed " + str(SEED) + \
" --n_gpus " + str(num_gpus) + \
" --verbose 1 "
# print(cmd)
os.system(cmd)
_remove_temp_file([upwaveforms, upaux_feats])
# EVALUATION
if args['-e'] is None:
print("(warning) test list is empty.")
else:
# testing settings initialization
if args['TESTSPK'] is None:
print("Pleas assign the evaluation speaker.")
sys.exit(0)
testspk = args['TESTSPK']
outdir_eval = os.path.join(outdir, wav_mode, testspk, model_iters, "feat_id.wav")
test_feats = "%s%s%s_testfeats.tmp" % (tempdir, COP, model_version)
tlist = scp_dir + args['-e']
keyword = [synonym_root, "wav"]
subword = [corpus_dir, feat_format]
f0_factor = 1.0 # f0 scaled factor (1.0 means unchanged)
extra_memory = False # set True will accelerate the decoding but consume lots of memory
# speech decoding
if execute_steps[3]:
final_checkpoint = "%s/checkpoint-%s.pkl" % (expdir, model_iters)
_path_check([final_checkpoint])
# check the evaluation list
if not _list_initial(args['--replace'], feat_format, tlist, test_feats, outdir_eval, keyword, subword):
print("%s is skipped" % (args['-e']))
else:
cmd = "python " + execution_decode + \
" --feats " + test_feats + \
" --stats " + stats + \
" --config " + config + \
" --outdir " + outdir_eval + \
" --checkpoint " + final_checkpoint + \
" --fs " + str(feat_param.fs) + \
" --batch_size " + str(model_param.decode_batch_size) + \
" --extra_memory " + str(extra_memory) + \
" --seed " + str(DECODE_SEED) + \
" --n_gpus " + str(num_gpus) + \
" --f0_factor " + str(f0_factor) + \
" --f0_dim_index " + str(feat_param.f0_dim_idx)
# print(cmd)
os.system(cmd)
# noise shaping restored
if execute_steps[4]:
_path_check([os.path.dirname(outdir_eval)])
writedir = outdir_eval.replace(wav_mode, restored_mode)
_templist(tlist, test_feats, outdir_eval, keyword, subword)
cmd = "python " + execution_filter + \
" --feats " + test_feats + \
" --stats " + stats + \
" --outdir " + outdir_eval + \
" --writedir " + writedir + \
" --feature_type " + str(feat_param.feature_type)+ \
" --feature_format " + feat_format + \
" --pow_adjust " + str(pow_adjust) + \
" --fs " + str(feat_param.fs) + \
" --shiftms " + str(feat_param.shiftms) + \
" --fftl " + str(feat_param.fftl) + \
" --mcep_dim_start " + str(feat_param.mcep_dim_start) + \
" --mcep_dim_end " + str(feat_param.mcep_dim_end) + \
" --mcep_alpha " + str(feat_param.mcep_alpha) + \
" --mag " + str(mag) + \
" --n_jobs " + str(N_JOBS) + \
" --inv false"
# print(cmd)
os.system(cmd)
_remove_temp_file([test_feats])
# NETWORK VALIDATION
if execute_steps[5]:
if args['-v'] is None or args['-y'] is None:
print("Please assign the validation auxilary list by '-v VALIDAUXLIST' " + \
" and the corresponding wav list by '-y VALIDWAVLIST' ")
sys.exit(0)
# get validation auxiliary feat & waveform list
validaux_feats, validwaveforms = _get_list(scp_dir + args['-v'],
scp_dir + args['-y'],
model_version, 'validation')
for model_iters in validation_intervel:
checkpoint = "%s/checkpoint-%s.pkl" % (expdir, model_iters)
_path_check([checkpoint])
cmd = "python " + execution_validate + \
" --waveforms " + validwaveforms + \
" --feats " + validaux_feats + \
" --stats " + stats + \
" --resultdir " + expdir + \
" --config " + config + \
" --checkpoint " + checkpoint + \
" --batch_length " + str(model_param.batch_length) + \
" --batch_size " + str(model_param.batch_size) + \
" --max_length " + str(model_param.max_length) + \
" --n_gpus " + str(num_gpus) + \
" --verbose 1 "
# print(cmd)
os.system(cmd)
_remove_temp_file([validwaveforms, validaux_feats]) | en | 0.576295 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2019 <NAME> (Nagoya University) # based on a WaveNet script by <NAME> (Nagoya University) # (https://github.com/kan-bayashi/PytorchWaveNetVocoder) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) Pytorch QPNet script Usage: runQP.py -w WAVLIST -a AUXLIST [-hmr] [-f FS] [-x UPWAVLIST] [-u UPAUXLIST] [-y VALIDWAVLIST] [-v VALIDAUXLIST] [-e EVALLIST] [-g GPUID] [-n NETWORK] [-d DENSE] [-I ITER] [-U UITER] [-R RESUME] [-M MODEL] [-1] [-2] [-3] [-4] [-5] [TESTSPK] Options: -h, --help Show the help -r, --replace Over write the exist evaluation results -m, --multi Multi-speaker QPNet generatiron -w WAVLIST The list of the training waveform files -a AUXLIST The list of the training auxiliary features -x UPWAVLIST The list of the updating waveform files -u UPAUXLIST The list of the updating auxiliary features -y VALIDWAVLIST The list of the validation waveform files -v VALIDAUXLIST The list of the validation auxiliary features -e EVALLIST The list of the evaluation features -f FS The sampling rate -g GPUID The GPU device ID -n NETWORK The name of the network structure ('d4r4') -d DENSE The dense factor a -I ITER The number of iteration -U UITER The number if update iteration -R RESUME The number of iteration to resume model -M MODEL The number of iteration of model for testing -1, --step1 Execute step1 (train QPNet) -2, --step2 Execute step2 (update QPNet) -3, --step3 Execute step3 (QPNet decode) -4, --step4 Execute step4 (noiseshaping restored) -5, --step5 Execute step5 (validation) TESTSPK The speaker name of the evaluation list # MAIN # STEP CONTRAL # ENVIRONMET PARAMETER SETTING # ACOUSTIC FEATURE & WAVEFORM SETTING # RUNNING SETTING # MODEL SETTING # model name # network structure # number of training iteration # number of updating iteration # testing model version # PATH INITIALIZATION # LIST INITIALIZATION # get auxiliary feat list # get waveform list # get training auxiliary feat & waveform list # NETWORK TRAINING # resume setting # training #print(cmd) # NETWORK ADAPTATION # check the pretrained checkpoint # get updating model version # get updating auxiliary feat & waveform list # update path # update validation interval # resume setting # adaptation # print(cmd) # EVALUATION # testing settings initialization # f0 scaled factor (1.0 means unchanged) # set True will accelerate the decoding but consume lots of memory # speech decoding # check the evaluation list # print(cmd) # noise shaping restored # print(cmd) # NETWORK VALIDATION # get validation auxiliary feat & waveform list # print(cmd) | 1.979033 | 2 |
backend/models/tests/test_gcsutils.py | choo/handwriting-scorer | 3 | 6613056 | from unittest import TestCase
from gcsutils import GCSUtils
TEST_BUCKET_NAME = 'handwriting-test-00'
SAMPLE_INPUT_IMAGE = './tests/sample.png'
class TestGCSUtils(TestCase):
def test_list_buckets(self):
gcs_utils = GCSUtils()
buckets = gcs_utils.list_buckets()
for bucket in buckets:
print(bucket.name)
#def test_create_bucket(self):
# gcs_utils = GCSUtils()
# gcs_utils.create_bucket('handwriting-test-0')
def test_upload_filepath(self):
gcs_utils = GCSUtils()
gcs_utils.upload_by_path(TEST_BUCKET_NAME,
SAMPLE_INPUT_IMAGE, 'sample_test_01.png')
def test_upload_file(self):
gcs_utils = GCSUtils()
with open(SAMPLE_INPUT_IMAGE, 'rb') as f:
gcs_utils.upload_by_file(TEST_BUCKET_NAME, f,
'sample_test_stream_01.png', {'hoge': 'fuga'})
def test_list_all_blobs(self):
gcs_utils = GCSUtils()
blobs = gcs_utils.list_all_blobs(TEST_BUCKET_NAME)
print('-------- all objects ---------- ')
for blob in blobs:
print(blob)
prefix = 'sample_test_stream'
print('--------- with prefix "{}" -------'.format(prefix))
blobs = gcs_utils.list_all_blobs(TEST_BUCKET_NAME, prefix=prefix)
for blob in blobs:
print(blob)
| from unittest import TestCase
from gcsutils import GCSUtils
TEST_BUCKET_NAME = 'handwriting-test-00'
SAMPLE_INPUT_IMAGE = './tests/sample.png'
class TestGCSUtils(TestCase):
def test_list_buckets(self):
gcs_utils = GCSUtils()
buckets = gcs_utils.list_buckets()
for bucket in buckets:
print(bucket.name)
#def test_create_bucket(self):
# gcs_utils = GCSUtils()
# gcs_utils.create_bucket('handwriting-test-0')
def test_upload_filepath(self):
gcs_utils = GCSUtils()
gcs_utils.upload_by_path(TEST_BUCKET_NAME,
SAMPLE_INPUT_IMAGE, 'sample_test_01.png')
def test_upload_file(self):
gcs_utils = GCSUtils()
with open(SAMPLE_INPUT_IMAGE, 'rb') as f:
gcs_utils.upload_by_file(TEST_BUCKET_NAME, f,
'sample_test_stream_01.png', {'hoge': 'fuga'})
def test_list_all_blobs(self):
gcs_utils = GCSUtils()
blobs = gcs_utils.list_all_blobs(TEST_BUCKET_NAME)
print('-------- all objects ---------- ')
for blob in blobs:
print(blob)
prefix = 'sample_test_stream'
print('--------- with prefix "{}" -------'.format(prefix))
blobs = gcs_utils.list_all_blobs(TEST_BUCKET_NAME, prefix=prefix)
for blob in blobs:
print(blob)
| en | 0.21752 | #def test_create_bucket(self): # gcs_utils = GCSUtils() # gcs_utils.create_bucket('handwriting-test-0') | 2.820798 | 3 |
scripts/simulate_lightcurves.py | lena-lin/emmanoulopoulos | 0 | 6613057 | from astropy.table import Table
import astropy.units as u
from emmanoulopoulos.emmanoulopoulos_lc_simulation import Emmanoulopoulos_Sampler
from emmanoulopoulos.lightcurve import LC
import json
from json import encoder
import logging
from pathlib import Path
from tqdm import tqdm
logger = logging.getLogger(__name__)
logging.getLogger(__name__).setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s:%(levelname)s:%(message)s", datefmt="%Y-%m-%dT%H:%M:%s"
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
def run_lc_simulation(time, flux, err, name, output_path, n=10,):
lc = LC(time, flux, err, tbin=10 * u.day)
lc.fit_PSD()
pdf_parameter = lc.fit_PDF()
pdf_positive = False
while not pdf_positive:
for k, v in pdf_parameter.to_dict().items():
if v < 0:
pdf_parameter = lc.fit_PDF()
break
else:
pdf_positive = True
logger.info(f"PDF parameters: {pdf_parameter.to_dict()}")
logger.info(f"PSD parameters: {lc.psd_parameter.to_dict()}")
simulated_lcs = {}
Emma = Emmanoulopoulos_Sampler()
for i in tqdm(range(n)):
lc_sim = Emma.sample_from_lc(lc)
simulated_lcs[i] = list(lc_sim.original_flux)
with open(str(Path(output_path, "sim_lc_{}_n{}.json".format(name, n))), 'w') as f:
json.dump(simulated_lcs, f, indent=4, separators=(", ", ": "), sort_keys=True)
def load_data_Fermi():
t_fermi = Table.read("data/lc_2008_2020.fits")
mjd = t_fermi['tmean']
time = (mjd - mjd.min()) * u.day
flux = t_fermi['flux']
err = t_fermi['flux_err']
return time, flux, err
if __name__ == "__main__":
output_path = "build"
time, flux, err = load_data_Fermi()
run_lc_simulation(time, flux, err, name="Fermi_2008_2020", n=3, output_path=output_path)
| from astropy.table import Table
import astropy.units as u
from emmanoulopoulos.emmanoulopoulos_lc_simulation import Emmanoulopoulos_Sampler
from emmanoulopoulos.lightcurve import LC
import json
from json import encoder
import logging
from pathlib import Path
from tqdm import tqdm
logger = logging.getLogger(__name__)
logging.getLogger(__name__).setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s:%(levelname)s:%(message)s", datefmt="%Y-%m-%dT%H:%M:%s"
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
def run_lc_simulation(time, flux, err, name, output_path, n=10,):
lc = LC(time, flux, err, tbin=10 * u.day)
lc.fit_PSD()
pdf_parameter = lc.fit_PDF()
pdf_positive = False
while not pdf_positive:
for k, v in pdf_parameter.to_dict().items():
if v < 0:
pdf_parameter = lc.fit_PDF()
break
else:
pdf_positive = True
logger.info(f"PDF parameters: {pdf_parameter.to_dict()}")
logger.info(f"PSD parameters: {lc.psd_parameter.to_dict()}")
simulated_lcs = {}
Emma = Emmanoulopoulos_Sampler()
for i in tqdm(range(n)):
lc_sim = Emma.sample_from_lc(lc)
simulated_lcs[i] = list(lc_sim.original_flux)
with open(str(Path(output_path, "sim_lc_{}_n{}.json".format(name, n))), 'w') as f:
json.dump(simulated_lcs, f, indent=4, separators=(", ", ": "), sort_keys=True)
def load_data_Fermi():
t_fermi = Table.read("data/lc_2008_2020.fits")
mjd = t_fermi['tmean']
time = (mjd - mjd.min()) * u.day
flux = t_fermi['flux']
err = t_fermi['flux_err']
return time, flux, err
if __name__ == "__main__":
output_path = "build"
time, flux, err = load_data_Fermi()
run_lc_simulation(time, flux, err, name="Fermi_2008_2020", n=3, output_path=output_path)
| none | 1 | 2.060952 | 2 | |
api/db_migrations/versions/fdbf608faf86_create_scan_table.py | cds-snc/scan-files | 0 | 6613058 | """create scan table
Revision ID: fdbf608faf86
Revises:
Create Date: 2021-11-15 20:45:58.403433
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "fdbf608faf86"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"scans",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("file_name", sa.String(), nullable=False),
sa.Column("file_size", sa.Numeric(), nullable=False),
sa.Column("save_path", sa.String(), nullable=False),
sa.Column("sha256", sa.String(), nullable=False),
sa.Column("scan_provider", sa.String(), nullable=False),
sa.Column("submitter", sa.String(), nullable=False),
sa.Column("verdict", sa.String(), nullable=True),
sa.Column("quarantine_path", sa.String(), nullable=True),
sa.Column("meta_data", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("submitted", sa.DateTime(), nullable=False),
sa.Column("completed", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("scans")
# ### end Alembic commands ###
| """create scan table
Revision ID: fdbf608faf86
Revises:
Create Date: 2021-11-15 20:45:58.403433
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "fdbf608faf86"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"scans",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("file_name", sa.String(), nullable=False),
sa.Column("file_size", sa.Numeric(), nullable=False),
sa.Column("save_path", sa.String(), nullable=False),
sa.Column("sha256", sa.String(), nullable=False),
sa.Column("scan_provider", sa.String(), nullable=False),
sa.Column("submitter", sa.String(), nullable=False),
sa.Column("verdict", sa.String(), nullable=True),
sa.Column("quarantine_path", sa.String(), nullable=True),
sa.Column("meta_data", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("submitted", sa.DateTime(), nullable=False),
sa.Column("completed", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("scans")
# ### end Alembic commands ###
| en | 0.499863 | create scan table Revision ID: fdbf608faf86 Revises: Create Date: 2021-11-15 20:45:58.403433 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.979043 | 2 |
src/pyasl/asl/localtime.py | mirofedurco/PyAstronomy | 98 | 6613059 | # -*- coding: utf-8 -*-
import numpy as np
from PyAstronomy.pyaC import pyaErrors as PE
def localTime(utc, lon, diff=True):
"""
Computes the Local Time for a given UTC at a given geographic longitude.
The local time is computed as UTC + LONGITUDE/15.
Parameters
----------
utc : float or array
The time in UTC in hours.
lon : float or array
The geographic (East) longitude in DEGREES for which
local time should be calculated.
diff : boolean, optional
If True (default), returns the difference in HOURS between
UTC and local time.
Returns
-------
Time : float or array
Local time in HOURS (0 - 24) for given geographic
longitude and UTC.
Time difference : float or array
The difference between local and UTC time in
hours (only returned if `diff` is True)
"""
utc = np.array(utc, ndmin=1)
lon = np.array(lon, ndmin=1)
if lon.size != utc.size:
raise(PE.PyAValError("You need to specify the same number of longitudes and times", \
solution="Make `lon` and `utc` arrays have the same length.", \
where="localTime"))
indi = np.where(np.logical_or(lon<0.0, lon>360.))[0]
if len(indi) > 0:
raise(PE.PyAValError("Longitude needs to be in the range 0-360.", \
solution="Change the input.", \
where="localTime"))
localtime = utc + lon/15.
localtime = localtime % 24.0
if diff == True:
return localtime, lon/15.
else:
return localtime
| # -*- coding: utf-8 -*-
import numpy as np
from PyAstronomy.pyaC import pyaErrors as PE
def localTime(utc, lon, diff=True):
"""
Computes the Local Time for a given UTC at a given geographic longitude.
The local time is computed as UTC + LONGITUDE/15.
Parameters
----------
utc : float or array
The time in UTC in hours.
lon : float or array
The geographic (East) longitude in DEGREES for which
local time should be calculated.
diff : boolean, optional
If True (default), returns the difference in HOURS between
UTC and local time.
Returns
-------
Time : float or array
Local time in HOURS (0 - 24) for given geographic
longitude and UTC.
Time difference : float or array
The difference between local and UTC time in
hours (only returned if `diff` is True)
"""
utc = np.array(utc, ndmin=1)
lon = np.array(lon, ndmin=1)
if lon.size != utc.size:
raise(PE.PyAValError("You need to specify the same number of longitudes and times", \
solution="Make `lon` and `utc` arrays have the same length.", \
where="localTime"))
indi = np.where(np.logical_or(lon<0.0, lon>360.))[0]
if len(indi) > 0:
raise(PE.PyAValError("Longitude needs to be in the range 0-360.", \
solution="Change the input.", \
where="localTime"))
localtime = utc + lon/15.
localtime = localtime % 24.0
if diff == True:
return localtime, lon/15.
else:
return localtime
| en | 0.746703 | # -*- coding: utf-8 -*- Computes the Local Time for a given UTC at a given geographic longitude. The local time is computed as UTC + LONGITUDE/15. Parameters ---------- utc : float or array The time in UTC in hours. lon : float or array The geographic (East) longitude in DEGREES for which local time should be calculated. diff : boolean, optional If True (default), returns the difference in HOURS between UTC and local time. Returns ------- Time : float or array Local time in HOURS (0 - 24) for given geographic longitude and UTC. Time difference : float or array The difference between local and UTC time in hours (only returned if `diff` is True) | 3.421757 | 3 |
discord_typings/resources/guild_scheduled_events.py | Middledot/discord-typings | 7 | 6613060 | from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Union
from typing_extensions import Literal, NotRequired, TypedDict, final
if TYPE_CHECKING:
from ..shared import Snowflake
from .guild import GuildMemberData
from .user import UserData
__all__ = (
'GuildScheduledEventData', 'GuildScheduledEventEntityMetadata',
'GuildScheduledEventUserData'
)
# https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event-object-guild-scheduled-event-structure
class GuildScheduledEventBase(TypedDict):
id: Snowflake
guild_id: Snowflake
creator_id: Snowflake
name: str
description: NotRequired[str]
scheduled_start_time: str
privacy_level: Literal[2]
status: Literal[1, 2, 3, 4]
entity_type: Literal[1, 2, 3]
entity_id: Optional[Snowflake]
creator: UserData
user_count: NotRequired[int]
@final
class StageGuildScheduledEventData(GuildScheduledEventBase):
channel_id: Snowflake
entity_metadata: None
scheduled_end_time: NotRequired[str]
@final
class VoiceGuildScheduledEventData(GuildScheduledEventBase):
channel_id: Snowflake
entity_metadata: None
scheduled_end_time: NotRequired[str]
@final
class ExternalGuildScheduledEventData(GuildScheduledEventBase):
channel_id: None
entity_metadata: GuildScheduledEventEntityMetadata
scheduled_end_time: str
GuildScheduledEventData = Union[
StageGuildScheduledEventData,
VoiceGuildScheduledEventData,
ExternalGuildScheduledEventData
]
# https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event-object-guild-scheduled-event-entity-metadata
@final
class GuildScheduledEventEntityMetadata(TypedDict):
location: NotRequired[str]
# https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event-user-object-guild-scheduled-event-user-structure
@final
class GuildScheduledEventUserData(TypedDict):
guild_scheduled_event_id: Snowflake
user: UserData
member: NotRequired[GuildMemberData]
| from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Union
from typing_extensions import Literal, NotRequired, TypedDict, final
if TYPE_CHECKING:
from ..shared import Snowflake
from .guild import GuildMemberData
from .user import UserData
__all__ = (
'GuildScheduledEventData', 'GuildScheduledEventEntityMetadata',
'GuildScheduledEventUserData'
)
# https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event-object-guild-scheduled-event-structure
class GuildScheduledEventBase(TypedDict):
id: Snowflake
guild_id: Snowflake
creator_id: Snowflake
name: str
description: NotRequired[str]
scheduled_start_time: str
privacy_level: Literal[2]
status: Literal[1, 2, 3, 4]
entity_type: Literal[1, 2, 3]
entity_id: Optional[Snowflake]
creator: UserData
user_count: NotRequired[int]
@final
class StageGuildScheduledEventData(GuildScheduledEventBase):
channel_id: Snowflake
entity_metadata: None
scheduled_end_time: NotRequired[str]
@final
class VoiceGuildScheduledEventData(GuildScheduledEventBase):
channel_id: Snowflake
entity_metadata: None
scheduled_end_time: NotRequired[str]
@final
class ExternalGuildScheduledEventData(GuildScheduledEventBase):
channel_id: None
entity_metadata: GuildScheduledEventEntityMetadata
scheduled_end_time: str
GuildScheduledEventData = Union[
StageGuildScheduledEventData,
VoiceGuildScheduledEventData,
ExternalGuildScheduledEventData
]
# https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event-object-guild-scheduled-event-entity-metadata
@final
class GuildScheduledEventEntityMetadata(TypedDict):
location: NotRequired[str]
# https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event-user-object-guild-scheduled-event-user-structure
@final
class GuildScheduledEventUserData(TypedDict):
guild_scheduled_event_id: Snowflake
user: UserData
member: NotRequired[GuildMemberData]
| en | 0.642829 | # https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event-object-guild-scheduled-event-structure # https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event-object-guild-scheduled-event-entity-metadata # https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event-user-object-guild-scheduled-event-user-structure | 1.979983 | 2 |
PythonExercicios/Mundo1/ex009.py | leonardocsrod/Python-Curso-em-Video | 0 | 6613061 | <reponame>leonardocsrod/Python-Curso-em-Video
ttable = int(input('Write a number: '))
print('-' * 15)
print('{} * {:2} = {:>2}'.format(ttable, 1, ttable * 1))
print('{} * {:2} = {}'.format(ttable, 2, ttable * 2))
print('{} * {:2} = {}'.format(ttable, 3, ttable * 3))
print('{} * {:2} = {}'.format(ttable, 4, ttable * 4))
print('{} * {:2} = {}'.format(ttable, 5, ttable * 5))
print('{} * {:2} = {}'.format(ttable, 6, ttable * 6))
print('{} * {:2} = {}'.format(ttable, 7, ttable * 7))
print('{} * {:2} = {}'.format(ttable, 8, ttable * 8))
print('{} * {:2} = {}'.format(ttable, 9, ttable * 9))
print('{} * {:2} = {}'.format(ttable, 10, ttable * 10))
print('-' * 15) | ttable = int(input('Write a number: '))
print('-' * 15)
print('{} * {:2} = {:>2}'.format(ttable, 1, ttable * 1))
print('{} * {:2} = {}'.format(ttable, 2, ttable * 2))
print('{} * {:2} = {}'.format(ttable, 3, ttable * 3))
print('{} * {:2} = {}'.format(ttable, 4, ttable * 4))
print('{} * {:2} = {}'.format(ttable, 5, ttable * 5))
print('{} * {:2} = {}'.format(ttable, 6, ttable * 6))
print('{} * {:2} = {}'.format(ttable, 7, ttable * 7))
print('{} * {:2} = {}'.format(ttable, 8, ttable * 8))
print('{} * {:2} = {}'.format(ttable, 9, ttable * 9))
print('{} * {:2} = {}'.format(ttable, 10, ttable * 10))
print('-' * 15) | none | 1 | 3.778658 | 4 | |
bot/main.py | Vilsepi/sc2-botto | 0 | 6613062 | from sc2.data import Result
import sc2
from bot.lib.army import ArmyManager
from bot.lib.build import BuildManager
from bot.lib.overlords import OverlordManager
from bot.lib.queens import QueenManager
from bot.lib.train import UnitTrainingManager
from bot.lib.upgrades import UpgradeManager
from bot.lib.workers import WorkerManager
from bot.util.logging import TerminalLogger
class Botto(sc2.BotAI):
async def on_start(self):
self.logger: TerminalLogger = TerminalLogger(self)
self.army_manager: ArmyManager = ArmyManager(self)
self.build_manager: BuildManager = BuildManager(self, self.logger)
self.train_manager: UnitTrainingManager = UnitTrainingManager(self, self.logger)
self.worker_manager: WorkerManager = WorkerManager(self)
self.upgrade_manager: UpgradeManager = UpgradeManager(self, self.logger)
self.overlord_manager: OverlordManager = OverlordManager(self)
self.queen_manager: QueenManager = QueenManager(self, self.logger)
async def on_step(self, iteration: int):
self.army_manager.manage_army(iteration)
if self.train_manager.manage_unit_training_from_larvae():
return
self.train_manager.manage_queen_training()
self.upgrade_manager.manage_tech_upgrades()
await self.build_manager.manage_build_projects()
self.worker_manager.manage_workers()
self.queen_manager.manage_queens()
self.overlord_manager.manage_overlords()
self.train_manager.set_hatchery_rally_points(iteration)
async def on_end(
self, game_result: Result
): # pyright: reportGeneralTypeIssues=false
self.logger.log_end_stats(game_result)
| from sc2.data import Result
import sc2
from bot.lib.army import ArmyManager
from bot.lib.build import BuildManager
from bot.lib.overlords import OverlordManager
from bot.lib.queens import QueenManager
from bot.lib.train import UnitTrainingManager
from bot.lib.upgrades import UpgradeManager
from bot.lib.workers import WorkerManager
from bot.util.logging import TerminalLogger
class Botto(sc2.BotAI):
async def on_start(self):
self.logger: TerminalLogger = TerminalLogger(self)
self.army_manager: ArmyManager = ArmyManager(self)
self.build_manager: BuildManager = BuildManager(self, self.logger)
self.train_manager: UnitTrainingManager = UnitTrainingManager(self, self.logger)
self.worker_manager: WorkerManager = WorkerManager(self)
self.upgrade_manager: UpgradeManager = UpgradeManager(self, self.logger)
self.overlord_manager: OverlordManager = OverlordManager(self)
self.queen_manager: QueenManager = QueenManager(self, self.logger)
async def on_step(self, iteration: int):
self.army_manager.manage_army(iteration)
if self.train_manager.manage_unit_training_from_larvae():
return
self.train_manager.manage_queen_training()
self.upgrade_manager.manage_tech_upgrades()
await self.build_manager.manage_build_projects()
self.worker_manager.manage_workers()
self.queen_manager.manage_queens()
self.overlord_manager.manage_overlords()
self.train_manager.set_hatchery_rally_points(iteration)
async def on_end(
self, game_result: Result
): # pyright: reportGeneralTypeIssues=false
self.logger.log_end_stats(game_result)
| en | 0.59211 | # pyright: reportGeneralTypeIssues=false | 2.312132 | 2 |
backend/todo/migrations/0003_auto_20181210_1024.py | jimbofreedman/naggingnelly-backend | 0 | 6613063 | <gh_stars>0
# Generated by Django 2.0.9 on 2018-12-10 10:24
from django.db import migrations
import recurrence.fields
class Migration(migrations.Migration):
dependencies = [
('todo', '0002_auto_20181210_0833'),
]
operations = [
migrations.AlterModelOptions(
name='todoitem',
options={'ordering': ('order',)},
),
migrations.AddField(
model_name='todoitem',
name='recurrence',
field=recurrence.fields.RecurrenceField(blank=True, null=True),
),
]
| # Generated by Django 2.0.9 on 2018-12-10 10:24
from django.db import migrations
import recurrence.fields
class Migration(migrations.Migration):
dependencies = [
('todo', '0002_auto_20181210_0833'),
]
operations = [
migrations.AlterModelOptions(
name='todoitem',
options={'ordering': ('order',)},
),
migrations.AddField(
model_name='todoitem',
name='recurrence',
field=recurrence.fields.RecurrenceField(blank=True, null=True),
),
] | en | 0.84978 | # Generated by Django 2.0.9 on 2018-12-10 10:24 | 1.616716 | 2 |
python/misc/dataFrameToDatabase/test_dataFrameToDatabase.py | jlucas-esri/Geospatial-Center-Code | 14 | 6613064 | <gh_stars>10-100
from dataFrameToDatabase import DataFrameToDatabase
import pytest
#not implemented yet
class TestDataFrameToDatabase:
pass
| from dataFrameToDatabase import DataFrameToDatabase
import pytest
#not implemented yet
class TestDataFrameToDatabase:
pass | en | 0.640491 | #not implemented yet | 1.117659 | 1 |
ch01/04.py | sai-kaneko-0311/nlp100_2020 | 0 | 6613065 | <filename>ch01/04.py
results = {}
for i, word in enumerate("Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.".split()):
j = i + 1
if j in [1, 5, 6, 7, 8, 9, 15, 16, 19]:
results[word[0]] = j
else:
results[word[:2]] = j
print(results) | <filename>ch01/04.py
results = {}
for i, word in enumerate("Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.".split()):
j = i + 1
if j in [1, 5, 6, 7, 8, 9, 15, 16, 19]:
results[word[0]] = j
else:
results[word[:2]] = j
print(results) | none | 1 | 3.229935 | 3 | |
neurolang/probabilistic/cplogic/tests/test_noisy_or_probability_provenance.py | hndgzkn/NeuroLang | 0 | 6613066 | import collections
from ....relational_algebra import (
NaturalJoin,
Projection,
str2columnstr_constant,
)
from .. import testing
from ..noisy_or_probability_provenance import (
NoisyORProbabilityProvenanceSolver,
)
def test_simple_noisy_or_projection():
prov_col = "foo"
columns = (prov_col, "bar", "baz")
iterable = [
(0.2, "a", "x"),
(0.5, "b", "y"),
(0.1, "a", "z"),
]
prov_set = testing.make_prov_set(iterable, columns)
projection = Projection(prov_set, (str2columnstr_constant("bar"),))
solver = NoisyORProbabilityProvenanceSolver()
result = solver.walk(projection)
expected_tuples = [(1 - 0.8 * 0.9, "a"), (0.5, "b")]
itertuple = collections.namedtuple("tuple", result.value.columns)
assert all(itertuple._make(nt) in result.value for nt in expected_tuples)
def test_noisy_or_projection_and_naturaljoin():
"""
A(x) <- ∃y Q(x, y)
B(x) <- ∃y Z(x, y)
C(x) <- A(x), B(x)
R_Q = | _p_ | x | y | R_Z = | _p_ | x | y |
| 0.2 | x1 | y1 | | 0.6 | x1 | y3 |
| 0.1 | x1 | y2 | | 0.9 | x3 | y3 |
| 0.9 | x2 | y2 |
R_A = | _p_ | x | R_B = | _p_ | x |
| 0.28 | x1 | | 0.6 | x1 |
| 0.9 | x2 | | 0.9 | x3 |
R_C = | _p_ | x |
| 0.168 | x1 |
"""
r_Q = testing.make_prov_set(
[(0.2, "x1", "y1"), (0.1, "x1", "y2"), (0.9, "x2", "y2")],
("_p_", "x", "y"),
)
r_Z = testing.make_prov_set(
[(0.6, "x1", "y3"), (0.9, "x3", "y3")], ("_p_", "x", "y"),
)
r_A = Projection(r_Q, (str2columnstr_constant("x"),))
r_B = Projection(r_Z, (str2columnstr_constant("x"),))
r_C = NaturalJoin(r_A, r_B)
result = NoisyORProbabilityProvenanceSolver().walk(r_A)
expected = testing.make_prov_set(
[(1 - 0.8 * 0.9, "x1"), (0.9, "x2")], ("_p_", "x")
)
assert result == expected
result = NoisyORProbabilityProvenanceSolver().walk(r_B)
expected = testing.make_prov_set([(0.6, "x1"), (0.9, "x3")], ("_p_", "x"))
assert result == expected
result = NoisyORProbabilityProvenanceSolver().walk(r_C)
expected = testing.make_prov_set(
[((1 - (1 - 0.2) * (1 - 0.1)) * 0.6, "x1")], ("_p_", "x"),
)
assert result == expected
| import collections
from ....relational_algebra import (
NaturalJoin,
Projection,
str2columnstr_constant,
)
from .. import testing
from ..noisy_or_probability_provenance import (
NoisyORProbabilityProvenanceSolver,
)
def test_simple_noisy_or_projection():
prov_col = "foo"
columns = (prov_col, "bar", "baz")
iterable = [
(0.2, "a", "x"),
(0.5, "b", "y"),
(0.1, "a", "z"),
]
prov_set = testing.make_prov_set(iterable, columns)
projection = Projection(prov_set, (str2columnstr_constant("bar"),))
solver = NoisyORProbabilityProvenanceSolver()
result = solver.walk(projection)
expected_tuples = [(1 - 0.8 * 0.9, "a"), (0.5, "b")]
itertuple = collections.namedtuple("tuple", result.value.columns)
assert all(itertuple._make(nt) in result.value for nt in expected_tuples)
def test_noisy_or_projection_and_naturaljoin():
"""
A(x) <- ∃y Q(x, y)
B(x) <- ∃y Z(x, y)
C(x) <- A(x), B(x)
R_Q = | _p_ | x | y | R_Z = | _p_ | x | y |
| 0.2 | x1 | y1 | | 0.6 | x1 | y3 |
| 0.1 | x1 | y2 | | 0.9 | x3 | y3 |
| 0.9 | x2 | y2 |
R_A = | _p_ | x | R_B = | _p_ | x |
| 0.28 | x1 | | 0.6 | x1 |
| 0.9 | x2 | | 0.9 | x3 |
R_C = | _p_ | x |
| 0.168 | x1 |
"""
r_Q = testing.make_prov_set(
[(0.2, "x1", "y1"), (0.1, "x1", "y2"), (0.9, "x2", "y2")],
("_p_", "x", "y"),
)
r_Z = testing.make_prov_set(
[(0.6, "x1", "y3"), (0.9, "x3", "y3")], ("_p_", "x", "y"),
)
r_A = Projection(r_Q, (str2columnstr_constant("x"),))
r_B = Projection(r_Z, (str2columnstr_constant("x"),))
r_C = NaturalJoin(r_A, r_B)
result = NoisyORProbabilityProvenanceSolver().walk(r_A)
expected = testing.make_prov_set(
[(1 - 0.8 * 0.9, "x1"), (0.9, "x2")], ("_p_", "x")
)
assert result == expected
result = NoisyORProbabilityProvenanceSolver().walk(r_B)
expected = testing.make_prov_set([(0.6, "x1"), (0.9, "x3")], ("_p_", "x"))
assert result == expected
result = NoisyORProbabilityProvenanceSolver().walk(r_C)
expected = testing.make_prov_set(
[((1 - (1 - 0.2) * (1 - 0.1)) * 0.6, "x1")], ("_p_", "x"),
)
assert result == expected
| en | 0.571827 | A(x) <- ∃y Q(x, y) B(x) <- ∃y Z(x, y) C(x) <- A(x), B(x) R_Q = | _p_ | x | y | R_Z = | _p_ | x | y | | 0.2 | x1 | y1 | | 0.6 | x1 | y3 | | 0.1 | x1 | y2 | | 0.9 | x3 | y3 | | 0.9 | x2 | y2 | R_A = | _p_ | x | R_B = | _p_ | x | | 0.28 | x1 | | 0.6 | x1 | | 0.9 | x2 | | 0.9 | x3 | R_C = | _p_ | x | | 0.168 | x1 | | 2.580493 | 3 |
src/dynamicprogramming/tests/test_subset_sum.py | seahrh/coding-interview | 0 | 6613067 | <reponame>seahrh/coding-interview
from dynamicprogramming.subset_sum import *
class TestSubsetSumDP:
def test_when_nothing_can_fit_then_return_empty_set(self):
assert subset_sum(capacity=1, weights=set()) == set()
def test_two_items_or_less(self):
assert subset_sum(capacity=1, weights={1}) == {1}
assert subset_sum(capacity=1, weights={2}) == set()
assert subset_sum(capacity=2, weights={1}) == set()
assert subset_sum(capacity=4, weights={1, 2}) == set()
assert subset_sum(capacity=3, weights={1, 2}) == {1, 2}
assert subset_sum(capacity=2, weights={1, 2}) == {2}
assert subset_sum(capacity=1, weights={1, 2}) == {1}
def test_case_1(self):
a = subset_sum(capacity=30, weights={5, 10, 12, 13, 15, 18})
assert a == {18, 12} or a == {10, 5, 15} or a == {13, 12, 5}
a = subset_sum(capacity=28, weights={5, 10, 12, 13, 15, 18})
assert a == {10, 13, 5} or a == {10, 18} or a == {13, 15}
assert subset_sum(capacity=29, weights={5, 10, 12, 13, 15, 18}) == set()
def test_case_2(self):
a = subset_sum(capacity=9, weights={3, 34, 4, 12, 5, 2})
assert a == {4, 5} or a == {2, 3, 4}
assert subset_sum(capacity=30, weights={3, 34, 4, 12, 5, 2}) == set()
| from dynamicprogramming.subset_sum import *
class TestSubsetSumDP:
def test_when_nothing_can_fit_then_return_empty_set(self):
assert subset_sum(capacity=1, weights=set()) == set()
def test_two_items_or_less(self):
assert subset_sum(capacity=1, weights={1}) == {1}
assert subset_sum(capacity=1, weights={2}) == set()
assert subset_sum(capacity=2, weights={1}) == set()
assert subset_sum(capacity=4, weights={1, 2}) == set()
assert subset_sum(capacity=3, weights={1, 2}) == {1, 2}
assert subset_sum(capacity=2, weights={1, 2}) == {2}
assert subset_sum(capacity=1, weights={1, 2}) == {1}
def test_case_1(self):
a = subset_sum(capacity=30, weights={5, 10, 12, 13, 15, 18})
assert a == {18, 12} or a == {10, 5, 15} or a == {13, 12, 5}
a = subset_sum(capacity=28, weights={5, 10, 12, 13, 15, 18})
assert a == {10, 13, 5} or a == {10, 18} or a == {13, 15}
assert subset_sum(capacity=29, weights={5, 10, 12, 13, 15, 18}) == set()
def test_case_2(self):
a = subset_sum(capacity=9, weights={3, 34, 4, 12, 5, 2})
assert a == {4, 5} or a == {2, 3, 4}
assert subset_sum(capacity=30, weights={3, 34, 4, 12, 5, 2}) == set() | none | 1 | 3.183011 | 3 | |
handlers/yum.py | ghowland/deployman | 0 | 6613068 | <filename>handlers/yum.py
"""
sysync: handlers: yum
Module installing yum
"""
from utility.log import Log
from utility.error import Error
from utility.run import Run, RunOnCommit
def GetKeys(section_item, options):
"""Returns the key used for the Work List/Data work_key"""
if 'name' not in section_item or section_item['name'] == None:
Error('Section Item does not have a "name" key: %s' % section_item, options)
# Returns List, always a single item for this handler
return [section_item['name']]
def Install(section_item, config, options):
# If we want to install it
if section_item['remove'] == False:
# See if we can check the cache
#TODO(g): Versions cant be checked. Split the names, or always take the penalty of trying to install!!!!
(status, output) = Run('rpm --nofiles --noscripts -V %s' % section_item['name'])
if status != 0:
Log('Yum: %s (%s)' % (section_item,output))
RunOnCommit('/usr/bin/yum install -y %s' % section_item['name'], 'Failed to install yum package: %s' % section_item['name'], options)
# Else, already installed
else:
Log('Yum: %s [installed]' % section_item)
# Else, we want to remove the package
else:
(status, output) = Run('rpm --nofiles --noscripts -V %s' % section_item['name'])
if status == 0:
Log('Yum Remove: %s (%s)' % (section_item,output))
RunOnCommit('/usr/bin/yum remove -y %s' % section_item['name'], 'Failed to install yum package: %s' % section_item['name'], options)
# Else, already not installed
else:
Log('Yum: %s [not installed]' % section_item)
| <filename>handlers/yum.py
"""
sysync: handlers: yum
Module installing yum
"""
from utility.log import Log
from utility.error import Error
from utility.run import Run, RunOnCommit
def GetKeys(section_item, options):
"""Returns the key used for the Work List/Data work_key"""
if 'name' not in section_item or section_item['name'] == None:
Error('Section Item does not have a "name" key: %s' % section_item, options)
# Returns List, always a single item for this handler
return [section_item['name']]
def Install(section_item, config, options):
# If we want to install it
if section_item['remove'] == False:
# See if we can check the cache
#TODO(g): Versions cant be checked. Split the names, or always take the penalty of trying to install!!!!
(status, output) = Run('rpm --nofiles --noscripts -V %s' % section_item['name'])
if status != 0:
Log('Yum: %s (%s)' % (section_item,output))
RunOnCommit('/usr/bin/yum install -y %s' % section_item['name'], 'Failed to install yum package: %s' % section_item['name'], options)
# Else, already installed
else:
Log('Yum: %s [installed]' % section_item)
# Else, we want to remove the package
else:
(status, output) = Run('rpm --nofiles --noscripts -V %s' % section_item['name'])
if status == 0:
Log('Yum Remove: %s (%s)' % (section_item,output))
RunOnCommit('/usr/bin/yum remove -y %s' % section_item['name'], 'Failed to install yum package: %s' % section_item['name'], options)
# Else, already not installed
else:
Log('Yum: %s [not installed]' % section_item)
| en | 0.756309 | sysync: handlers: yum Module installing yum Returns the key used for the Work List/Data work_key # Returns List, always a single item for this handler # If we want to install it # See if we can check the cache #TODO(g): Versions cant be checked. Split the names, or always take the penalty of trying to install!!!! # Else, already installed # Else, we want to remove the package # Else, already not installed | 2.334348 | 2 |
ebl/tests/transliteration/test_parse_image_dollar_line.py | ElectronicBabylonianLiterature/dictionary | 4 | 6613069 | import pytest
from ebl.transliteration.domain.dollar_line import ImageDollarLine
from ebl.transliteration.domain.lark_parser import parse_atf_lark
from ebl.transliteration.domain.text import Text
@pytest.mark.parametrize(
"line,expected_line",
[
("$ (image 1a = great)", ImageDollarLine("1", "a", "great")),
("$(image 1a = great)", ImageDollarLine("1", "a", "great")),
("$ (image 1a = great )", ImageDollarLine("1", "a", "great")),
("$(image 1a = great )", ImageDollarLine("1", "a", "great")),
(
"$ (image 15 = numbered diagram of triangle)",
ImageDollarLine("15", None, "numbered diagram of triangle"),
),
(
"$(image 15 = numbered diagram of triangle)",
ImageDollarLine("15", None, "numbered diagram of triangle"),
),
("$ ((image 1a = great))", ImageDollarLine("1", "a", "great")),
("$((image 1a = great))", ImageDollarLine("1", "a", "great")),
],
)
def test_parse_image_dollar_line(line, expected_line):
assert parse_atf_lark(line).lines == Text.of_iterable([expected_line]).lines
| import pytest
from ebl.transliteration.domain.dollar_line import ImageDollarLine
from ebl.transliteration.domain.lark_parser import parse_atf_lark
from ebl.transliteration.domain.text import Text
@pytest.mark.parametrize(
"line,expected_line",
[
("$ (image 1a = great)", ImageDollarLine("1", "a", "great")),
("$(image 1a = great)", ImageDollarLine("1", "a", "great")),
("$ (image 1a = great )", ImageDollarLine("1", "a", "great")),
("$(image 1a = great )", ImageDollarLine("1", "a", "great")),
(
"$ (image 15 = numbered diagram of triangle)",
ImageDollarLine("15", None, "numbered diagram of triangle"),
),
(
"$(image 15 = numbered diagram of triangle)",
ImageDollarLine("15", None, "numbered diagram of triangle"),
),
("$ ((image 1a = great))", ImageDollarLine("1", "a", "great")),
("$((image 1a = great))", ImageDollarLine("1", "a", "great")),
],
)
def test_parse_image_dollar_line(line, expected_line):
assert parse_atf_lark(line).lines == Text.of_iterable([expected_line]).lines
| none | 1 | 2.430682 | 2 | |
my/mal.py | seanbreckenridge/HPI | 36 | 6613070 | """
Parses the data directory for my MAL export
Uses https://github.com/seanbreckenridge/malexport/
"""
REQUIRES = ["git+https://github.com/seanbreckenridge/malexport"]
# see https://github.com/seanbreckenridge/dotfiles/blob/master/.config/my/my/config/__init__.py for an example
from my.config import mal as user_config # type: ignore[attr-defined]
from pathlib import Path
from datetime import datetime
from typing import Iterator, List, Tuple, NamedTuple
from functools import lru_cache
from my.core import Stats, LazyLogger, PathIsh, dataclass
from my.core.common import mcachew
from my.core.structure import match_structure
from malexport.parse.combine import combine, AnimeData, MangaData
from malexport.parse.forum import Post, iter_forum_posts
@dataclass
class config(user_config):
# path[s]/glob to the exported data
export_path: PathIsh
logger = LazyLogger(__name__, level="warning")
# malexport supports multiple accounts
# in its data directory structure
@lru_cache(maxsize=1)
def export_dirs() -> List[Path]:
base: Path = Path(config.export_path).expanduser().absolute()
with match_structure(base, expected="animelist.xml") as matches:
return list(matches)
def _history_depends_on() -> List[float]:
json_history_files: List[Path] = []
for p in export_dirs():
json_history_files.extend(list((p / "history").rglob("*.json")))
json_history_files.sort()
return [p.lstat().st_mtime for p in json_history_files]
def _forum_depends_on() -> List[float]:
indexes = []
for p in export_dirs():
indexes.append(p / "forum" / "index.json")
return [p.lstat().st_mtime for p in indexes]
Export = Tuple[List[AnimeData], List[MangaData]]
@lru_cache(maxsize=None)
def _read_malexport(username: str) -> Export:
return combine(username)
### Expose all the parsed information from malexport
def anime() -> Iterator[AnimeData]:
for path in export_dirs():
anime, _ = _read_malexport(path.stem)
yield from anime
def manga() -> Iterator[MangaData]:
for path in export_dirs():
_, manga = _read_malexport(path.stem)
yield from manga
class Episode(NamedTuple):
mal_id: int
title: str
episode: int
at: datetime
# use the combined data when reading history
# since it removes entries you may have deleted
# which still have local history files left over
@mcachew(depends_on=_history_depends_on, logger=logger)
def episodes() -> Iterator[Episode]:
for path in export_dirs():
anime, _ = _read_malexport(path.stem)
for a in anime:
for h in a.history:
yield Episode(
mal_id=a.id,
title=a.title,
episode=h.number,
at=h.at,
)
class Chapter(NamedTuple):
mal_id: int
title: str
chapter: int
at: datetime
@mcachew(depends_on=_history_depends_on, logger=logger)
def chapters() -> Iterator[Chapter]:
for path in export_dirs():
_, manga = _read_malexport(path.stem)
for m in manga:
for h in m.history:
yield Chapter(
mal_id=m.id,
title=m.title,
chapter=h.number,
at=h.at,
)
@mcachew(depends_on=_forum_depends_on, logger=logger)
def posts() -> Iterator[Post]:
for path in export_dirs():
yield from iter_forum_posts(path.stem)
def stats() -> Stats:
from my.core import stat
return {
**stat(anime),
**stat(manga),
**stat(chapters),
**stat(episodes),
**stat(posts),
}
| """
Parses the data directory for my MAL export
Uses https://github.com/seanbreckenridge/malexport/
"""
REQUIRES = ["git+https://github.com/seanbreckenridge/malexport"]
# see https://github.com/seanbreckenridge/dotfiles/blob/master/.config/my/my/config/__init__.py for an example
from my.config import mal as user_config # type: ignore[attr-defined]
from pathlib import Path
from datetime import datetime
from typing import Iterator, List, Tuple, NamedTuple
from functools import lru_cache
from my.core import Stats, LazyLogger, PathIsh, dataclass
from my.core.common import mcachew
from my.core.structure import match_structure
from malexport.parse.combine import combine, AnimeData, MangaData
from malexport.parse.forum import Post, iter_forum_posts
@dataclass
class config(user_config):
# path[s]/glob to the exported data
export_path: PathIsh
logger = LazyLogger(__name__, level="warning")
# malexport supports multiple accounts
# in its data directory structure
@lru_cache(maxsize=1)
def export_dirs() -> List[Path]:
base: Path = Path(config.export_path).expanduser().absolute()
with match_structure(base, expected="animelist.xml") as matches:
return list(matches)
def _history_depends_on() -> List[float]:
json_history_files: List[Path] = []
for p in export_dirs():
json_history_files.extend(list((p / "history").rglob("*.json")))
json_history_files.sort()
return [p.lstat().st_mtime for p in json_history_files]
def _forum_depends_on() -> List[float]:
indexes = []
for p in export_dirs():
indexes.append(p / "forum" / "index.json")
return [p.lstat().st_mtime for p in indexes]
Export = Tuple[List[AnimeData], List[MangaData]]
@lru_cache(maxsize=None)
def _read_malexport(username: str) -> Export:
return combine(username)
### Expose all the parsed information from malexport
def anime() -> Iterator[AnimeData]:
for path in export_dirs():
anime, _ = _read_malexport(path.stem)
yield from anime
def manga() -> Iterator[MangaData]:
for path in export_dirs():
_, manga = _read_malexport(path.stem)
yield from manga
class Episode(NamedTuple):
mal_id: int
title: str
episode: int
at: datetime
# use the combined data when reading history
# since it removes entries you may have deleted
# which still have local history files left over
@mcachew(depends_on=_history_depends_on, logger=logger)
def episodes() -> Iterator[Episode]:
for path in export_dirs():
anime, _ = _read_malexport(path.stem)
for a in anime:
for h in a.history:
yield Episode(
mal_id=a.id,
title=a.title,
episode=h.number,
at=h.at,
)
class Chapter(NamedTuple):
mal_id: int
title: str
chapter: int
at: datetime
@mcachew(depends_on=_history_depends_on, logger=logger)
def chapters() -> Iterator[Chapter]:
for path in export_dirs():
_, manga = _read_malexport(path.stem)
for m in manga:
for h in m.history:
yield Chapter(
mal_id=m.id,
title=m.title,
chapter=h.number,
at=h.at,
)
@mcachew(depends_on=_forum_depends_on, logger=logger)
def posts() -> Iterator[Post]:
for path in export_dirs():
yield from iter_forum_posts(path.stem)
def stats() -> Stats:
from my.core import stat
return {
**stat(anime),
**stat(manga),
**stat(chapters),
**stat(episodes),
**stat(posts),
}
| en | 0.733186 | Parses the data directory for my MAL export Uses https://github.com/seanbreckenridge/malexport/ # see https://github.com/seanbreckenridge/dotfiles/blob/master/.config/my/my/config/__init__.py for an example # type: ignore[attr-defined] # path[s]/glob to the exported data # malexport supports multiple accounts # in its data directory structure ### Expose all the parsed information from malexport # use the combined data when reading history # since it removes entries you may have deleted # which still have local history files left over | 2.322939 | 2 |
cockpit/instruments/utils_instruments.py | wx-b/cockpit | 367 | 6613071 | <reponame>wx-b/cockpit
"""Utility functions for the instruments."""
import warnings
import matplotlib as mpl
import seaborn as sns
def create_basic_plot(
x,
y,
data,
ax,
EMA="",
EMA_alpha=0.2,
x_scale="linear",
y_scale="linear",
cmap=None,
EMA_cmap=None,
marker="o",
EMA_marker=",",
xlabel=None,
ylabel=None,
title="",
xlim=None,
ylim=None,
fontweight="normal",
facecolor=None,
zero_lines=False,
center=False,
):
"""Creates a basic plot of x vs. y values for the cockpit.
Args:
x (str): Name of the variable in data that should be plotted on the x-axis.
y (str): Name of the variable in data that should be plotted on the y-axis.
data (pandas.dataframe): Data Frame containing the plotting data.
ax (matplotlib.axis): Axis where the plot should be created.
EMA (str, optional): Signifies over which variables an exponentially
moving average should be computed.E.g. "xy" would be an exponentially
moving average over both variables. Defaults to "".
EMA_alpha (float, optional): Decay parameter of the exponentially moving
average. Defaults to 0.2.
x_scale (str, optional): Whether to use a linear or log scale for the x-axis.
Defaults to "linear".
y_scale (str, optional): Whether to use a linear or log scale for the y-axis.
Defaults to "linear".
cmap (matplotlib.cmap, optional): A colormap for the individual data points.
Defaults to None.
EMA_cmap (matplotlib.cmap, optional): A colormap for the EMA.
Defaults to None.
marker (str, optional): Marker type to use in the plot. Defaults to "o".
EMA_marker (str, optional): Marker for the EMA. Defaults to ",".
xlabel (str, optional): Label for the x-axis. Defaults to None, meaning
it uses `x`.
ylabel (str, optional): Label for the y-axis. Defaults to None, meaning
it uses `y`.
title (str, optional): Title of this subfigure. Defaults to "".
xlim (str, list, optional): Limits for the x-axis. Can be a (list of)
strings, None or numbers. "tight" would shrink the x-limits to the
data, None would use the default scaling, and float would use this
limit. If it is given as a list, the first value is used as the lower
bound and the second one as an upper bound. Defaults to None.
ylim (str, list, optional): Limits for the y-axis. Can be a (list of)
strings, None or numbers. "tight" would shrink the y-limits to the
data, None would use the default scaling, and float would use this
limit. If it is given as a list, the first value is used as the lower
bound and the second one as an upper bound. Defaults to None.
fontweight (str, optional): Fontweight of the title. Defaults to "normal".
facecolor (tuple, optional): Facecolor of the plot. Defaults to None,
which does not apply any color.
zero_lines (bool, optional): Whether to highligh the x and y = 0.
Defaults to False.
center (bool, optional): Whether to center the limits of the plot.
Can also be given as a list, where the first element is applied to
the x-axis and the second to the y-axis. Defaults to False.
"""
try:
sns.scatterplot(
x=x,
y=y,
hue="iteration",
palette=cmap,
edgecolor=None,
marker=marker,
s=10,
data=data,
ax=ax,
)
except TypeError:
sns.scatterplot(
x=x,
y=y,
palette=cmap,
edgecolor=None,
marker=marker,
s=10,
data=data,
ax=ax,
)
# Save what is being ploted as labels, if not otherwise given
xlabel = x if xlabel is None else xlabel
ylabel = y if ylabel is None else ylabel
if "y" in EMA:
data["EMA_" + y] = data[y].ewm(alpha=EMA_alpha, adjust=False).mean()
y = "EMA_" + y
if "x" in EMA:
data["EMA_" + x] = data[x].ewm(alpha=EMA_alpha, adjust=False).mean()
x = "EMA_" + x
if EMA != "":
try:
sns.scatterplot(
x=x,
y=y,
hue="iteration",
palette=EMA_cmap,
edgecolor=None,
marker=EMA_marker,
s=1,
data=data,
ax=ax,
)
except TypeError:
sns.scatterplot(
x=x,
y=y,
palette=EMA_cmap,
edgecolor=None,
marker=EMA_marker,
s=1,
data=data,
ax=ax,
)
_beautify_plot(
ax=ax,
x_scale=x_scale,
y_scale=y_scale,
xlabel=xlabel,
ylabel=ylabel,
title=title,
xlim=xlim,
ylim=ylim,
fontweight=fontweight,
facecolor=facecolor,
zero_lines=zero_lines,
center=center,
)
def _beautify_plot(
ax,
x_scale=None,
y_scale=None,
xlabel=None,
ylabel=None,
title="",
xlim=None,
ylim=None,
fontweight="normal",
facecolor=None,
zero_lines=False,
center=False,
):
ax.set_title(title, fontweight=fontweight, fontsize="large")
if ax.get_legend() is not None:
ax.get_legend().remove()
if x_scale is not None:
ax.set_xscale(x_scale)
if y_scale is not None:
ax.set_yscale(y_scale)
if xlabel is not None:
if xlabel == "iteration":
xlabel = "Iteration"
ax.set_xlabel(xlabel.replace("_", " "))
if ylabel is not None:
ax.set_ylabel(ylabel.replace("_", " "))
xlim, ylim = _compute_plot_limits(ax, xlim, ylim, center)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if facecolor is not None:
ax.set_facecolor(facecolor)
# Zero lines
if zero_lines:
ax.axvline(0, ls="-", color="#ababba", linewidth=1.5, zorder=0)
ax.axhline(0, ls="-", color="#ababba", linewidth=1.5, zorder=0)
def _compute_plot_limits(ax, xlim, ylim, center=False):
xlim = _extend_input(xlim)
ylim = _extend_input(ylim)
center = _extend_input(center)
lims = [xlim, ylim]
auto_lims = [ax.get_xlim(), ax.get_ylim()]
ax.autoscale(enable=True, tight=True)
tight_limts = [ax.get_xlim(), ax.get_ylim()]
# replace values according to inputs
for lim in range(len(lims)):
for direction in range(len(lims[lim])):
if lims[lim][direction] is None:
lims[lim][direction] = auto_lims[lim][direction]
elif lims[lim][direction] == "tight":
lims[lim][direction] = tight_limts[lim][direction]
elif (
type(lims[lim][direction]) == float or type(lims[lim][direction]) == int
):
pass
else:
warnings.warn(
"Unknown input for limits, it is neither None, nor tight,"
"nor a float ",
stacklevel=1,
)
if center[0]:
lims[0][1] = abs(max(lims[0], key=abs))
lims[0][0] = -lims[0][1]
if center[1]:
lims[1][1] = abs(max(lims[1], key=abs))
lims[1][0] = -lims[1][1]
return lims[0], lims[1]
def _extend_input(shortend_input):
# extend shortend inputs
if type(shortend_input) is list:
pass
elif shortend_input is None:
shortend_input = [None, None]
elif shortend_input == "tight":
shortend_input = ["tight", "tight"]
elif shortend_input:
shortend_input = [True, True]
else:
shortend_input = [False, False]
return shortend_input
def _add_last_value_to_legend(ax, percentage=False):
"""Adds the last value of each line to the legend.
This function takes every line in a plot, checks its last value and adds it
in brackets to the corresponding label in the legend.
Args:
ax (matplotlib.axes): Axis of a matplotlib figure
percentage (bool): Whether the value represents a percentage
"""
# Formating
if percentage:
formating = "{0}: ({1:.2%})"
else:
formating = "{0}: ({1:.1E})"
# Fix Legend
lines, labels = ax.get_legend_handles_labels()
plot_labels = []
for line, label in zip(lines, labels):
plot_labels.append(formating.format(label, line.get_ydata()[-1]))
ax.get_legend().remove()
ax.legend(lines, plot_labels)
def check_data(data, requires, min_elements=1):
"""Checks if all elements of requires are available in data.
Args:
data (pandas.DataFrame): A dataframe holding the data.
requires ([str]): A list of string that should be part of data.
min_elements (int, optional): Minimal number of elements required for plotting.
Defaults to 2. This is in general necessary, so that seaborn can apply
its colormap.
Returns:
bool: Check whether all elements of requires exist in data
"""
for r in requires:
# Check fails if element does not exists in the data frame
if r not in data.columns:
return False
# Or if it exists but has not enough elements
else:
if len(data[r].dropna()) < min_elements:
return False
return True
def _ticks_formatter(ticklabels, format_str="{:.2f}"):
"""Format the ticklabels.
Args:
ticklabels ([mpl.text.Text]): List of ticklabels.
format_str (str, optional): Formatting string for the labels.
Defaults to "{:.2f}".
Returns:
[mpl.text.Text]: Reformatted list of ticklabels.
"""
new_ticks = []
for tick in ticklabels:
rounded_label = format_str.format(float(tick.get_text()))
new_tick = mpl.text.Text(*tick.get_position(), rounded_label)
new_ticks.append(new_tick)
return new_ticks
| """Utility functions for the instruments."""
import warnings
import matplotlib as mpl
import seaborn as sns
def create_basic_plot(
x,
y,
data,
ax,
EMA="",
EMA_alpha=0.2,
x_scale="linear",
y_scale="linear",
cmap=None,
EMA_cmap=None,
marker="o",
EMA_marker=",",
xlabel=None,
ylabel=None,
title="",
xlim=None,
ylim=None,
fontweight="normal",
facecolor=None,
zero_lines=False,
center=False,
):
"""Creates a basic plot of x vs. y values for the cockpit.
Args:
x (str): Name of the variable in data that should be plotted on the x-axis.
y (str): Name of the variable in data that should be plotted on the y-axis.
data (pandas.dataframe): Data Frame containing the plotting data.
ax (matplotlib.axis): Axis where the plot should be created.
EMA (str, optional): Signifies over which variables an exponentially
moving average should be computed.E.g. "xy" would be an exponentially
moving average over both variables. Defaults to "".
EMA_alpha (float, optional): Decay parameter of the exponentially moving
average. Defaults to 0.2.
x_scale (str, optional): Whether to use a linear or log scale for the x-axis.
Defaults to "linear".
y_scale (str, optional): Whether to use a linear or log scale for the y-axis.
Defaults to "linear".
cmap (matplotlib.cmap, optional): A colormap for the individual data points.
Defaults to None.
EMA_cmap (matplotlib.cmap, optional): A colormap for the EMA.
Defaults to None.
marker (str, optional): Marker type to use in the plot. Defaults to "o".
EMA_marker (str, optional): Marker for the EMA. Defaults to ",".
xlabel (str, optional): Label for the x-axis. Defaults to None, meaning
it uses `x`.
ylabel (str, optional): Label for the y-axis. Defaults to None, meaning
it uses `y`.
title (str, optional): Title of this subfigure. Defaults to "".
xlim (str, list, optional): Limits for the x-axis. Can be a (list of)
strings, None or numbers. "tight" would shrink the x-limits to the
data, None would use the default scaling, and float would use this
limit. If it is given as a list, the first value is used as the lower
bound and the second one as an upper bound. Defaults to None.
ylim (str, list, optional): Limits for the y-axis. Can be a (list of)
strings, None or numbers. "tight" would shrink the y-limits to the
data, None would use the default scaling, and float would use this
limit. If it is given as a list, the first value is used as the lower
bound and the second one as an upper bound. Defaults to None.
fontweight (str, optional): Fontweight of the title. Defaults to "normal".
facecolor (tuple, optional): Facecolor of the plot. Defaults to None,
which does not apply any color.
zero_lines (bool, optional): Whether to highligh the x and y = 0.
Defaults to False.
center (bool, optional): Whether to center the limits of the plot.
Can also be given as a list, where the first element is applied to
the x-axis and the second to the y-axis. Defaults to False.
"""
try:
sns.scatterplot(
x=x,
y=y,
hue="iteration",
palette=cmap,
edgecolor=None,
marker=marker,
s=10,
data=data,
ax=ax,
)
except TypeError:
sns.scatterplot(
x=x,
y=y,
palette=cmap,
edgecolor=None,
marker=marker,
s=10,
data=data,
ax=ax,
)
# Save what is being ploted as labels, if not otherwise given
xlabel = x if xlabel is None else xlabel
ylabel = y if ylabel is None else ylabel
if "y" in EMA:
data["EMA_" + y] = data[y].ewm(alpha=EMA_alpha, adjust=False).mean()
y = "EMA_" + y
if "x" in EMA:
data["EMA_" + x] = data[x].ewm(alpha=EMA_alpha, adjust=False).mean()
x = "EMA_" + x
if EMA != "":
try:
sns.scatterplot(
x=x,
y=y,
hue="iteration",
palette=EMA_cmap,
edgecolor=None,
marker=EMA_marker,
s=1,
data=data,
ax=ax,
)
except TypeError:
sns.scatterplot(
x=x,
y=y,
palette=EMA_cmap,
edgecolor=None,
marker=EMA_marker,
s=1,
data=data,
ax=ax,
)
_beautify_plot(
ax=ax,
x_scale=x_scale,
y_scale=y_scale,
xlabel=xlabel,
ylabel=ylabel,
title=title,
xlim=xlim,
ylim=ylim,
fontweight=fontweight,
facecolor=facecolor,
zero_lines=zero_lines,
center=center,
)
def _beautify_plot(
ax,
x_scale=None,
y_scale=None,
xlabel=None,
ylabel=None,
title="",
xlim=None,
ylim=None,
fontweight="normal",
facecolor=None,
zero_lines=False,
center=False,
):
ax.set_title(title, fontweight=fontweight, fontsize="large")
if ax.get_legend() is not None:
ax.get_legend().remove()
if x_scale is not None:
ax.set_xscale(x_scale)
if y_scale is not None:
ax.set_yscale(y_scale)
if xlabel is not None:
if xlabel == "iteration":
xlabel = "Iteration"
ax.set_xlabel(xlabel.replace("_", " "))
if ylabel is not None:
ax.set_ylabel(ylabel.replace("_", " "))
xlim, ylim = _compute_plot_limits(ax, xlim, ylim, center)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if facecolor is not None:
ax.set_facecolor(facecolor)
# Zero lines
if zero_lines:
ax.axvline(0, ls="-", color="#ababba", linewidth=1.5, zorder=0)
ax.axhline(0, ls="-", color="#ababba", linewidth=1.5, zorder=0)
def _compute_plot_limits(ax, xlim, ylim, center=False):
xlim = _extend_input(xlim)
ylim = _extend_input(ylim)
center = _extend_input(center)
lims = [xlim, ylim]
auto_lims = [ax.get_xlim(), ax.get_ylim()]
ax.autoscale(enable=True, tight=True)
tight_limts = [ax.get_xlim(), ax.get_ylim()]
# replace values according to inputs
for lim in range(len(lims)):
for direction in range(len(lims[lim])):
if lims[lim][direction] is None:
lims[lim][direction] = auto_lims[lim][direction]
elif lims[lim][direction] == "tight":
lims[lim][direction] = tight_limts[lim][direction]
elif (
type(lims[lim][direction]) == float or type(lims[lim][direction]) == int
):
pass
else:
warnings.warn(
"Unknown input for limits, it is neither None, nor tight,"
"nor a float ",
stacklevel=1,
)
if center[0]:
lims[0][1] = abs(max(lims[0], key=abs))
lims[0][0] = -lims[0][1]
if center[1]:
lims[1][1] = abs(max(lims[1], key=abs))
lims[1][0] = -lims[1][1]
return lims[0], lims[1]
def _extend_input(shortend_input):
# extend shortend inputs
if type(shortend_input) is list:
pass
elif shortend_input is None:
shortend_input = [None, None]
elif shortend_input == "tight":
shortend_input = ["tight", "tight"]
elif shortend_input:
shortend_input = [True, True]
else:
shortend_input = [False, False]
return shortend_input
def _add_last_value_to_legend(ax, percentage=False):
"""Adds the last value of each line to the legend.
This function takes every line in a plot, checks its last value and adds it
in brackets to the corresponding label in the legend.
Args:
ax (matplotlib.axes): Axis of a matplotlib figure
percentage (bool): Whether the value represents a percentage
"""
# Formating
if percentage:
formating = "{0}: ({1:.2%})"
else:
formating = "{0}: ({1:.1E})"
# Fix Legend
lines, labels = ax.get_legend_handles_labels()
plot_labels = []
for line, label in zip(lines, labels):
plot_labels.append(formating.format(label, line.get_ydata()[-1]))
ax.get_legend().remove()
ax.legend(lines, plot_labels)
def check_data(data, requires, min_elements=1):
"""Checks if all elements of requires are available in data.
Args:
data (pandas.DataFrame): A dataframe holding the data.
requires ([str]): A list of string that should be part of data.
min_elements (int, optional): Minimal number of elements required for plotting.
Defaults to 2. This is in general necessary, so that seaborn can apply
its colormap.
Returns:
bool: Check whether all elements of requires exist in data
"""
for r in requires:
# Check fails if element does not exists in the data frame
if r not in data.columns:
return False
# Or if it exists but has not enough elements
else:
if len(data[r].dropna()) < min_elements:
return False
return True
def _ticks_formatter(ticklabels, format_str="{:.2f}"):
"""Format the ticklabels.
Args:
ticklabels ([mpl.text.Text]): List of ticklabels.
format_str (str, optional): Formatting string for the labels.
Defaults to "{:.2f}".
Returns:
[mpl.text.Text]: Reformatted list of ticklabels.
"""
new_ticks = []
for tick in ticklabels:
rounded_label = format_str.format(float(tick.get_text()))
new_tick = mpl.text.Text(*tick.get_position(), rounded_label)
new_ticks.append(new_tick)
return new_ticks | en | 0.773636 | Utility functions for the instruments. Creates a basic plot of x vs. y values for the cockpit. Args: x (str): Name of the variable in data that should be plotted on the x-axis. y (str): Name of the variable in data that should be plotted on the y-axis. data (pandas.dataframe): Data Frame containing the plotting data. ax (matplotlib.axis): Axis where the plot should be created. EMA (str, optional): Signifies over which variables an exponentially moving average should be computed.E.g. "xy" would be an exponentially moving average over both variables. Defaults to "". EMA_alpha (float, optional): Decay parameter of the exponentially moving average. Defaults to 0.2. x_scale (str, optional): Whether to use a linear or log scale for the x-axis. Defaults to "linear". y_scale (str, optional): Whether to use a linear or log scale for the y-axis. Defaults to "linear". cmap (matplotlib.cmap, optional): A colormap for the individual data points. Defaults to None. EMA_cmap (matplotlib.cmap, optional): A colormap for the EMA. Defaults to None. marker (str, optional): Marker type to use in the plot. Defaults to "o". EMA_marker (str, optional): Marker for the EMA. Defaults to ",". xlabel (str, optional): Label for the x-axis. Defaults to None, meaning it uses `x`. ylabel (str, optional): Label for the y-axis. Defaults to None, meaning it uses `y`. title (str, optional): Title of this subfigure. Defaults to "". xlim (str, list, optional): Limits for the x-axis. Can be a (list of) strings, None or numbers. "tight" would shrink the x-limits to the data, None would use the default scaling, and float would use this limit. If it is given as a list, the first value is used as the lower bound and the second one as an upper bound. Defaults to None. ylim (str, list, optional): Limits for the y-axis. Can be a (list of) strings, None or numbers. "tight" would shrink the y-limits to the data, None would use the default scaling, and float would use this limit. If it is given as a list, the first value is used as the lower bound and the second one as an upper bound. Defaults to None. fontweight (str, optional): Fontweight of the title. Defaults to "normal". facecolor (tuple, optional): Facecolor of the plot. Defaults to None, which does not apply any color. zero_lines (bool, optional): Whether to highligh the x and y = 0. Defaults to False. center (bool, optional): Whether to center the limits of the plot. Can also be given as a list, where the first element is applied to the x-axis and the second to the y-axis. Defaults to False. # Save what is being ploted as labels, if not otherwise given # Zero lines # replace values according to inputs # extend shortend inputs Adds the last value of each line to the legend. This function takes every line in a plot, checks its last value and adds it in brackets to the corresponding label in the legend. Args: ax (matplotlib.axes): Axis of a matplotlib figure percentage (bool): Whether the value represents a percentage # Formating # Fix Legend Checks if all elements of requires are available in data. Args: data (pandas.DataFrame): A dataframe holding the data. requires ([str]): A list of string that should be part of data. min_elements (int, optional): Minimal number of elements required for plotting. Defaults to 2. This is in general necessary, so that seaborn can apply its colormap. Returns: bool: Check whether all elements of requires exist in data # Check fails if element does not exists in the data frame # Or if it exists but has not enough elements Format the ticklabels. Args: ticklabels ([mpl.text.Text]): List of ticklabels. format_str (str, optional): Formatting string for the labels. Defaults to "{:.2f}". Returns: [mpl.text.Text]: Reformatted list of ticklabels. | 3.342644 | 3 |
third_party/blink/renderer/core/frame/PRESUBMIT.py | zipated/src | 2,151 | 6613072 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Blink frame presubmit script
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
CSS_PROPERTY_ID_HEADER_PATH = (
'third_party/blink/public/mojom/use_counter/css_property_id.mojom')
def _RunUseCounterChecks(input_api, output_api):
for f in input_api.AffectedFiles():
if f.LocalPath().endswith('use_counter.cc'):
use_counter_cpp_file = f
break
else:
return []
largest_found_bucket = 0
expected_max_bucket = 0
# Looking for a line like "case CSSPropertyGrid: return 453;"
bucket_finder = input_api.re.compile(
r'case CSSProperty\w*?:\s+?return (\d+);',
input_api.re.MULTILINE)
# Looking for a line like "const int32 kMaximumCSSSampleId = 452;"
expected_max_finder = input_api.re.compile(
r'const int32 kMaximumCSSSampleId = (\d+);')
for f in input_api.change.AffectedFiles():
if f.AbsoluteLocalPath().endswith(CSS_PROPERTY_ID_HEADER_PATH):
expected_max_match = expected_max_finder.search(
'\n'.join(f.NewContents()))
break
else:
return []
if expected_max_match:
expected_max_bucket = int(expected_max_match.group(1))
for bucket_match in bucket_finder.finditer(
'\n'.join(use_counter_cpp_file.NewContents())):
bucket = int(bucket_match.group(1))
largest_found_bucket = max(largest_found_bucket, bucket)
if largest_found_bucket != expected_max_bucket:
if input_api.is_committing:
message_type = output_api.PresubmitError
else:
message_type = output_api.PresubmitPromptWarning
return [message_type(
'Largest found CSSProperty bucket Id (%d) does not match '
'maximumCSSSampleId (%d)' % (
largest_found_bucket, expected_max_bucket),
items=[use_counter_cpp_file.LocalPath(),
CSS_PROPERTY_ID_HEADER_PATH])]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_RunUseCounterChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_RunUseCounterChecks(input_api, output_api))
return results
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Blink frame presubmit script
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
CSS_PROPERTY_ID_HEADER_PATH = (
'third_party/blink/public/mojom/use_counter/css_property_id.mojom')
def _RunUseCounterChecks(input_api, output_api):
for f in input_api.AffectedFiles():
if f.LocalPath().endswith('use_counter.cc'):
use_counter_cpp_file = f
break
else:
return []
largest_found_bucket = 0
expected_max_bucket = 0
# Looking for a line like "case CSSPropertyGrid: return 453;"
bucket_finder = input_api.re.compile(
r'case CSSProperty\w*?:\s+?return (\d+);',
input_api.re.MULTILINE)
# Looking for a line like "const int32 kMaximumCSSSampleId = 452;"
expected_max_finder = input_api.re.compile(
r'const int32 kMaximumCSSSampleId = (\d+);')
for f in input_api.change.AffectedFiles():
if f.AbsoluteLocalPath().endswith(CSS_PROPERTY_ID_HEADER_PATH):
expected_max_match = expected_max_finder.search(
'\n'.join(f.NewContents()))
break
else:
return []
if expected_max_match:
expected_max_bucket = int(expected_max_match.group(1))
for bucket_match in bucket_finder.finditer(
'\n'.join(use_counter_cpp_file.NewContents())):
bucket = int(bucket_match.group(1))
largest_found_bucket = max(largest_found_bucket, bucket)
if largest_found_bucket != expected_max_bucket:
if input_api.is_committing:
message_type = output_api.PresubmitError
else:
message_type = output_api.PresubmitPromptWarning
return [message_type(
'Largest found CSSProperty bucket Id (%d) does not match '
'maximumCSSSampleId (%d)' % (
largest_found_bucket, expected_max_bucket),
items=[use_counter_cpp_file.LocalPath(),
CSS_PROPERTY_ID_HEADER_PATH])]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_RunUseCounterChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_RunUseCounterChecks(input_api, output_api))
return results
| en | 0.773493 | # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Blink frame presubmit script See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details about the presubmit API built into gcl. # Looking for a line like "case CSSPropertyGrid: return 453;" # Looking for a line like "const int32 kMaximumCSSSampleId = 452;" | 1.892421 | 2 |
src/pyfx/model/common/jsonpath/JSONPathParser.py | cielong/pyfx | 9 | 6613073 | # Generated from JSONPath.g4 by ANTLR 4.8
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\26")
buf.write("\u00a1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\3\2\3\2\7\2%\n\2\f")
buf.write("\2\16\2(\13\2\3\2\3\2\3\3\3\3\5\3.\n\3\3\4\3\4\3\4\3\4")
buf.write("\5\4\64\n\4\3\5\3\5\3\5\3\5\5\5:\n\5\3\5\3\5\5\5>\n\5")
buf.write("\3\5\3\5\5\5B\n\5\3\5\3\5\5\5F\n\5\3\5\5\5I\n\5\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\6\5\6`\n\6\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\n\3\n\3\n\3\n\6\ns")
buf.write("\n\n\r\n\16\nt\3\n\3\n\3\13\3\13\5\13{\n\13\3\13\3\13")
buf.write("\5\13\177\n\13\3\13\3\13\5\13\u0083\n\13\3\13\3\13\3\f")
buf.write("\3\f\3\f\5\f\u008a\n\f\3\f\3\f\5\f\u008e\n\f\3\f\5\f\u0091")
buf.write("\n\f\3\r\3\r\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3")
buf.write("\20\3\20\3\21\3\21\3\21\2\2\22\2\4\6\b\n\f\16\20\22\24")
buf.write("\26\30\32\34\36 \2\4\3\2\b\n\3\2\23\24\2\u00a6\2\"\3\2")
buf.write("\2\2\4-\3\2\2\2\6\63\3\2\2\2\bH\3\2\2\2\n_\3\2\2\2\fa")
buf.write("\3\2\2\2\16f\3\2\2\2\20k\3\2\2\2\22n\3\2\2\2\24x\3\2\2")
buf.write("\2\26\u0090\3\2\2\2\30\u0092\3\2\2\2\32\u0094\3\2\2\2")
buf.write("\34\u0098\3\2\2\2\36\u009c\3\2\2\2 \u009e\3\2\2\2\"&\7")
buf.write("\17\2\2#%\5\4\3\2$#\3\2\2\2%(\3\2\2\2&$\3\2\2\2&\'\3\2")
buf.write("\2\2\')\3\2\2\2(&\3\2\2\2)*\7\2\2\3*\3\3\2\2\2+.\5\b\5")
buf.write("\2,.\5\6\4\2-+\3\2\2\2-,\3\2\2\2.\5\3\2\2\2/\60\7\22\2")
buf.write("\2\60\64\5\30\r\2\61\62\7\22\2\2\62\64\5\32\16\2\63/\3")
buf.write("\2\2\2\63\61\3\2\2\2\64\7\3\2\2\2\65I\5\26\f\2\66\67\7")
buf.write("\21\2\2\67I\5\36\20\28:\7\21\2\298\3\2\2\29:\3\2\2\2:")
buf.write(";\3\2\2\2;I\5 \21\2<>\7\21\2\2=<\3\2\2\2=>\3\2\2\2>?\3")
buf.write("\2\2\2?I\5\n\6\2@B\7\21\2\2A@\3\2\2\2AB\3\2\2\2BC\3\2")
buf.write("\2\2CI\5\24\13\2DF\7\21\2\2ED\3\2\2\2EF\3\2\2\2FG\3\2")
buf.write("\2\2GI\5\22\n\2H\65\3\2\2\2H\66\3\2\2\2H9\3\2\2\2H=\3")
buf.write("\2\2\2HA\3\2\2\2HE\3\2\2\2I\t\3\2\2\2JK\7\3\2\2KL\7\4")
buf.write("\2\2LM\7\5\2\2MN\5\f\7\2NO\7\6\2\2OP\7\7\2\2P`\3\2\2\2")
buf.write("QR\7\3\2\2RS\7\4\2\2ST\7\5\2\2TU\5\16\b\2UV\7\6\2\2VW")
buf.write("\7\7\2\2W`\3\2\2\2XY\7\3\2\2YZ\7\4\2\2Z[\7\5\2\2[\\\5")
buf.write("\20\t\2\\]\7\6\2\2]^\7\7\2\2^`\3\2\2\2_J\3\2\2\2_Q\3\2")
buf.write("\2\2_X\3\2\2\2`\13\3\2\2\2ab\7\20\2\2bc\5\26\f\2cd\t\2")
buf.write("\2\2de\7\25\2\2e\r\3\2\2\2fg\7\20\2\2gh\5\26\f\2hi\7\n")
buf.write("\2\2ij\7\24\2\2j\17\3\2\2\2kl\7\20\2\2lm\5\26\f\2m\21")
buf.write("\3\2\2\2no\7\3\2\2or\t\3\2\2pq\7\13\2\2qs\t\3\2\2rp\3")
buf.write("\2\2\2st\3\2\2\2tr\3\2\2\2tu\3\2\2\2uv\3\2\2\2vw\7\7\2")
buf.write("\2w\23\3\2\2\2xz\7\3\2\2y{\7\25\2\2zy\3\2\2\2z{\3\2\2")
buf.write("\2{|\3\2\2\2|~\7\f\2\2}\177\7\25\2\2~}\3\2\2\2~\177\3")
buf.write("\2\2\2\177\u0082\3\2\2\2\u0080\u0081\7\f\2\2\u0081\u0083")
buf.write("\7\25\2\2\u0082\u0080\3\2\2\2\u0082\u0083\3\2\2\2\u0083")
buf.write("\u0084\3\2\2\2\u0084\u0085\7\7\2\2\u0085\25\3\2\2\2\u0086")
buf.write("\u0087\7\21\2\2\u0087\u0091\5\30\r\2\u0088\u008a\7\21")
buf.write("\2\2\u0089\u0088\3\2\2\2\u0089\u008a\3\2\2\2\u008a\u008b")
buf.write("\3\2\2\2\u008b\u0091\5\32\16\2\u008c\u008e\7\21\2\2\u008d")
buf.write("\u008c\3\2\2\2\u008d\u008e\3\2\2\2\u008e\u008f\3\2\2\2")
buf.write("\u008f\u0091\5\34\17\2\u0090\u0086\3\2\2\2\u0090\u0089")
buf.write("\3\2\2\2\u0090\u008d\3\2\2\2\u0091\27\3\2\2\2\u0092\u0093")
buf.write("\7\23\2\2\u0093\31\3\2\2\2\u0094\u0095\7\3\2\2\u0095\u0096")
buf.write("\t\3\2\2\u0096\u0097\7\7\2\2\u0097\33\3\2\2\2\u0098\u0099")
buf.write("\7\3\2\2\u0099\u009a\7\25\2\2\u009a\u009b\7\7\2\2\u009b")
buf.write("\35\3\2\2\2\u009c\u009d\7\r\2\2\u009d\37\3\2\2\2\u009e")
buf.write("\u009f\7\16\2\2\u009f!\3\2\2\2\22&-\639=AEH_tz~\u0082")
buf.write("\u0089\u008d\u0090")
return buf.getvalue()
class JSONPathParser(Parser):
grammarFileName = "JSONPath.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
sharedContextCache = PredictionContextCache()
literalNames = ["<INVALID>", "'['", "'?'", "'('", "')'", "']'", "'>'",
"'<'", "'=='", "','", "':'", "'*'", "'[*]'", "'$'",
"'@'", "'.'", "'..'"]
symbolicNames = [
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"ROOT",
"CURRENT",
"SINGLE_DOT",
"DOUBLE_DOT",
"LETTER",
"STRING",
"INT",
"WS"]
RULE_jsonpath = 0
RULE_expression = 1
RULE_doubleDotExpression = 2
RULE_singleDotExpression = 3
RULE_filters = 4
RULE_numericFilter = 5
RULE_stringFilter = 6
RULE_booleanFilter = 7
RULE_union = 8
RULE_arraySlice = 9
RULE_fieldAccessor = 10
RULE_field = 11
RULE_bracketField = 12
RULE_arrayIndex = 13
RULE_wildcard = 14
RULE_bracketWildcard = 15
ruleNames = [
"jsonpath",
"expression",
"doubleDotExpression",
"singleDotExpression",
"filters",
"numericFilter",
"stringFilter",
"booleanFilter",
"union",
"arraySlice",
"fieldAccessor",
"field",
"bracketField",
"arrayIndex",
"wildcard",
"bracketWildcard"]
EOF = Token.EOF
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
ROOT = 13
CURRENT = 14
SINGLE_DOT = 15
DOUBLE_DOT = 16
LETTER = 17
STRING = 18
INT = 19
WS = 20
def __init__(self, input: TokenStream, output: TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(
self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class JsonpathContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def ROOT(self):
return self.getToken(JSONPathParser.ROOT, 0)
def EOF(self):
return self.getToken(JSONPathParser.EOF, 0)
def expression(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(
JSONPathParser.ExpressionContext)
else:
return self.getTypedRuleContext(
JSONPathParser.ExpressionContext, i)
def getRuleIndex(self):
return JSONPathParser.RULE_jsonpath
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterJsonpath"):
listener.enterJsonpath(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitJsonpath"):
listener.exitJsonpath(self)
def jsonpath(self):
localctx = JSONPathParser.JsonpathContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_jsonpath)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 32
self.match(JSONPathParser.ROOT)
self.state = 36
self._errHandler.sync(self)
_la = self._input.LA(1)
while (
(
(_la) & ~0x3f) == 0 and (
(1 << _la) & (
(1 << JSONPathParser.T__0) | (
1 << JSONPathParser.T__11) | (
1 << JSONPathParser.SINGLE_DOT) | (
1 << JSONPathParser.DOUBLE_DOT))) != 0):
self.state = 33
self.expression()
self.state = 38
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 39
self.match(JSONPathParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def singleDotExpression(self):
return self.getTypedRuleContext(
JSONPathParser.SingleDotExpressionContext, 0)
def doubleDotExpression(self):
return self.getTypedRuleContext(
JSONPathParser.DoubleDotExpressionContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_expression
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterExpression"):
listener.enterExpression(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitExpression"):
listener.exitExpression(self)
def expression(self):
localctx = JSONPathParser.ExpressionContext(
self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_expression)
try:
self.state = 43
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [JSONPathParser.T__0, JSONPathParser.T__11,
JSONPathParser.SINGLE_DOT]:
self.enterOuterAlt(localctx, 1)
self.state = 41
self.singleDotExpression()
pass
elif token in [JSONPathParser.DOUBLE_DOT]:
self.enterOuterAlt(localctx, 2)
self.state = 42
self.doubleDotExpression()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DoubleDotExpressionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def DOUBLE_DOT(self):
return self.getToken(JSONPathParser.DOUBLE_DOT, 0)
def field(self):
return self.getTypedRuleContext(JSONPathParser.FieldContext, 0)
def bracketField(self):
return self.getTypedRuleContext(
JSONPathParser.BracketFieldContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_doubleDotExpression
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterDoubleDotExpression"):
listener.enterDoubleDotExpression(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDoubleDotExpression"):
listener.exitDoubleDotExpression(self)
def doubleDotExpression(self):
localctx = JSONPathParser.DoubleDotExpressionContext(
self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_doubleDotExpression)
try:
self.state = 49
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 2, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 45
self.match(JSONPathParser.DOUBLE_DOT)
self.state = 46
self.field()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 47
self.match(JSONPathParser.DOUBLE_DOT)
self.state = 48
self.bracketField()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SingleDotExpressionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def fieldAccessor(self):
return self.getTypedRuleContext(
JSONPathParser.FieldAccessorContext, 0)
def SINGLE_DOT(self):
return self.getToken(JSONPathParser.SINGLE_DOT, 0)
def wildcard(self):
return self.getTypedRuleContext(JSONPathParser.WildcardContext, 0)
def bracketWildcard(self):
return self.getTypedRuleContext(
JSONPathParser.BracketWildcardContext, 0)
def filters(self):
return self.getTypedRuleContext(JSONPathParser.FiltersContext, 0)
def arraySlice(self):
return self.getTypedRuleContext(
JSONPathParser.ArraySliceContext, 0)
def union(self):
return self.getTypedRuleContext(JSONPathParser.UnionContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_singleDotExpression
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterSingleDotExpression"):
listener.enterSingleDotExpression(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitSingleDotExpression"):
listener.exitSingleDotExpression(self)
def singleDotExpression(self):
localctx = JSONPathParser.SingleDotExpressionContext(
self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_singleDotExpression)
self._la = 0 # Token type
try:
self.state = 70
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 7, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 51
self.fieldAccessor()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 52
self.match(JSONPathParser.SINGLE_DOT)
self.state = 53
self.wildcard()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 55
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 54
self.match(JSONPathParser.SINGLE_DOT)
self.state = 57
self.bracketWildcard()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 59
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 58
self.match(JSONPathParser.SINGLE_DOT)
self.state = 61
self.filters()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 63
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 62
self.match(JSONPathParser.SINGLE_DOT)
self.state = 65
self.arraySlice()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 67
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 66
self.match(JSONPathParser.SINGLE_DOT)
self.state = 69
self.union()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FiltersContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def numericFilter(self):
return self.getTypedRuleContext(
JSONPathParser.NumericFilterContext, 0)
def stringFilter(self):
return self.getTypedRuleContext(
JSONPathParser.StringFilterContext, 0)
def booleanFilter(self):
return self.getTypedRuleContext(
JSONPathParser.BooleanFilterContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_filters
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFilters"):
listener.enterFilters(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFilters"):
listener.exitFilters(self)
def filters(self):
localctx = JSONPathParser.FiltersContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_filters)
try:
self.state = 93
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 8, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 72
self.match(JSONPathParser.T__0)
self.state = 73
self.match(JSONPathParser.T__1)
self.state = 74
self.match(JSONPathParser.T__2)
self.state = 75
self.numericFilter()
self.state = 76
self.match(JSONPathParser.T__3)
self.state = 77
self.match(JSONPathParser.T__4)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 79
self.match(JSONPathParser.T__0)
self.state = 80
self.match(JSONPathParser.T__1)
self.state = 81
self.match(JSONPathParser.T__2)
self.state = 82
self.stringFilter()
self.state = 83
self.match(JSONPathParser.T__3)
self.state = 84
self.match(JSONPathParser.T__4)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 86
self.match(JSONPathParser.T__0)
self.state = 87
self.match(JSONPathParser.T__1)
self.state = 88
self.match(JSONPathParser.T__2)
self.state = 89
self.booleanFilter()
self.state = 90
self.match(JSONPathParser.T__3)
self.state = 91
self.match(JSONPathParser.T__4)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NumericFilterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def CURRENT(self):
return self.getToken(JSONPathParser.CURRENT, 0)
def fieldAccessor(self):
return self.getTypedRuleContext(
JSONPathParser.FieldAccessorContext, 0)
def INT(self):
return self.getToken(JSONPathParser.INT, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_numericFilter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterNumericFilter"):
listener.enterNumericFilter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitNumericFilter"):
listener.exitNumericFilter(self)
def numericFilter(self):
localctx = JSONPathParser.NumericFilterContext(
self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_numericFilter)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 95
self.match(JSONPathParser.CURRENT)
self.state = 96
self.fieldAccessor()
self.state = 97
_la = self._input.LA(1)
if not (
(((_la) & ~0x3f) == 0 and (
(1 << _la) & (
(1 << JSONPathParser.T__5) | (
1 << JSONPathParser.T__6) | (
1 << JSONPathParser.T__7))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 98
self.match(JSONPathParser.INT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StringFilterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def CURRENT(self):
return self.getToken(JSONPathParser.CURRENT, 0)
def fieldAccessor(self):
return self.getTypedRuleContext(
JSONPathParser.FieldAccessorContext, 0)
def STRING(self):
return self.getToken(JSONPathParser.STRING, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_stringFilter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterStringFilter"):
listener.enterStringFilter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitStringFilter"):
listener.exitStringFilter(self)
def stringFilter(self):
localctx = JSONPathParser.StringFilterContext(
self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_stringFilter)
try:
self.enterOuterAlt(localctx, 1)
self.state = 100
self.match(JSONPathParser.CURRENT)
self.state = 101
self.fieldAccessor()
self.state = 102
self.match(JSONPathParser.T__7)
self.state = 103
self.match(JSONPathParser.STRING)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BooleanFilterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def CURRENT(self):
return self.getToken(JSONPathParser.CURRENT, 0)
def fieldAccessor(self):
return self.getTypedRuleContext(
JSONPathParser.FieldAccessorContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_booleanFilter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBooleanFilter"):
listener.enterBooleanFilter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBooleanFilter"):
listener.exitBooleanFilter(self)
def booleanFilter(self):
localctx = JSONPathParser.BooleanFilterContext(
self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_booleanFilter)
try:
self.enterOuterAlt(localctx, 1)
self.state = 105
self.match(JSONPathParser.CURRENT)
self.state = 106
self.fieldAccessor()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def STRING(self, i: int = None):
if i is None:
return self.getTokens(JSONPathParser.STRING)
else:
return self.getToken(JSONPathParser.STRING, i)
def LETTER(self, i: int = None):
if i is None:
return self.getTokens(JSONPathParser.LETTER)
else:
return self.getToken(JSONPathParser.LETTER, i)
def getRuleIndex(self):
return JSONPathParser.RULE_union
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterUnion"):
listener.enterUnion(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitUnion"):
listener.exitUnion(self)
def union(self):
localctx = JSONPathParser.UnionContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_union)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 108
self.match(JSONPathParser.T__0)
self.state = 109
_la = self._input.LA(1)
if not (_la == JSONPathParser.LETTER or _la ==
JSONPathParser.STRING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 112
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 110
self.match(JSONPathParser.T__8)
self.state = 111
_la = self._input.LA(1)
if not (_la == JSONPathParser.LETTER or _la ==
JSONPathParser.STRING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 114
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la == JSONPathParser.T__8):
break
self.state = 116
self.match(JSONPathParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArraySliceContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def INT(self, i: int = None):
if i is None:
return self.getTokens(JSONPathParser.INT)
else:
return self.getToken(JSONPathParser.INT, i)
def getRuleIndex(self):
return JSONPathParser.RULE_arraySlice
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterArraySlice"):
listener.enterArraySlice(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitArraySlice"):
listener.exitArraySlice(self)
def arraySlice(self):
localctx = JSONPathParser.ArraySliceContext(
self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_arraySlice)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 118
self.match(JSONPathParser.T__0)
self.state = 120
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.INT:
self.state = 119
self.match(JSONPathParser.INT)
self.state = 122
self.match(JSONPathParser.T__9)
self.state = 124
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.INT:
self.state = 123
self.match(JSONPathParser.INT)
self.state = 128
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.T__9:
self.state = 126
self.match(JSONPathParser.T__9)
self.state = 127
self.match(JSONPathParser.INT)
self.state = 130
self.match(JSONPathParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FieldAccessorContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def SINGLE_DOT(self):
return self.getToken(JSONPathParser.SINGLE_DOT, 0)
def field(self):
return self.getTypedRuleContext(JSONPathParser.FieldContext, 0)
def bracketField(self):
return self.getTypedRuleContext(
JSONPathParser.BracketFieldContext, 0)
def arrayIndex(self):
return self.getTypedRuleContext(
JSONPathParser.ArrayIndexContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_fieldAccessor
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFieldAccessor"):
listener.enterFieldAccessor(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFieldAccessor"):
listener.exitFieldAccessor(self)
def fieldAccessor(self):
localctx = JSONPathParser.FieldAccessorContext(
self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_fieldAccessor)
self._la = 0 # Token type
try:
self.state = 142
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 15, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 132
self.match(JSONPathParser.SINGLE_DOT)
self.state = 133
self.field()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 135
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 134
self.match(JSONPathParser.SINGLE_DOT)
self.state = 137
self.bracketField()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 138
self.match(JSONPathParser.SINGLE_DOT)
self.state = 141
self.arrayIndex()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FieldContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def LETTER(self):
return self.getToken(JSONPathParser.LETTER, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_field
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterField"):
listener.enterField(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitField"):
listener.exitField(self)
def field(self):
localctx = JSONPathParser.FieldContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_field)
try:
self.enterOuterAlt(localctx, 1)
self.state = 144
self.match(JSONPathParser.LETTER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BracketFieldContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def STRING(self):
return self.getToken(JSONPathParser.STRING, 0)
def LETTER(self):
return self.getToken(JSONPathParser.LETTER, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_bracketField
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBracketField"):
listener.enterBracketField(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBracketField"):
listener.exitBracketField(self)
def bracketField(self):
localctx = JSONPathParser.BracketFieldContext(
self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_bracketField)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 146
self.match(JSONPathParser.T__0)
self.state = 147
_la = self._input.LA(1)
if not (_la == JSONPathParser.LETTER or _la ==
JSONPathParser.STRING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 148
self.match(JSONPathParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArrayIndexContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def INT(self):
return self.getToken(JSONPathParser.INT, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_arrayIndex
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterArrayIndex"):
listener.enterArrayIndex(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitArrayIndex"):
listener.exitArrayIndex(self)
def arrayIndex(self):
localctx = JSONPathParser.ArrayIndexContext(
self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_arrayIndex)
try:
self.enterOuterAlt(localctx, 1)
self.state = 150
self.match(JSONPathParser.T__0)
self.state = 151
self.match(JSONPathParser.INT)
self.state = 152
self.match(JSONPathParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WildcardContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return JSONPathParser.RULE_wildcard
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterWildcard"):
listener.enterWildcard(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitWildcard"):
listener.exitWildcard(self)
def wildcard(self):
localctx = JSONPathParser.WildcardContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_wildcard)
try:
self.enterOuterAlt(localctx, 1)
self.state = 154
self.match(JSONPathParser.T__10)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BracketWildcardContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return JSONPathParser.RULE_bracketWildcard
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBracketWildcard"):
listener.enterBracketWildcard(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBracketWildcard"):
listener.exitBracketWildcard(self)
def bracketWildcard(self):
localctx = JSONPathParser.BracketWildcardContext(
self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_bracketWildcard)
try:
self.enterOuterAlt(localctx, 1)
self.state = 156
self.match(JSONPathParser.T__11)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| # Generated from JSONPath.g4 by ANTLR 4.8
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\26")
buf.write("\u00a1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\3\2\3\2\7\2%\n\2\f")
buf.write("\2\16\2(\13\2\3\2\3\2\3\3\3\3\5\3.\n\3\3\4\3\4\3\4\3\4")
buf.write("\5\4\64\n\4\3\5\3\5\3\5\3\5\5\5:\n\5\3\5\3\5\5\5>\n\5")
buf.write("\3\5\3\5\5\5B\n\5\3\5\3\5\5\5F\n\5\3\5\5\5I\n\5\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\6\5\6`\n\6\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\n\3\n\3\n\3\n\6\ns")
buf.write("\n\n\r\n\16\nt\3\n\3\n\3\13\3\13\5\13{\n\13\3\13\3\13")
buf.write("\5\13\177\n\13\3\13\3\13\5\13\u0083\n\13\3\13\3\13\3\f")
buf.write("\3\f\3\f\5\f\u008a\n\f\3\f\3\f\5\f\u008e\n\f\3\f\5\f\u0091")
buf.write("\n\f\3\r\3\r\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3")
buf.write("\20\3\20\3\21\3\21\3\21\2\2\22\2\4\6\b\n\f\16\20\22\24")
buf.write("\26\30\32\34\36 \2\4\3\2\b\n\3\2\23\24\2\u00a6\2\"\3\2")
buf.write("\2\2\4-\3\2\2\2\6\63\3\2\2\2\bH\3\2\2\2\n_\3\2\2\2\fa")
buf.write("\3\2\2\2\16f\3\2\2\2\20k\3\2\2\2\22n\3\2\2\2\24x\3\2\2")
buf.write("\2\26\u0090\3\2\2\2\30\u0092\3\2\2\2\32\u0094\3\2\2\2")
buf.write("\34\u0098\3\2\2\2\36\u009c\3\2\2\2 \u009e\3\2\2\2\"&\7")
buf.write("\17\2\2#%\5\4\3\2$#\3\2\2\2%(\3\2\2\2&$\3\2\2\2&\'\3\2")
buf.write("\2\2\')\3\2\2\2(&\3\2\2\2)*\7\2\2\3*\3\3\2\2\2+.\5\b\5")
buf.write("\2,.\5\6\4\2-+\3\2\2\2-,\3\2\2\2.\5\3\2\2\2/\60\7\22\2")
buf.write("\2\60\64\5\30\r\2\61\62\7\22\2\2\62\64\5\32\16\2\63/\3")
buf.write("\2\2\2\63\61\3\2\2\2\64\7\3\2\2\2\65I\5\26\f\2\66\67\7")
buf.write("\21\2\2\67I\5\36\20\28:\7\21\2\298\3\2\2\29:\3\2\2\2:")
buf.write(";\3\2\2\2;I\5 \21\2<>\7\21\2\2=<\3\2\2\2=>\3\2\2\2>?\3")
buf.write("\2\2\2?I\5\n\6\2@B\7\21\2\2A@\3\2\2\2AB\3\2\2\2BC\3\2")
buf.write("\2\2CI\5\24\13\2DF\7\21\2\2ED\3\2\2\2EF\3\2\2\2FG\3\2")
buf.write("\2\2GI\5\22\n\2H\65\3\2\2\2H\66\3\2\2\2H9\3\2\2\2H=\3")
buf.write("\2\2\2HA\3\2\2\2HE\3\2\2\2I\t\3\2\2\2JK\7\3\2\2KL\7\4")
buf.write("\2\2LM\7\5\2\2MN\5\f\7\2NO\7\6\2\2OP\7\7\2\2P`\3\2\2\2")
buf.write("QR\7\3\2\2RS\7\4\2\2ST\7\5\2\2TU\5\16\b\2UV\7\6\2\2VW")
buf.write("\7\7\2\2W`\3\2\2\2XY\7\3\2\2YZ\7\4\2\2Z[\7\5\2\2[\\\5")
buf.write("\20\t\2\\]\7\6\2\2]^\7\7\2\2^`\3\2\2\2_J\3\2\2\2_Q\3\2")
buf.write("\2\2_X\3\2\2\2`\13\3\2\2\2ab\7\20\2\2bc\5\26\f\2cd\t\2")
buf.write("\2\2de\7\25\2\2e\r\3\2\2\2fg\7\20\2\2gh\5\26\f\2hi\7\n")
buf.write("\2\2ij\7\24\2\2j\17\3\2\2\2kl\7\20\2\2lm\5\26\f\2m\21")
buf.write("\3\2\2\2no\7\3\2\2or\t\3\2\2pq\7\13\2\2qs\t\3\2\2rp\3")
buf.write("\2\2\2st\3\2\2\2tr\3\2\2\2tu\3\2\2\2uv\3\2\2\2vw\7\7\2")
buf.write("\2w\23\3\2\2\2xz\7\3\2\2y{\7\25\2\2zy\3\2\2\2z{\3\2\2")
buf.write("\2{|\3\2\2\2|~\7\f\2\2}\177\7\25\2\2~}\3\2\2\2~\177\3")
buf.write("\2\2\2\177\u0082\3\2\2\2\u0080\u0081\7\f\2\2\u0081\u0083")
buf.write("\7\25\2\2\u0082\u0080\3\2\2\2\u0082\u0083\3\2\2\2\u0083")
buf.write("\u0084\3\2\2\2\u0084\u0085\7\7\2\2\u0085\25\3\2\2\2\u0086")
buf.write("\u0087\7\21\2\2\u0087\u0091\5\30\r\2\u0088\u008a\7\21")
buf.write("\2\2\u0089\u0088\3\2\2\2\u0089\u008a\3\2\2\2\u008a\u008b")
buf.write("\3\2\2\2\u008b\u0091\5\32\16\2\u008c\u008e\7\21\2\2\u008d")
buf.write("\u008c\3\2\2\2\u008d\u008e\3\2\2\2\u008e\u008f\3\2\2\2")
buf.write("\u008f\u0091\5\34\17\2\u0090\u0086\3\2\2\2\u0090\u0089")
buf.write("\3\2\2\2\u0090\u008d\3\2\2\2\u0091\27\3\2\2\2\u0092\u0093")
buf.write("\7\23\2\2\u0093\31\3\2\2\2\u0094\u0095\7\3\2\2\u0095\u0096")
buf.write("\t\3\2\2\u0096\u0097\7\7\2\2\u0097\33\3\2\2\2\u0098\u0099")
buf.write("\7\3\2\2\u0099\u009a\7\25\2\2\u009a\u009b\7\7\2\2\u009b")
buf.write("\35\3\2\2\2\u009c\u009d\7\r\2\2\u009d\37\3\2\2\2\u009e")
buf.write("\u009f\7\16\2\2\u009f!\3\2\2\2\22&-\639=AEH_tz~\u0082")
buf.write("\u0089\u008d\u0090")
return buf.getvalue()
class JSONPathParser(Parser):
grammarFileName = "JSONPath.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
sharedContextCache = PredictionContextCache()
literalNames = ["<INVALID>", "'['", "'?'", "'('", "')'", "']'", "'>'",
"'<'", "'=='", "','", "':'", "'*'", "'[*]'", "'$'",
"'@'", "'.'", "'..'"]
symbolicNames = [
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"<INVALID>",
"ROOT",
"CURRENT",
"SINGLE_DOT",
"DOUBLE_DOT",
"LETTER",
"STRING",
"INT",
"WS"]
RULE_jsonpath = 0
RULE_expression = 1
RULE_doubleDotExpression = 2
RULE_singleDotExpression = 3
RULE_filters = 4
RULE_numericFilter = 5
RULE_stringFilter = 6
RULE_booleanFilter = 7
RULE_union = 8
RULE_arraySlice = 9
RULE_fieldAccessor = 10
RULE_field = 11
RULE_bracketField = 12
RULE_arrayIndex = 13
RULE_wildcard = 14
RULE_bracketWildcard = 15
ruleNames = [
"jsonpath",
"expression",
"doubleDotExpression",
"singleDotExpression",
"filters",
"numericFilter",
"stringFilter",
"booleanFilter",
"union",
"arraySlice",
"fieldAccessor",
"field",
"bracketField",
"arrayIndex",
"wildcard",
"bracketWildcard"]
EOF = Token.EOF
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
ROOT = 13
CURRENT = 14
SINGLE_DOT = 15
DOUBLE_DOT = 16
LETTER = 17
STRING = 18
INT = 19
WS = 20
def __init__(self, input: TokenStream, output: TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(
self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class JsonpathContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def ROOT(self):
return self.getToken(JSONPathParser.ROOT, 0)
def EOF(self):
return self.getToken(JSONPathParser.EOF, 0)
def expression(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(
JSONPathParser.ExpressionContext)
else:
return self.getTypedRuleContext(
JSONPathParser.ExpressionContext, i)
def getRuleIndex(self):
return JSONPathParser.RULE_jsonpath
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterJsonpath"):
listener.enterJsonpath(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitJsonpath"):
listener.exitJsonpath(self)
def jsonpath(self):
localctx = JSONPathParser.JsonpathContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_jsonpath)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 32
self.match(JSONPathParser.ROOT)
self.state = 36
self._errHandler.sync(self)
_la = self._input.LA(1)
while (
(
(_la) & ~0x3f) == 0 and (
(1 << _la) & (
(1 << JSONPathParser.T__0) | (
1 << JSONPathParser.T__11) | (
1 << JSONPathParser.SINGLE_DOT) | (
1 << JSONPathParser.DOUBLE_DOT))) != 0):
self.state = 33
self.expression()
self.state = 38
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 39
self.match(JSONPathParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def singleDotExpression(self):
return self.getTypedRuleContext(
JSONPathParser.SingleDotExpressionContext, 0)
def doubleDotExpression(self):
return self.getTypedRuleContext(
JSONPathParser.DoubleDotExpressionContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_expression
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterExpression"):
listener.enterExpression(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitExpression"):
listener.exitExpression(self)
def expression(self):
localctx = JSONPathParser.ExpressionContext(
self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_expression)
try:
self.state = 43
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [JSONPathParser.T__0, JSONPathParser.T__11,
JSONPathParser.SINGLE_DOT]:
self.enterOuterAlt(localctx, 1)
self.state = 41
self.singleDotExpression()
pass
elif token in [JSONPathParser.DOUBLE_DOT]:
self.enterOuterAlt(localctx, 2)
self.state = 42
self.doubleDotExpression()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DoubleDotExpressionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def DOUBLE_DOT(self):
return self.getToken(JSONPathParser.DOUBLE_DOT, 0)
def field(self):
return self.getTypedRuleContext(JSONPathParser.FieldContext, 0)
def bracketField(self):
return self.getTypedRuleContext(
JSONPathParser.BracketFieldContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_doubleDotExpression
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterDoubleDotExpression"):
listener.enterDoubleDotExpression(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDoubleDotExpression"):
listener.exitDoubleDotExpression(self)
def doubleDotExpression(self):
localctx = JSONPathParser.DoubleDotExpressionContext(
self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_doubleDotExpression)
try:
self.state = 49
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 2, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 45
self.match(JSONPathParser.DOUBLE_DOT)
self.state = 46
self.field()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 47
self.match(JSONPathParser.DOUBLE_DOT)
self.state = 48
self.bracketField()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SingleDotExpressionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def fieldAccessor(self):
return self.getTypedRuleContext(
JSONPathParser.FieldAccessorContext, 0)
def SINGLE_DOT(self):
return self.getToken(JSONPathParser.SINGLE_DOT, 0)
def wildcard(self):
return self.getTypedRuleContext(JSONPathParser.WildcardContext, 0)
def bracketWildcard(self):
return self.getTypedRuleContext(
JSONPathParser.BracketWildcardContext, 0)
def filters(self):
return self.getTypedRuleContext(JSONPathParser.FiltersContext, 0)
def arraySlice(self):
return self.getTypedRuleContext(
JSONPathParser.ArraySliceContext, 0)
def union(self):
return self.getTypedRuleContext(JSONPathParser.UnionContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_singleDotExpression
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterSingleDotExpression"):
listener.enterSingleDotExpression(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitSingleDotExpression"):
listener.exitSingleDotExpression(self)
def singleDotExpression(self):
localctx = JSONPathParser.SingleDotExpressionContext(
self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_singleDotExpression)
self._la = 0 # Token type
try:
self.state = 70
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 7, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 51
self.fieldAccessor()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 52
self.match(JSONPathParser.SINGLE_DOT)
self.state = 53
self.wildcard()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 55
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 54
self.match(JSONPathParser.SINGLE_DOT)
self.state = 57
self.bracketWildcard()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 59
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 58
self.match(JSONPathParser.SINGLE_DOT)
self.state = 61
self.filters()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 63
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 62
self.match(JSONPathParser.SINGLE_DOT)
self.state = 65
self.arraySlice()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 67
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 66
self.match(JSONPathParser.SINGLE_DOT)
self.state = 69
self.union()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FiltersContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def numericFilter(self):
return self.getTypedRuleContext(
JSONPathParser.NumericFilterContext, 0)
def stringFilter(self):
return self.getTypedRuleContext(
JSONPathParser.StringFilterContext, 0)
def booleanFilter(self):
return self.getTypedRuleContext(
JSONPathParser.BooleanFilterContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_filters
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFilters"):
listener.enterFilters(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFilters"):
listener.exitFilters(self)
def filters(self):
localctx = JSONPathParser.FiltersContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_filters)
try:
self.state = 93
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 8, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 72
self.match(JSONPathParser.T__0)
self.state = 73
self.match(JSONPathParser.T__1)
self.state = 74
self.match(JSONPathParser.T__2)
self.state = 75
self.numericFilter()
self.state = 76
self.match(JSONPathParser.T__3)
self.state = 77
self.match(JSONPathParser.T__4)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 79
self.match(JSONPathParser.T__0)
self.state = 80
self.match(JSONPathParser.T__1)
self.state = 81
self.match(JSONPathParser.T__2)
self.state = 82
self.stringFilter()
self.state = 83
self.match(JSONPathParser.T__3)
self.state = 84
self.match(JSONPathParser.T__4)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 86
self.match(JSONPathParser.T__0)
self.state = 87
self.match(JSONPathParser.T__1)
self.state = 88
self.match(JSONPathParser.T__2)
self.state = 89
self.booleanFilter()
self.state = 90
self.match(JSONPathParser.T__3)
self.state = 91
self.match(JSONPathParser.T__4)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NumericFilterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def CURRENT(self):
return self.getToken(JSONPathParser.CURRENT, 0)
def fieldAccessor(self):
return self.getTypedRuleContext(
JSONPathParser.FieldAccessorContext, 0)
def INT(self):
return self.getToken(JSONPathParser.INT, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_numericFilter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterNumericFilter"):
listener.enterNumericFilter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitNumericFilter"):
listener.exitNumericFilter(self)
def numericFilter(self):
localctx = JSONPathParser.NumericFilterContext(
self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_numericFilter)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 95
self.match(JSONPathParser.CURRENT)
self.state = 96
self.fieldAccessor()
self.state = 97
_la = self._input.LA(1)
if not (
(((_la) & ~0x3f) == 0 and (
(1 << _la) & (
(1 << JSONPathParser.T__5) | (
1 << JSONPathParser.T__6) | (
1 << JSONPathParser.T__7))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 98
self.match(JSONPathParser.INT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StringFilterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def CURRENT(self):
return self.getToken(JSONPathParser.CURRENT, 0)
def fieldAccessor(self):
return self.getTypedRuleContext(
JSONPathParser.FieldAccessorContext, 0)
def STRING(self):
return self.getToken(JSONPathParser.STRING, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_stringFilter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterStringFilter"):
listener.enterStringFilter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitStringFilter"):
listener.exitStringFilter(self)
def stringFilter(self):
localctx = JSONPathParser.StringFilterContext(
self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_stringFilter)
try:
self.enterOuterAlt(localctx, 1)
self.state = 100
self.match(JSONPathParser.CURRENT)
self.state = 101
self.fieldAccessor()
self.state = 102
self.match(JSONPathParser.T__7)
self.state = 103
self.match(JSONPathParser.STRING)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BooleanFilterContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def CURRENT(self):
return self.getToken(JSONPathParser.CURRENT, 0)
def fieldAccessor(self):
return self.getTypedRuleContext(
JSONPathParser.FieldAccessorContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_booleanFilter
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBooleanFilter"):
listener.enterBooleanFilter(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBooleanFilter"):
listener.exitBooleanFilter(self)
def booleanFilter(self):
localctx = JSONPathParser.BooleanFilterContext(
self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_booleanFilter)
try:
self.enterOuterAlt(localctx, 1)
self.state = 105
self.match(JSONPathParser.CURRENT)
self.state = 106
self.fieldAccessor()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnionContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def STRING(self, i: int = None):
if i is None:
return self.getTokens(JSONPathParser.STRING)
else:
return self.getToken(JSONPathParser.STRING, i)
def LETTER(self, i: int = None):
if i is None:
return self.getTokens(JSONPathParser.LETTER)
else:
return self.getToken(JSONPathParser.LETTER, i)
def getRuleIndex(self):
return JSONPathParser.RULE_union
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterUnion"):
listener.enterUnion(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitUnion"):
listener.exitUnion(self)
def union(self):
localctx = JSONPathParser.UnionContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_union)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 108
self.match(JSONPathParser.T__0)
self.state = 109
_la = self._input.LA(1)
if not (_la == JSONPathParser.LETTER or _la ==
JSONPathParser.STRING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 112
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 110
self.match(JSONPathParser.T__8)
self.state = 111
_la = self._input.LA(1)
if not (_la == JSONPathParser.LETTER or _la ==
JSONPathParser.STRING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 114
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la == JSONPathParser.T__8):
break
self.state = 116
self.match(JSONPathParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArraySliceContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def INT(self, i: int = None):
if i is None:
return self.getTokens(JSONPathParser.INT)
else:
return self.getToken(JSONPathParser.INT, i)
def getRuleIndex(self):
return JSONPathParser.RULE_arraySlice
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterArraySlice"):
listener.enterArraySlice(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitArraySlice"):
listener.exitArraySlice(self)
def arraySlice(self):
localctx = JSONPathParser.ArraySliceContext(
self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_arraySlice)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 118
self.match(JSONPathParser.T__0)
self.state = 120
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.INT:
self.state = 119
self.match(JSONPathParser.INT)
self.state = 122
self.match(JSONPathParser.T__9)
self.state = 124
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.INT:
self.state = 123
self.match(JSONPathParser.INT)
self.state = 128
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.T__9:
self.state = 126
self.match(JSONPathParser.T__9)
self.state = 127
self.match(JSONPathParser.INT)
self.state = 130
self.match(JSONPathParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FieldAccessorContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def SINGLE_DOT(self):
return self.getToken(JSONPathParser.SINGLE_DOT, 0)
def field(self):
return self.getTypedRuleContext(JSONPathParser.FieldContext, 0)
def bracketField(self):
return self.getTypedRuleContext(
JSONPathParser.BracketFieldContext, 0)
def arrayIndex(self):
return self.getTypedRuleContext(
JSONPathParser.ArrayIndexContext, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_fieldAccessor
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFieldAccessor"):
listener.enterFieldAccessor(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFieldAccessor"):
listener.exitFieldAccessor(self)
def fieldAccessor(self):
localctx = JSONPathParser.FieldAccessorContext(
self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_fieldAccessor)
self._la = 0 # Token type
try:
self.state = 142
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 15, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 132
self.match(JSONPathParser.SINGLE_DOT)
self.state = 133
self.field()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 135
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 134
self.match(JSONPathParser.SINGLE_DOT)
self.state = 137
self.bracketField()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == JSONPathParser.SINGLE_DOT:
self.state = 138
self.match(JSONPathParser.SINGLE_DOT)
self.state = 141
self.arrayIndex()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FieldContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def LETTER(self):
return self.getToken(JSONPathParser.LETTER, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_field
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterField"):
listener.enterField(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitField"):
listener.exitField(self)
def field(self):
localctx = JSONPathParser.FieldContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_field)
try:
self.enterOuterAlt(localctx, 1)
self.state = 144
self.match(JSONPathParser.LETTER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BracketFieldContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def STRING(self):
return self.getToken(JSONPathParser.STRING, 0)
def LETTER(self):
return self.getToken(JSONPathParser.LETTER, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_bracketField
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBracketField"):
listener.enterBracketField(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBracketField"):
listener.exitBracketField(self)
def bracketField(self):
localctx = JSONPathParser.BracketFieldContext(
self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_bracketField)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 146
self.match(JSONPathParser.T__0)
self.state = 147
_la = self._input.LA(1)
if not (_la == JSONPathParser.LETTER or _la ==
JSONPathParser.STRING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 148
self.match(JSONPathParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArrayIndexContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def INT(self):
return self.getToken(JSONPathParser.INT, 0)
def getRuleIndex(self):
return JSONPathParser.RULE_arrayIndex
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterArrayIndex"):
listener.enterArrayIndex(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitArrayIndex"):
listener.exitArrayIndex(self)
def arrayIndex(self):
localctx = JSONPathParser.ArrayIndexContext(
self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_arrayIndex)
try:
self.enterOuterAlt(localctx, 1)
self.state = 150
self.match(JSONPathParser.T__0)
self.state = 151
self.match(JSONPathParser.INT)
self.state = 152
self.match(JSONPathParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WildcardContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return JSONPathParser.RULE_wildcard
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterWildcard"):
listener.enterWildcard(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitWildcard"):
listener.exitWildcard(self)
def wildcard(self):
localctx = JSONPathParser.WildcardContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_wildcard)
try:
self.enterOuterAlt(localctx, 1)
self.state = 154
self.match(JSONPathParser.T__10)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BracketWildcardContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None,
invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return JSONPathParser.RULE_bracketWildcard
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterBracketWildcard"):
listener.enterBracketWildcard(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitBracketWildcard"):
listener.exitBracketWildcard(self)
def bracketWildcard(self):
localctx = JSONPathParser.BracketWildcardContext(
self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_bracketWildcard)
try:
self.enterOuterAlt(localctx, 1)
self.state = 156
self.match(JSONPathParser.T__11)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| en | 0.321912 | # Generated from JSONPath.g4 by ANTLR 4.8 # encoding: utf-8 #%\5\4\3\2$#\3\2\2\2%(\3\2\2\2&$\3\2\2\2&\'\3\2") # Token type # Token type # Token type # Token type # Token type # Token type # Token type | 1.449932 | 1 |
tasks/__init__.py | andreformento/querido-diario-data-processing | 3 | 6613074 | <filename>tasks/__init__.py<gh_stars>1-10
from .gazette_text_extraction import extract_text_pending_gazettes, upload_gazette_raw_text
from .interfaces import DatabaseInterface, StorageInterface, IndexInterface, TextExtractorInterface
| <filename>tasks/__init__.py<gh_stars>1-10
from .gazette_text_extraction import extract_text_pending_gazettes, upload_gazette_raw_text
from .interfaces import DatabaseInterface, StorageInterface, IndexInterface, TextExtractorInterface
| none | 1 | 1.269782 | 1 | |
music/views.py | madhu0309/music-app-django | 0 | 6613075 | from django.shortcuts import get_object_or_404, render
from .models import Song
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.views.generic import DetailView, CreateView, DeleteView
from django.views.generic.list import ListView
from django.utils import html
@login_required
def special(request):
return HttpResponse("You are logged in !")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def register(request):
registered = False
if request.method == 'POST':
print("post")
user_form = UserForm(data=request.POST)
if user_form.is_valid():
user = user_form.save()
user.set_password(<PASSWORD>)
user = user.save()
else:
print(user_form.errors)
else:
user_form = UserForm()
return render(request, 'registration.html', {'user_form': user_form, 'registered': registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('song_list'))
else:
return HttpResponse("Your account was inactive.")
else:
print("Someone tried to login and failed.")
print("They used username: {} and password:{}".format(
username, password))
return HttpResponse("Invalid login details given")
else:
return render(request, 'login.html')
class SongListView(ListView):
model = Song
context_object_name = "song_list"
template_name = "song_list.html"
def song_detail(request, id):
song = get_object_or_404(Song, id=id)
is_favourite = False
if song.favourite.filter(id=request.user.id).exists():
is_favourite = True
context = {
'song': song,
'is_favourite': is_favourite,
}
if request.is_ajax():
return JsonResponse({'form': html})
return render(request, 'song_detail.html', context)
def song_favourite_list(request):
user = request.user
favourite_songs = user.favourite.all()
context = {
'favourite_songs': favourite_songs,
}
return render(request, 'song_favourite_list.html', context)
def favourite_song(request, id):
song = get_object_or_404(Song, id=id)
if song.favourite.filter(id=request.user.id).exists():
song.favourite.remove(request.user)
else:
song.favourite.add(request.user)
return HttpResponseRedirect(song.get_absolute_url())
| from django.shortcuts import get_object_or_404, render
from .models import Song
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.views.generic import DetailView, CreateView, DeleteView
from django.views.generic.list import ListView
from django.utils import html
@login_required
def special(request):
return HttpResponse("You are logged in !")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def register(request):
registered = False
if request.method == 'POST':
print("post")
user_form = UserForm(data=request.POST)
if user_form.is_valid():
user = user_form.save()
user.set_password(<PASSWORD>)
user = user.save()
else:
print(user_form.errors)
else:
user_form = UserForm()
return render(request, 'registration.html', {'user_form': user_form, 'registered': registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('song_list'))
else:
return HttpResponse("Your account was inactive.")
else:
print("Someone tried to login and failed.")
print("They used username: {} and password:{}".format(
username, password))
return HttpResponse("Invalid login details given")
else:
return render(request, 'login.html')
class SongListView(ListView):
model = Song
context_object_name = "song_list"
template_name = "song_list.html"
def song_detail(request, id):
song = get_object_or_404(Song, id=id)
is_favourite = False
if song.favourite.filter(id=request.user.id).exists():
is_favourite = True
context = {
'song': song,
'is_favourite': is_favourite,
}
if request.is_ajax():
return JsonResponse({'form': html})
return render(request, 'song_detail.html', context)
def song_favourite_list(request):
user = request.user
favourite_songs = user.favourite.all()
context = {
'favourite_songs': favourite_songs,
}
return render(request, 'song_favourite_list.html', context)
def favourite_song(request, id):
song = get_object_or_404(Song, id=id)
if song.favourite.filter(id=request.user.id).exists():
song.favourite.remove(request.user)
else:
song.favourite.add(request.user)
return HttpResponseRedirect(song.get_absolute_url())
| none | 1 | 2.416896 | 2 | |
books/forms.py | Mulham/Django-Project | 0 | 6613076 | from django import forms
from .models import Book
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = ("title", "categories", "image", 'description', 'file', )
class CommentForm(forms.Form):
author = forms.CharField(
max_length=60,
widget=forms.TextInput(attrs={
"class": "form-control",
"placeholder": "<NAME>"
})
)
body = forms.CharField(widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder": "Leave a comment!"
})
) | from django import forms
from .models import Book
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = ("title", "categories", "image", 'description', 'file', )
class CommentForm(forms.Form):
author = forms.CharField(
max_length=60,
widget=forms.TextInput(attrs={
"class": "form-control",
"placeholder": "<NAME>"
})
)
body = forms.CharField(widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder": "Leave a comment!"
})
) | none | 1 | 2.644825 | 3 | |
external/model-preparation-algorithm/mpa_tasks/apis/classification/__init__.py | opencv/openvino_training_extensions | 775 | 6613077 | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from .config import ClassificationConfig
from .task import ClassificationInferenceTask, ClassificationTrainTask
# Load relevant extensions to registry
import mpa_tasks.extensions.datasets.mpa_cls_dataset
import mpa_tasks.extensions.datasets.pipelines.mpa_cls_pipeline
| # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from .config import ClassificationConfig
from .task import ClassificationInferenceTask, ClassificationTrainTask
# Load relevant extensions to registry
import mpa_tasks.extensions.datasets.mpa_cls_dataset
import mpa_tasks.extensions.datasets.pipelines.mpa_cls_pipeline
| en | 0.417668 | # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # # Load relevant extensions to registry | 1.098084 | 1 |
gpsimy/models/__init__.py | gamikun/gpsimy | 1 | 6613078 | <reponame>gamikun/gpsimy
from command import Command
from event import *
from position import Position | from command import Command
from event import *
from position import Position | none | 1 | 1.145532 | 1 | |
examples/koopman_nonlinear_damped_dual_mass.py | HaldexBrake/ReducedOrderModeling | 2 | 6613079 | # TensorFlow
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU') # Force CPU
tf.keras.backend.set_floatx('float64')
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import Adam
import sys
sys.path.append('../')
import numpy as np
from numpy.linalg import norm
np.set_printoptions(precision=5,linewidth=1000)
import matplotlib.pyplot as plt
from dmd import dmd
from koopman import koopman
from koopman.koopman import Net
if __name__ == '__main__':
# Simulation params
t_start = 0.0 # start simulation time
t_stop = 40.0 # stop simulations time
t_stop_train = 20.0 # only train with data up to this time
ncp = 4000 # number of communication points
amp = 30.0 # amplitude of input signal
freq = 0.1 # frequency of input signal
inp = 'sin' # input type: delta or sin
# NN and training params
lr = 8e-5
epochs = 400
pre_lr = 5e-3
pre_epochs = 300
n = 21
w_hidden = 60
############################################################################
# Simulate and generate data
############################################################################
# Construct time vector
t = np.linspace(t_start,t_stop, ncp+1)
dt = (t_stop-t_start)/ncp # Step size
# Calc stop idx for training
if not t_stop_train:
m_stop = ncp+1
elif t_stop_train>t_stop:
raise ValueError('t_stop_train must be <= t_stop.')
else:
m_stop = np.argmin(t<t_stop_train)
# Compute input from time vector t
u = dmd.create_input_vec(t, inp_type=inp, amp=amp, freq=freq)
# Get snapshots, both simulated and analytical
data_sim = dmd.get_snapshots_stop_friction(
t_start, t_stop, ncp , input_force=u, time_vec=t, states=['mass1.s', 'mass1.v', 'mass2.s', 'mass2.v'])
x1_target = data_sim[0,:]
x2_target = data_sim[2,:]
# Get data matrices
X,Y = dmd.get_data_matrices(data_sim, m_stop=m_stop, u=u)
############################################################################
# Construct and train NN
############################################################################
# Convert arrays to tensors
X_tensor = tf.constant(X.T, dtype=tf.float64)
Y_tensor = tf.constant(Y.T, dtype=tf.float64)
if u is not None:
u_tensor = tf.constant(u, dtype=tf.float64)
else:
u_tensor = None
# NN and training params
m_stop, n_x = X_tensor.shape
# Define networks
g = Net(n_x, n, w_hidden=w_hidden, activation='relu')
h = Net(n, n_x, w_hidden=w_hidden, activation='relu')
# Initiate weights
g(tf.expand_dims(tf.ones(n_x, dtype=tf.dtypes.float64),0))
h(tf.expand_dims(tf.ones(n, dtype=tf.dtypes.float64),0))
# Create optimizer objects
pre_opt_g = Adam(learning_rate=pre_lr)
pre_opt_h = Adam(learning_rate=pre_lr)
# Pre-train network
print('Pre-training the network as an autoencoder...')
pre_loss_history = koopman.pre_train_networks(pre_opt_g, pre_opt_h, g,
h, Y_tensor, EPOCHS=tf.constant(pre_epochs))
print('FINISHED with final pre-train loss: ',pre_loss_history[-1].numpy())
# Train networks with new optimizer objects
opt_g = Adam(learning_rate=lr)
opt_h = Adam(learning_rate=lr)
print('Training the network with Koopman loss...')
loss_history, rec_loss_hist, lin_loss_hist, pred_loss_hist = koopman.train_networks(
opt_g, opt_h, g, h, X_tensor, Y_tensor, u_tensor, EPOCHS=tf.constant(epochs))
print('FINISHED with final train loss: ',loss_history[-1].numpy())
############################################################################
# Make new predictions
############################################################################
gX = g(X_tensor)
gY = g(Y_tensor)
# Calc DMD modes and eigenvalues
lam_tensor,w_tensor,v_tensor, _ = koopman.get_dmd_modes_tensors(gX,gY)
# Predict the system
Yhat_tensor,_ = koopman.predict_koopman(lam_tensor, w_tensor, v_tensor, X_tensor[0,:],
tf.constant(ncp), g, h, u=u_tensor)
Yhat = Yhat_tensor.numpy().T
x1_koopman = Yhat[0,:]
x2_koopman = Yhat[2,:]
############################################################################
# Perform DMD for comparison
############################################################################
lam_dmd,w_dmd,v_dmd,_ = dmd.get_dmd_modes(X,Y)
Yhat_dmd = dmd.predict(lam_dmd, w_dmd, v_dmd, X[:,0], ncp, u=u)
Yhat_dmd = Yhat_dmd
x1_dmd = Yhat_dmd[0,:]
x2_dmd = Yhat_dmd[2,:]
############################################################################
# Errors, prints and plots
############################################################################
print('Train error Koopman')
print(' x1: ', 1/m_stop*norm(x1_koopman[:m_stop]-x1_target[:m_stop],2))
print(' x2: ', 1/m_stop*norm(x2_koopman[:m_stop]-x2_target[:m_stop],2))
print('Train error DMD')
print(' x1: ', 1/m_stop*norm(x1_dmd[:m_stop]-x1_target[:m_stop],2))
print(' x2: ', 1/m_stop*norm(x2_dmd[:m_stop]-x2_target[:m_stop],2))
if m_stop<ncp:
print('Test error Koopman')
print(' x1: ', 1/(ncp-m_stop)*norm(x1_koopman[m_stop:]-x1_target[m_stop:],2))
print(' x2: ', 1/(ncp-m_stop)*norm(x2_koopman[m_stop:]-x2_target[m_stop:],2))
print('Test error DMD')
print(' x1: ', 1/(ncp-m_stop)*norm(x1_dmd[m_stop:]-x1_target[m_stop:],2))
print(' x2: ', 1/(ncp-m_stop)*norm(x2_dmd[m_stop:]-x2_target[m_stop:],2))
plt.figure()
plt.plot(t,x1_target,'-b')
plt.plot(t,x1_dmd,'--r')
plt.plot(t,x1_koopman,'-.k')
plt.xlim(0,dt*ncp)
plt.xlabel('Time (s)')
plt.ylabel('Possition of mass 1 (m)')
plt.grid(True)
plt.legend(('Simulation','DMD','Koopman'),bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=2)
plt.figure()
plt.plot(t,x2_target,'-b')
plt.plot(t,x2_dmd,'--r')
plt.plot(t,x2_koopman,'-.k')
plt.xlim(0,dt*ncp)
plt.xlabel('Time (s)')
plt.ylabel('Possition of mass 2 (m)')
plt.grid(True)
plt.legend(('Simulation','DMD','Koopman'),bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=2)
plt.show()
| # TensorFlow
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU') # Force CPU
tf.keras.backend.set_floatx('float64')
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import Adam
import sys
sys.path.append('../')
import numpy as np
from numpy.linalg import norm
np.set_printoptions(precision=5,linewidth=1000)
import matplotlib.pyplot as plt
from dmd import dmd
from koopman import koopman
from koopman.koopman import Net
if __name__ == '__main__':
# Simulation params
t_start = 0.0 # start simulation time
t_stop = 40.0 # stop simulations time
t_stop_train = 20.0 # only train with data up to this time
ncp = 4000 # number of communication points
amp = 30.0 # amplitude of input signal
freq = 0.1 # frequency of input signal
inp = 'sin' # input type: delta or sin
# NN and training params
lr = 8e-5
epochs = 400
pre_lr = 5e-3
pre_epochs = 300
n = 21
w_hidden = 60
############################################################################
# Simulate and generate data
############################################################################
# Construct time vector
t = np.linspace(t_start,t_stop, ncp+1)
dt = (t_stop-t_start)/ncp # Step size
# Calc stop idx for training
if not t_stop_train:
m_stop = ncp+1
elif t_stop_train>t_stop:
raise ValueError('t_stop_train must be <= t_stop.')
else:
m_stop = np.argmin(t<t_stop_train)
# Compute input from time vector t
u = dmd.create_input_vec(t, inp_type=inp, amp=amp, freq=freq)
# Get snapshots, both simulated and analytical
data_sim = dmd.get_snapshots_stop_friction(
t_start, t_stop, ncp , input_force=u, time_vec=t, states=['mass1.s', 'mass1.v', 'mass2.s', 'mass2.v'])
x1_target = data_sim[0,:]
x2_target = data_sim[2,:]
# Get data matrices
X,Y = dmd.get_data_matrices(data_sim, m_stop=m_stop, u=u)
############################################################################
# Construct and train NN
############################################################################
# Convert arrays to tensors
X_tensor = tf.constant(X.T, dtype=tf.float64)
Y_tensor = tf.constant(Y.T, dtype=tf.float64)
if u is not None:
u_tensor = tf.constant(u, dtype=tf.float64)
else:
u_tensor = None
# NN and training params
m_stop, n_x = X_tensor.shape
# Define networks
g = Net(n_x, n, w_hidden=w_hidden, activation='relu')
h = Net(n, n_x, w_hidden=w_hidden, activation='relu')
# Initiate weights
g(tf.expand_dims(tf.ones(n_x, dtype=tf.dtypes.float64),0))
h(tf.expand_dims(tf.ones(n, dtype=tf.dtypes.float64),0))
# Create optimizer objects
pre_opt_g = Adam(learning_rate=pre_lr)
pre_opt_h = Adam(learning_rate=pre_lr)
# Pre-train network
print('Pre-training the network as an autoencoder...')
pre_loss_history = koopman.pre_train_networks(pre_opt_g, pre_opt_h, g,
h, Y_tensor, EPOCHS=tf.constant(pre_epochs))
print('FINISHED with final pre-train loss: ',pre_loss_history[-1].numpy())
# Train networks with new optimizer objects
opt_g = Adam(learning_rate=lr)
opt_h = Adam(learning_rate=lr)
print('Training the network with Koopman loss...')
loss_history, rec_loss_hist, lin_loss_hist, pred_loss_hist = koopman.train_networks(
opt_g, opt_h, g, h, X_tensor, Y_tensor, u_tensor, EPOCHS=tf.constant(epochs))
print('FINISHED with final train loss: ',loss_history[-1].numpy())
############################################################################
# Make new predictions
############################################################################
gX = g(X_tensor)
gY = g(Y_tensor)
# Calc DMD modes and eigenvalues
lam_tensor,w_tensor,v_tensor, _ = koopman.get_dmd_modes_tensors(gX,gY)
# Predict the system
Yhat_tensor,_ = koopman.predict_koopman(lam_tensor, w_tensor, v_tensor, X_tensor[0,:],
tf.constant(ncp), g, h, u=u_tensor)
Yhat = Yhat_tensor.numpy().T
x1_koopman = Yhat[0,:]
x2_koopman = Yhat[2,:]
############################################################################
# Perform DMD for comparison
############################################################################
lam_dmd,w_dmd,v_dmd,_ = dmd.get_dmd_modes(X,Y)
Yhat_dmd = dmd.predict(lam_dmd, w_dmd, v_dmd, X[:,0], ncp, u=u)
Yhat_dmd = Yhat_dmd
x1_dmd = Yhat_dmd[0,:]
x2_dmd = Yhat_dmd[2,:]
############################################################################
# Errors, prints and plots
############################################################################
print('Train error Koopman')
print(' x1: ', 1/m_stop*norm(x1_koopman[:m_stop]-x1_target[:m_stop],2))
print(' x2: ', 1/m_stop*norm(x2_koopman[:m_stop]-x2_target[:m_stop],2))
print('Train error DMD')
print(' x1: ', 1/m_stop*norm(x1_dmd[:m_stop]-x1_target[:m_stop],2))
print(' x2: ', 1/m_stop*norm(x2_dmd[:m_stop]-x2_target[:m_stop],2))
if m_stop<ncp:
print('Test error Koopman')
print(' x1: ', 1/(ncp-m_stop)*norm(x1_koopman[m_stop:]-x1_target[m_stop:],2))
print(' x2: ', 1/(ncp-m_stop)*norm(x2_koopman[m_stop:]-x2_target[m_stop:],2))
print('Test error DMD')
print(' x1: ', 1/(ncp-m_stop)*norm(x1_dmd[m_stop:]-x1_target[m_stop:],2))
print(' x2: ', 1/(ncp-m_stop)*norm(x2_dmd[m_stop:]-x2_target[m_stop:],2))
plt.figure()
plt.plot(t,x1_target,'-b')
plt.plot(t,x1_dmd,'--r')
plt.plot(t,x1_koopman,'-.k')
plt.xlim(0,dt*ncp)
plt.xlabel('Time (s)')
plt.ylabel('Possition of mass 1 (m)')
plt.grid(True)
plt.legend(('Simulation','DMD','Koopman'),bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=2)
plt.figure()
plt.plot(t,x2_target,'-b')
plt.plot(t,x2_dmd,'--r')
plt.plot(t,x2_koopman,'-.k')
plt.xlim(0,dt*ncp)
plt.xlabel('Time (s)')
plt.ylabel('Possition of mass 2 (m)')
plt.grid(True)
plt.legend(('Simulation','DMD','Koopman'),bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=2)
plt.show()
| de | 0.547794 | # TensorFlow # Force CPU # Simulation params # start simulation time # stop simulations time # only train with data up to this time # number of communication points # amplitude of input signal # frequency of input signal # input type: delta or sin # NN and training params ############################################################################ # Simulate and generate data ############################################################################ # Construct time vector # Step size # Calc stop idx for training # Compute input from time vector t # Get snapshots, both simulated and analytical # Get data matrices ############################################################################ # Construct and train NN ############################################################################ # Convert arrays to tensors # NN and training params # Define networks # Initiate weights # Create optimizer objects # Pre-train network # Train networks with new optimizer objects ############################################################################ # Make new predictions ############################################################################ # Calc DMD modes and eigenvalues # Predict the system ############################################################################ # Perform DMD for comparison ############################################################################ ############################################################################ # Errors, prints and plots ############################################################################ | 2.141093 | 2 |
romania_astar.py | Guilherme-Valle/ai-Algorithms | 1 | 6613080 | <reponame>Guilherme-Valle/ai-Algorithms
import heapq
mapa_romania = {
'Arad': {'cidade': 'Arad',
'conexoes': [{'cidade': 'Zerind', 'distancia': 75},
{'cidade': 'Timisoara', 'distancia': 118},
{'cidade': 'Sibiu', 'distancia': 140}]},
'Sibiu': {'cidade': 'Sibiu',
'conexoes': [{'cidade': 'Fagaras', 'distancia': 99},
{'cidade': 'Rimnicu Vilcea', 'distancia': 80},
{'cidade': 'Oradea', 'distancia': 151},
{'cidade': 'Arad', 'distancia': 140}]},
'Fagaras': {'cidade': 'Fagaras',
'conexoes': [{'cidade': 'Bucharest', 'distancia': 211},
{'cidade': 'Sibiu', 'distancia': 99}]},
'Rimnicu Vilcea': {
'cidade': 'Rimnicu Vilcea',
'conexoes': [{'cidade': 'Craiova', 'distancia': 146},
{'cidade': 'Sibiu', 'distancia': 80},
{'cidade': 'Pitesti', 'distancia': 97}]},
'Pitesti': {'cidade': 'Pitesti',
'conexoes': [{'cidade': 'Bucharest', 'distancia': 101},
{'cidade': 'Rimnicu Vilcea', 'distancia': 97},
{'cidade': 'Craiova', 'distancia': 138}]},
'Bucharest': {'cidade': 'Bucharest',
'conexoes': [{'cidade': 'Fagaras', 'distancia': 211},
{'cidade': 'Pitesti', 'distancia': 101},
{'cidade': 'Urziceni', 'distancia': 85},
{'cidade': 'Giurgiu', 'distancia': 90}]},
'Giurgiu': {'cidade': 'Giurgiu',
'conexoes': [{'cidade': 'Bucharest', 'distancia': 90}]},
'Urziceni': {'cidade': 'Urziceni',
'conexoes': [{'cidade': 'Bucharest', 'distancia': 85},
{'cidade': 'Vaslui', 'distancia': 142},
{'cidade': 'Hirsova', 'distancia': 98},
]},
'Hirsova': {
'cidade': 'Hirsova',
'conexoes': [{'cidade': 'Urziceni', 'distancia': 98},
{'cidade': 'Eforie', 'distancia': 86}]},
'Eforie': {
'cidade': 'Eforie',
'conexoes': [{'cidade': 'Hirsova', 'distancia': 86}]},
'Vaslui': {
'cidade': 'Vaslui',
'conexoes': [{'cidade': 'Urziceni', 'distancia': 142},
{'cidade': 'Iasi', 'distancia': 92}]},
'Iasi': {
'cidade': 'Iasi',
'conexoes': [{'cidade': 'Neamt', 'distancia': 87},
{'cidade': 'Vaslui', 'distancia': 92}]},
'Neamt': {'cidade': 'Neamt',
'conexoes': [{'cidade': 'Iasi', 'distancia': 87}]},
'Craiova': {'cidade': 'Craiova',
'conexoes': [{'cidade': 'Pitesti', 'distancia': 138},
{'cidade': 'Rimnicu Vilcea', 'distancia': 146},
{'cidade': 'Drobeta', 'distancia': 120}]},
'Drobeta': {
'cidade': 'Drobeta',
'conexoes': [{'cidade': 'Craiova', 'distancia': 120},
{'cidade': 'Mehadia', 'distancia': 75}]},
'Mehadia': {
'cidade': 'Mehadia',
'conexoes': [{'cidade': 'Drobeta', 'distancia': 75},
{'cidade': 'Lugoj', 'distancia': 70}]},
'Lugoj': {
'cidade': 'Lugoj',
'conexoes': [{'cidade': 'Mehadia', 'distancia': 70},
{'cidade': 'Timisoara', 'distancia': 111}]},
'Timisoara': {
'cidade': 'Timisoara',
'conexoes': [{'cidade': 'Arad', 'distancia': 118},
{'cidade': 'Lugoj', 'distancia': 111}]},
'Zerind': {
'cidade': 'Zerind',
'conexoes': [{'cidade': 'Arad', 'distancia': 75},
{'cidade': 'Oradea', 'distancia': 71}]},
'Oradea': {'cidade': 'Zerind',
'conexoes': [{'cidade': 'Zerind', 'distancia': 71},
{'cidade': 'Sibiu', 'distancia': 151}]}
}
heuristica_romania = {
'Arad': 366,
'Bucharest': 0,
'Craiova': 160,
'Drobeta': 242,
'Eforie': 161,
'Fagaras': 176,
'Giurgiu': 77,
'Hirsova': 151,
'Iasi': 226,
'Lugoj': 244,
'Mehadia': 241,
'Neamt': 234,
'Oradea': 380,
'Pitesti': 100,
'<NAME>': 193,
'Sibiu': 253,
'Timisoara': 329,
'Urziceni': 80,
'Vaslui': 199,
'Zerind': 374
}
def romania_astar(inicio, objetivo):
caminho = {}
filaFechada = {}
filaAberta = []
heapq.heappush(filaAberta, (0, inicio))
filaFechada[inicio] = 0
while filaAberta:
elemento_atual = mapa_romania[heapq.heappop(filaAberta)[1]]
for sucessor in mapa_romania[elemento_atual['cidade']]['conexoes']:
if sucessor['cidade'] == objetivo:
break
g = filaFechada[elemento_atual['cidade']] + int(sucessor['distancia'])
if sucessor['cidade'] not in filaFechada or g < filaFechada[sucessor['cidade']]:
filaFechada[sucessor['cidade']] = g
f = g + heuristica_romania[sucessor['cidade']]
heapq.heappush(filaAberta, (f, sucessor['cidade']))
caminho[sucessor['cidade']] = sucessor['cidade']
print(caminho)
| import heapq
mapa_romania = {
'Arad': {'cidade': 'Arad',
'conexoes': [{'cidade': 'Zerind', 'distancia': 75},
{'cidade': 'Timisoara', 'distancia': 118},
{'cidade': 'Sibiu', 'distancia': 140}]},
'Sibiu': {'cidade': 'Sibiu',
'conexoes': [{'cidade': 'Fagaras', 'distancia': 99},
{'cidade': 'Rimnicu Vilcea', 'distancia': 80},
{'cidade': 'Oradea', 'distancia': 151},
{'cidade': 'Arad', 'distancia': 140}]},
'Fagaras': {'cidade': 'Fagaras',
'conexoes': [{'cidade': 'Bucharest', 'distancia': 211},
{'cidade': 'Sibiu', 'distancia': 99}]},
'Rimnicu Vilcea': {
'cidade': 'Rimnicu Vilcea',
'conexoes': [{'cidade': 'Craiova', 'distancia': 146},
{'cidade': 'Sibiu', 'distancia': 80},
{'cidade': 'Pitesti', 'distancia': 97}]},
'Pitesti': {'cidade': 'Pitesti',
'conexoes': [{'cidade': 'Bucharest', 'distancia': 101},
{'cidade': 'Rimnicu Vilcea', 'distancia': 97},
{'cidade': 'Craiova', 'distancia': 138}]},
'Bucharest': {'cidade': 'Bucharest',
'conexoes': [{'cidade': 'Fagaras', 'distancia': 211},
{'cidade': 'Pitesti', 'distancia': 101},
{'cidade': 'Urziceni', 'distancia': 85},
{'cidade': 'Giurgiu', 'distancia': 90}]},
'Giurgiu': {'cidade': 'Giurgiu',
'conexoes': [{'cidade': 'Bucharest', 'distancia': 90}]},
'Urziceni': {'cidade': 'Urziceni',
'conexoes': [{'cidade': 'Bucharest', 'distancia': 85},
{'cidade': 'Vaslui', 'distancia': 142},
{'cidade': 'Hirsova', 'distancia': 98},
]},
'Hirsova': {
'cidade': 'Hirsova',
'conexoes': [{'cidade': 'Urziceni', 'distancia': 98},
{'cidade': 'Eforie', 'distancia': 86}]},
'Eforie': {
'cidade': 'Eforie',
'conexoes': [{'cidade': 'Hirsova', 'distancia': 86}]},
'Vaslui': {
'cidade': 'Vaslui',
'conexoes': [{'cidade': 'Urziceni', 'distancia': 142},
{'cidade': 'Iasi', 'distancia': 92}]},
'Iasi': {
'cidade': 'Iasi',
'conexoes': [{'cidade': 'Neamt', 'distancia': 87},
{'cidade': 'Vaslui', 'distancia': 92}]},
'Neamt': {'cidade': 'Neamt',
'conexoes': [{'cidade': 'Iasi', 'distancia': 87}]},
'Craiova': {'cidade': 'Craiova',
'conexoes': [{'cidade': 'Pitesti', 'distancia': 138},
{'cidade': 'Rimnicu Vilcea', 'distancia': 146},
{'cidade': 'Drobeta', 'distancia': 120}]},
'Drobeta': {
'cidade': 'Drobeta',
'conexoes': [{'cidade': 'Craiova', 'distancia': 120},
{'cidade': 'Mehadia', 'distancia': 75}]},
'Mehadia': {
'cidade': 'Mehadia',
'conexoes': [{'cidade': 'Drobeta', 'distancia': 75},
{'cidade': 'Lugoj', 'distancia': 70}]},
'Lugoj': {
'cidade': 'Lugoj',
'conexoes': [{'cidade': 'Mehadia', 'distancia': 70},
{'cidade': 'Timisoara', 'distancia': 111}]},
'Timisoara': {
'cidade': 'Timisoara',
'conexoes': [{'cidade': 'Arad', 'distancia': 118},
{'cidade': 'Lugoj', 'distancia': 111}]},
'Zerind': {
'cidade': 'Zerind',
'conexoes': [{'cidade': 'Arad', 'distancia': 75},
{'cidade': 'Oradea', 'distancia': 71}]},
'Oradea': {'cidade': 'Zerind',
'conexoes': [{'cidade': 'Zerind', 'distancia': 71},
{'cidade': 'Sibiu', 'distancia': 151}]}
}
heuristica_romania = {
'Arad': 366,
'Bucharest': 0,
'Craiova': 160,
'Drobeta': 242,
'Eforie': 161,
'Fagaras': 176,
'Giurgiu': 77,
'Hirsova': 151,
'Iasi': 226,
'Lugoj': 244,
'Mehadia': 241,
'Neamt': 234,
'Oradea': 380,
'Pitesti': 100,
'<NAME>': 193,
'Sibiu': 253,
'Timisoara': 329,
'Urziceni': 80,
'Vaslui': 199,
'Zerind': 374
}
def romania_astar(inicio, objetivo):
caminho = {}
filaFechada = {}
filaAberta = []
heapq.heappush(filaAberta, (0, inicio))
filaFechada[inicio] = 0
while filaAberta:
elemento_atual = mapa_romania[heapq.heappop(filaAberta)[1]]
for sucessor in mapa_romania[elemento_atual['cidade']]['conexoes']:
if sucessor['cidade'] == objetivo:
break
g = filaFechada[elemento_atual['cidade']] + int(sucessor['distancia'])
if sucessor['cidade'] not in filaFechada or g < filaFechada[sucessor['cidade']]:
filaFechada[sucessor['cidade']] = g
f = g + heuristica_romania[sucessor['cidade']]
heapq.heappush(filaAberta, (f, sucessor['cidade']))
caminho[sucessor['cidade']] = sucessor['cidade']
print(caminho) | none | 1 | 1.947672 | 2 | |
cifar10/adversarial_networks/models/__init__.py | whxbergkamp/RobustDL_GAN | 22 | 6613081 | import tensorflow as tf
from adversarial_networks.models import (
resnet18_2,
resnet18_2_wide,
)
generator_dict = {
'resnet18_2': resnet18_2.generator,
'resnet18_2_wide': resnet18_2_wide.generator,
}
discriminator_dict = {
'resnet18_2': resnet18_2.discriminator,
'resnet18_2_wide': resnet18_2_wide.discriminator,
}
def get_generator(model_name, scope='generator', **kwargs):
model_func = generator_dict[model_name]
return tf.make_template(scope, model_func, **kwargs)
def get_discriminator(model_name, scope='discriminator', **kwargs):
model_func = discriminator_dict[model_name]
return tf.make_template(scope, model_func, **kwargs)
| import tensorflow as tf
from adversarial_networks.models import (
resnet18_2,
resnet18_2_wide,
)
generator_dict = {
'resnet18_2': resnet18_2.generator,
'resnet18_2_wide': resnet18_2_wide.generator,
}
discriminator_dict = {
'resnet18_2': resnet18_2.discriminator,
'resnet18_2_wide': resnet18_2_wide.discriminator,
}
def get_generator(model_name, scope='generator', **kwargs):
model_func = generator_dict[model_name]
return tf.make_template(scope, model_func, **kwargs)
def get_discriminator(model_name, scope='discriminator', **kwargs):
model_func = discriminator_dict[model_name]
return tf.make_template(scope, model_func, **kwargs)
| none | 1 | 2.293204 | 2 | |
math/distribution/bernoulliDistribution.py | parmalatinter/ai | 0 | 6613082 | #!/usr/bin/env python
# ベルヌーイ分布見方
# 常に同じ値与える分布
# 1回の試行で、2種類のどちらかの事象しか起こらない試行
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# 期待値:E(x) = p
# 分散: Var(x) = p * (1 - p)
# 確率関数: f(x) = { p , x = 1 \n 1 - p, z = 0}
# * 確率変数Xがパラメターpのベルヌーイ分布に従うとき X ~ B(1, p)と表現する
# 参考
import numpy as np
import sympy as sym
# グラフ
def Graph(xArray, yArray):
# X軸の数字が必ず整数になるようにする
plt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
plt.bar(xArray, yArray)
plt.show()
# 標準偏差
def StandardDeviation(variance):
return math.sqrt(variance)
def main():
n = 1
xArray = [i for i in range(n)]
print(xArray)
yArray = [0.7, 0.3]
Graph(xArray, yArray)
p = yArray[0]
print("期待値:E(x)", yArray[0])
var = p * (1 - p)
print("分散:Var(x)", var)
σ = StandardDeviation(var)
print("標準偏差:σ", σ)
fx = 1 - p
print("確率関数:f(x)", fx)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# ベルヌーイ分布見方
# 常に同じ値与える分布
# 1回の試行で、2種類のどちらかの事象しか起こらない試行
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# 期待値:E(x) = p
# 分散: Var(x) = p * (1 - p)
# 確率関数: f(x) = { p , x = 1 \n 1 - p, z = 0}
# * 確率変数Xがパラメターpのベルヌーイ分布に従うとき X ~ B(1, p)と表現する
# 参考
import numpy as np
import sympy as sym
# グラフ
def Graph(xArray, yArray):
# X軸の数字が必ず整数になるようにする
plt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
plt.bar(xArray, yArray)
plt.show()
# 標準偏差
def StandardDeviation(variance):
return math.sqrt(variance)
def main():
n = 1
xArray = [i for i in range(n)]
print(xArray)
yArray = [0.7, 0.3]
Graph(xArray, yArray)
p = yArray[0]
print("期待値:E(x)", yArray[0])
var = p * (1 - p)
print("分散:Var(x)", var)
σ = StandardDeviation(var)
print("標準偏差:σ", σ)
fx = 1 - p
print("確率関数:f(x)", fx)
if __name__ == "__main__":
main()
| ja | 0.980269 | #!/usr/bin/env python # ベルヌーイ分布見方 # 常に同じ値与える分布 # 1回の試行で、2種類のどちらかの事象しか起こらない試行 # 期待値:E(x) = p # 分散: Var(x) = p * (1 - p) # 確率関数: f(x) = { p , x = 1 \n 1 - p, z = 0} # * 確率変数Xがパラメターpのベルヌーイ分布に従うとき X ~ B(1, p)と表現する # 参考 # グラフ # X軸の数字が必ず整数になるようにする # 標準偏差 | 4.024263 | 4 |
meerkat_backend_interface/katcp_server.py | rubyvanrooyen/meerkat-backend-interface | 0 | 6613083 | r"""
Adapted from the reynard package (MIT license) on June 22, 2018:
https://github.com/ewanbarr/reynard
___ _ _ _ _ _ _ _
| _ )_ _ ___ __ _| |_| |_| |_ _ _ ___ _ _ __ _| |_ | | (_)__| |_ ___ _ _
| _ \ '_/ -_) _` | / / _| ' \| '_/ _ \ || / _` | ' \ | |__| (_-< _/ -_) ' \
|___/_| \___\__,_|_\_\\__|_||_|_| \___/\_,_\__, |_||_| |____|_/__/\__\___|_||_|
|___/
______________ ,-.
/ /| / \ `. __..-,O
/ / | : \ --''_..-'.'
/____________ / | | . .-' `. '.
| ___________ | | : . .`.'
|| || | \ `. / ..
|| || | \ `. ' .
|| || | `, `. \
||___________|| | ,|,`. `-.\
| _______ | / '.|| ``-...__..-`
/| (_______) | /____________ | |
( |_____________|/ \ |__|
\ \ /||\
.=======================. \ //||\\
| :::::::::::::::: ::: | \ // || \\
| ::::::::::::::[] ::: | \___//__||__\\___
| ----------- ::: | '--------------'
\-----------------------
"""
from __future__ import print_function
import sys
import json
import time
from katcp import Sensor, AsyncDeviceServer, AsyncReply
from katcp.kattypes import request, return_reply, Int, Str
from reynard.utils import unpack_dict
import redis
from redis_tools import REDIS_CHANNELS, write_pair_redis, write_list_redis, publish_to_redis
# to handle halt request
from concurrent.futures import Future
from tornado import gen
from tornado.concurrent import chain_future
from .logger import log
class BLBackendInterface(AsyncDeviceServer):
"""Breakthrough Listen's KATCP Server Backend Interface
This server responds to requests sent from CAM, most notably:
@ configue
@ capture-init
@ capture-start
@ capture-stop
@ capture-done
@ deconfigure
But because it inherits from AsyncDeviceServer, also responds to:
* halt
* help
* log-level
* restart [#restartf1]_
* client-list
* sensor-list
* sensor-sampling
* sensor-value
* watchdog
* version-list (only standard in KATCP v5 or later)
* request-timeout-hint (pre-standard only if protocol flags indicates
timeout hints, supported for KATCP v5.1 or later)
* sensor-sampling-clear (non-standard)
"""
VERSION_INFO = ("BLUSE-katcp-interface", 1, 0)
BUILD_INFO = ("BLUSE-katcp-implementation", 1, 0, "rc?")
DEVICE_STATUSES = ["ok", "fail", "degraded"]
def __init__(self, server_host, server_port):
self.port = server_port
self.redis_server = redis.StrictRedis()
super(BLBackendInterface, self).__init__(
server_host, server_port)
def start(self):
"""Start the server
Based on the passed configuration object this is
where the clients for suboridnates nodes will be
set up.
"""
super(BLBackendInterface, self).start()
print(R"""
,'''''-._
; ,. <> `-._
; \' _,--'"
; (
; , ` \
;, , \
; | | MeerKAT BL Backend Interface:
; |, | |\ KATCP Server
; | | | \ Version: {}
|.-\ ,\ |\ : Port: {}
|.| `. `-. | ||
:.| `-. \ ';;
.- , \;;|
; , | ,\
; , ; \ https://github.com/ejmichaud/meerkat-backend-interface
; , /`. , )
__,;, ,' \ ,|
_,--''__,| / \ :
,'_,-'' | ,/ | :
/ / | ; ; |
| | __,-| |--..__,--| |---.--....___
___,-| |----'' / | `._`-. `----
\ \ `''' ''' --
`.`. --'
`.`-._ _, ,- __,-
`-.`.
--' `;
""".format("{}.{}".format(self.VERSION_INFO[1], self.VERSION_INFO[2]), self.port))
@request(Str(), Str(), Int(), Str(), Str())
@return_reply()
def request_configure(self, req, product_id, antennas_csv,
n_channels, streams_json, proxy_name):
"""Receive metadata for upcoming observation.
In order to allow BLUSE to make an estimate of its ability
to process a particular data product, this command should
be used to configure a BLUSE instance when a new subarray is activated.
Args:
product_id (str): This is a name for the data product,
which is a useful tag to include in the data,
but should not be analysed further.
For example "array_1_bc856M4k". This value will
be unique across all subarrays. However, it is
not a globally unique identifier for the lifetime
of the telescope. The exact same value may be provided
at a later time when the same subarray is activated again.
antennas_csv (str): A comma separated list of physical antenna names
used in particular sub-array to which the data products belongs.
n_channels (int): The integer number of frequency channels provided by the CBF.
streams_json (str) is a JSON struct containing config keys and
values describing the streams. For example:
{'stream_type1': {
'stream_name1': 'stream_address1',
'stream_name2': 'stream_address2',
...},
'stream_type2': {
'stream_name1': 'stream_address1',
'stream_name2': 'stream_address2',
...},
...}
The steam type keys indicate the source of the data and the type, e.g. cam.http.
stream_address will be a URI. For SPEAD streams, the format will be
spead://<ip>[+<count>]:<port>, representing SPEAD stream multicast groups.
When a single logical stream requires too much bandwidth to accommodate
as a single multicast group, the count parameter indicates the number of
additional consecutively numbered multicast group ip addresses, and
sharing the same UDP port number.
stream_name is the name used to identify the stream in CAM.
A Python example is shown below, for five streams:
One CAM stream, with type cam.http. The camdata stream provides the
connection string for katportalclient (for the subarray that this
BLUSE instance is being configured on).
One F-engine stream, with type: cbf.antenna_channelised_voltage.
One X-engine stream, with type: cbf.baseline_correlation_products.
Two beam streams, with type: cbf.tied_array_channelised_voltage.
The stream names ending in x are horizontally polarised, and those
ending in y are vertically polarised.
proxy_name (str): The CAM name for the instance of the BLUSE data
proxy that is being configured. For example, "BLUSE_3". This
can be used to query sensors on the correct proxy. Note that for
BLUSE there will only be a single instance of the proxy in a subarray.
Returns:
None... but replies with "ok" or "fail" and logs either info or error
Writes:
- subbarry1_abc65555:timestamp" -> "1534657577373.23423" :: Redis String
- subarray1_abc65555:antennas" -> [1,2,3,4] :: Redis List
- subarray1_abc65555:n_channels" -> "4096" :: Redis String
- subarray1_abc65555:proxy_name "-> "BLUSE_whatever" :: Redis String
- subarray1_abc65555:streams" -> {....} :: Redis Hash !!!CURRENTLY A STRING!!!
- current:obs:id -> "subbary1_abc65555"
Publishes:
redis-channel: 'alerts' <-- "configure"
Examples:
> ?configure array_1_bc856M4k a1,a2,a3,a4 128000 {"cam.http":{"camdata":"http://monctl.devnmk.camlab.kat.ac.za/api/client/2"},"stream_type2":{"stream_name1":"stream_address1","stream_name2":"stream_address2"}} BLUSE_3
"""
try:
antennas_list = antennas_csv.split(",")
json_dict = unpack_dict(streams_json)
cam_url = json_dict['cam.http']['camdata']
except Exception as e:
log.error(e)
return ("fail", e)
statuses = []
statuses.append(write_pair_redis(self.redis_server, "{}:timestamp".format(product_id), time.time()))
statuses.append(write_list_redis(self.redis_server, "{}:antennas".format(product_id), antennas_list))
statuses.append(write_pair_redis(self.redis_server, "{}:n_channels".format(product_id), n_channels))
statuses.append(write_pair_redis(self.redis_server, "{}:proxy_name".format(product_id), proxy_name))
statuses.append(write_pair_redis(self.redis_server, "{}:streams".format(product_id), json.dumps(json_dict)))
statuses.append(write_pair_redis(self.redis_server, "{}:cam:url".format(product_id), cam_url))
statuses.append(write_pair_redis(self.redis_server, "current:obs:id", product_id))
msg = "configure:{}".format(product_id)
statuses.append(publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg))
if all(statuses):
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_capture_init(self, req, product_id):
"""Signals that an observation will start soon
Publishes a message to the 'alerts' channel of the form:
capture-init:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
to get ready for data
"""
msg = "capture-init:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_capture_start(self, req, product_id):
"""Signals that an observation is starting now
Publishes a message to the 'alerts' channel of the form:
capture-start:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
that they need to be collecting data now
"""
msg = "capture-start:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_capture_stop(self, req, product_id):
"""Signals that an observation is has stopped
Publishes a message to the 'alerts' channel of the form:
capture-stop:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
that they should stop collecting data now
"""
msg = "capture-stop:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_capture_done(self, req, product_id):
"""Signals that an observation has finished
Publishes a message to the 'alerts' channel of the form:
capture-done:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
that their data streams are ending
"""
msg = "capture-done:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_deconfigure(self, req, product_id):
"""Signals that the current data product is done.
Deconfigure the BLUSE instance that was created by the call
to ?configure with the corresponding product_id. Note: CAM is
expected to have sent a ?capture-done request before deconfiguring,
in order to ensure that all data has been written. If BLUSE uses an
instance of katportalclient to get information from CAM for this
BLUSE instance, then it should disconnect at this time.
Publishes a message to the 'alerts' channel of the form:
deconfigure:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
that their data streams are ending
"""
msg = "deconfigure:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
def setup_sensors(self):
"""
@brief Set up monitoring sensors.
@note The following sensors are made available on top of defaul sensors
implemented in AsynDeviceServer and its base classes.
device-status: Reports the health status of the FBFUSE and associated devices:
Among other things report HW failure, SW failure and observation failure.
"""
self._device_status = Sensor.discrete(
"device-status",
description="Health status of BLUSE",
params=self.DEVICE_STATUSES,
default="ok",
initial_status=Sensor.NOMINAL)
self.add_sensor(self._device_status)
self._local_time_synced = Sensor.boolean(
"local-time-synced",
description="Indicates BLUSE is NTP syncronised.",
default=True, # TODO: implement actual NTP synchronization request
initial_status=Sensor.NOMINAL)
self.add_sensor(self._local_time_synced)
self._version = Sensor.string(
"version",
description="Reports the current BLUSE version",
default=str(self.VERSION_INFO[1:]).strip('()').replace(' ', '').replace(",", '.'), # e.g. '1.0'
initial_status=Sensor.NOMINAL)
self.add_sensor(self._version)
def request_halt(self, req, msg):
"""Halts the server, logs to syslog and slack, and exits the program
Returns
-------
success : {'ok', 'fail'}
Whether scheduling the halt succeeded.
Examples
--------
::
?halt
!halt ok
TODO:
- Call halt method on superclass to avoid copy paste
Doing this caused an issue:
File "/Users/Eric/Berkeley/seti/packages/meerkat/lib/python2.7/site-packages/katcp/server.py", line 1102, in handle_request
assert (reply.mtype == Message.REPLY)
AttributeError: 'NoneType' object has no attribute 'mtype'
"""
f = Future()
@gen.coroutine
def _halt():
req.reply("ok")
yield gen.moment
self.stop(timeout=None)
raise AsyncReply
self.ioloop.add_callback(lambda: chain_future(_halt(), f))
log.critical("HALTING SERVER!!!")
# TODO: uncomment when you deploy
# notify_slack("KATCP server at MeerKAT has halted. Might want to check that!")
sys.exit(0)
@request()
@return_reply(Str())
def request_find_alien(self, req):
"""Finds an alien.
"""
return ("ok", R"""
. . . . . . . . . + .
. . : . .. :. .___---------___.
. . . . :.:. _".^ .^ ^. '.. :"-_. .
. : . . .:../: . .^ :.:\.
. . :: +. :.:/: . . . . . .:\
. : . . _ :::/: . ^ . . .:\
.. . . . - : :.:./. . .:\
. . . :..|: . . ^. .:|
. . : : ..|| . . . !:|
. . . . ::. ::\( . :)/
. . : . : .:.|. ###### .#######::|
:.. . :- : .: ::|.####### ..########:|
. . . .. . .. :\ ######## :######## :/
. .+ :: : -.:\ ######## . ########.:/
. .+ . . . . :.:\. ####### #######..:/
:: . . . . ::.:..:.\ . . ..:/
. . . .. : -::::.\. | | . .:/
. : . . .-:.":.::.\ ..:/
. -. . . . .: .:::.:.\. .:/
. . . : : ....::_:..:\ ___. :/
. . . .:. .. . .: :.:.:\ :/
+ . . : . ::. :.:. .:.|\ .:/|
. + . . ...:: ..| --.:|
. . . . . . . ... :..:.."( ..)"
. . . : . .: ::/ . .::\
""")
| r"""
Adapted from the reynard package (MIT license) on June 22, 2018:
https://github.com/ewanbarr/reynard
___ _ _ _ _ _ _ _
| _ )_ _ ___ __ _| |_| |_| |_ _ _ ___ _ _ __ _| |_ | | (_)__| |_ ___ _ _
| _ \ '_/ -_) _` | / / _| ' \| '_/ _ \ || / _` | ' \ | |__| (_-< _/ -_) ' \
|___/_| \___\__,_|_\_\\__|_||_|_| \___/\_,_\__, |_||_| |____|_/__/\__\___|_||_|
|___/
______________ ,-.
/ /| / \ `. __..-,O
/ / | : \ --''_..-'.'
/____________ / | | . .-' `. '.
| ___________ | | : . .`.'
|| || | \ `. / ..
|| || | \ `. ' .
|| || | `, `. \
||___________|| | ,|,`. `-.\
| _______ | / '.|| ``-...__..-`
/| (_______) | /____________ | |
( |_____________|/ \ |__|
\ \ /||\
.=======================. \ //||\\
| :::::::::::::::: ::: | \ // || \\
| ::::::::::::::[] ::: | \___//__||__\\___
| ----------- ::: | '--------------'
\-----------------------
"""
from __future__ import print_function
import sys
import json
import time
from katcp import Sensor, AsyncDeviceServer, AsyncReply
from katcp.kattypes import request, return_reply, Int, Str
from reynard.utils import unpack_dict
import redis
from redis_tools import REDIS_CHANNELS, write_pair_redis, write_list_redis, publish_to_redis
# to handle halt request
from concurrent.futures import Future
from tornado import gen
from tornado.concurrent import chain_future
from .logger import log
class BLBackendInterface(AsyncDeviceServer):
"""Breakthrough Listen's KATCP Server Backend Interface
This server responds to requests sent from CAM, most notably:
@ configue
@ capture-init
@ capture-start
@ capture-stop
@ capture-done
@ deconfigure
But because it inherits from AsyncDeviceServer, also responds to:
* halt
* help
* log-level
* restart [#restartf1]_
* client-list
* sensor-list
* sensor-sampling
* sensor-value
* watchdog
* version-list (only standard in KATCP v5 or later)
* request-timeout-hint (pre-standard only if protocol flags indicates
timeout hints, supported for KATCP v5.1 or later)
* sensor-sampling-clear (non-standard)
"""
VERSION_INFO = ("BLUSE-katcp-interface", 1, 0)
BUILD_INFO = ("BLUSE-katcp-implementation", 1, 0, "rc?")
DEVICE_STATUSES = ["ok", "fail", "degraded"]
def __init__(self, server_host, server_port):
self.port = server_port
self.redis_server = redis.StrictRedis()
super(BLBackendInterface, self).__init__(
server_host, server_port)
def start(self):
"""Start the server
Based on the passed configuration object this is
where the clients for suboridnates nodes will be
set up.
"""
super(BLBackendInterface, self).start()
print(R"""
,'''''-._
; ,. <> `-._
; \' _,--'"
; (
; , ` \
;, , \
; | | MeerKAT BL Backend Interface:
; |, | |\ KATCP Server
; | | | \ Version: {}
|.-\ ,\ |\ : Port: {}
|.| `. `-. | ||
:.| `-. \ ';;
.- , \;;|
; , | ,\
; , ; \ https://github.com/ejmichaud/meerkat-backend-interface
; , /`. , )
__,;, ,' \ ,|
_,--''__,| / \ :
,'_,-'' | ,/ | :
/ / | ; ; |
| | __,-| |--..__,--| |---.--....___
___,-| |----'' / | `._`-. `----
\ \ `''' ''' --
`.`. --'
`.`-._ _, ,- __,-
`-.`.
--' `;
""".format("{}.{}".format(self.VERSION_INFO[1], self.VERSION_INFO[2]), self.port))
@request(Str(), Str(), Int(), Str(), Str())
@return_reply()
def request_configure(self, req, product_id, antennas_csv,
n_channels, streams_json, proxy_name):
"""Receive metadata for upcoming observation.
In order to allow BLUSE to make an estimate of its ability
to process a particular data product, this command should
be used to configure a BLUSE instance when a new subarray is activated.
Args:
product_id (str): This is a name for the data product,
which is a useful tag to include in the data,
but should not be analysed further.
For example "array_1_bc856M4k". This value will
be unique across all subarrays. However, it is
not a globally unique identifier for the lifetime
of the telescope. The exact same value may be provided
at a later time when the same subarray is activated again.
antennas_csv (str): A comma separated list of physical antenna names
used in particular sub-array to which the data products belongs.
n_channels (int): The integer number of frequency channels provided by the CBF.
streams_json (str) is a JSON struct containing config keys and
values describing the streams. For example:
{'stream_type1': {
'stream_name1': 'stream_address1',
'stream_name2': 'stream_address2',
...},
'stream_type2': {
'stream_name1': 'stream_address1',
'stream_name2': 'stream_address2',
...},
...}
The steam type keys indicate the source of the data and the type, e.g. cam.http.
stream_address will be a URI. For SPEAD streams, the format will be
spead://<ip>[+<count>]:<port>, representing SPEAD stream multicast groups.
When a single logical stream requires too much bandwidth to accommodate
as a single multicast group, the count parameter indicates the number of
additional consecutively numbered multicast group ip addresses, and
sharing the same UDP port number.
stream_name is the name used to identify the stream in CAM.
A Python example is shown below, for five streams:
One CAM stream, with type cam.http. The camdata stream provides the
connection string for katportalclient (for the subarray that this
BLUSE instance is being configured on).
One F-engine stream, with type: cbf.antenna_channelised_voltage.
One X-engine stream, with type: cbf.baseline_correlation_products.
Two beam streams, with type: cbf.tied_array_channelised_voltage.
The stream names ending in x are horizontally polarised, and those
ending in y are vertically polarised.
proxy_name (str): The CAM name for the instance of the BLUSE data
proxy that is being configured. For example, "BLUSE_3". This
can be used to query sensors on the correct proxy. Note that for
BLUSE there will only be a single instance of the proxy in a subarray.
Returns:
None... but replies with "ok" or "fail" and logs either info or error
Writes:
- subbarry1_abc65555:timestamp" -> "1534657577373.23423" :: Redis String
- subarray1_abc65555:antennas" -> [1,2,3,4] :: Redis List
- subarray1_abc65555:n_channels" -> "4096" :: Redis String
- subarray1_abc65555:proxy_name "-> "BLUSE_whatever" :: Redis String
- subarray1_abc65555:streams" -> {....} :: Redis Hash !!!CURRENTLY A STRING!!!
- current:obs:id -> "subbary1_abc65555"
Publishes:
redis-channel: 'alerts' <-- "configure"
Examples:
> ?configure array_1_bc856M4k a1,a2,a3,a4 128000 {"cam.http":{"camdata":"http://monctl.devnmk.camlab.kat.ac.za/api/client/2"},"stream_type2":{"stream_name1":"stream_address1","stream_name2":"stream_address2"}} BLUSE_3
"""
try:
antennas_list = antennas_csv.split(",")
json_dict = unpack_dict(streams_json)
cam_url = json_dict['cam.http']['camdata']
except Exception as e:
log.error(e)
return ("fail", e)
statuses = []
statuses.append(write_pair_redis(self.redis_server, "{}:timestamp".format(product_id), time.time()))
statuses.append(write_list_redis(self.redis_server, "{}:antennas".format(product_id), antennas_list))
statuses.append(write_pair_redis(self.redis_server, "{}:n_channels".format(product_id), n_channels))
statuses.append(write_pair_redis(self.redis_server, "{}:proxy_name".format(product_id), proxy_name))
statuses.append(write_pair_redis(self.redis_server, "{}:streams".format(product_id), json.dumps(json_dict)))
statuses.append(write_pair_redis(self.redis_server, "{}:cam:url".format(product_id), cam_url))
statuses.append(write_pair_redis(self.redis_server, "current:obs:id", product_id))
msg = "configure:{}".format(product_id)
statuses.append(publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg))
if all(statuses):
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_capture_init(self, req, product_id):
"""Signals that an observation will start soon
Publishes a message to the 'alerts' channel of the form:
capture-init:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
to get ready for data
"""
msg = "capture-init:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_capture_start(self, req, product_id):
"""Signals that an observation is starting now
Publishes a message to the 'alerts' channel of the form:
capture-start:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
that they need to be collecting data now
"""
msg = "capture-start:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_capture_stop(self, req, product_id):
"""Signals that an observation is has stopped
Publishes a message to the 'alerts' channel of the form:
capture-stop:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
that they should stop collecting data now
"""
msg = "capture-stop:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_capture_done(self, req, product_id):
"""Signals that an observation has finished
Publishes a message to the 'alerts' channel of the form:
capture-done:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
that their data streams are ending
"""
msg = "capture-done:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
@request(Str())
@return_reply()
def request_deconfigure(self, req, product_id):
"""Signals that the current data product is done.
Deconfigure the BLUSE instance that was created by the call
to ?configure with the corresponding product_id. Note: CAM is
expected to have sent a ?capture-done request before deconfiguring,
in order to ensure that all data has been written. If BLUSE uses an
instance of katportalclient to get information from CAM for this
BLUSE instance, then it should disconnect at this time.
Publishes a message to the 'alerts' channel of the form:
deconfigure:product_id
The product_id should match what what was sent in the ?configure request
This alert should notify all backend processes (such as beamformer)
that their data streams are ending
"""
msg = "deconfigure:{}".format(product_id)
success = publish_to_redis(self.redis_server, REDIS_CHANNELS.alerts, msg)
if success:
return ("ok",)
else:
return ("fail", "Failed to publish to our local redis server")
def setup_sensors(self):
"""
@brief Set up monitoring sensors.
@note The following sensors are made available on top of defaul sensors
implemented in AsynDeviceServer and its base classes.
device-status: Reports the health status of the FBFUSE and associated devices:
Among other things report HW failure, SW failure and observation failure.
"""
self._device_status = Sensor.discrete(
"device-status",
description="Health status of BLUSE",
params=self.DEVICE_STATUSES,
default="ok",
initial_status=Sensor.NOMINAL)
self.add_sensor(self._device_status)
self._local_time_synced = Sensor.boolean(
"local-time-synced",
description="Indicates BLUSE is NTP syncronised.",
default=True, # TODO: implement actual NTP synchronization request
initial_status=Sensor.NOMINAL)
self.add_sensor(self._local_time_synced)
self._version = Sensor.string(
"version",
description="Reports the current BLUSE version",
default=str(self.VERSION_INFO[1:]).strip('()').replace(' ', '').replace(",", '.'), # e.g. '1.0'
initial_status=Sensor.NOMINAL)
self.add_sensor(self._version)
def request_halt(self, req, msg):
"""Halts the server, logs to syslog and slack, and exits the program
Returns
-------
success : {'ok', 'fail'}
Whether scheduling the halt succeeded.
Examples
--------
::
?halt
!halt ok
TODO:
- Call halt method on superclass to avoid copy paste
Doing this caused an issue:
File "/Users/Eric/Berkeley/seti/packages/meerkat/lib/python2.7/site-packages/katcp/server.py", line 1102, in handle_request
assert (reply.mtype == Message.REPLY)
AttributeError: 'NoneType' object has no attribute 'mtype'
"""
f = Future()
@gen.coroutine
def _halt():
req.reply("ok")
yield gen.moment
self.stop(timeout=None)
raise AsyncReply
self.ioloop.add_callback(lambda: chain_future(_halt(), f))
log.critical("HALTING SERVER!!!")
# TODO: uncomment when you deploy
# notify_slack("KATCP server at MeerKAT has halted. Might want to check that!")
sys.exit(0)
@request()
@return_reply(Str())
def request_find_alien(self, req):
"""Finds an alien.
"""
return ("ok", R"""
. . . . . . . . . + .
. . : . .. :. .___---------___.
. . . . :.:. _".^ .^ ^. '.. :"-_. .
. : . . .:../: . .^ :.:\.
. . :: +. :.:/: . . . . . .:\
. : . . _ :::/: . ^ . . .:\
.. . . . - : :.:./. . .:\
. . . :..|: . . ^. .:|
. . : : ..|| . . . !:|
. . . . ::. ::\( . :)/
. . : . : .:.|. ###### .#######::|
:.. . :- : .: ::|.####### ..########:|
. . . .. . .. :\ ######## :######## :/
. .+ :: : -.:\ ######## . ########.:/
. .+ . . . . :.:\. ####### #######..:/
:: . . . . ::.:..:.\ . . ..:/
. . . .. : -::::.\. | | . .:/
. : . . .-:.":.::.\ ..:/
. -. . . . .: .:::.:.\. .:/
. . . : : ....::_:..:\ ___. :/
. . . .:. .. . .: :.:.:\ :/
+ . . : . ::. :.:. .:.|\ .:/|
. + . . ...:: ..| --.:|
. . . . . . . ... :..:.."( ..)"
. . . : . .: ::/ . .::\
""")
| en | 0.679206 | Adapted from the reynard package (MIT license) on June 22, 2018: https://github.com/ewanbarr/reynard ___ _ _ _ _ _ _ _ | _ )_ _ ___ __ _| |_| |_| |_ _ _ ___ _ _ __ _| |_ | | (_)__| |_ ___ _ _ | _ \ '_/ -_) _` | / / _| ' \| '_/ _ \ || / _` | ' \ | |__| (_-< _/ -_) ' \ |___/_| \___\__,_|_\_\\__|_||_|_| \___/\_,_\__, |_||_| |____|_/__/\__\___|_||_| |___/ ______________ ,-. / /| / \ `. __..-,O / / | : \ --''_..-'.' /____________ / | | . .-' `. '. | ___________ | | : . .`.' || || | \ `. / .. || || | \ `. ' . || || | `, `. \ ||___________|| | ,|,`. `-.\ | _______ | / '.|| ``-...__..-` /| (_______) | /____________ | | ( |_____________|/ \ |__| \ \ /||\ .=======================. \ //||\\ | :::::::::::::::: ::: | \ // || \\ | ::::::::::::::[] ::: | \___//__||__\\___ | ----------- ::: | '--------------' \----------------------- # to handle halt request Breakthrough Listen's KATCP Server Backend Interface This server responds to requests sent from CAM, most notably: @ configue @ capture-init @ capture-start @ capture-stop @ capture-done @ deconfigure But because it inherits from AsyncDeviceServer, also responds to: * halt * help * log-level * restart [#restartf1]_ * client-list * sensor-list * sensor-sampling * sensor-value * watchdog * version-list (only standard in KATCP v5 or later) * request-timeout-hint (pre-standard only if protocol flags indicates timeout hints, supported for KATCP v5.1 or later) * sensor-sampling-clear (non-standard) Start the server Based on the passed configuration object this is where the clients for suboridnates nodes will be set up. ,'''''-._ ; ,. <> `-._ ; \' _,--'" ; ( ; , ` \ ;, , \ ; | | MeerKAT BL Backend Interface: ; |, | |\ KATCP Server ; | | | \ Version: {} |.-\ ,\ |\ : Port: {} |.| `. `-. | || :.| `-. \ ';; .- , \;;| ; , | ,\ ; , ; \ https://github.com/ejmichaud/meerkat-backend-interface ; , /`. , ) __,;, ,' \ ,| _,--''__,| / \ : ,'_,-'' | ,/ | : / / | ; ; | | | __,-| |--..__,--| |---.--....___ ___,-| |----'' / | `._`-. `---- \ \ `''' ''' -- `.`. --' `.`-._ _, ,- __,- `-.`. --' `; Receive metadata for upcoming observation. In order to allow BLUSE to make an estimate of its ability to process a particular data product, this command should be used to configure a BLUSE instance when a new subarray is activated. Args: product_id (str): This is a name for the data product, which is a useful tag to include in the data, but should not be analysed further. For example "array_1_bc856M4k". This value will be unique across all subarrays. However, it is not a globally unique identifier for the lifetime of the telescope. The exact same value may be provided at a later time when the same subarray is activated again. antennas_csv (str): A comma separated list of physical antenna names used in particular sub-array to which the data products belongs. n_channels (int): The integer number of frequency channels provided by the CBF. streams_json (str) is a JSON struct containing config keys and values describing the streams. For example: {'stream_type1': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, 'stream_type2': { 'stream_name1': 'stream_address1', 'stream_name2': 'stream_address2', ...}, ...} The steam type keys indicate the source of the data and the type, e.g. cam.http. stream_address will be a URI. For SPEAD streams, the format will be spead://<ip>[+<count>]:<port>, representing SPEAD stream multicast groups. When a single logical stream requires too much bandwidth to accommodate as a single multicast group, the count parameter indicates the number of additional consecutively numbered multicast group ip addresses, and sharing the same UDP port number. stream_name is the name used to identify the stream in CAM. A Python example is shown below, for five streams: One CAM stream, with type cam.http. The camdata stream provides the connection string for katportalclient (for the subarray that this BLUSE instance is being configured on). One F-engine stream, with type: cbf.antenna_channelised_voltage. One X-engine stream, with type: cbf.baseline_correlation_products. Two beam streams, with type: cbf.tied_array_channelised_voltage. The stream names ending in x are horizontally polarised, and those ending in y are vertically polarised. proxy_name (str): The CAM name for the instance of the BLUSE data proxy that is being configured. For example, "BLUSE_3". This can be used to query sensors on the correct proxy. Note that for BLUSE there will only be a single instance of the proxy in a subarray. Returns: None... but replies with "ok" or "fail" and logs either info or error Writes: - subbarry1_abc65555:timestamp" -> "1534657577373.23423" :: Redis String - subarray1_abc65555:antennas" -> [1,2,3,4] :: Redis List - subarray1_abc65555:n_channels" -> "4096" :: Redis String - subarray1_abc65555:proxy_name "-> "BLUSE_whatever" :: Redis String - subarray1_abc65555:streams" -> {....} :: Redis Hash !!!CURRENTLY A STRING!!! - current:obs:id -> "subbary1_abc65555" Publishes: redis-channel: 'alerts' <-- "configure" Examples: > ?configure array_1_bc856M4k a1,a2,a3,a4 128000 {"cam.http":{"camdata":"http://monctl.devnmk.camlab.kat.ac.za/api/client/2"},"stream_type2":{"stream_name1":"stream_address1","stream_name2":"stream_address2"}} BLUSE_3 Signals that an observation will start soon Publishes a message to the 'alerts' channel of the form: capture-init:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) to get ready for data Signals that an observation is starting now Publishes a message to the 'alerts' channel of the form: capture-start:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) that they need to be collecting data now Signals that an observation is has stopped Publishes a message to the 'alerts' channel of the form: capture-stop:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) that they should stop collecting data now Signals that an observation has finished Publishes a message to the 'alerts' channel of the form: capture-done:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) that their data streams are ending Signals that the current data product is done. Deconfigure the BLUSE instance that was created by the call to ?configure with the corresponding product_id. Note: CAM is expected to have sent a ?capture-done request before deconfiguring, in order to ensure that all data has been written. If BLUSE uses an instance of katportalclient to get information from CAM for this BLUSE instance, then it should disconnect at this time. Publishes a message to the 'alerts' channel of the form: deconfigure:product_id The product_id should match what what was sent in the ?configure request This alert should notify all backend processes (such as beamformer) that their data streams are ending @brief Set up monitoring sensors. @note The following sensors are made available on top of defaul sensors implemented in AsynDeviceServer and its base classes. device-status: Reports the health status of the FBFUSE and associated devices: Among other things report HW failure, SW failure and observation failure. # TODO: implement actual NTP synchronization request # e.g. '1.0' Halts the server, logs to syslog and slack, and exits the program Returns ------- success : {'ok', 'fail'} Whether scheduling the halt succeeded. Examples -------- :: ?halt !halt ok TODO: - Call halt method on superclass to avoid copy paste Doing this caused an issue: File "/Users/Eric/Berkeley/seti/packages/meerkat/lib/python2.7/site-packages/katcp/server.py", line 1102, in handle_request assert (reply.mtype == Message.REPLY) AttributeError: 'NoneType' object has no attribute 'mtype' # TODO: uncomment when you deploy # notify_slack("KATCP server at MeerKAT has halted. Might want to check that!") Finds an alien. . . . . . . . . . + . . . : . .. :. .___---------___. . . . . :.:. _".^ .^ ^. '.. :"-_. . . : . . .:../: . .^ :.:\. . . :: +. :.:/: . . . . . .:\ . : . . _ :::/: . ^ . . .:\ .. . . . - : :.:./. . .:\ . . . :..|: . . ^. .:| . . : : ..|| . . . !:| . . . . ::. ::\( . :)/ . . : . : .:.|. ###### .#######::| :.. . :- : .: ::|.####### ..########:| . . . .. . .. :\ ######## :######## :/ . .+ :: : -.:\ ######## . ########.:/ . .+ . . . . :.:\. ####### #######..:/ :: . . . . ::.:..:.\ . . ..:/ . . . .. : -::::.\. | | . .:/ . : . . .-:.":.::.\ ..:/ . -. . . . .: .:::.:.\. .:/ . . . : : ....::_:..:\ ___. :/ . . . .:. .. . .: :.:.:\ :/ + . . : . ::. :.:. .:.|\ .:/| . + . . ...:: ..| --.:| . . . . . . . ... :..:.."( ..)" . . . : . .: ::/ . .::\ | 2.099316 | 2 |
pycpa/nxamalthea.py | IDA-TUBS/pycpa | 8 | 6613084 | <gh_stars>1-10
import xml.etree.ElementTree as ET
import networkx as nx
from . import util
from . import model
from . import schedulers
import csv
xsi='{http://www.w3.org/2001/XMLSchema-instance}'
XSI_TYPE='{http://www.w3.org/2001/XMLSchema-instance}type'
MAPPING = 'mapping'
ACCESS = 'ACCESS'
READ = 'read'
WRITE = 'write'
TASK = 'task'
RUNNABLE = 'runnable'
LABEL = 'label'
RESSOURCE = 'ressource'
TYPE = 'TYPE'
PRIO = 'scheduling_parameter'
class NxAmaltheaParser(object):
def __init__(self, xml_file, scale=1.0):
root = ET.parse(xml_file).getroot()
self.mappingModel= root.find('mappingModel')
self.sw_model = root.find('swModel')
self.hw_model = root.find('hwModel')
self.stim_model = root.find('stimuliModel')
self.constr_model = root.find('constraintsModel')
self.os_model = root.find('osModel')
self.time_base = util.ns
self.scale = scale
self.time_per_instruction = self._set_time_per_instruction()
self.G = nx.MultiDiGraph()
def clean_xml_string(self, s=None):
#remove type substring from xml strings
return s[:s.index('?')]
def parse_runnables_and_labels_to_nx(self):
for r in self.sw_model.iter('runnables'):
r_name = r.get('name')
bcet = int(float(r.find('runnableItems/default/deviation/lowerBound').get('value')) * float(self.time_per_instruction) * self.scale)
wcet = int(float(r.find('runnableItems/default/deviation/upperBound').get('value')) * float(self.time_per_instruction) * self.scale)
#self.G.add_node(r_name, **{ 'bcet' : bcet , 'wcet' : wcet , TYPE : RUNNABLE })
self.G.add_node(r_name, bcet= bcet , wcet=wcet, TYPE = RUNNABLE )
#Adding a label/runnable multiple times doesn't matter.
#Every "node" is a hashable object, i.e. the string identfying the node
for ri in r.iter('runnableItems'):
value = ri.get(XSI_TYPE)
if value:
prefix, tag = value.split(":")
if tag == 'LabelAccess':
label = self.clean_xml_string(ri.get('data'))
self.G.add_node(label, TYPE = LABEL)
access = ri.get('access')
if access == "read":
self.G.add_edge(label,r_name,TYPE = ACCESS, ACCESS = READ)
else:
self.G.add_edge(r_name,label,TYPE = ACCESS, ACCESS = WRITE)
return self.G
def _number_of_labels_in_xml(self):
n = 0
for label in self.sw_model.iter('labels'):
n = n + 1
if not self.G.has_node(label.get('name')):
print("%s not in the Graph" % label.get('name'))
return n
def _get_stimulus_params(self, stimulus):
_,stim_type = stimulus.get(XSI_TYPE).split(':')
s_param = dict()
if stim_type == "Periodic":
#returns a dict vwith value and unit as keys
s_param = stimulus.find('recurrence').attrib
elif stim_type == "Sporadic":
s_param['lowerBound'] = stimulus.find('stimulusDeviation').find('lowerBound').attrib
s_param['upperBound'] = stimulus.find('stimulusDeviation').find('upperBound').attrib
else:
raise ValueError
s_param['EMType'] = stim_type
return s_param
def parse_tasks_and_cores_to_nx(self):
for t in self.sw_model.iter('tasks'):
t_name = t.get('name')
t_prio = t.get('priority')
# find event model
stimulus_name = self.clean_xml_string(t.get('stimuli'))
for stimulus in self.stim_model.iter('stimuli'):
if stimulus.get('name') == stimulus_name:
stim_params = self._get_stimulus_params(stimulus)
self.G.add_node(t_name, TYPE=TASK, event_model=stim_params, scheduling_parameter=t_prio)
#Map task to runnables
graphEntries = t.find('callGraph/graphEntries')
prefix,tag = graphEntries.get(XSI_TYPE).split(":")
if tag == 'CallSequence':
for call in graphEntries.iter('calls'):
#each runnable is linked to a task
r = self.clean_xml_string(call.get('runnable'))
self.G.add_edge(r,t_name, TYPE=MAPPING)
self.G.add_edge(t_name,r, TYPE=MAPPING)
#TODO: In principle this is right but we omit the indirection and assume the Scheduler to be the
#core
#for core in self.hw_model.find('system/ecus/microcontrollers').iter('cores'):
# c_name = core.get('name')
# self.G.add_node(c_name)
#Get all the schedulers in the Model - typically one per core; shortcut for task allocation
for sched in self.os_model.find('operatingSystems').iter('taskSchedulers'):
s_name = sched.get('name')
_,sched_algo = sched.find('schedulingAlgorithm').get(XSI_TYPE).split(':')
self.G.add_node(s_name, TYPE = RESSOURCE, schedulingAlgorithm = sched_algo)
for ta in self.mappingModel.iter('taskAllocation'):
task = self.clean_xml_string(ta.get('task'))
sched = self.clean_xml_string(ta.get('scheduler'))
self.G.add_edge(task,sched, TYPE=MAPPING)
self.G.add_edge(sched,task, TYPE=MAPPING)
return self.G
def parse_runnable_sequence(self):
# adds edges to the graph G that specify the sequence of runnables in a task
# assumes that runnables and tasks are already parsed
for t in self.sw_model.iter('tasks'):
t_name = t.get('name')
graphEntries = t.find('callGraph/graphEntries')
prefix,tag = graphEntries.get(XSI_TYPE).split(":")
if tag == 'CallSequence':
first_runnable = True
for call in graphEntries.iter('calls'):
# Get the runnable
cur_r = self.clean_xml_string(call.get('runnable'))
# Link the runnables in order
if not first_runnable:
self.G.add_edge(prev_r, cur_r, TYPE=RUNNABLE_CALL)
first_runnable = False # The first runnable has no predecessor in the task
prev_r = cur_r
return self.G
def _set_time_per_instruction(self):
assert ( int(self.hw_model.find('coreTypes').get('instructionsPerCycle')) == 1 )
#Supports only models with one microcontroller element!
pll_freq = int(float(self.hw_model.find('system/ecus/microcontrollers/quartzes/frequency').get('value')))
#Assumption: pll_freq is the CPU clock, i.e. prescaler clockRation=1 for each core)
self.time_per_instruction = util.cycles_to_time(value=1,freq=pll_freq, base_time=self.time_base)
return self.time_per_instruction
def get_cpa_sys(self,G):
pass
def parse_all(self):
self.parse_runnables_and_labels_to_nx()
self.parse_tasks_and_cores_to_nx()
self.parse_runnable_sequence()
return self.G
class NxConverter(object):
def __init__(self,G):
""" This class manages the conversion of a networkx task/runnable system to a pyCPA system
"""
self.G = G
self.cpa_base = util.ns
def get_cpa_sys(self, reverse_prios=True):
""" returns a pyCPA system based on the stored networkx graph
reversing prios ensures that Amalthea Models parsed to nx are compatible with pyCPA
"""
s = model.System()
for n,d in self.G.nodes(data=True):
if d['TYPE'] == RESSOURCE:
#for the time being we only support SPP
#r = s.bind_resource(model.Resource(self.G.node[n], schedulers.SPPScheduler()))
r = s.bind_resource(model.Resource(n, schedulers.SPPScheduler()))
# get the neigbors of n that have a MAPPING to a task
for u,v,d_edge in self.G.out_edges(n,data=True):
if d_edge[TYPE] == MAPPING:
#v is a task
assert (self.G.node[v][TYPE] == TASK )
task_params = self.get_task_params(v,reverse_prios)
t = r.bind_task(model.Task(name=v, **task_params))
t.in_event_model = self.construct_event_model(v)
return s
def get_task_params(self,t,reverse_prios=True):
""" returns dict with wcet, bcet, scheduling_parameter
"""
t_params = dict()
t_params['wcet'] = 0
t_params['bcet'] = 0
# pyCPA starts with 1 as the highest one; amalthe does it the other way around (like OSEK)
if reverse_prios == True:
t_params['scheduling_parameter'] = self.get_reverse_prio(t)
else:
t_params['scheduling_parameter'] = self.G.node[t]['scheduling_parameter']
#Filter out a subgraph that only contains runnables, tasks and mapping edges
tasks_runnables = [ n for n,d in self.G.nodes(data=True) if (d[TYPE] ==
RUNNABLE or d[TYPE] == TASK)]
H = self.G.subgraph( tasks_runnables )
#Iterate over the runnables and compute WCET/BCET as a sum over the neigbors!
for u,v,d in H.out_edges(t,data=True):
if (d[TYPE] == MAPPING and self.G.node[v][TYPE] == RUNNABLE):
#print(u,v,d)
t_params['wcet'] = int(self.G.node[v]['wcet']) + int(t_params['wcet'])
t_params['bcet'] = int(self.G.node[v]['bcet']) + int(t_params['bcet'])
#print(t_params)
return t_params
def construct_event_model(self, task=None):
#TODO: In principle we would have to check whether the task in fact has an event model
# or whether it is activated by another task; in that case the dict key event_model must not
# exist
if self.G.node[task]['event_model']['EMType'] == 'Periodic':
s_param = self.G.node[task]['event_model']
P = util.time_to_time( int(s_param['value']) , base_in=util.str_to_time_base(s_param['unit']), base_out=self.cpa_base)
return model.PJdEventModel(P=P, J=0)
elif self.G.node[task]['event_model']['EMType'] == 'Sporadic':
s_param = self.G.node[task]['event_model']['lowerBound']
P = util.time_to_time( int(s_param['value']) , base_in=util.str_to_time_base(s_param['unit']), base_out=self.cpa_base)
return model.PJdEventModel(P=P, J=0)
else:
raise ValueError
def get_reverse_prio(self, task):
# in pyCPA 1 is the highest priority - Amalthea sorts the other way, i.e. 1 is the lowest
# in principle this can be cached!
prio_list = list()
name_list = list()
for n,d in self.G.nodes(data=True):
if d[TYPE] == TASK:
name_list.append(n)
prio_list.append(d[PRIO])
prio_list.reverse()
prio_cache = dict()
for i in range(len(name_list)):
prio_cache[name_list[i]] = prio_list[i]
return prio_cache[task]
def _get_event_model_params(self, task=None):
""" Instead of return a cpa event model just return the parameters
WARNING: Only returns periods at the moment
"""
if self.G.node[task]['event_model']['EMType'] == 'Periodic':
s_param = self.G.node[task]['event_model']
P = util.time_to_time( int(s_param['value']) , base_in=util.str_to_time_base(s_param['unit']), base_out=self.cpa_base)
return (P,0)
elif self.G.node[task]['event_model']['EMType'] == 'Sporadic':
lB = self.G.node[task]['event_model']['lowerBound']
uB = self.G.node[task]['event_model']['upperBound']
#TODO!
s_param = self.G.node[task]['event_model']['lowerBound']
P = util.time_to_time( int(s_param['value']) , base_in=util.str_to_time_base(s_param['unit']), base_out=self.cpa_base)
return (P,0)
else:
raise ValueError
def write_to_csv(self,filename, reverse_prios=True):
""" WARNING: Forces P,J as Event Model Parameters! """
with open(filename, 'w') as csvfile:
fieldnames = ['task_name', 'resource', 'bcet', 'wcet', 'scheduling_parameter', 'period' , 'jitter']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
writer.writeheader()
for n,d in self.G.nodes(data=True):
if d['TYPE'] == RESSOURCE:
# get the neigbors of n that have a MAPPING to a task
for u,v,d_edge in self.G.out_edges(n,data=True):
if d_edge[TYPE] == MAPPING:
#v is a task (i.e. the name)
assert (self.G.node[v][TYPE] == TASK )
task_params = self.get_task_params(v,reverse_prios)
task_params['task_name'] = v
task_params['resource'] = n
task_params['period'], task_params['jitter'] = self._get_event_model_params(v)
writer.writerow(task_params)
| import xml.etree.ElementTree as ET
import networkx as nx
from . import util
from . import model
from . import schedulers
import csv
xsi='{http://www.w3.org/2001/XMLSchema-instance}'
XSI_TYPE='{http://www.w3.org/2001/XMLSchema-instance}type'
MAPPING = 'mapping'
ACCESS = 'ACCESS'
READ = 'read'
WRITE = 'write'
TASK = 'task'
RUNNABLE = 'runnable'
LABEL = 'label'
RESSOURCE = 'ressource'
TYPE = 'TYPE'
PRIO = 'scheduling_parameter'
class NxAmaltheaParser(object):
def __init__(self, xml_file, scale=1.0):
root = ET.parse(xml_file).getroot()
self.mappingModel= root.find('mappingModel')
self.sw_model = root.find('swModel')
self.hw_model = root.find('hwModel')
self.stim_model = root.find('stimuliModel')
self.constr_model = root.find('constraintsModel')
self.os_model = root.find('osModel')
self.time_base = util.ns
self.scale = scale
self.time_per_instruction = self._set_time_per_instruction()
self.G = nx.MultiDiGraph()
def clean_xml_string(self, s=None):
#remove type substring from xml strings
return s[:s.index('?')]
def parse_runnables_and_labels_to_nx(self):
for r in self.sw_model.iter('runnables'):
r_name = r.get('name')
bcet = int(float(r.find('runnableItems/default/deviation/lowerBound').get('value')) * float(self.time_per_instruction) * self.scale)
wcet = int(float(r.find('runnableItems/default/deviation/upperBound').get('value')) * float(self.time_per_instruction) * self.scale)
#self.G.add_node(r_name, **{ 'bcet' : bcet , 'wcet' : wcet , TYPE : RUNNABLE })
self.G.add_node(r_name, bcet= bcet , wcet=wcet, TYPE = RUNNABLE )
#Adding a label/runnable multiple times doesn't matter.
#Every "node" is a hashable object, i.e. the string identfying the node
for ri in r.iter('runnableItems'):
value = ri.get(XSI_TYPE)
if value:
prefix, tag = value.split(":")
if tag == 'LabelAccess':
label = self.clean_xml_string(ri.get('data'))
self.G.add_node(label, TYPE = LABEL)
access = ri.get('access')
if access == "read":
self.G.add_edge(label,r_name,TYPE = ACCESS, ACCESS = READ)
else:
self.G.add_edge(r_name,label,TYPE = ACCESS, ACCESS = WRITE)
return self.G
def _number_of_labels_in_xml(self):
n = 0
for label in self.sw_model.iter('labels'):
n = n + 1
if not self.G.has_node(label.get('name')):
print("%s not in the Graph" % label.get('name'))
return n
def _get_stimulus_params(self, stimulus):
_,stim_type = stimulus.get(XSI_TYPE).split(':')
s_param = dict()
if stim_type == "Periodic":
#returns a dict vwith value and unit as keys
s_param = stimulus.find('recurrence').attrib
elif stim_type == "Sporadic":
s_param['lowerBound'] = stimulus.find('stimulusDeviation').find('lowerBound').attrib
s_param['upperBound'] = stimulus.find('stimulusDeviation').find('upperBound').attrib
else:
raise ValueError
s_param['EMType'] = stim_type
return s_param
def parse_tasks_and_cores_to_nx(self):
for t in self.sw_model.iter('tasks'):
t_name = t.get('name')
t_prio = t.get('priority')
# find event model
stimulus_name = self.clean_xml_string(t.get('stimuli'))
for stimulus in self.stim_model.iter('stimuli'):
if stimulus.get('name') == stimulus_name:
stim_params = self._get_stimulus_params(stimulus)
self.G.add_node(t_name, TYPE=TASK, event_model=stim_params, scheduling_parameter=t_prio)
#Map task to runnables
graphEntries = t.find('callGraph/graphEntries')
prefix,tag = graphEntries.get(XSI_TYPE).split(":")
if tag == 'CallSequence':
for call in graphEntries.iter('calls'):
#each runnable is linked to a task
r = self.clean_xml_string(call.get('runnable'))
self.G.add_edge(r,t_name, TYPE=MAPPING)
self.G.add_edge(t_name,r, TYPE=MAPPING)
#TODO: In principle this is right but we omit the indirection and assume the Scheduler to be the
#core
#for core in self.hw_model.find('system/ecus/microcontrollers').iter('cores'):
# c_name = core.get('name')
# self.G.add_node(c_name)
#Get all the schedulers in the Model - typically one per core; shortcut for task allocation
for sched in self.os_model.find('operatingSystems').iter('taskSchedulers'):
s_name = sched.get('name')
_,sched_algo = sched.find('schedulingAlgorithm').get(XSI_TYPE).split(':')
self.G.add_node(s_name, TYPE = RESSOURCE, schedulingAlgorithm = sched_algo)
for ta in self.mappingModel.iter('taskAllocation'):
task = self.clean_xml_string(ta.get('task'))
sched = self.clean_xml_string(ta.get('scheduler'))
self.G.add_edge(task,sched, TYPE=MAPPING)
self.G.add_edge(sched,task, TYPE=MAPPING)
return self.G
def parse_runnable_sequence(self):
# adds edges to the graph G that specify the sequence of runnables in a task
# assumes that runnables and tasks are already parsed
for t in self.sw_model.iter('tasks'):
t_name = t.get('name')
graphEntries = t.find('callGraph/graphEntries')
prefix,tag = graphEntries.get(XSI_TYPE).split(":")
if tag == 'CallSequence':
first_runnable = True
for call in graphEntries.iter('calls'):
# Get the runnable
cur_r = self.clean_xml_string(call.get('runnable'))
# Link the runnables in order
if not first_runnable:
self.G.add_edge(prev_r, cur_r, TYPE=RUNNABLE_CALL)
first_runnable = False # The first runnable has no predecessor in the task
prev_r = cur_r
return self.G
def _set_time_per_instruction(self):
assert ( int(self.hw_model.find('coreTypes').get('instructionsPerCycle')) == 1 )
#Supports only models with one microcontroller element!
pll_freq = int(float(self.hw_model.find('system/ecus/microcontrollers/quartzes/frequency').get('value')))
#Assumption: pll_freq is the CPU clock, i.e. prescaler clockRation=1 for each core)
self.time_per_instruction = util.cycles_to_time(value=1,freq=pll_freq, base_time=self.time_base)
return self.time_per_instruction
def get_cpa_sys(self,G):
pass
def parse_all(self):
self.parse_runnables_and_labels_to_nx()
self.parse_tasks_and_cores_to_nx()
self.parse_runnable_sequence()
return self.G
class NxConverter(object):
def __init__(self,G):
""" This class manages the conversion of a networkx task/runnable system to a pyCPA system
"""
self.G = G
self.cpa_base = util.ns
def get_cpa_sys(self, reverse_prios=True):
""" returns a pyCPA system based on the stored networkx graph
reversing prios ensures that Amalthea Models parsed to nx are compatible with pyCPA
"""
s = model.System()
for n,d in self.G.nodes(data=True):
if d['TYPE'] == RESSOURCE:
#for the time being we only support SPP
#r = s.bind_resource(model.Resource(self.G.node[n], schedulers.SPPScheduler()))
r = s.bind_resource(model.Resource(n, schedulers.SPPScheduler()))
# get the neigbors of n that have a MAPPING to a task
for u,v,d_edge in self.G.out_edges(n,data=True):
if d_edge[TYPE] == MAPPING:
#v is a task
assert (self.G.node[v][TYPE] == TASK )
task_params = self.get_task_params(v,reverse_prios)
t = r.bind_task(model.Task(name=v, **task_params))
t.in_event_model = self.construct_event_model(v)
return s
def get_task_params(self,t,reverse_prios=True):
""" returns dict with wcet, bcet, scheduling_parameter
"""
t_params = dict()
t_params['wcet'] = 0
t_params['bcet'] = 0
# pyCPA starts with 1 as the highest one; amalthe does it the other way around (like OSEK)
if reverse_prios == True:
t_params['scheduling_parameter'] = self.get_reverse_prio(t)
else:
t_params['scheduling_parameter'] = self.G.node[t]['scheduling_parameter']
#Filter out a subgraph that only contains runnables, tasks and mapping edges
tasks_runnables = [ n for n,d in self.G.nodes(data=True) if (d[TYPE] ==
RUNNABLE or d[TYPE] == TASK)]
H = self.G.subgraph( tasks_runnables )
#Iterate over the runnables and compute WCET/BCET as a sum over the neigbors!
for u,v,d in H.out_edges(t,data=True):
if (d[TYPE] == MAPPING and self.G.node[v][TYPE] == RUNNABLE):
#print(u,v,d)
t_params['wcet'] = int(self.G.node[v]['wcet']) + int(t_params['wcet'])
t_params['bcet'] = int(self.G.node[v]['bcet']) + int(t_params['bcet'])
#print(t_params)
return t_params
def construct_event_model(self, task=None):
#TODO: In principle we would have to check whether the task in fact has an event model
# or whether it is activated by another task; in that case the dict key event_model must not
# exist
if self.G.node[task]['event_model']['EMType'] == 'Periodic':
s_param = self.G.node[task]['event_model']
P = util.time_to_time( int(s_param['value']) , base_in=util.str_to_time_base(s_param['unit']), base_out=self.cpa_base)
return model.PJdEventModel(P=P, J=0)
elif self.G.node[task]['event_model']['EMType'] == 'Sporadic':
s_param = self.G.node[task]['event_model']['lowerBound']
P = util.time_to_time( int(s_param['value']) , base_in=util.str_to_time_base(s_param['unit']), base_out=self.cpa_base)
return model.PJdEventModel(P=P, J=0)
else:
raise ValueError
def get_reverse_prio(self, task):
# in pyCPA 1 is the highest priority - Amalthea sorts the other way, i.e. 1 is the lowest
# in principle this can be cached!
prio_list = list()
name_list = list()
for n,d in self.G.nodes(data=True):
if d[TYPE] == TASK:
name_list.append(n)
prio_list.append(d[PRIO])
prio_list.reverse()
prio_cache = dict()
for i in range(len(name_list)):
prio_cache[name_list[i]] = prio_list[i]
return prio_cache[task]
def _get_event_model_params(self, task=None):
""" Instead of return a cpa event model just return the parameters
WARNING: Only returns periods at the moment
"""
if self.G.node[task]['event_model']['EMType'] == 'Periodic':
s_param = self.G.node[task]['event_model']
P = util.time_to_time( int(s_param['value']) , base_in=util.str_to_time_base(s_param['unit']), base_out=self.cpa_base)
return (P,0)
elif self.G.node[task]['event_model']['EMType'] == 'Sporadic':
lB = self.G.node[task]['event_model']['lowerBound']
uB = self.G.node[task]['event_model']['upperBound']
#TODO!
s_param = self.G.node[task]['event_model']['lowerBound']
P = util.time_to_time( int(s_param['value']) , base_in=util.str_to_time_base(s_param['unit']), base_out=self.cpa_base)
return (P,0)
else:
raise ValueError
def write_to_csv(self,filename, reverse_prios=True):
""" WARNING: Forces P,J as Event Model Parameters! """
with open(filename, 'w') as csvfile:
fieldnames = ['task_name', 'resource', 'bcet', 'wcet', 'scheduling_parameter', 'period' , 'jitter']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
writer.writeheader()
for n,d in self.G.nodes(data=True):
if d['TYPE'] == RESSOURCE:
# get the neigbors of n that have a MAPPING to a task
for u,v,d_edge in self.G.out_edges(n,data=True):
if d_edge[TYPE] == MAPPING:
#v is a task (i.e. the name)
assert (self.G.node[v][TYPE] == TASK )
task_params = self.get_task_params(v,reverse_prios)
task_params['task_name'] = v
task_params['resource'] = n
task_params['period'], task_params['jitter'] = self._get_event_model_params(v)
writer.writerow(task_params) | en | 0.825605 | #remove type substring from xml strings #self.G.add_node(r_name, **{ 'bcet' : bcet , 'wcet' : wcet , TYPE : RUNNABLE }) #Adding a label/runnable multiple times doesn't matter. #Every "node" is a hashable object, i.e. the string identfying the node #returns a dict vwith value and unit as keys # find event model #Map task to runnables #each runnable is linked to a task #TODO: In principle this is right but we omit the indirection and assume the Scheduler to be the #core #for core in self.hw_model.find('system/ecus/microcontrollers').iter('cores'): # c_name = core.get('name') # self.G.add_node(c_name) #Get all the schedulers in the Model - typically one per core; shortcut for task allocation # adds edges to the graph G that specify the sequence of runnables in a task # assumes that runnables and tasks are already parsed # Get the runnable # Link the runnables in order # The first runnable has no predecessor in the task #Supports only models with one microcontroller element! #Assumption: pll_freq is the CPU clock, i.e. prescaler clockRation=1 for each core) This class manages the conversion of a networkx task/runnable system to a pyCPA system returns a pyCPA system based on the stored networkx graph reversing prios ensures that Amalthea Models parsed to nx are compatible with pyCPA #for the time being we only support SPP #r = s.bind_resource(model.Resource(self.G.node[n], schedulers.SPPScheduler())) # get the neigbors of n that have a MAPPING to a task #v is a task returns dict with wcet, bcet, scheduling_parameter # pyCPA starts with 1 as the highest one; amalthe does it the other way around (like OSEK) #Filter out a subgraph that only contains runnables, tasks and mapping edges #Iterate over the runnables and compute WCET/BCET as a sum over the neigbors! #print(u,v,d) #print(t_params) #TODO: In principle we would have to check whether the task in fact has an event model # or whether it is activated by another task; in that case the dict key event_model must not # exist # in pyCPA 1 is the highest priority - Amalthea sorts the other way, i.e. 1 is the lowest # in principle this can be cached! Instead of return a cpa event model just return the parameters WARNING: Only returns periods at the moment #TODO! WARNING: Forces P,J as Event Model Parameters! # get the neigbors of n that have a MAPPING to a task #v is a task (i.e. the name) | 2.197335 | 2 |
SampleScripts/Static_Only/solutions.py | kyspencer/GAMMA-PC-A-Greedy-Memetic-Algorithm-for-Storing-Cooling-Objects | 0 | 6613085 | <gh_stars>0
# solutions.py
# This python file contains modules save solutions.
# Author: <NAME>
# Date: March 31, 2016
import binpacking as bp
import mop
import random
from constraints import concheck
from numpy import zeros
from operator import itemgetter
def main():
print('This file saves solutions in a bpp optimization.')
def process(idnum, t, chromosome, bpp, items):
# Process solutions (decode, check constraints, calculate
# fitness values, make solution object)
x, y = bp.ed(idnum, chromosome, bpp, items)
concheck(idnum, x, bpp)
fit = mop.calcfits(x, y, items)
a = MultiSol(idnum, chromosome, x, y, t, fit, 0, 0.0)
return a
def oldnew(archive, q, genes):
# This module checks the new generation to see if its
# members already exist or need to be created.
# - archive is the set of all current solutions
# - q is the new generation
# - genes is only the chromosome portion of q
# - members is the number of individuals in a gen.
new = []
archgenes = []
for m in range(len(archive)):
archgenes.append(archive[m].getgenes())
k = 0
for p in range(len(genes)):
count = archgenes.count(genes[p])
if count == 0:
new.append(p)
del q[k]
k -= 1
k += 1
return new, q
def reduce(archive, p, q):
# This module keeps the length of the archive below 1000 individual
# solutions to save computer memory during runtime.
# - archive is the list of all solutions
# - p is the parent generation
# - q is the next generation
from operator import attrgetter
if len(archive) > 1200:
archive.sort(key=attrgetter('rank'))
k = 1000
for m in range(k, len(archive)):
if archive[k] in p:
k += 1
elif archive[k] in q:
k += 1
else:
del archive[k]
archive.sort(key=attrgetter('index'))
return archive
class Sol:
def __init__(self, index, chromosome, x, y, t, fitvals, prank):
self.index = index
self.genes = chromosome
self.n = len(self.genes)
self.x = x
self.y = y
self.t = int(t)
self.fits = fitvals
self.rank = prank
def getindex(self):
return self.index
def updateid(self, idnum):
self.index = idnum
def getgenes(self):
return self.genes
def getx(self):
return self.x
def gety(self):
return self.y
def getgen(self):
return self.t
def getfits(self):
return self.fits
def getrank(self):
return self.rank
def updaterank(self, prank):
self.rank = prank
class MultiSol(Sol):
def __init__(self, index, chromosome, x, y, t, fitvals, prank, idist):
Sol.__init__(self, index, chromosome, x, y, t, fitvals, prank)
self.fit0 = fitvals[0]
self.fit1 = fitvals[1]
self.fit2 = fitvals[2]
self.cd = idist
def getbins(self):
return self.fit0
def getmaxh(self):
return self.fit1
def getavgw(self):
return self.fit2
def getcd(self):
return self.cd
def updatecd(self, idist):
self.cd = idist
class GAMMASol(Sol):
def __init__(self, index, x, y, vlrep, t, chrom=None, prank=0):
self.cd = 0.0 # Initialize crowded distance value
self.vlrep = vlrep
self.bin_weights = zeros(len(y)) # Initialize bin weight array
self.bin_heights = zeros(len(y)) # Initialize bin height array
fitvals = zeros(3) # Initialize fitness vector
if not chrom:
chrom = self.vlrep2chrom(vlrep)
Sol.__init__(self, index, chrom, x, y, t, fitvals, prank)
self.openbins = 0
self.initopenbins()
def updatefitvals(self, fitvals):
self.fits = fitvals
self.fit0 = fitvals[0]
self.fit1 = fitvals[1]
self.fit2 = fitvals[2]
def set_weights(self, weights):
if len(weights) != len(self.bin_weights):
print('Error! The length of the bin weight array is not ', self.n)
self.bin_weights = weights
def set_heights(self, heights):
if len(heights) != len(self.bin_heights):
print('Error! The length of the bin height array is not ', self.n)
self.bin_heights = heights
def swapitems(self, i1, j1, i2, j2):
# This function swaps the cookies j1 and j2 between boxes i1 and i2
# Swap in the x-matrix
self.x[i1, j1] = 0
self.x[i2, j1] = 1
self.x[i2, j2] = 0
self.x[i1, j2] = 1
# Swap in the variable length representation
self.vlrep[i1].remove(j1)
self.vlrep[i2].append(j1)
self.vlrep[i2].remove(j2)
self.vlrep[i1].append(j2)
# Resort the bins to keep js in order
self.vlrep[i1].sort()
self.vlrep[i2].sort()
def moveitem(self, i, j, inew):
# This function moves cookie j2 from box i2 to box i
# Move in variable length representation
self.vlrep[i].remove(j)
self.vlrep[inew].append(j)
# Move in x-matrix
self.x[i, j] = 0
self.x[inew, j] = 1
# Resort bin inew to keep js in order
self.vlrep[inew].sort()
# Check y-matrix
if not self.vlrep[i]:
self.closebin(i)
if inew > i:
inew -= 1
if self.y[inew] == 0:
self.y[inew] = 1
def opennewbin(self, i, j):
# This function moves cookie j from box i into box inew at time t
inew = len(self.vlrep)
# Move in x-matrix:
self.x[i, j] = 0
self.x[inew, j] = 1
# Open new box in y-matrix:
self.y[inew] = 1
# Open new box in vlrep
self.vlrep[i].remove(j)
self.vlrep.append([j])
self.openbins = len(self.vlrep)
def closebin(self, i):
# This function closes bin i after it has been emptied
if self.vlrep[i] == []:
del self.vlrep[i]
# Move to close empty rows
for imove in range(i, self.n - 1):
self.y[imove] = self.y[imove + 1]
self.x[imove, :] = self.x[imove + 1, :]
self.y[-1] = 0
self.x[-1, :] = 0
self.initopenbins()
def vlrep2chrom(self, vlrep):
# This function reforms vlrep into the chromosome representation
chrom = list(vlrep[0])
for i in range(1, len(vlrep)):
chrom.extend(list(vlrep[i]))
return chrom
def removeunneededbins(self):
# This function removes empty bins from the end of the vlrep list
for i in range(self.openbins, self.n):
if len(self.vlrep) == self.openbins:
break
if self.vlrep[self.openbins] == []:
del self.vlrep[self.openbins]
else:
print('Error: y does not match vlrep in solution', self.index)
def initopenbins(self):
# This function determines the number of open bins based on the y-matrix
self.openbins = int(sum(self.y))
def getbins(self):
return self.fit0
def getmaxh(self):
return self.fit1
def get_heights(self):
return self.bin_heights
def getavgw(self):
return self.fit2
def get_weights(self):
return self.bin_weights
def getvlrep(self, i=None):
if i:
return self.vlrep[i]
elif i == 0:
return self.vlrep[0]
else:
return self.vlrep
def getopenbins(self):
return self.openbins
def getcd(self):
return self.cd
def updatecd(self, idist):
self.cd = idist
class PSOSol(Sol):
# This subclass of solutions is built specifically for a PSO algorithm.
def __init__(self, index, chromosome, x, y, t, fitvals, prank, niche):
Sol.__init__(self, index, chromosome, x, y, t, fitvals, prank)
self.fit0 = fitvals[0]
self.fit1 = fitvals[1]
self.fit2 = fitvals[2]
self.niche = niche
self.vlrep = []
self.binws = [] # individual bin weights
self.binhs = [] # individual bin heights
def makevlrep(self, vlrep, items):
# Note: vlrep has item index in it, which is j + 1
self.vlrep = list(vlrep)
for i in range(len(self.vlrep)):
weight = 0
height = 0
for j in range(len(self.vlrep[i])):
index = self.vlrep[i][j]
weight += items[index - 1].getweight()
height += items[index - 1].getheight()
self.binws.append(weight)
self.binhs.append(height)
def getpbest(self):
# This function returns the most filled bin
# Randomly chooses "most filled" from weight or height
wmaxindex, wmax = max(enumerate(self.binws), key=itemgetter(1))
hmaxindex, hmax = max(enumerate(self.binhs), key=itemgetter(1))
wmaxbin = self.vlrep[wmaxindex]
hmaxbin = self.vlrep[hmaxindex]
binitems = random.choice([wmaxbin, hmaxbin])
return binitems
def getbins(self):
return self.fit0
def getmaxh(self):
return self.fit1
def getavgw(self):
return self.fit2
def getniche(self):
return self.niche
def updateniche(self, nichecount):
self.niche = nichecount
def getvlrep(self):
return self.vlrep
if __name__ == '__main__':
main()
| # solutions.py
# This python file contains modules save solutions.
# Author: <NAME>
# Date: March 31, 2016
import binpacking as bp
import mop
import random
from constraints import concheck
from numpy import zeros
from operator import itemgetter
def main():
print('This file saves solutions in a bpp optimization.')
def process(idnum, t, chromosome, bpp, items):
# Process solutions (decode, check constraints, calculate
# fitness values, make solution object)
x, y = bp.ed(idnum, chromosome, bpp, items)
concheck(idnum, x, bpp)
fit = mop.calcfits(x, y, items)
a = MultiSol(idnum, chromosome, x, y, t, fit, 0, 0.0)
return a
def oldnew(archive, q, genes):
# This module checks the new generation to see if its
# members already exist or need to be created.
# - archive is the set of all current solutions
# - q is the new generation
# - genes is only the chromosome portion of q
# - members is the number of individuals in a gen.
new = []
archgenes = []
for m in range(len(archive)):
archgenes.append(archive[m].getgenes())
k = 0
for p in range(len(genes)):
count = archgenes.count(genes[p])
if count == 0:
new.append(p)
del q[k]
k -= 1
k += 1
return new, q
def reduce(archive, p, q):
# This module keeps the length of the archive below 1000 individual
# solutions to save computer memory during runtime.
# - archive is the list of all solutions
# - p is the parent generation
# - q is the next generation
from operator import attrgetter
if len(archive) > 1200:
archive.sort(key=attrgetter('rank'))
k = 1000
for m in range(k, len(archive)):
if archive[k] in p:
k += 1
elif archive[k] in q:
k += 1
else:
del archive[k]
archive.sort(key=attrgetter('index'))
return archive
class Sol:
def __init__(self, index, chromosome, x, y, t, fitvals, prank):
self.index = index
self.genes = chromosome
self.n = len(self.genes)
self.x = x
self.y = y
self.t = int(t)
self.fits = fitvals
self.rank = prank
def getindex(self):
return self.index
def updateid(self, idnum):
self.index = idnum
def getgenes(self):
return self.genes
def getx(self):
return self.x
def gety(self):
return self.y
def getgen(self):
return self.t
def getfits(self):
return self.fits
def getrank(self):
return self.rank
def updaterank(self, prank):
self.rank = prank
class MultiSol(Sol):
def __init__(self, index, chromosome, x, y, t, fitvals, prank, idist):
Sol.__init__(self, index, chromosome, x, y, t, fitvals, prank)
self.fit0 = fitvals[0]
self.fit1 = fitvals[1]
self.fit2 = fitvals[2]
self.cd = idist
def getbins(self):
return self.fit0
def getmaxh(self):
return self.fit1
def getavgw(self):
return self.fit2
def getcd(self):
return self.cd
def updatecd(self, idist):
self.cd = idist
class GAMMASol(Sol):
def __init__(self, index, x, y, vlrep, t, chrom=None, prank=0):
self.cd = 0.0 # Initialize crowded distance value
self.vlrep = vlrep
self.bin_weights = zeros(len(y)) # Initialize bin weight array
self.bin_heights = zeros(len(y)) # Initialize bin height array
fitvals = zeros(3) # Initialize fitness vector
if not chrom:
chrom = self.vlrep2chrom(vlrep)
Sol.__init__(self, index, chrom, x, y, t, fitvals, prank)
self.openbins = 0
self.initopenbins()
def updatefitvals(self, fitvals):
self.fits = fitvals
self.fit0 = fitvals[0]
self.fit1 = fitvals[1]
self.fit2 = fitvals[2]
def set_weights(self, weights):
if len(weights) != len(self.bin_weights):
print('Error! The length of the bin weight array is not ', self.n)
self.bin_weights = weights
def set_heights(self, heights):
if len(heights) != len(self.bin_heights):
print('Error! The length of the bin height array is not ', self.n)
self.bin_heights = heights
def swapitems(self, i1, j1, i2, j2):
# This function swaps the cookies j1 and j2 between boxes i1 and i2
# Swap in the x-matrix
self.x[i1, j1] = 0
self.x[i2, j1] = 1
self.x[i2, j2] = 0
self.x[i1, j2] = 1
# Swap in the variable length representation
self.vlrep[i1].remove(j1)
self.vlrep[i2].append(j1)
self.vlrep[i2].remove(j2)
self.vlrep[i1].append(j2)
# Resort the bins to keep js in order
self.vlrep[i1].sort()
self.vlrep[i2].sort()
def moveitem(self, i, j, inew):
# This function moves cookie j2 from box i2 to box i
# Move in variable length representation
self.vlrep[i].remove(j)
self.vlrep[inew].append(j)
# Move in x-matrix
self.x[i, j] = 0
self.x[inew, j] = 1
# Resort bin inew to keep js in order
self.vlrep[inew].sort()
# Check y-matrix
if not self.vlrep[i]:
self.closebin(i)
if inew > i:
inew -= 1
if self.y[inew] == 0:
self.y[inew] = 1
def opennewbin(self, i, j):
# This function moves cookie j from box i into box inew at time t
inew = len(self.vlrep)
# Move in x-matrix:
self.x[i, j] = 0
self.x[inew, j] = 1
# Open new box in y-matrix:
self.y[inew] = 1
# Open new box in vlrep
self.vlrep[i].remove(j)
self.vlrep.append([j])
self.openbins = len(self.vlrep)
def closebin(self, i):
# This function closes bin i after it has been emptied
if self.vlrep[i] == []:
del self.vlrep[i]
# Move to close empty rows
for imove in range(i, self.n - 1):
self.y[imove] = self.y[imove + 1]
self.x[imove, :] = self.x[imove + 1, :]
self.y[-1] = 0
self.x[-1, :] = 0
self.initopenbins()
def vlrep2chrom(self, vlrep):
# This function reforms vlrep into the chromosome representation
chrom = list(vlrep[0])
for i in range(1, len(vlrep)):
chrom.extend(list(vlrep[i]))
return chrom
def removeunneededbins(self):
# This function removes empty bins from the end of the vlrep list
for i in range(self.openbins, self.n):
if len(self.vlrep) == self.openbins:
break
if self.vlrep[self.openbins] == []:
del self.vlrep[self.openbins]
else:
print('Error: y does not match vlrep in solution', self.index)
def initopenbins(self):
# This function determines the number of open bins based on the y-matrix
self.openbins = int(sum(self.y))
def getbins(self):
return self.fit0
def getmaxh(self):
return self.fit1
def get_heights(self):
return self.bin_heights
def getavgw(self):
return self.fit2
def get_weights(self):
return self.bin_weights
def getvlrep(self, i=None):
if i:
return self.vlrep[i]
elif i == 0:
return self.vlrep[0]
else:
return self.vlrep
def getopenbins(self):
return self.openbins
def getcd(self):
return self.cd
def updatecd(self, idist):
self.cd = idist
class PSOSol(Sol):
# This subclass of solutions is built specifically for a PSO algorithm.
def __init__(self, index, chromosome, x, y, t, fitvals, prank, niche):
Sol.__init__(self, index, chromosome, x, y, t, fitvals, prank)
self.fit0 = fitvals[0]
self.fit1 = fitvals[1]
self.fit2 = fitvals[2]
self.niche = niche
self.vlrep = []
self.binws = [] # individual bin weights
self.binhs = [] # individual bin heights
def makevlrep(self, vlrep, items):
# Note: vlrep has item index in it, which is j + 1
self.vlrep = list(vlrep)
for i in range(len(self.vlrep)):
weight = 0
height = 0
for j in range(len(self.vlrep[i])):
index = self.vlrep[i][j]
weight += items[index - 1].getweight()
height += items[index - 1].getheight()
self.binws.append(weight)
self.binhs.append(height)
def getpbest(self):
# This function returns the most filled bin
# Randomly chooses "most filled" from weight or height
wmaxindex, wmax = max(enumerate(self.binws), key=itemgetter(1))
hmaxindex, hmax = max(enumerate(self.binhs), key=itemgetter(1))
wmaxbin = self.vlrep[wmaxindex]
hmaxbin = self.vlrep[hmaxindex]
binitems = random.choice([wmaxbin, hmaxbin])
return binitems
def getbins(self):
return self.fit0
def getmaxh(self):
return self.fit1
def getavgw(self):
return self.fit2
def getniche(self):
return self.niche
def updateniche(self, nichecount):
self.niche = nichecount
def getvlrep(self):
return self.vlrep
if __name__ == '__main__':
main() | en | 0.84441 | # solutions.py # This python file contains modules save solutions. # Author: <NAME> # Date: March 31, 2016 # Process solutions (decode, check constraints, calculate # fitness values, make solution object) # This module checks the new generation to see if its # members already exist or need to be created. # - archive is the set of all current solutions # - q is the new generation # - genes is only the chromosome portion of q # - members is the number of individuals in a gen. # This module keeps the length of the archive below 1000 individual # solutions to save computer memory during runtime. # - archive is the list of all solutions # - p is the parent generation # - q is the next generation # Initialize crowded distance value # Initialize bin weight array # Initialize bin height array # Initialize fitness vector # This function swaps the cookies j1 and j2 between boxes i1 and i2 # Swap in the x-matrix # Swap in the variable length representation # Resort the bins to keep js in order # This function moves cookie j2 from box i2 to box i # Move in variable length representation # Move in x-matrix # Resort bin inew to keep js in order # Check y-matrix # This function moves cookie j from box i into box inew at time t # Move in x-matrix: # Open new box in y-matrix: # Open new box in vlrep # This function closes bin i after it has been emptied # Move to close empty rows # This function reforms vlrep into the chromosome representation # This function removes empty bins from the end of the vlrep list # This function determines the number of open bins based on the y-matrix # This subclass of solutions is built specifically for a PSO algorithm. # individual bin weights # individual bin heights # Note: vlrep has item index in it, which is j + 1 # This function returns the most filled bin # Randomly chooses "most filled" from weight or height | 2.942693 | 3 |
emp-tax/main.py | piotrzegarek/Employment-Tax | 0 | 6613086 | from calc import PaymentCalc, TaxCalc
from employees import BazaDanych, Employee
import pandas as pd
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy()
@app.route("/")
def index():
return 'Hello World!'
if __name__ == "__main__":
app.run(debug=True) | from calc import PaymentCalc, TaxCalc
from employees import BazaDanych, Employee
import pandas as pd
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy()
@app.route("/")
def index():
return 'Hello World!'
if __name__ == "__main__":
app.run(debug=True) | none | 1 | 2.120065 | 2 | |
src/go/Google_gcj_tools/lib/utils.py | veltzerdoron/GCJ | 0 | 6613087 | <filename>src/go/Google_gcj_tools/lib/utils.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods used to simplify code in the library and scripts."""
import datetime
import sys
def AskConfirmation(message, action, skip, sinput=sys.stdin, output=sys.stdout):
"""Ask for confirmation about something and return the user response.
Args:
message: Message shown to the user before asking.
action: Verb with the action to be performed upon confirmation.
skip: Boolean indicating if check should be skipped or not.
sinput: Input file where confirmation should be read from.
output: Output file where messages should be written to.
Returns:
True if the user confirmed the action of a skip was requested.
"""
# If the confirmation should be skipped just show the message. Otherwise,
# show the message and ask for confirmation.
output.write(message)
if skip:
output.write('\n')
return True
else:
output.write(' {0}? (y/N) '.format(action))
user_input = sinput.readline()
return user_input[:1].lower() == 'y'
def AskConfirmationOrDie(message, action, skip, sinput=sys.stdin,
output=sys.stdout, exit_value=1):
"""Ask for confirmation about something and abort on a negative answer.
Args:
message: Message shown to the user before asking.
action: Verb with the action to be performed upon confirmation.
skip: Boolean indicating if check should be skipped or not.
sinput: Input file where confirmation should be read from.
output: Output file where messages should be written to.
exit_value: Process' return value to use when user didn't confirm.
"""
# Ask for confirmation and exit if user didn't confirm.
if not AskConfirmation(message, action, skip, sinput, output):
output.write('Aborted.\n')
sys.exit(exit_value)
def _AppendTimeToken(value, unit, tokens):
# Only append non-null tokens, being careful for plural terminations.
if value > 0:
plural_terminator = 's' if value != 1 else ''
tokens.append('%s %s%s' % (value, unit, plural_terminator))
def FormatHumanTime(seconds):
"""Format the number of seconds into a human readable string.
This function expects to receive small values only (on the order of minutes),
and will display minutes as the biggest unit.
Args:
seconds: Number of seconds to be formatted.
Returns:
A string with the formatted timestamp.
"""
# If the seconds is null, just return 0 seconds.
if seconds == 0:
return '0 seconds'
# Put the seconds into a datetime object and extract the minute and second
# values, which then are returned in a human readable string.
timestamp = datetime.datetime.fromtimestamp(seconds).time()
tokens = []
_AppendTimeToken(timestamp.minute, 'minute', tokens)
_AppendTimeToken(timestamp.second, 'second', tokens)
return ', '.join(tokens)
def GetIndexFromInputId(input_spec, input_id):
"""Get the 0-based index of the input_id inside input_spec.
Args:
input_spec: Dictionary with the input specification, mapping from input name
to another dictionary with a 'time_limit' key.
input_id: Id whose index must be retrieved.
Returns:
0-based index of the input_id inside input_spec.
"""
# The index computation does not check if the input_id exists, so make a
# separate check for it.
if not any(input_data['input_id'] == input_id
for _, input_data in input_spec.iteritems()):
return None
# The index of input_id is equivalent to the number of ids lower than it.
return sum(1
for _, input_data in input_spec.iteritems()
if input_data['input_id'] < input_id)
| <filename>src/go/Google_gcj_tools/lib/utils.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods used to simplify code in the library and scripts."""
import datetime
import sys
def AskConfirmation(message, action, skip, sinput=sys.stdin, output=sys.stdout):
"""Ask for confirmation about something and return the user response.
Args:
message: Message shown to the user before asking.
action: Verb with the action to be performed upon confirmation.
skip: Boolean indicating if check should be skipped or not.
sinput: Input file where confirmation should be read from.
output: Output file where messages should be written to.
Returns:
True if the user confirmed the action of a skip was requested.
"""
# If the confirmation should be skipped just show the message. Otherwise,
# show the message and ask for confirmation.
output.write(message)
if skip:
output.write('\n')
return True
else:
output.write(' {0}? (y/N) '.format(action))
user_input = sinput.readline()
return user_input[:1].lower() == 'y'
def AskConfirmationOrDie(message, action, skip, sinput=sys.stdin,
output=sys.stdout, exit_value=1):
"""Ask for confirmation about something and abort on a negative answer.
Args:
message: Message shown to the user before asking.
action: Verb with the action to be performed upon confirmation.
skip: Boolean indicating if check should be skipped or not.
sinput: Input file where confirmation should be read from.
output: Output file where messages should be written to.
exit_value: Process' return value to use when user didn't confirm.
"""
# Ask for confirmation and exit if user didn't confirm.
if not AskConfirmation(message, action, skip, sinput, output):
output.write('Aborted.\n')
sys.exit(exit_value)
def _AppendTimeToken(value, unit, tokens):
# Only append non-null tokens, being careful for plural terminations.
if value > 0:
plural_terminator = 's' if value != 1 else ''
tokens.append('%s %s%s' % (value, unit, plural_terminator))
def FormatHumanTime(seconds):
"""Format the number of seconds into a human readable string.
This function expects to receive small values only (on the order of minutes),
and will display minutes as the biggest unit.
Args:
seconds: Number of seconds to be formatted.
Returns:
A string with the formatted timestamp.
"""
# If the seconds is null, just return 0 seconds.
if seconds == 0:
return '0 seconds'
# Put the seconds into a datetime object and extract the minute and second
# values, which then are returned in a human readable string.
timestamp = datetime.datetime.fromtimestamp(seconds).time()
tokens = []
_AppendTimeToken(timestamp.minute, 'minute', tokens)
_AppendTimeToken(timestamp.second, 'second', tokens)
return ', '.join(tokens)
def GetIndexFromInputId(input_spec, input_id):
"""Get the 0-based index of the input_id inside input_spec.
Args:
input_spec: Dictionary with the input specification, mapping from input name
to another dictionary with a 'time_limit' key.
input_id: Id whose index must be retrieved.
Returns:
0-based index of the input_id inside input_spec.
"""
# The index computation does not check if the input_id exists, so make a
# separate check for it.
if not any(input_data['input_id'] == input_id
for _, input_data in input_spec.iteritems()):
return None
# The index of input_id is equivalent to the number of ids lower than it.
return sum(1
for _, input_data in input_spec.iteritems()
if input_data['input_id'] < input_id)
| en | 0.839493 | #!/usr/bin/env python2 # -*- coding: utf-8 -*- # # Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Utility methods used to simplify code in the library and scripts. Ask for confirmation about something and return the user response. Args: message: Message shown to the user before asking. action: Verb with the action to be performed upon confirmation. skip: Boolean indicating if check should be skipped or not. sinput: Input file where confirmation should be read from. output: Output file where messages should be written to. Returns: True if the user confirmed the action of a skip was requested. # If the confirmation should be skipped just show the message. Otherwise, # show the message and ask for confirmation. Ask for confirmation about something and abort on a negative answer. Args: message: Message shown to the user before asking. action: Verb with the action to be performed upon confirmation. skip: Boolean indicating if check should be skipped or not. sinput: Input file where confirmation should be read from. output: Output file where messages should be written to. exit_value: Process' return value to use when user didn't confirm. # Ask for confirmation and exit if user didn't confirm. # Only append non-null tokens, being careful for plural terminations. Format the number of seconds into a human readable string. This function expects to receive small values only (on the order of minutes), and will display minutes as the biggest unit. Args: seconds: Number of seconds to be formatted. Returns: A string with the formatted timestamp. # If the seconds is null, just return 0 seconds. # Put the seconds into a datetime object and extract the minute and second # values, which then are returned in a human readable string. Get the 0-based index of the input_id inside input_spec. Args: input_spec: Dictionary with the input specification, mapping from input name to another dictionary with a 'time_limit' key. input_id: Id whose index must be retrieved. Returns: 0-based index of the input_id inside input_spec. # The index computation does not check if the input_id exists, so make a # separate check for it. # The index of input_id is equivalent to the number of ids lower than it. | 2.746356 | 3 |
webserver.py | jackneil/medSpellCheck | 0 | 6613088 | <filename>webserver.py
from flask import Flask, request
from OpenSSL import SSL
import jamspell, json, re
from json2html import *
print('Starting up ...')
corrector = jamspell.TSpellCorrector()
print('Loading medical model ...')
corrector.LoadLangModel('c:/_aidata/medSpellCheck/model_medical.v1.bin')
MSC = Flask(__name__) #medSpellCheck
formcode = "<form name='checkme' method='get' action=''>\n" + \
"Enter some medical text: <font size='-1'>(please enter at least 4 words ... this spellchecker uses context!)</font><br><textarea id='text' name='text' rows='10' cols='60'></textarea>\n" + \
"<br><br>Limit: <input id='limit' name='limit' type='number' min='1' max='10' value='2' /> <- only applies if 'candidates' selected as return type\n" + \
"<br><br>Return type: <select name='route' id='route'><option value='fix'>Fix</option><option value='candidates'>Candidates</option></select>\n" + \
"<br><br><input type='hidden' name='html' value='1'/><input type='submit' value='Correct It!' /></form>\n\n"
@MSC.route("/")
def hello():
route = request.args.get('route', default="", type=str)
if route == 'candidates': return candidates()
elif route == 'fix': return fix()
#else
return "<html><head><title>Charting.ai - Medical Spell Check - Powered by Hank Ai, Inc.</title></head>\n" + \
"<body>Welcome to Medical Spell Check As-A-Service, " + \
" powered by <a href='https://hank.ai'>Hank AI, Inc.</a><br><br>" + \
"My API endpoints are: <br>1) charting.ai/fix?html=0&text=i am a medical recrd yes i am. i have dibetes and rumatoid arthritis" + \
"<br>2) charting.ai/candidates?html=0&limit=2&text=i am a medical recrd yes i am. i have dibetes and rumatoid arthritis<br><br><br>" + \
"Or try me out:<br><br>" + formcode
@MSC.route("/fix")
def fix():
text = request.args.get('text', default = "", type = str)
htmlflag = request.args.get('html', default=0, type = int)
if text == "":
return "No text received. Usage: url/fix?html=0&text=texttomedicalspellcheck"
rval = {}
rval['input']= text
rval['results'] = corrector.FixFragment(text)
if text == rval['results']: rval['results']='CORRECT'
print(htmlflag)
if bool(htmlflag): return json2html.convert(json.dumps(rval)) + "<br><br><br>Try me out: <br><br>" + formcode
else: return json.dumps(rval)
@MSC.route("/candidates")
def candidates():
text = request.args.get('text', default = "", type = str)
limit = request.args.get('limit', default = 5, type = int)
htmlflag = request.args.get('html', default=0, type = int)
rval = {}
rval['input'] = text
runningOffset=0
if text == "":
return "No text received. Usage: url/candidates?html=0&limit=2&text=texttomedicalspellcheck"
respJSONstring = corrector.GetALLCandidatesScoredJSON(text)
print(respJSONstring)
rval = json.loads(respJSONstring)
for result in rval['results']:
result['candidates'] = result['candidates'][:limit]
if 'results' not in rval.keys() or len(rval['results'])==0: rval['results']='CORRECT'
if bool(htmlflag): return json2html.convert(json.dumps(rval)) + "<br><br><br>Try me out: <br><br>" + formcode
else: return json.dumps(rval,indent=2)
if __name__ == '__main__':
context=('c:/charting_ai.crt', 'c:/charting_ai.pem')
MSC.run(debug=True, host='0.0.0.0', port=443, ssl_context=context, threaded=True) | <filename>webserver.py
from flask import Flask, request
from OpenSSL import SSL
import jamspell, json, re
from json2html import *
print('Starting up ...')
corrector = jamspell.TSpellCorrector()
print('Loading medical model ...')
corrector.LoadLangModel('c:/_aidata/medSpellCheck/model_medical.v1.bin')
MSC = Flask(__name__) #medSpellCheck
formcode = "<form name='checkme' method='get' action=''>\n" + \
"Enter some medical text: <font size='-1'>(please enter at least 4 words ... this spellchecker uses context!)</font><br><textarea id='text' name='text' rows='10' cols='60'></textarea>\n" + \
"<br><br>Limit: <input id='limit' name='limit' type='number' min='1' max='10' value='2' /> <- only applies if 'candidates' selected as return type\n" + \
"<br><br>Return type: <select name='route' id='route'><option value='fix'>Fix</option><option value='candidates'>Candidates</option></select>\n" + \
"<br><br><input type='hidden' name='html' value='1'/><input type='submit' value='Correct It!' /></form>\n\n"
@MSC.route("/")
def hello():
route = request.args.get('route', default="", type=str)
if route == 'candidates': return candidates()
elif route == 'fix': return fix()
#else
return "<html><head><title>Charting.ai - Medical Spell Check - Powered by Hank Ai, Inc.</title></head>\n" + \
"<body>Welcome to Medical Spell Check As-A-Service, " + \
" powered by <a href='https://hank.ai'>Hank AI, Inc.</a><br><br>" + \
"My API endpoints are: <br>1) charting.ai/fix?html=0&text=i am a medical recrd yes i am. i have dibetes and rumatoid arthritis" + \
"<br>2) charting.ai/candidates?html=0&limit=2&text=i am a medical recrd yes i am. i have dibetes and rumatoid arthritis<br><br><br>" + \
"Or try me out:<br><br>" + formcode
@MSC.route("/fix")
def fix():
text = request.args.get('text', default = "", type = str)
htmlflag = request.args.get('html', default=0, type = int)
if text == "":
return "No text received. Usage: url/fix?html=0&text=texttomedicalspellcheck"
rval = {}
rval['input']= text
rval['results'] = corrector.FixFragment(text)
if text == rval['results']: rval['results']='CORRECT'
print(htmlflag)
if bool(htmlflag): return json2html.convert(json.dumps(rval)) + "<br><br><br>Try me out: <br><br>" + formcode
else: return json.dumps(rval)
@MSC.route("/candidates")
def candidates():
text = request.args.get('text', default = "", type = str)
limit = request.args.get('limit', default = 5, type = int)
htmlflag = request.args.get('html', default=0, type = int)
rval = {}
rval['input'] = text
runningOffset=0
if text == "":
return "No text received. Usage: url/candidates?html=0&limit=2&text=texttomedicalspellcheck"
respJSONstring = corrector.GetALLCandidatesScoredJSON(text)
print(respJSONstring)
rval = json.loads(respJSONstring)
for result in rval['results']:
result['candidates'] = result['candidates'][:limit]
if 'results' not in rval.keys() or len(rval['results'])==0: rval['results']='CORRECT'
if bool(htmlflag): return json2html.convert(json.dumps(rval)) + "<br><br><br>Try me out: <br><br>" + formcode
else: return json.dumps(rval,indent=2)
if __name__ == '__main__':
context=('c:/charting_ai.crt', 'c:/charting_ai.pem')
MSC.run(debug=True, host='0.0.0.0', port=443, ssl_context=context, threaded=True) | en | 0.218417 | #medSpellCheck #else | 2.878708 | 3 |
before/main.py | ArjanCodes/2021-even-more-code-smells | 3 | 6613089 | <reponame>ArjanCodes/2021-even-more-code-smells
from pos.order import Order
from pos.system import POSSystem
def main() -> None:
# create the POS system and setup the payment processor
system = POSSystem()
system.setup_payment_processor("https://api.stripe.com/v2")
# create the order
order = Order(
12345, "Arjan", "Sesame street 104", "1234", "Amsterdam", "<EMAIL>"
)
order.create_line_item("Keyboard", 1, 5000)
order.create_line_item("SSD", 1, 15000)
order.create_line_item("USB cable", 2, 500)
# register and process the order
system.register_order(order)
system.process_order(order)
if __name__ == "__main__":
main()
| from pos.order import Order
from pos.system import POSSystem
def main() -> None:
# create the POS system and setup the payment processor
system = POSSystem()
system.setup_payment_processor("https://api.stripe.com/v2")
# create the order
order = Order(
12345, "Arjan", "Sesame street 104", "1234", "Amsterdam", "<EMAIL>"
)
order.create_line_item("Keyboard", 1, 5000)
order.create_line_item("SSD", 1, 15000)
order.create_line_item("USB cable", 2, 500)
# register and process the order
system.register_order(order)
system.process_order(order)
if __name__ == "__main__":
main() | en | 0.796874 | # create the POS system and setup the payment processor # create the order # register and process the order | 2.692002 | 3 |
app/gaelo_processing/controller/dicom_zip_controller.py | salimkanoun/Rest_Radiomics | 0 | 6613090 | <filename>app/gaelo_processing/controller/dicom_zip_controller.py<gh_stars>0
import os
import shutil
import tempfile
import hashlib
from zipfile import ZipFile
from django.conf import settings
from dicom_to_cnn.tools.pre_processing import series
from django.http import HttpResponse
def handle(request):
method = request.method
if(method=='POST'):
zip_file=request.read()
filename=get_dicom_zip(zip_file)
return HttpResponse(status=200)
def get_dicom_zip(zip_file):
"""[Get dicom series zip, unzip files and create nifti]
Args:
zip_file ([byte]): [content of zip file]
Returns:
[id_image]: [The id of the nifti image created]
"""
data_path = settings.STORAGE_DIR
destination=tempfile.mkdtemp(prefix='dicom_zip_')
file = open(destination+'/dicom.zip', 'wb')
file.write(zip_file)
file.close()
#unzip_file and save dicom series
image_md5 = hashlib.md5(str(file.name).encode())
dicom_id = image_md5.hexdigest()
os.mkdir(settings.STORAGE_DIR+'/dicom/dicom_serie_'+dicom_id)
destination=settings.STORAGE_DIR+'/dicom/dicom_serie_'+dicom_id
with ZipFile(file.name) as my_zip:
for member in my_zip.namelist():
filename = os.path.basename(member)
# skip directories
if not filename:
continue
# copy file (taken from zipfile's extract)
source = my_zip.open(member)
target = open(os.path.join(destination, filename), "wb")
with source, target:
shutil.copyfileobj(source, target)
#create and save nifti
nifti=series.get_series_object(destination)
nifti_str=str(nifti)
nifti_str=nifti_str[1:44]
if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT':
nifti.get_instances_ordered()
nifti.get_numpy_array()
image_md5 = hashlib.md5(str(nifti).encode())
image_id = image_md5.hexdigest()
img=nifti.export_nifti(data_path+'/image/image_'+image_id+'_CT.nii')
if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':
nifti.get_instances_ordered()
nifti.get_numpy_array()
nifti.set_ExportType('suv')
image_md5 = hashlib.md5(str(nifti).encode())
image_id = image_md5.hexdigest()
img=nifti.export_nifti(data_path+'/image/image_'+image_id+'_PT.nii')
return image_id | <filename>app/gaelo_processing/controller/dicom_zip_controller.py<gh_stars>0
import os
import shutil
import tempfile
import hashlib
from zipfile import ZipFile
from django.conf import settings
from dicom_to_cnn.tools.pre_processing import series
from django.http import HttpResponse
def handle(request):
method = request.method
if(method=='POST'):
zip_file=request.read()
filename=get_dicom_zip(zip_file)
return HttpResponse(status=200)
def get_dicom_zip(zip_file):
"""[Get dicom series zip, unzip files and create nifti]
Args:
zip_file ([byte]): [content of zip file]
Returns:
[id_image]: [The id of the nifti image created]
"""
data_path = settings.STORAGE_DIR
destination=tempfile.mkdtemp(prefix='dicom_zip_')
file = open(destination+'/dicom.zip', 'wb')
file.write(zip_file)
file.close()
#unzip_file and save dicom series
image_md5 = hashlib.md5(str(file.name).encode())
dicom_id = image_md5.hexdigest()
os.mkdir(settings.STORAGE_DIR+'/dicom/dicom_serie_'+dicom_id)
destination=settings.STORAGE_DIR+'/dicom/dicom_serie_'+dicom_id
with ZipFile(file.name) as my_zip:
for member in my_zip.namelist():
filename = os.path.basename(member)
# skip directories
if not filename:
continue
# copy file (taken from zipfile's extract)
source = my_zip.open(member)
target = open(os.path.join(destination, filename), "wb")
with source, target:
shutil.copyfileobj(source, target)
#create and save nifti
nifti=series.get_series_object(destination)
nifti_str=str(nifti)
nifti_str=nifti_str[1:44]
if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT':
nifti.get_instances_ordered()
nifti.get_numpy_array()
image_md5 = hashlib.md5(str(nifti).encode())
image_id = image_md5.hexdigest()
img=nifti.export_nifti(data_path+'/image/image_'+image_id+'_CT.nii')
if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':
nifti.get_instances_ordered()
nifti.get_numpy_array()
nifti.set_ExportType('suv')
image_md5 = hashlib.md5(str(nifti).encode())
image_id = image_md5.hexdigest()
img=nifti.export_nifti(data_path+'/image/image_'+image_id+'_PT.nii')
return image_id | en | 0.765843 | [Get dicom series zip, unzip files and create nifti] Args: zip_file ([byte]): [content of zip file] Returns: [id_image]: [The id of the nifti image created] #unzip_file and save dicom series # skip directories # copy file (taken from zipfile's extract) #create and save nifti | 2.20286 | 2 |
corehq/messaging/smsbackends/unicel/views.py | kkrampa/commcare-hq | 1 | 6613091 | <filename>corehq/messaging/smsbackends/unicel/views.py
from __future__ import absolute_import
from __future__ import unicode_literals
from django.http import HttpResponse
from corehq.apps.sms.views import IncomingBackendView
from corehq.messaging.smsbackends.unicel.models import create_from_request, SQLUnicelBackend
import json
def incoming(request, backend_id=None):
"""
The inbound endpoint for UNICEL's API.
"""
# for now just save this information in the message log and return
message = create_from_request(request, backend_id=backend_id)
return HttpResponse(json.dumps({'status': 'success', 'message_id': message.couch_id}), 'text/json')
class UnicelIncomingSMSView(IncomingBackendView):
urlname = 'unicel_sms'
@property
def backend_class(self):
return SQLUnicelBackend
def get(self, request, api_key, *args, **kwargs):
return incoming(request, backend_id=self.backend_couch_id)
| <filename>corehq/messaging/smsbackends/unicel/views.py
from __future__ import absolute_import
from __future__ import unicode_literals
from django.http import HttpResponse
from corehq.apps.sms.views import IncomingBackendView
from corehq.messaging.smsbackends.unicel.models import create_from_request, SQLUnicelBackend
import json
def incoming(request, backend_id=None):
"""
The inbound endpoint for UNICEL's API.
"""
# for now just save this information in the message log and return
message = create_from_request(request, backend_id=backend_id)
return HttpResponse(json.dumps({'status': 'success', 'message_id': message.couch_id}), 'text/json')
class UnicelIncomingSMSView(IncomingBackendView):
urlname = 'unicel_sms'
@property
def backend_class(self):
return SQLUnicelBackend
def get(self, request, api_key, *args, **kwargs):
return incoming(request, backend_id=self.backend_couch_id)
| en | 0.676879 | The inbound endpoint for UNICEL's API. # for now just save this information in the message log and return | 2.140925 | 2 |
main.py | TheJakester42/JakeLanguage | 0 | 6613092 | <reponame>TheJakester42/JakeLanguage
import re
import Tokenize
import Token
keyword_dict = {
"make": "declares a variable",
"if": "if control flow",
"else": "else control flow",
"return": "returns out of a method",
"class": "declares a class",
"method": "declares a method",
}
def classifyToken(word):
if(Tokenize.TokenizeKeywords(word)):
return Token.Token(word + ": is a keyword of type ~:~ " + keyword_dict[word])
elif(Tokenize.TokenizeStrings(word)):
return Token.Token(word + ": is a string")
elif(Tokenize.TokenizeOperators(word)):
return Token.Token(word + ": is an operator")
elif(Tokenize.TokenizeEndOfStatment(word)):
return Token.Token("; : is an end of statment")
elif(Tokenize.TokenizeDigits(word)):
return Token.Token(word + ": is a number")
elif(Tokenize.TokenizeIdentifiers(word)):
return Token.Token(word + ": is an identefier")
else:
return Token.Token(word)
contents = ""
try:
codeFile = open("myCode.txt","r")
contents = codeFile.read()
codeFile.close()
except:
print("there was an issue reading the file")
comments = Tokenize.TokenizeComments(contents)
for word in comments:
print(word)
print("is a comment and is now removed")
print
print
contents = Tokenize.cleanComments(contents)
words = Tokenize.TokenizeOutWords(contents)
tokens = [classifyToken(word) for word in words]
for token in tokens:
print(token.description) | import re
import Tokenize
import Token
keyword_dict = {
"make": "declares a variable",
"if": "if control flow",
"else": "else control flow",
"return": "returns out of a method",
"class": "declares a class",
"method": "declares a method",
}
def classifyToken(word):
if(Tokenize.TokenizeKeywords(word)):
return Token.Token(word + ": is a keyword of type ~:~ " + keyword_dict[word])
elif(Tokenize.TokenizeStrings(word)):
return Token.Token(word + ": is a string")
elif(Tokenize.TokenizeOperators(word)):
return Token.Token(word + ": is an operator")
elif(Tokenize.TokenizeEndOfStatment(word)):
return Token.Token("; : is an end of statment")
elif(Tokenize.TokenizeDigits(word)):
return Token.Token(word + ": is a number")
elif(Tokenize.TokenizeIdentifiers(word)):
return Token.Token(word + ": is an identefier")
else:
return Token.Token(word)
contents = ""
try:
codeFile = open("myCode.txt","r")
contents = codeFile.read()
codeFile.close()
except:
print("there was an issue reading the file")
comments = Tokenize.TokenizeComments(contents)
for word in comments:
print(word)
print("is a comment and is now removed")
print
print
contents = Tokenize.cleanComments(contents)
words = Tokenize.TokenizeOutWords(contents)
tokens = [classifyToken(word) for word in words]
for token in tokens:
print(token.description) | none | 1 | 3.480819 | 3 | |
leetcode/155.py | windniw/just-for-fun | 1 | 6613093 | """
link: https://leetcode-cn.com/problems/min-stack
problem: 实现栈
solution: 数组模拟
"""
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.data = []
def push(self, x: int) -> None:
self.data.append(x)
def pop(self) -> None:
t = self.data[-1]
self.data = self.data[0:-1]
return t
def top(self) -> int:
return self.data[-1]
def getMin(self) -> int:
return min(self.data)
| """
link: https://leetcode-cn.com/problems/min-stack
problem: 实现栈
solution: 数组模拟
"""
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.data = []
def push(self, x: int) -> None:
self.data.append(x)
def pop(self) -> None:
t = self.data[-1]
self.data = self.data[0:-1]
return t
def top(self) -> int:
return self.data[-1]
def getMin(self) -> int:
return min(self.data)
| en | 0.716204 | link: https://leetcode-cn.com/problems/min-stack problem: 实现栈 solution: 数组模拟 initialize your data structure here. | 3.7246 | 4 |
performance/2017_06_performance_basics/images/mmm_naive.py | fastats/learning | 4 | 6613094 | import os
import matplotlib.pyplot as plt
print('PID: ', str(os.getpid()))
data =[
16843164,
17095332,
16924717,
16529307,
16284980
]
plt.plot(data, label='Naive MMM')
plt.xlabel('Repeats')
plt.ylabel('Clock cycles')
plt.legend(loc='upper right')
plt.show() | import os
import matplotlib.pyplot as plt
print('PID: ', str(os.getpid()))
data =[
16843164,
17095332,
16924717,
16529307,
16284980
]
plt.plot(data, label='Naive MMM')
plt.xlabel('Repeats')
plt.ylabel('Clock cycles')
plt.legend(loc='upper right')
plt.show() | none | 1 | 2.842175 | 3 | |
jingyun_cli/jingd/sample.py | meisanggou/jingyun | 0 | 6613095 | #! /usr/bin/env python
# coding: utf-8
import sys
import os
import argparse
try:
from .help import g_help, error_and_exit, jy_input
except ValueError:
from help import g_help, error_and_exit, jy_input
from jingyun_cli.jingd import request_jingd
def request_sample(method, url, data):
return request_jingd("sample", method, url, data)
def req_process():
url = "/sample/process/"
r_data = request_sample("GET", url, None)
all_process = r_data["data"]
return all_process
def req_process_detail(process_no):
url = "/sample/process/detail/"
r_data = request_sample("POST", url, dict(process_no=process_no))
params = r_data["data"]["params"]
return params
def req_sample_info(sample_no):
url = "/sample/info/"
r_data = request_sample("GET", url, dict(sample_no=sample_no))
seq_files = r_data["data"]["seq_files"]
return seq_files
def req_sample_right(sample_no):
url = "/sample/right/"
r_data = request_sample("GET", url, dict(sample_no=sample_no))
rights = r_data["data"]
return rights
def req_analysis(account, sample_no, seq_files, bucket, process_no):
url = "/sample/analysis/v2/"
data = dict(sample_no=sample_no, seq_files=seq_files, bucket=bucket, process_no=process_no, account=account)
print(data)
confirm = jy_input("Confirm?").lower()
if confirm in ["y", "yes"]:
r_data = request_sample("POST", url, data)
def re_run(sample_no, account=None):
seq_files = req_sample_info(sample_no)
if seq_files is None:
error_and_exit("Not Found seq_files")
files = seq_files.split(",")
if account is None:
rights = req_sample_right(sample_no)
for item in rights:
if item["role"] == 0:
account = item["account"]
if account is None:
error_and_exit("Auto Find Account Fail, Please Set Account")
process_no = -1
all_process = req_process()
for p in all_process:
if p["process_name"] == files[0]:
process_no = p["process_no"]
if process_no == -1:
error_and_exit("Not Found Process Name Is %s" % files[0])
params = req_process_detail(process_no)
r_seq_files = dict()
for p in params:
r_seq_files[p["param_name"]] = ""
keys = r_seq_files.keys()
if len(keys) != len(files) - 1:
print(keys)
print(files)
error_and_exit("Please Check input file")
bucket = None
for i in range(len(keys)):
bucket, file_path = files[i + 1].split(":", 1)
r_seq_files[keys[i]] = file_path
req_analysis(account, sample_no, r_seq_files, bucket, process_no)
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--account", dest="account", help=g_help("user"))
parser.add_argument("sample_no", help=g_help("sample_no"))
if len(sys.argv) <= 1:
sys.argv.append("-h")
args = parser.parse_args()
sample_no = int(args.sample_no)
re_run(sample_no, args.account)
if __name__ == "__main__":
sys.argv.extend(["205"])
cli_main()
| #! /usr/bin/env python
# coding: utf-8
import sys
import os
import argparse
try:
from .help import g_help, error_and_exit, jy_input
except ValueError:
from help import g_help, error_and_exit, jy_input
from jingyun_cli.jingd import request_jingd
def request_sample(method, url, data):
return request_jingd("sample", method, url, data)
def req_process():
url = "/sample/process/"
r_data = request_sample("GET", url, None)
all_process = r_data["data"]
return all_process
def req_process_detail(process_no):
url = "/sample/process/detail/"
r_data = request_sample("POST", url, dict(process_no=process_no))
params = r_data["data"]["params"]
return params
def req_sample_info(sample_no):
url = "/sample/info/"
r_data = request_sample("GET", url, dict(sample_no=sample_no))
seq_files = r_data["data"]["seq_files"]
return seq_files
def req_sample_right(sample_no):
url = "/sample/right/"
r_data = request_sample("GET", url, dict(sample_no=sample_no))
rights = r_data["data"]
return rights
def req_analysis(account, sample_no, seq_files, bucket, process_no):
url = "/sample/analysis/v2/"
data = dict(sample_no=sample_no, seq_files=seq_files, bucket=bucket, process_no=process_no, account=account)
print(data)
confirm = jy_input("Confirm?").lower()
if confirm in ["y", "yes"]:
r_data = request_sample("POST", url, data)
def re_run(sample_no, account=None):
seq_files = req_sample_info(sample_no)
if seq_files is None:
error_and_exit("Not Found seq_files")
files = seq_files.split(",")
if account is None:
rights = req_sample_right(sample_no)
for item in rights:
if item["role"] == 0:
account = item["account"]
if account is None:
error_and_exit("Auto Find Account Fail, Please Set Account")
process_no = -1
all_process = req_process()
for p in all_process:
if p["process_name"] == files[0]:
process_no = p["process_no"]
if process_no == -1:
error_and_exit("Not Found Process Name Is %s" % files[0])
params = req_process_detail(process_no)
r_seq_files = dict()
for p in params:
r_seq_files[p["param_name"]] = ""
keys = r_seq_files.keys()
if len(keys) != len(files) - 1:
print(keys)
print(files)
error_and_exit("Please Check input file")
bucket = None
for i in range(len(keys)):
bucket, file_path = files[i + 1].split(":", 1)
r_seq_files[keys[i]] = file_path
req_analysis(account, sample_no, r_seq_files, bucket, process_no)
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--account", dest="account", help=g_help("user"))
parser.add_argument("sample_no", help=g_help("sample_no"))
if len(sys.argv) <= 1:
sys.argv.append("-h")
args = parser.parse_args()
sample_no = int(args.sample_no)
re_run(sample_no, args.account)
if __name__ == "__main__":
sys.argv.extend(["205"])
cli_main()
| en | 0.321189 | #! /usr/bin/env python # coding: utf-8 | 2.165973 | 2 |
nnunet/utilities/find_duplicate_series.py | Janetteeeeeeee/nnUNet | 0 | 6613096 | import os
import pydicom
from batchgenerators.utilities.file_and_folder_operations import join, subfiles
if __name__ == '__main__':
bladder_series_dir = r'E:\AutoSeg_Bladder_data\Dicom\Train&Valid'
rectum_series_dir = r'E:\AutoSeg_Rectum_data\Train&Valid\6th_Eclips'
series_list = os.listdir(rectum_series_dir)
position_dict = {}
for series in series_list:
dicom_files = subfiles(join(rectum_series_dir, series), prefix='1.', suffix='.dcm')
if len(dicom_files) > 0:
dicom_file = pydicom.read_file(dicom_files[0])
exposure_time = dicom_file[0x0018, 0x1150]
position = dicom_file[0x0020, 0x0032].value
if (position[0], position[1], position[2]) in position_dict:
print(series)
print(position_dict[(position[0], position[1], position[2])])
else:
position_dict[(position[0], position[1], position[2])] = series
| import os
import pydicom
from batchgenerators.utilities.file_and_folder_operations import join, subfiles
if __name__ == '__main__':
bladder_series_dir = r'E:\AutoSeg_Bladder_data\Dicom\Train&Valid'
rectum_series_dir = r'E:\AutoSeg_Rectum_data\Train&Valid\6th_Eclips'
series_list = os.listdir(rectum_series_dir)
position_dict = {}
for series in series_list:
dicom_files = subfiles(join(rectum_series_dir, series), prefix='1.', suffix='.dcm')
if len(dicom_files) > 0:
dicom_file = pydicom.read_file(dicom_files[0])
exposure_time = dicom_file[0x0018, 0x1150]
position = dicom_file[0x0020, 0x0032].value
if (position[0], position[1], position[2]) in position_dict:
print(series)
print(position_dict[(position[0], position[1], position[2])])
else:
position_dict[(position[0], position[1], position[2])] = series
| none | 1 | 2.165159 | 2 | |
__computeSumofASCIIUpperCase.py | simdevex/01.Basics | 0 | 6613097 | <gh_stars>0
'''
Write a Python program to compute the sum of the ASCII values of the upper-case characters in a given string.
Input:
<NAME>
Output:
373
Input:
JavaScript
Output:
157
'''
#License: https://bit.ly/3oLErEI
def test(strs):
return sum(map(ord,filter(str.isupper,strs)))
strs = "PytHon ExerciSEs"
print("Original strings:")
print(strs)
print("Sum of the ASCII values of the upper-case characters in the said string:")
print(test(strs))
strs = "JavaScript"
print("\nOriginal strings:")
print(strs)
print("Sum of the ASCII values of the upper-case characters in the said string:")
print(test(strs))
| '''
Write a Python program to compute the sum of the ASCII values of the upper-case characters in a given string.
Input:
<NAME>
Output:
373
Input:
JavaScript
Output:
157
'''
#License: https://bit.ly/3oLErEI
def test(strs):
return sum(map(ord,filter(str.isupper,strs)))
strs = "PytHon ExerciSEs"
print("Original strings:")
print(strs)
print("Sum of the ASCII values of the upper-case characters in the said string:")
print(test(strs))
strs = "JavaScript"
print("\nOriginal strings:")
print(strs)
print("Sum of the ASCII values of the upper-case characters in the said string:")
print(test(strs)) | en | 0.58417 | Write a Python program to compute the sum of the ASCII values of the upper-case characters in a given string. Input: <NAME> Output: 373 Input: JavaScript Output: 157 #License: https://bit.ly/3oLErEI | 3.992384 | 4 |
esp8266/micropython/read_temperature.py | SERC-IoT/Starter-MLX90614-IR-Temperature-Sensor | 2 | 6613098 | <filename>esp8266/micropython/read_temperature.py
import time
import mlx90614
from machine import SoftI2C, Pin
# define software I2C bus (needed for ESP8266).
# alternatively hardware I2C bus (ESP32 only) can be used by passing 0 or 1 to
# constructor, i.e.: i2c = I2C(0, scl=Pin(5), sda=Pin(4), freq=100000)
# any input pins can be defined for the i2c interface
# note, sensor doesn't work at default 400k bus speed
i2c = SoftI2C(scl=Pin(5), sda=Pin(4), freq=100000)
# create snesor object
sensor = mlx90614.MLX90614(i2c)
print("* MLX90614 Temperature *")
print("Object | Ambient")
while True:
# read sensor values
object_temp = sensor.read_object_temp()
ambient_temp = sensor.read_ambient_temp()
# print readings to console
# {} is used in conjunction with format() for substitution.
# .2f - format to 2 decimal places.
print("{0:>5.2f}C | {1:>5.2f}C".format(object_temp, ambient_temp), end='\r')
time.sleep_ms(1000)
| <filename>esp8266/micropython/read_temperature.py
import time
import mlx90614
from machine import SoftI2C, Pin
# define software I2C bus (needed for ESP8266).
# alternatively hardware I2C bus (ESP32 only) can be used by passing 0 or 1 to
# constructor, i.e.: i2c = I2C(0, scl=Pin(5), sda=Pin(4), freq=100000)
# any input pins can be defined for the i2c interface
# note, sensor doesn't work at default 400k bus speed
i2c = SoftI2C(scl=Pin(5), sda=Pin(4), freq=100000)
# create snesor object
sensor = mlx90614.MLX90614(i2c)
print("* MLX90614 Temperature *")
print("Object | Ambient")
while True:
# read sensor values
object_temp = sensor.read_object_temp()
ambient_temp = sensor.read_ambient_temp()
# print readings to console
# {} is used in conjunction with format() for substitution.
# .2f - format to 2 decimal places.
print("{0:>5.2f}C | {1:>5.2f}C".format(object_temp, ambient_temp), end='\r')
time.sleep_ms(1000)
| en | 0.722123 | # define software I2C bus (needed for ESP8266). # alternatively hardware I2C bus (ESP32 only) can be used by passing 0 or 1 to # constructor, i.e.: i2c = I2C(0, scl=Pin(5), sda=Pin(4), freq=100000) # any input pins can be defined for the i2c interface # note, sensor doesn't work at default 400k bus speed # create snesor object # read sensor values # print readings to console # {} is used in conjunction with format() for substitution. # .2f - format to 2 decimal places. | 3.610938 | 4 |
example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/A/atomic mass unit-hartree relationship.py | kuanpern/jupyterlab-snippets-multimenus | 0 | 6613099 | <filename>example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/A/atomic mass unit-hartree relationship.py
constants.physical_constants["atomic mass unit-hartree relationship"] | <filename>example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/A/atomic mass unit-hartree relationship.py
constants.physical_constants["atomic mass unit-hartree relationship"] | none | 1 | 1.198013 | 1 | |
EstruturaDeRepeticao/43.py | TheCarvalho/atividades-wikipython | 0 | 6613100 | <filename>EstruturaDeRepeticao/43.py
'''
43. O cardápio de uma lanchonete é o seguinte:
Especificação Código Preço
Cachorro Quente 100 R$ 1,20
Bauru Simples 101 R$ 1,30
Bauru com ovo 102 R$ 1,50
Hambúrguer 103 R$ 1,20
Cheeseburguer 104 R$ 1,30
Refrigerante 105 R$ 1,00
Faça um programa que leia o código dos itens pedidos e as quantidades desejadas. Calcule e mostre o valor a ser
pago por item (preço * quantidade) e o total geral do pedido. Considere que o cliente deve informar quando o pedido
deve ser encerrado.
'''
from os import system
from time import sleep
comanda = dict()
total = 0
print('{:<20}{:<14}{}'.format('ESPECIFICAÇÃO', 'CÓDIGO', 'PREÇO\n'))
print('{:<20}{:<14}{}'.format('Cachorro Quente', '100', 'R$ 1,20'))
print('{:<20}{:<14}{}'.format('Bauru Simples', '101', 'R$ 1,30'))
print('{:<20}{:<14}{}'.format('Bauru com ovo', '102', 'R$ 1,50'))
print('{:<20}{:<14}{}'.format('Hambúguer', '103', 'R$ 1,20'))
print('{:<20}{:<14}{}'.format('Cheeseburguer', '104', 'R$ 1,30'))
print('{:<20}{:<14}{}'.format('Refrigerante', '105', 'R$ 1,00'))
while True:
pedido = int(input('\nItem: '))
if pedido == 0:
break
elif pedido == 100:
pedido = '<NAME>'
preco = 1.20
elif pedido == 101:
pedido = 'Bauru Simples'
preco = 1.30
elif pedido == 102:
pedido = 'Bauru com ovo'
preco = 1.50
elif pedido == 103:
pedido = 'Hambúrguer'
preco = 1.20
elif pedido == 104:
pedido = 'Cheeseburguer'
preco = 1.30
elif pedido == 105:
pedido = 'Redrigerante'
preco = 1
else:
print('Opção inválida, tente novamente!')
sleep(2)
continue
quantidade = int(input('Quantidade: '))
comanda[pedido] = quantidade
total += preco*quantidade
system('cls')
print('====== COMANDA ======')
for item, quantidade in comanda.items():
print(f'{quantidade}x - {item}')
print('='*21)
print(f'Total: R$ {total:.2f}')
| <filename>EstruturaDeRepeticao/43.py
'''
43. O cardápio de uma lanchonete é o seguinte:
Especificação Código Preço
Cachorro Quente 100 R$ 1,20
Bauru Simples 101 R$ 1,30
Bauru com ovo 102 R$ 1,50
Hambúrguer 103 R$ 1,20
Cheeseburguer 104 R$ 1,30
Refrigerante 105 R$ 1,00
Faça um programa que leia o código dos itens pedidos e as quantidades desejadas. Calcule e mostre o valor a ser
pago por item (preço * quantidade) e o total geral do pedido. Considere que o cliente deve informar quando o pedido
deve ser encerrado.
'''
from os import system
from time import sleep
comanda = dict()
total = 0
print('{:<20}{:<14}{}'.format('ESPECIFICAÇÃO', 'CÓDIGO', 'PREÇO\n'))
print('{:<20}{:<14}{}'.format('Cachorro Quente', '100', 'R$ 1,20'))
print('{:<20}{:<14}{}'.format('Bauru Simples', '101', 'R$ 1,30'))
print('{:<20}{:<14}{}'.format('Bauru com ovo', '102', 'R$ 1,50'))
print('{:<20}{:<14}{}'.format('Hambúguer', '103', 'R$ 1,20'))
print('{:<20}{:<14}{}'.format('Cheeseburguer', '104', 'R$ 1,30'))
print('{:<20}{:<14}{}'.format('Refrigerante', '105', 'R$ 1,00'))
while True:
pedido = int(input('\nItem: '))
if pedido == 0:
break
elif pedido == 100:
pedido = '<NAME>'
preco = 1.20
elif pedido == 101:
pedido = 'Bauru Simples'
preco = 1.30
elif pedido == 102:
pedido = 'Bauru com ovo'
preco = 1.50
elif pedido == 103:
pedido = 'Hambúrguer'
preco = 1.20
elif pedido == 104:
pedido = 'Cheeseburguer'
preco = 1.30
elif pedido == 105:
pedido = 'Redrigerante'
preco = 1
else:
print('Opção inválida, tente novamente!')
sleep(2)
continue
quantidade = int(input('Quantidade: '))
comanda[pedido] = quantidade
total += preco*quantidade
system('cls')
print('====== COMANDA ======')
for item, quantidade in comanda.items():
print(f'{quantidade}x - {item}')
print('='*21)
print(f'Total: R$ {total:.2f}')
| pt | 0.956395 | 43. O cardápio de uma lanchonete é o seguinte: Especificação Código Preço Cachorro Quente 100 R$ 1,20 Bauru Simples 101 R$ 1,30 Bauru com ovo 102 R$ 1,50 Hambúrguer 103 R$ 1,20 Cheeseburguer 104 R$ 1,30 Refrigerante 105 R$ 1,00 Faça um programa que leia o código dos itens pedidos e as quantidades desejadas. Calcule e mostre o valor a ser pago por item (preço * quantidade) e o total geral do pedido. Considere que o cliente deve informar quando o pedido deve ser encerrado. | 3.924362 | 4 |
braze/client.py | dtatarkin/braze-client | 1 | 6613101 | <reponame>dtatarkin/braze-client
import time
import requests
from tenacity import retry
from tenacity import stop_after_attempt
from tenacity import wait_random_exponential
DEFAULT_API_URL = "https://rest.iad-02.braze.com"
USER_TRACK_ENDPOINT = "/users/track"
USER_DELETE_ENDPOINT = "/users/delete"
USER_EXPORT_ENDPOINT = "/users/export/ids"
#: Endpoint for Scheduled Trigger Campaign Sends
CAMPAIGN_TRIGGER_SCHEDULE_CREATE = "/campaigns/trigger/schedule/create"
MAX_RETRIES = 3
# Max time to wait between API call retries
MAX_WAIT_SECONDS = 1.25
class BrazeClientError(Exception):
"""
Represents any Braze Client Error.
https://www.braze.com/docs/developer_guide/rest_api/user_data/#user-track-responses
"""
pass
class BrazeRateLimitError(BrazeClientError):
def __init__(self, reset_epoch_s):
"""
A rate limit error was encountered.
:param float reset_epoch_s: Unix timestamp for when the API may be called again.
"""
self.reset_epoch_s = reset_epoch_s
super(BrazeRateLimitError, self).__init__()
class BrazeInternalServerError(BrazeClientError):
"""
Used for Braze API responses where response code is of type 5XX suggesting
Braze side server errors.
"""
pass
def _wait_random_exp_or_rate_limit():
"""Creates a tenacity wait callback that accounts for explicit rate limits."""
random_exp = wait_random_exponential(multiplier=1, max=MAX_WAIT_SECONDS)
def check(retry_state):
"""
Waits with either a random exponential backoff or attempts to obey rate limits
that Braze returns.
:param tenacity.RetryCallState retry_state: Info about current retry invocation
:raises BrazeRateLimitError: If the rate limit reset time is too long
:returns: Time to wait, in seconds.
:rtype: float
"""
exc = retry_state.outcome.exception()
if isinstance(exc, BrazeRateLimitError):
sec_to_reset = exc.reset_epoch_s - float(time.time())
if sec_to_reset >= MAX_WAIT_SECONDS:
raise exc
return max(0.0, sec_to_reset)
return random_exp(retry_state=retry_state)
return check
class BrazeClient(object):
"""
Client for Appboy public API. Support user_track.
usage:
from braze.client import BrazeClient
client = BrazeClient(api_key='Place your API key here')
r = client.user_track(
attributes=[{
'external_id': '1',
'first_name': '<NAME>',
'last_name': '<NAME>',
'email': '<EMAIL>',
'status': 'Active',
}],
events=None,
purchases=None,
)
if r['success']:
print 'Success!'
print r
else:
print r['client_error']
print r['errors']
"""
def __init__(self, api_key, api_url=None, use_auth_header=False):
self.api_key = api_key
self.api_url = api_url or DEFAULT_API_URL
self.use_auth_header = use_auth_header
self.session = requests.Session()
self.request_url = ""
def user_track(self, attributes=None, events=None, purchases=None):
"""
Record custom events, user attributes, and purchases for users.
:param attributes: dict or list of user attributes dict (external_id, first_name, email)
:param events: dict or list of user events dict (external_id, app_id, name, time, properties)
:param purchases: dict or list of user purchases dict (external_id, app_id, product_id, currency, price)
:return: json dict response, for example: {"message": "success", "errors": [], "client_error": ""}
"""
if attributes is events is purchases is None:
raise ValueError(
"Bad arguments, at least one of attributes, events or purchases must be "
"non None"
)
self.request_url = self.api_url + USER_TRACK_ENDPOINT
payload = {}
if events:
payload["events"] = events
else:
payload["events"] = []
if attributes:
payload["attributes"] = attributes
else:
payload["attributes"] = []
if purchases:
payload["purchases"] = purchases
else:
payload["purchases"] = []
return self.__create_request(payload=payload)
def user_delete(self, external_ids):
"""
Delete user from braze.
:param external_ids: dict or list of user external ids
:return: json dict response, for example: {"message": "success", "errors": [], "client_error": ""}
"""
if not external_ids:
raise ValueError("No external ids specified")
self.request_url = self.api_url + USER_DELETE_ENDPOINT
payload = {"external_ids": external_ids}
return self.__create_request(payload=payload)
def user_export(self, external_ids=None, email=None, fields_to_export=None):
"""
Export user profiles from braze. One or both of ``external_ids`` or ``email``
must be provided. Braze allows exporting multiple user profiles through
``external_ids`` but only one with the ``email`` argument.
ref: https://www.braze.com/docs/developer_guide/rest_api/export/
:param list[str] external_ids:
optional list of braze external ids whose profiles are to be exported.
:param str email:
optional email for a braze profile whose data will be exported.
:param list[str] fields_to_export:
optional list of fields to export. If not specified braze exports all fields,
with a warning that this may slow down the API response time. See API doc for
list of valid fields.
:return: json dict response from braze
"""
if external_ids is email is None:
raise ValueError("At least one of external_ids or email must be specified")
self.request_url = self.api_url + USER_EXPORT_ENDPOINT
payload = {}
if external_ids:
payload["external_ids"] = external_ids
elif email:
payload["email_address"] = email
if fields_to_export:
payload["fields_to_export"] = fields_to_export
return self.__create_request(payload)
def __create_request(self, payload):
if not self.use_auth_header:
payload["api_key"] = self.api_key
response = {"errors": []}
r = self._post_request_with_retries(payload)
response.update(r.json())
response["status_code"] = r.status_code
message = response["message"]
response["success"] = (
message in ("success", "queued") and not response["errors"]
)
if message != "success":
# message contains the fatal error message from Braze
raise BrazeClientError(message, response["errors"])
if "status_code" not in response:
response["status_code"] = 0
if "message" not in response:
response["message"] = ""
return response
@retry(
reraise=True,
wait=_wait_random_exp_or_rate_limit(),
stop=stop_after_attempt(MAX_RETRIES),
)
def _post_request_with_retries(self, payload):
"""
:param dict payload:
:rtype: requests.Response
"""
headers = {}
# Prior to April 2020, API keys would be included as a part of the API request body or within the request URL
# as a parameter. Braze now has updated the way in which we read API keys. API keys are now set with the HTTP
# Authorization request header, making your API keys more secure.
# https://www.braze.com/docs/api/api_key/#how-can-i-use-it
if self.use_auth_header:
headers["Authorization"] = "Bearer {}".format(self.api_key)
r = self.session.post(
self.request_url, json=payload, timeout=2, headers=headers
)
# https://www.braze.com/docs/developer_guide/rest_api/messaging/#fatal-errors
if r.status_code == 429:
reset_epoch_s = float(r.headers.get("X-RateLimit-Reset", 0))
raise BrazeRateLimitError(reset_epoch_s)
elif str(r.status_code).startswith("5"):
raise BrazeInternalServerError
return r
def campaign_trigger_schedule_create(
self,
campaign_id,
schedule,
send_id=None,
broadcast=None,
audience=None,
recipients=None,
):
"""
Send Messages via API Triggered Delivery at a specified time
ref: https://www.braze.com/docs/developer_guide/rest_api/messaging/#schedule-endpoints
:return: json dict response, for example: {"message": "success", "errors": [], "client_error": ""}
"""
self.request_url = self.api_url + CAMPAIGN_TRIGGER_SCHEDULE_CREATE
payload = {"campaign_id": campaign_id, "schedule": schedule}
if send_id is not None:
payload["send_id"] = send_id
if broadcast is not None:
payload["broadcast"] = broadcast
if audience is not None:
payload["audience"] = audience
if recipients is not None:
payload["recipients"] = recipients
return self.__create_request(payload)
| import time
import requests
from tenacity import retry
from tenacity import stop_after_attempt
from tenacity import wait_random_exponential
DEFAULT_API_URL = "https://rest.iad-02.braze.com"
USER_TRACK_ENDPOINT = "/users/track"
USER_DELETE_ENDPOINT = "/users/delete"
USER_EXPORT_ENDPOINT = "/users/export/ids"
#: Endpoint for Scheduled Trigger Campaign Sends
CAMPAIGN_TRIGGER_SCHEDULE_CREATE = "/campaigns/trigger/schedule/create"
MAX_RETRIES = 3
# Max time to wait between API call retries
MAX_WAIT_SECONDS = 1.25
class BrazeClientError(Exception):
"""
Represents any Braze Client Error.
https://www.braze.com/docs/developer_guide/rest_api/user_data/#user-track-responses
"""
pass
class BrazeRateLimitError(BrazeClientError):
def __init__(self, reset_epoch_s):
"""
A rate limit error was encountered.
:param float reset_epoch_s: Unix timestamp for when the API may be called again.
"""
self.reset_epoch_s = reset_epoch_s
super(BrazeRateLimitError, self).__init__()
class BrazeInternalServerError(BrazeClientError):
"""
Used for Braze API responses where response code is of type 5XX suggesting
Braze side server errors.
"""
pass
def _wait_random_exp_or_rate_limit():
"""Creates a tenacity wait callback that accounts for explicit rate limits."""
random_exp = wait_random_exponential(multiplier=1, max=MAX_WAIT_SECONDS)
def check(retry_state):
"""
Waits with either a random exponential backoff or attempts to obey rate limits
that Braze returns.
:param tenacity.RetryCallState retry_state: Info about current retry invocation
:raises BrazeRateLimitError: If the rate limit reset time is too long
:returns: Time to wait, in seconds.
:rtype: float
"""
exc = retry_state.outcome.exception()
if isinstance(exc, BrazeRateLimitError):
sec_to_reset = exc.reset_epoch_s - float(time.time())
if sec_to_reset >= MAX_WAIT_SECONDS:
raise exc
return max(0.0, sec_to_reset)
return random_exp(retry_state=retry_state)
return check
class BrazeClient(object):
"""
Client for Appboy public API. Support user_track.
usage:
from braze.client import BrazeClient
client = BrazeClient(api_key='Place your API key here')
r = client.user_track(
attributes=[{
'external_id': '1',
'first_name': '<NAME>',
'last_name': '<NAME>',
'email': '<EMAIL>',
'status': 'Active',
}],
events=None,
purchases=None,
)
if r['success']:
print 'Success!'
print r
else:
print r['client_error']
print r['errors']
"""
def __init__(self, api_key, api_url=None, use_auth_header=False):
self.api_key = api_key
self.api_url = api_url or DEFAULT_API_URL
self.use_auth_header = use_auth_header
self.session = requests.Session()
self.request_url = ""
def user_track(self, attributes=None, events=None, purchases=None):
"""
Record custom events, user attributes, and purchases for users.
:param attributes: dict or list of user attributes dict (external_id, first_name, email)
:param events: dict or list of user events dict (external_id, app_id, name, time, properties)
:param purchases: dict or list of user purchases dict (external_id, app_id, product_id, currency, price)
:return: json dict response, for example: {"message": "success", "errors": [], "client_error": ""}
"""
if attributes is events is purchases is None:
raise ValueError(
"Bad arguments, at least one of attributes, events or purchases must be "
"non None"
)
self.request_url = self.api_url + USER_TRACK_ENDPOINT
payload = {}
if events:
payload["events"] = events
else:
payload["events"] = []
if attributes:
payload["attributes"] = attributes
else:
payload["attributes"] = []
if purchases:
payload["purchases"] = purchases
else:
payload["purchases"] = []
return self.__create_request(payload=payload)
def user_delete(self, external_ids):
"""
Delete user from braze.
:param external_ids: dict or list of user external ids
:return: json dict response, for example: {"message": "success", "errors": [], "client_error": ""}
"""
if not external_ids:
raise ValueError("No external ids specified")
self.request_url = self.api_url + USER_DELETE_ENDPOINT
payload = {"external_ids": external_ids}
return self.__create_request(payload=payload)
def user_export(self, external_ids=None, email=None, fields_to_export=None):
"""
Export user profiles from braze. One or both of ``external_ids`` or ``email``
must be provided. Braze allows exporting multiple user profiles through
``external_ids`` but only one with the ``email`` argument.
ref: https://www.braze.com/docs/developer_guide/rest_api/export/
:param list[str] external_ids:
optional list of braze external ids whose profiles are to be exported.
:param str email:
optional email for a braze profile whose data will be exported.
:param list[str] fields_to_export:
optional list of fields to export. If not specified braze exports all fields,
with a warning that this may slow down the API response time. See API doc for
list of valid fields.
:return: json dict response from braze
"""
if external_ids is email is None:
raise ValueError("At least one of external_ids or email must be specified")
self.request_url = self.api_url + USER_EXPORT_ENDPOINT
payload = {}
if external_ids:
payload["external_ids"] = external_ids
elif email:
payload["email_address"] = email
if fields_to_export:
payload["fields_to_export"] = fields_to_export
return self.__create_request(payload)
def __create_request(self, payload):
if not self.use_auth_header:
payload["api_key"] = self.api_key
response = {"errors": []}
r = self._post_request_with_retries(payload)
response.update(r.json())
response["status_code"] = r.status_code
message = response["message"]
response["success"] = (
message in ("success", "queued") and not response["errors"]
)
if message != "success":
# message contains the fatal error message from Braze
raise BrazeClientError(message, response["errors"])
if "status_code" not in response:
response["status_code"] = 0
if "message" not in response:
response["message"] = ""
return response
@retry(
reraise=True,
wait=_wait_random_exp_or_rate_limit(),
stop=stop_after_attempt(MAX_RETRIES),
)
def _post_request_with_retries(self, payload):
"""
:param dict payload:
:rtype: requests.Response
"""
headers = {}
# Prior to April 2020, API keys would be included as a part of the API request body or within the request URL
# as a parameter. Braze now has updated the way in which we read API keys. API keys are now set with the HTTP
# Authorization request header, making your API keys more secure.
# https://www.braze.com/docs/api/api_key/#how-can-i-use-it
if self.use_auth_header:
headers["Authorization"] = "Bearer {}".format(self.api_key)
r = self.session.post(
self.request_url, json=payload, timeout=2, headers=headers
)
# https://www.braze.com/docs/developer_guide/rest_api/messaging/#fatal-errors
if r.status_code == 429:
reset_epoch_s = float(r.headers.get("X-RateLimit-Reset", 0))
raise BrazeRateLimitError(reset_epoch_s)
elif str(r.status_code).startswith("5"):
raise BrazeInternalServerError
return r
def campaign_trigger_schedule_create(
self,
campaign_id,
schedule,
send_id=None,
broadcast=None,
audience=None,
recipients=None,
):
"""
Send Messages via API Triggered Delivery at a specified time
ref: https://www.braze.com/docs/developer_guide/rest_api/messaging/#schedule-endpoints
:return: json dict response, for example: {"message": "success", "errors": [], "client_error": ""}
"""
self.request_url = self.api_url + CAMPAIGN_TRIGGER_SCHEDULE_CREATE
payload = {"campaign_id": campaign_id, "schedule": schedule}
if send_id is not None:
payload["send_id"] = send_id
if broadcast is not None:
payload["broadcast"] = broadcast
if audience is not None:
payload["audience"] = audience
if recipients is not None:
payload["recipients"] = recipients
return self.__create_request(payload) | en | 0.724982 | #: Endpoint for Scheduled Trigger Campaign Sends # Max time to wait between API call retries Represents any Braze Client Error. https://www.braze.com/docs/developer_guide/rest_api/user_data/#user-track-responses A rate limit error was encountered. :param float reset_epoch_s: Unix timestamp for when the API may be called again. Used for Braze API responses where response code is of type 5XX suggesting Braze side server errors. Creates a tenacity wait callback that accounts for explicit rate limits. Waits with either a random exponential backoff or attempts to obey rate limits that Braze returns. :param tenacity.RetryCallState retry_state: Info about current retry invocation :raises BrazeRateLimitError: If the rate limit reset time is too long :returns: Time to wait, in seconds. :rtype: float Client for Appboy public API. Support user_track. usage: from braze.client import BrazeClient client = BrazeClient(api_key='Place your API key here') r = client.user_track( attributes=[{ 'external_id': '1', 'first_name': '<NAME>', 'last_name': '<NAME>', 'email': '<EMAIL>', 'status': 'Active', }], events=None, purchases=None, ) if r['success']: print 'Success!' print r else: print r['client_error'] print r['errors'] Record custom events, user attributes, and purchases for users. :param attributes: dict or list of user attributes dict (external_id, first_name, email) :param events: dict or list of user events dict (external_id, app_id, name, time, properties) :param purchases: dict or list of user purchases dict (external_id, app_id, product_id, currency, price) :return: json dict response, for example: {"message": "success", "errors": [], "client_error": ""} Delete user from braze. :param external_ids: dict or list of user external ids :return: json dict response, for example: {"message": "success", "errors": [], "client_error": ""} Export user profiles from braze. One or both of ``external_ids`` or ``email`` must be provided. Braze allows exporting multiple user profiles through ``external_ids`` but only one with the ``email`` argument. ref: https://www.braze.com/docs/developer_guide/rest_api/export/ :param list[str] external_ids: optional list of braze external ids whose profiles are to be exported. :param str email: optional email for a braze profile whose data will be exported. :param list[str] fields_to_export: optional list of fields to export. If not specified braze exports all fields, with a warning that this may slow down the API response time. See API doc for list of valid fields. :return: json dict response from braze # message contains the fatal error message from Braze :param dict payload: :rtype: requests.Response # Prior to April 2020, API keys would be included as a part of the API request body or within the request URL # as a parameter. Braze now has updated the way in which we read API keys. API keys are now set with the HTTP # Authorization request header, making your API keys more secure. # https://www.braze.com/docs/api/api_key/#how-can-i-use-it # https://www.braze.com/docs/developer_guide/rest_api/messaging/#fatal-errors Send Messages via API Triggered Delivery at a specified time ref: https://www.braze.com/docs/developer_guide/rest_api/messaging/#schedule-endpoints :return: json dict response, for example: {"message": "success", "errors": [], "client_error": ""} | 2.799436 | 3 |
books_library/books/apis/views.py | Ilyes-Hammadi/books-library | 9 | 6613102 | <reponame>Ilyes-Hammadi/books-library<gh_stars>1-10
from django.urls import reverse
from rest_framework import viewsets, filters
from rest_framework.decorators import api_view
from rest_framework.response import Response
from books_library.books.apis.paginators import BookSearchSetPagination
from books_library.navigation.models import BookHistory
from books_library.navigation.sentiment import get_sentiment, POSITIVE
from books_library.recomendation.views import get_rec
from .serializers import BookSerializer, CommentSerializer, CategorySerializer, BookSearchSerializer, \
BookSimilarSerializer
from ..models import Book, Comment, Category
from books_library.users.models import User
class BookViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
def get_object(self):
"""Return the requested book and save the navigation"""
# Get the object from the super class method
book = super(BookViewSet, self).get_object()
# If the user is logged in, save the book history actions
if self.request.user.is_authenticated():
# Get the logged in user
user = self.request.user
# If the user has not viewed the book, create a new BookAction model and save
# in the user history field
if not user.history.books_action.filter(book=book).exists():
book_actions = BookHistory(book=book, viewed=True)
book_actions.score += 1
book_actions.save()
user.history.books_action.add(book_actions)
else:
books_action = user.history.books_action.get(book=book)
if not books_action.viewed:
books_action.viewed = True
books_action.score += 1
books_action.save()
return book
class BookSearchViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSearchSerializer
pagination_class = BookSearchSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = ('^name', '$name')
class BookSimilarViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
def get_queryset(self):
books = super(BookSimilarViewSet, self).get_queryset()
try:
book_id = self.request.GET.get('book_id')
book_ids = get_rec(book_id)
return books.filter(id__in=book_ids)
except:
return books.all()[:10]
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
@api_view(['GET'])
def book_like(request, id):
"""Take the id of the book to like"""
try:
# Get the logged in user
user = request.user
# Get the Book to read
book = Book.objects.get(pk=id)
# If the book is in the user history
if user.history.books_action.filter(book=book).exists():
book_history = user.history.books_action.get(book=book)
if not book_history.liked:
book_history.liked = True
book_history.score += 1
book_history.save()
book.likes.add(user)
book.save()
else:
res = Response({'message': "Can't like a book more then one time"})
res.status_code = 400
return res
else:
book_history = BookHistory(book=book)
book_history.liked = True
book_history.score += 1
book_history.save()
user.history.books_action.add(book_history)
# Increse the like by one
book.likes.add(user)
book.save()
return Response({'message': 'book {0} is liked by the user {1}'.format(book.name, user.username),
'likes': book.likes.count()})
except:
res = Response({'message': 'error'})
res.status_code = 400
return res
@api_view(['GET'])
def book_dislike(request, id):
"""Take the id of the book to dislike"""
try:
# Get the logged in user
user = request.user
# Get the Book to read
book = Book.objects.get(pk=id)
# If the book is in the user history
if user.history.books_action.filter(book=book).exists():
book_history = user.history.books_action.get(book=book)
if book_history.liked:
book_history.liked = False
book.likes.remove(user)
book.save()
else:
res = Response({'message': "Can't dislike a book more then one time"})
res.status_code = 400
return res
book_history.save()
return Response({'message': 'book {0} is disliked by the user {1}'.format(book.name, user.username),
'likes': book.likes.count()})
except:
res = Response({'message': 'error'})
res.status_code = 400
return res
@api_view(['GET'])
def book_bookmark(request, id):
"""Take the id of the book to bookmark"""
try:
# Get the logged in user
user = request.user
# Get the Book to read
book = Book.objects.get(pk=id)
# If the book is in the user history
if user.history.books_action.filter(book=book).exists():
book_history = user.history.books_action.get(book=book)
if not book_history.bookmarked:
book_history.bookmarked = True
book_history.score += 1
book_history.save()
else:
res = Response({'message': "Can't bookmark a book more then one time"})
res.status_code = 400
return res
else:
book_history = BookHistory(book=book)
book_history.bookmarked = True
book_history.score += 1
book_history.save()
user.history.books_action.add(book_history)
return Response({'message': 'book {0} is bookmarked by the user {1}'.format(book.name, user.username)})
except:
res = Response({'message': 'error'})
res.status_code = 400
return res
@api_view(['GET'])
def add_comment(request):
try:
username = request.GET.get('username')
book_id = request.GET.get('book_id')
comment_content = request.GET.get('comment_content')
print('User {}'.format(username))
print('Book {}'.format(book_id))
print('Comment {}'.format(comment_content))
user = User.objects.get(username=username)
book = Book.objects.get(id=book_id)
comment = Comment(user=user, content=comment_content)
sentiment = get_sentiment(comment.content)
comment.sentiment = sentiment
comment.save()
book.comments.add(comment)
if sentiment == POSITIVE:
books_action = request.user.history.books_action.get(book=book)
books_action.score += 1
books_action.save()
book.comments.add(comment)
users = User.objects.filter(history__books_action__book=book).distinct().exclude(
username__exact=request.user.username)
link = reverse('books:detail', kwargs={'slug': book.slug}) + '#{0}'.format(comment.id)
content = 'has commented on {1}'.format(request.user.username, book.name)
for user in users:
user.notify(sender=request.user, content=content, link=link)
return Response({'status': 'success'})
except:
return Response({'status': 'error'})
| from django.urls import reverse
from rest_framework import viewsets, filters
from rest_framework.decorators import api_view
from rest_framework.response import Response
from books_library.books.apis.paginators import BookSearchSetPagination
from books_library.navigation.models import BookHistory
from books_library.navigation.sentiment import get_sentiment, POSITIVE
from books_library.recomendation.views import get_rec
from .serializers import BookSerializer, CommentSerializer, CategorySerializer, BookSearchSerializer, \
BookSimilarSerializer
from ..models import Book, Comment, Category
from books_library.users.models import User
class BookViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
def get_object(self):
"""Return the requested book and save the navigation"""
# Get the object from the super class method
book = super(BookViewSet, self).get_object()
# If the user is logged in, save the book history actions
if self.request.user.is_authenticated():
# Get the logged in user
user = self.request.user
# If the user has not viewed the book, create a new BookAction model and save
# in the user history field
if not user.history.books_action.filter(book=book).exists():
book_actions = BookHistory(book=book, viewed=True)
book_actions.score += 1
book_actions.save()
user.history.books_action.add(book_actions)
else:
books_action = user.history.books_action.get(book=book)
if not books_action.viewed:
books_action.viewed = True
books_action.score += 1
books_action.save()
return book
class BookSearchViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSearchSerializer
pagination_class = BookSearchSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = ('^name', '$name')
class BookSimilarViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
def get_queryset(self):
books = super(BookSimilarViewSet, self).get_queryset()
try:
book_id = self.request.GET.get('book_id')
book_ids = get_rec(book_id)
return books.filter(id__in=book_ids)
except:
return books.all()[:10]
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
@api_view(['GET'])
def book_like(request, id):
"""Take the id of the book to like"""
try:
# Get the logged in user
user = request.user
# Get the Book to read
book = Book.objects.get(pk=id)
# If the book is in the user history
if user.history.books_action.filter(book=book).exists():
book_history = user.history.books_action.get(book=book)
if not book_history.liked:
book_history.liked = True
book_history.score += 1
book_history.save()
book.likes.add(user)
book.save()
else:
res = Response({'message': "Can't like a book more then one time"})
res.status_code = 400
return res
else:
book_history = BookHistory(book=book)
book_history.liked = True
book_history.score += 1
book_history.save()
user.history.books_action.add(book_history)
# Increse the like by one
book.likes.add(user)
book.save()
return Response({'message': 'book {0} is liked by the user {1}'.format(book.name, user.username),
'likes': book.likes.count()})
except:
res = Response({'message': 'error'})
res.status_code = 400
return res
@api_view(['GET'])
def book_dislike(request, id):
"""Take the id of the book to dislike"""
try:
# Get the logged in user
user = request.user
# Get the Book to read
book = Book.objects.get(pk=id)
# If the book is in the user history
if user.history.books_action.filter(book=book).exists():
book_history = user.history.books_action.get(book=book)
if book_history.liked:
book_history.liked = False
book.likes.remove(user)
book.save()
else:
res = Response({'message': "Can't dislike a book more then one time"})
res.status_code = 400
return res
book_history.save()
return Response({'message': 'book {0} is disliked by the user {1}'.format(book.name, user.username),
'likes': book.likes.count()})
except:
res = Response({'message': 'error'})
res.status_code = 400
return res
@api_view(['GET'])
def book_bookmark(request, id):
"""Take the id of the book to bookmark"""
try:
# Get the logged in user
user = request.user
# Get the Book to read
book = Book.objects.get(pk=id)
# If the book is in the user history
if user.history.books_action.filter(book=book).exists():
book_history = user.history.books_action.get(book=book)
if not book_history.bookmarked:
book_history.bookmarked = True
book_history.score += 1
book_history.save()
else:
res = Response({'message': "Can't bookmark a book more then one time"})
res.status_code = 400
return res
else:
book_history = BookHistory(book=book)
book_history.bookmarked = True
book_history.score += 1
book_history.save()
user.history.books_action.add(book_history)
return Response({'message': 'book {0} is bookmarked by the user {1}'.format(book.name, user.username)})
except:
res = Response({'message': 'error'})
res.status_code = 400
return res
@api_view(['GET'])
def add_comment(request):
try:
username = request.GET.get('username')
book_id = request.GET.get('book_id')
comment_content = request.GET.get('comment_content')
print('User {}'.format(username))
print('Book {}'.format(book_id))
print('Comment {}'.format(comment_content))
user = User.objects.get(username=username)
book = Book.objects.get(id=book_id)
comment = Comment(user=user, content=comment_content)
sentiment = get_sentiment(comment.content)
comment.sentiment = sentiment
comment.save()
book.comments.add(comment)
if sentiment == POSITIVE:
books_action = request.user.history.books_action.get(book=book)
books_action.score += 1
books_action.save()
book.comments.add(comment)
users = User.objects.filter(history__books_action__book=book).distinct().exclude(
username__exact=request.user.username)
link = reverse('books:detail', kwargs={'slug': book.slug}) + '#{0}'.format(comment.id)
content = 'has commented on {1}'.format(request.user.username, book.name)
for user in users:
user.notify(sender=request.user, content=content, link=link)
return Response({'status': 'success'})
except:
return Response({'status': 'error'}) | en | 0.922752 | Return the requested book and save the navigation # Get the object from the super class method # If the user is logged in, save the book history actions # Get the logged in user # If the user has not viewed the book, create a new BookAction model and save # in the user history field Take the id of the book to like # Get the logged in user # Get the Book to read # If the book is in the user history # Increse the like by one Take the id of the book to dislike # Get the logged in user # Get the Book to read # If the book is in the user history Take the id of the book to bookmark # Get the logged in user # Get the Book to read # If the book is in the user history | 2.323443 | 2 |
pint/facets/formatting/objects.py | fernandezc/pint | 0 | 6613103 | """
pint.facets.formatting.objects
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2022 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import annotations
import re
from typing import Any
from ...compat import babel_parse, ndarray, np
from ...formatting import (
_pretty_fmt_exponent,
extract_custom_flags,
format_unit,
ndarray_to_latex,
remove_custom_flags,
siunitx_format_unit,
split_format,
)
from ...util import iterable
from ..plain import UnitsContainer
class FormattingQuantity:
_exp_pattern = re.compile(r"([0-9]\.?[0-9]*)e(-?)\+?0*([0-9]+)")
def __format__(self, spec: str) -> str:
if self._REGISTRY.fmt_locale is not None:
return self.format_babel(spec)
mspec, uspec = split_format(
spec, self.default_format, self._REGISTRY.separate_format_defaults
)
# If Compact is selected, do it at the beginning
if "#" in spec:
# TODO: don't replace '#'
mspec = mspec.replace("#", "")
uspec = uspec.replace("#", "")
obj = self.to_compact()
else:
obj = self
if "L" in uspec:
allf = plain_allf = r"{}\ {}"
elif "H" in uspec:
allf = plain_allf = "{} {}"
if iterable(obj.magnitude):
# Use HTML table instead of plain text template for array-likes
allf = (
"<table><tbody>"
"<tr><th>Magnitude</th>"
"<td style='text-align:left;'>{}</td></tr>"
"<tr><th>Units</th><td style='text-align:left;'>{}</td></tr>"
"</tbody></table>"
)
else:
allf = plain_allf = "{} {}"
if "Lx" in uspec:
# the LaTeX siunitx code
# TODO: add support for extracting options
opts = ""
ustr = siunitx_format_unit(obj.units._units, obj._REGISTRY)
allf = r"\SI[%s]{{{}}}{{{}}}" % opts
else:
# Hand off to unit formatting
# TODO: only use `uspec` after completing the deprecation cycle
ustr = format(obj.units, mspec + uspec)
# mspec = remove_custom_flags(spec)
if "H" in uspec:
# HTML formatting
if hasattr(obj.magnitude, "_repr_html_"):
# If magnitude has an HTML repr, nest it within Pint's
mstr = obj.magnitude._repr_html_()
else:
if isinstance(self.magnitude, ndarray):
# Use custom ndarray text formatting with monospace font
formatter = "{{:{}}}".format(mspec)
# Need to override for scalars, which are detected as iterable,
# and don't respond to printoptions.
if self.magnitude.ndim == 0:
allf = plain_allf = "{} {}"
mstr = formatter.format(obj.magnitude)
else:
with np.printoptions(
formatter={"float_kind": formatter.format}
):
mstr = (
"<pre>"
+ format(obj.magnitude).replace("\n", "<br>")
+ "</pre>"
)
elif not iterable(obj.magnitude):
# Use plain text for scalars
mstr = format(obj.magnitude, mspec)
else:
# Use monospace font for other array-likes
mstr = (
"<pre>"
+ format(obj.magnitude, mspec).replace("\n", "<br>")
+ "</pre>"
)
elif isinstance(self.magnitude, ndarray):
if "L" in uspec:
# Use ndarray LaTeX special formatting
mstr = ndarray_to_latex(obj.magnitude, mspec)
else:
# Use custom ndarray text formatting--need to handle scalars differently
# since they don't respond to printoptions
formatter = "{{:{}}}".format(mspec)
if obj.magnitude.ndim == 0:
mstr = formatter.format(obj.magnitude)
else:
with np.printoptions(formatter={"float_kind": formatter.format}):
mstr = format(obj.magnitude).replace("\n", "")
else:
mstr = format(obj.magnitude, mspec).replace("\n", "")
if "L" in uspec and "Lx" not in uspec:
mstr = self._exp_pattern.sub(r"\1\\times 10^{\2\3}", mstr)
elif "H" in uspec or "P" in uspec:
m = self._exp_pattern.match(mstr)
_exp_formatter = (
_pretty_fmt_exponent if "P" in uspec else lambda s: f"<sup>{s}</sup>"
)
if m:
exp = int(m.group(2) + m.group(3))
mstr = self._exp_pattern.sub(r"\1×10" + _exp_formatter(exp), mstr)
if allf == plain_allf and ustr.startswith("1 /"):
# Write e.g. "3 / s" instead of "3 1 / s"
ustr = ustr[2:]
return allf.format(mstr, ustr).strip()
def _repr_pretty_(self, p, cycle):
if cycle:
super()._repr_pretty_(p, cycle)
else:
p.pretty(self.magnitude)
p.text(" ")
p.pretty(self.units)
def format_babel(self, spec: str = "", **kwspec: Any) -> str:
spec = spec or self.default_format
# standard cases
if "#" in spec:
spec = spec.replace("#", "")
obj = self.to_compact()
else:
obj = self
kwspec = dict(kwspec)
if "length" in kwspec:
kwspec["babel_length"] = kwspec.pop("length")
loc = kwspec.get("locale", self._REGISTRY.fmt_locale)
if loc is None:
raise ValueError("Provide a `locale` value to localize translation.")
kwspec["locale"] = babel_parse(loc)
kwspec["babel_plural_form"] = kwspec["locale"].plural_form(obj.magnitude)
return "{} {}".format(
format(obj.magnitude, remove_custom_flags(spec)),
obj.units.format_babel(spec, **kwspec),
).replace("\n", "")
def __str__(self) -> str:
if self._REGISTRY.fmt_locale is not None:
return self.format_babel()
return format(self)
class FormattingUnit:
def __str__(self):
return format(self)
def __format__(self, spec) -> str:
_, uspec = split_format(
spec, self.default_format, self._REGISTRY.separate_format_defaults
)
if "~" in uspec:
if not self._units:
return ""
units = UnitsContainer(
dict(
(self._REGISTRY._get_symbol(key), value)
for key, value in self._units.items()
)
)
uspec = uspec.replace("~", "")
else:
units = self._units
return format_unit(units, uspec, registry=self._REGISTRY)
def format_babel(self, spec="", locale=None, **kwspec: Any) -> str:
spec = spec or extract_custom_flags(self.default_format)
if "~" in spec:
if self.dimensionless:
return ""
units = UnitsContainer(
dict(
(self._REGISTRY._get_symbol(key), value)
for key, value in self._units.items()
)
)
spec = spec.replace("~", "")
else:
units = self._units
locale = self._REGISTRY.fmt_locale if locale is None else locale
if locale is None:
raise ValueError("Provide a `locale` value to localize translation.")
else:
kwspec["locale"] = babel_parse(locale)
return units.format_babel(spec, registry=self._REGISTRY, **kwspec)
| """
pint.facets.formatting.objects
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2022 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import annotations
import re
from typing import Any
from ...compat import babel_parse, ndarray, np
from ...formatting import (
_pretty_fmt_exponent,
extract_custom_flags,
format_unit,
ndarray_to_latex,
remove_custom_flags,
siunitx_format_unit,
split_format,
)
from ...util import iterable
from ..plain import UnitsContainer
class FormattingQuantity:
_exp_pattern = re.compile(r"([0-9]\.?[0-9]*)e(-?)\+?0*([0-9]+)")
def __format__(self, spec: str) -> str:
if self._REGISTRY.fmt_locale is not None:
return self.format_babel(spec)
mspec, uspec = split_format(
spec, self.default_format, self._REGISTRY.separate_format_defaults
)
# If Compact is selected, do it at the beginning
if "#" in spec:
# TODO: don't replace '#'
mspec = mspec.replace("#", "")
uspec = uspec.replace("#", "")
obj = self.to_compact()
else:
obj = self
if "L" in uspec:
allf = plain_allf = r"{}\ {}"
elif "H" in uspec:
allf = plain_allf = "{} {}"
if iterable(obj.magnitude):
# Use HTML table instead of plain text template for array-likes
allf = (
"<table><tbody>"
"<tr><th>Magnitude</th>"
"<td style='text-align:left;'>{}</td></tr>"
"<tr><th>Units</th><td style='text-align:left;'>{}</td></tr>"
"</tbody></table>"
)
else:
allf = plain_allf = "{} {}"
if "Lx" in uspec:
# the LaTeX siunitx code
# TODO: add support for extracting options
opts = ""
ustr = siunitx_format_unit(obj.units._units, obj._REGISTRY)
allf = r"\SI[%s]{{{}}}{{{}}}" % opts
else:
# Hand off to unit formatting
# TODO: only use `uspec` after completing the deprecation cycle
ustr = format(obj.units, mspec + uspec)
# mspec = remove_custom_flags(spec)
if "H" in uspec:
# HTML formatting
if hasattr(obj.magnitude, "_repr_html_"):
# If magnitude has an HTML repr, nest it within Pint's
mstr = obj.magnitude._repr_html_()
else:
if isinstance(self.magnitude, ndarray):
# Use custom ndarray text formatting with monospace font
formatter = "{{:{}}}".format(mspec)
# Need to override for scalars, which are detected as iterable,
# and don't respond to printoptions.
if self.magnitude.ndim == 0:
allf = plain_allf = "{} {}"
mstr = formatter.format(obj.magnitude)
else:
with np.printoptions(
formatter={"float_kind": formatter.format}
):
mstr = (
"<pre>"
+ format(obj.magnitude).replace("\n", "<br>")
+ "</pre>"
)
elif not iterable(obj.magnitude):
# Use plain text for scalars
mstr = format(obj.magnitude, mspec)
else:
# Use monospace font for other array-likes
mstr = (
"<pre>"
+ format(obj.magnitude, mspec).replace("\n", "<br>")
+ "</pre>"
)
elif isinstance(self.magnitude, ndarray):
if "L" in uspec:
# Use ndarray LaTeX special formatting
mstr = ndarray_to_latex(obj.magnitude, mspec)
else:
# Use custom ndarray text formatting--need to handle scalars differently
# since they don't respond to printoptions
formatter = "{{:{}}}".format(mspec)
if obj.magnitude.ndim == 0:
mstr = formatter.format(obj.magnitude)
else:
with np.printoptions(formatter={"float_kind": formatter.format}):
mstr = format(obj.magnitude).replace("\n", "")
else:
mstr = format(obj.magnitude, mspec).replace("\n", "")
if "L" in uspec and "Lx" not in uspec:
mstr = self._exp_pattern.sub(r"\1\\times 10^{\2\3}", mstr)
elif "H" in uspec or "P" in uspec:
m = self._exp_pattern.match(mstr)
_exp_formatter = (
_pretty_fmt_exponent if "P" in uspec else lambda s: f"<sup>{s}</sup>"
)
if m:
exp = int(m.group(2) + m.group(3))
mstr = self._exp_pattern.sub(r"\1×10" + _exp_formatter(exp), mstr)
if allf == plain_allf and ustr.startswith("1 /"):
# Write e.g. "3 / s" instead of "3 1 / s"
ustr = ustr[2:]
return allf.format(mstr, ustr).strip()
def _repr_pretty_(self, p, cycle):
if cycle:
super()._repr_pretty_(p, cycle)
else:
p.pretty(self.magnitude)
p.text(" ")
p.pretty(self.units)
def format_babel(self, spec: str = "", **kwspec: Any) -> str:
spec = spec or self.default_format
# standard cases
if "#" in spec:
spec = spec.replace("#", "")
obj = self.to_compact()
else:
obj = self
kwspec = dict(kwspec)
if "length" in kwspec:
kwspec["babel_length"] = kwspec.pop("length")
loc = kwspec.get("locale", self._REGISTRY.fmt_locale)
if loc is None:
raise ValueError("Provide a `locale` value to localize translation.")
kwspec["locale"] = babel_parse(loc)
kwspec["babel_plural_form"] = kwspec["locale"].plural_form(obj.magnitude)
return "{} {}".format(
format(obj.magnitude, remove_custom_flags(spec)),
obj.units.format_babel(spec, **kwspec),
).replace("\n", "")
def __str__(self) -> str:
if self._REGISTRY.fmt_locale is not None:
return self.format_babel()
return format(self)
class FormattingUnit:
def __str__(self):
return format(self)
def __format__(self, spec) -> str:
_, uspec = split_format(
spec, self.default_format, self._REGISTRY.separate_format_defaults
)
if "~" in uspec:
if not self._units:
return ""
units = UnitsContainer(
dict(
(self._REGISTRY._get_symbol(key), value)
for key, value in self._units.items()
)
)
uspec = uspec.replace("~", "")
else:
units = self._units
return format_unit(units, uspec, registry=self._REGISTRY)
def format_babel(self, spec="", locale=None, **kwspec: Any) -> str:
spec = spec or extract_custom_flags(self.default_format)
if "~" in spec:
if self.dimensionless:
return ""
units = UnitsContainer(
dict(
(self._REGISTRY._get_symbol(key), value)
for key, value in self._units.items()
)
)
spec = spec.replace("~", "")
else:
units = self._units
locale = self._REGISTRY.fmt_locale if locale is None else locale
if locale is None:
raise ValueError("Provide a `locale` value to localize translation.")
else:
kwspec["locale"] = babel_parse(locale)
return units.format_babel(spec, registry=self._REGISTRY, **kwspec)
| en | 0.636993 | pint.facets.formatting.objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: 2022 by Pint Authors, see AUTHORS for more details. :license: BSD, see LICENSE for more details. # If Compact is selected, do it at the beginning # TODO: don't replace '#' # Use HTML table instead of plain text template for array-likes # the LaTeX siunitx code # TODO: add support for extracting options # Hand off to unit formatting # TODO: only use `uspec` after completing the deprecation cycle # mspec = remove_custom_flags(spec) # HTML formatting # If magnitude has an HTML repr, nest it within Pint's # Use custom ndarray text formatting with monospace font # Need to override for scalars, which are detected as iterable, # and don't respond to printoptions. # Use plain text for scalars # Use monospace font for other array-likes # Use ndarray LaTeX special formatting # Use custom ndarray text formatting--need to handle scalars differently # since they don't respond to printoptions # Write e.g. "3 / s" instead of "3 1 / s" # standard cases | 2.281587 | 2 |
info/urls.py | yaquake/mcsite | 0 | 6613104 | from django.urls import path
from .models import *
from .views import (
why_us,
send_appraisal,
tenancy,
ContactView,
ApplyView,
AboutView,
ServicesListView
)
app_name = 'info'
urlpatterns = [
path('whyus/', why_us, name='whyus'),
path('appraisal/', send_appraisal, name='appraisal'),
path('tenancy_guide/', tenancy, name='tenancy'),
path('contact/', ContactView.as_view(), name='contact'),
path('apply/', ApplyView.as_view(), name='apply'),
path('about/', AboutView.as_view(), name='about'),
path('services/', ServicesListView.as_view(), name='services'),
]
| from django.urls import path
from .models import *
from .views import (
why_us,
send_appraisal,
tenancy,
ContactView,
ApplyView,
AboutView,
ServicesListView
)
app_name = 'info'
urlpatterns = [
path('whyus/', why_us, name='whyus'),
path('appraisal/', send_appraisal, name='appraisal'),
path('tenancy_guide/', tenancy, name='tenancy'),
path('contact/', ContactView.as_view(), name='contact'),
path('apply/', ApplyView.as_view(), name='apply'),
path('about/', AboutView.as_view(), name='about'),
path('services/', ServicesListView.as_view(), name='services'),
]
| none | 1 | 1.800521 | 2 | |
archiTop/deck_builder/multi_card_deck_builder.py | Julian-Brendel/archiTop | 2 | 6613105 | """Sourcefile containing deck builder class for decks with multiple cards"""
from collections import OrderedDict
from copy import deepcopy
from archiTop.base_classes import DeckBuilder
from archiTop.resources import (card_asset_template, card_deck_template,
card_template)
from archiTop.scryfall.data_types import ScryfallCard
class MultiCardDeckBuilder(DeckBuilder):
""""MultiCardDeckBuilder class implementing abstract DeckBuilder class.
Used for card decks with multiple cards."""
def __init__(self, *args):
self.card_deck_json = deepcopy(card_deck_template)
self.contained_objects = []
self.deck_ids = []
self.custom_deck = OrderedDict()
super().__init__(*args)
def __repr__(self):
unique_cards = len(set(self.deck_ids))
return f'CardDeck({len(self.deck_ids)} total cards, {unique_cards} unique cards)'
def _populate_card_template(self, card: ScryfallCard):
"""Creates a new TableTop card object and fills information from card class.
Each card in deck needs one card object, therefore cards with quantity > 1 will be
duplicated.
Same cards, even when duplicated will keep the same ID.
Once populated, card object is inserted into contained_objects and id added to deck_ids.
Args:
card: Card to create card object for
"""
card_json = deepcopy(card_template)
card_json['CardID'] = self.current_card_id
card_json['Nickname'] = card.tabletop_name
# create one object per quantity
for _ in range(card.quantity):
self.contained_objects.append(card_json)
self.deck_ids.append(self.current_card_id)
self.current_card_id += 100
def _populate_card_asset_template(self, card: ScryfallCard):
"""Creates a new TableTop card asset object and fills with information from card class.
There should only exist on card asset template for each unique card.
Therefor cards with quantity > 1 do only get one card asset.
Asset matching is done with insertion order of asset objects.
Order in the ContainedObjects, DeckID's must match the order of card assets.
Once populated, card asset is inserted in custom deck and asset id is incremented.
Args:
card: Card to create asset for
"""
card_asset_json = deepcopy(card_asset_template)
card_asset_json['FaceURL'] = card.image_url
card_asset_json['BackURL'] = self.card_back_url
self.custom_deck[str(self.current_card_asset_id)] = card_asset_json
self.current_card_asset_id += 1
def create_deck(self) -> dict:
"""Create the json structure for the card deck containing multiple cards.
Returns:
TableTop card deck json containing multiple cards
"""
for card in self.cards:
self._populate_card_template(card)
self._populate_card_asset_template(card)
self.card_deck_json['ContainedObjects'] = self.contained_objects
self.card_deck_json['DeckIDs'] = self.deck_ids
self.card_deck_json['CustomDeck'] = self.custom_deck
self.card_deck_json['Transform']['rotZ'] = 180 if self.hidden else 0
return self.card_deck_json
| """Sourcefile containing deck builder class for decks with multiple cards"""
from collections import OrderedDict
from copy import deepcopy
from archiTop.base_classes import DeckBuilder
from archiTop.resources import (card_asset_template, card_deck_template,
card_template)
from archiTop.scryfall.data_types import ScryfallCard
class MultiCardDeckBuilder(DeckBuilder):
""""MultiCardDeckBuilder class implementing abstract DeckBuilder class.
Used for card decks with multiple cards."""
def __init__(self, *args):
self.card_deck_json = deepcopy(card_deck_template)
self.contained_objects = []
self.deck_ids = []
self.custom_deck = OrderedDict()
super().__init__(*args)
def __repr__(self):
unique_cards = len(set(self.deck_ids))
return f'CardDeck({len(self.deck_ids)} total cards, {unique_cards} unique cards)'
def _populate_card_template(self, card: ScryfallCard):
"""Creates a new TableTop card object and fills information from card class.
Each card in deck needs one card object, therefore cards with quantity > 1 will be
duplicated.
Same cards, even when duplicated will keep the same ID.
Once populated, card object is inserted into contained_objects and id added to deck_ids.
Args:
card: Card to create card object for
"""
card_json = deepcopy(card_template)
card_json['CardID'] = self.current_card_id
card_json['Nickname'] = card.tabletop_name
# create one object per quantity
for _ in range(card.quantity):
self.contained_objects.append(card_json)
self.deck_ids.append(self.current_card_id)
self.current_card_id += 100
def _populate_card_asset_template(self, card: ScryfallCard):
"""Creates a new TableTop card asset object and fills with information from card class.
There should only exist on card asset template for each unique card.
Therefor cards with quantity > 1 do only get one card asset.
Asset matching is done with insertion order of asset objects.
Order in the ContainedObjects, DeckID's must match the order of card assets.
Once populated, card asset is inserted in custom deck and asset id is incremented.
Args:
card: Card to create asset for
"""
card_asset_json = deepcopy(card_asset_template)
card_asset_json['FaceURL'] = card.image_url
card_asset_json['BackURL'] = self.card_back_url
self.custom_deck[str(self.current_card_asset_id)] = card_asset_json
self.current_card_asset_id += 1
def create_deck(self) -> dict:
"""Create the json structure for the card deck containing multiple cards.
Returns:
TableTop card deck json containing multiple cards
"""
for card in self.cards:
self._populate_card_template(card)
self._populate_card_asset_template(card)
self.card_deck_json['ContainedObjects'] = self.contained_objects
self.card_deck_json['DeckIDs'] = self.deck_ids
self.card_deck_json['CustomDeck'] = self.custom_deck
self.card_deck_json['Transform']['rotZ'] = 180 if self.hidden else 0
return self.card_deck_json
| en | 0.878534 | Sourcefile containing deck builder class for decks with multiple cards "MultiCardDeckBuilder class implementing abstract DeckBuilder class. Used for card decks with multiple cards. Creates a new TableTop card object and fills information from card class. Each card in deck needs one card object, therefore cards with quantity > 1 will be duplicated. Same cards, even when duplicated will keep the same ID. Once populated, card object is inserted into contained_objects and id added to deck_ids. Args: card: Card to create card object for # create one object per quantity Creates a new TableTop card asset object and fills with information from card class. There should only exist on card asset template for each unique card. Therefor cards with quantity > 1 do only get one card asset. Asset matching is done with insertion order of asset objects. Order in the ContainedObjects, DeckID's must match the order of card assets. Once populated, card asset is inserted in custom deck and asset id is incremented. Args: card: Card to create asset for Create the json structure for the card deck containing multiple cards. Returns: TableTop card deck json containing multiple cards | 3.102113 | 3 |
status.py | scorot/indi_dome_scripts | 0 | 6613106 | <reponame>scorot/indi_dome_scripts<filename>status.py
#!/usr/bin/python
#
# Status script for INDI Dome Scripting Gateway
#
# Arguments: file name to save current state and coordinates (parked ra dec)
# Exit code: 0 for success, 1 for failure
#
import sys
script, path = sys.argv
coordinates = open('/tmp/indi-status', 'r')
status = open(path, 'w')
status.truncate()
status.write(coordinates.readline())
status.close()
sys.exit(0)
| #!/usr/bin/python
#
# Status script for INDI Dome Scripting Gateway
#
# Arguments: file name to save current state and coordinates (parked ra dec)
# Exit code: 0 for success, 1 for failure
#
import sys
script, path = sys.argv
coordinates = open('/tmp/indi-status', 'r')
status = open(path, 'w')
status.truncate()
status.write(coordinates.readline())
status.close()
sys.exit(0) | en | 0.695725 | #!/usr/bin/python # # Status script for INDI Dome Scripting Gateway # # Arguments: file name to save current state and coordinates (parked ra dec) # Exit code: 0 for success, 1 for failure # | 2.262289 | 2 |
reference/multivariate_dataset_examples.py | jeffrey82221/gluonts_fund_price_forecast | 1 | 6613107 | <filename>reference/multivariate_dataset_examples.py
'''
Organize nav curves of multiple funds into the multi-timeseries objects offered by gluonts.
'''
import os
import inspect
import sys
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from gluonts.dataset.common import ListDataset
import numpy as np
import pandas as pd
from gluonts.dataset.multivariate_grouper import MultivariateGrouper
from gluonts.dataset.repository.datasets import get_dataset
# exchange_rate_nips, electricity_nips, traffic_nips, solar_nips,
# wiki-rolling_nips, ## taxi_30min is buggy still
dataset = get_dataset("electricity_nips", regenerate=True)
train_grouper = MultivariateGrouper(max_target_dim=min(
2000, int(dataset.metadata.feat_static_cat[0].cardinality)))
test_grouper = MultivariateGrouper(num_test_dates=int(len(dataset.test) / len(dataset.train)),
max_target_dim=min(2000, int(dataset.metadata.feat_static_cat[0].cardinality)))
dataset_train = train_grouper(dataset.train)
dataset_test = test_grouper(dataset.test)
print('Example Success')
ts_jsons = []
for i in range(10):
ts_jsons.append(
{
"start": pd.Timestamp('2021-01-01', freq='D'),
"target": np.arange(300 + i),
}
)
dataset = ListDataset(ts_jsons, freq='D')
print(next(iter(dataset)))
train_grouper = MultivariateGrouper(max_target_dim=10)
grouped_dataset = train_grouper(dataset)
print(len(grouped_dataset))
print(next(iter(grouped_dataset)))
print('Own version success')
| <filename>reference/multivariate_dataset_examples.py
'''
Organize nav curves of multiple funds into the multi-timeseries objects offered by gluonts.
'''
import os
import inspect
import sys
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from gluonts.dataset.common import ListDataset
import numpy as np
import pandas as pd
from gluonts.dataset.multivariate_grouper import MultivariateGrouper
from gluonts.dataset.repository.datasets import get_dataset
# exchange_rate_nips, electricity_nips, traffic_nips, solar_nips,
# wiki-rolling_nips, ## taxi_30min is buggy still
dataset = get_dataset("electricity_nips", regenerate=True)
train_grouper = MultivariateGrouper(max_target_dim=min(
2000, int(dataset.metadata.feat_static_cat[0].cardinality)))
test_grouper = MultivariateGrouper(num_test_dates=int(len(dataset.test) / len(dataset.train)),
max_target_dim=min(2000, int(dataset.metadata.feat_static_cat[0].cardinality)))
dataset_train = train_grouper(dataset.train)
dataset_test = test_grouper(dataset.test)
print('Example Success')
ts_jsons = []
for i in range(10):
ts_jsons.append(
{
"start": pd.Timestamp('2021-01-01', freq='D'),
"target": np.arange(300 + i),
}
)
dataset = ListDataset(ts_jsons, freq='D')
print(next(iter(dataset)))
train_grouper = MultivariateGrouper(max_target_dim=10)
grouped_dataset = train_grouper(dataset)
print(len(grouped_dataset))
print(next(iter(grouped_dataset)))
print('Own version success')
| en | 0.817116 | Organize nav curves of multiple funds into the multi-timeseries objects offered by gluonts. # exchange_rate_nips, electricity_nips, traffic_nips, solar_nips, # wiki-rolling_nips, ## taxi_30min is buggy still | 2.619112 | 3 |
server/__init__.py | nathanIL/openews | 2 | 6613108 | import os
from flask import Flask
from flask.ext.pymongo import PyMongo
server_app = Flask(__name__)
if 'OPENEWS_DEVELOPMENT_ENV' in os.environ:
server_app.config.from_object('config-development')
else:
server_app.config.from_object('config-production')
mongo = PyMongo(server_app)
| import os
from flask import Flask
from flask.ext.pymongo import PyMongo
server_app = Flask(__name__)
if 'OPENEWS_DEVELOPMENT_ENV' in os.environ:
server_app.config.from_object('config-development')
else:
server_app.config.from_object('config-production')
mongo = PyMongo(server_app)
| none | 1 | 2.037163 | 2 | |
combat_width/Simulator.py | isamaru/combat_width | 1 | 6613109 | import random, yaml, math, csv, os
import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
""" HoI4 NSB damage dealing simulator
Copyright 2021 <NAME>, <EMAIL>
"""
class Params:
def __init__(self, data, **kwargs) -> None:
def value(name):
return kwargs.get(name) or data[name]
self.attacker_width = value('attacker_width')
self.defender_width = value('defender_width')
self.combat_width = value('combat_width')
self.attack_per_width = value('attack_per_width')
self.attack_base = value('attack_base')
self.defense_per_width = value('defense_per_width')
self.defense_base = value('defense_base')
self.effective_coordination = value('effective_coordination')
self.overwidth_penalty_rate = value('overwidth_penalty_rate')
self.overwidth_penalty_max = value('overwidth_penalty_max')
self.stacking_limit_per_width = value('stacking_limit_per_width')
self.stacking_penalty_per_division = value('stacking_penalty_per_division')
self.attackers_count = Params.units_per_front(
self.combat_width, self.attacker_width, self.overwidth_penalty_rate, self.overwidth_penalty_max)
self.defenders_count = Params.units_per_front(
self.combat_width, self.defender_width, self.overwidth_penalty_rate, self.overwidth_penalty_max)
self.attack_overwidth_factor = Params.overwidth_penalty(
self.combat_width, self.attacker_width, self.attackers_count, self.overwidth_penalty_rate)
self.defense_overwidth_factor = Params.overwidth_penalty(
self.combat_width, self.defender_width, self.defenders_count, self.overwidth_penalty_rate)
self.attack_overstack_factor = Params.overstack_penalty(
self.combat_width, self.attackers_count, self.stacking_limit_per_width, self.stacking_penalty_per_division)
self.defense_overstack_factor = Params.overstack_penalty(
self.combat_width, self.defenders_count, self.stacking_limit_per_width, self.stacking_penalty_per_division)
self.attack = (
(self.attack_base + (self.attacker_width * self.attack_per_width))
* self.attack_overwidth_factor * self.attack_overstack_factor)
self.defense = (
(self.defense_base + (self.defender_width * self.defense_per_width))
* self.defense_overwidth_factor * self.defense_overstack_factor)
self.target_pool_size = min(max(1, (2 * self.attacker_width) // self.defender_width), self.defenders_count)
self.focused_attacks = self.effective_coordination * self.attack
self.spread_attacks = math.floor(((1 - self.effective_coordination) * self.attack) / self.target_pool_size)
@staticmethod
def units_per_front(combat_width, unit_width, penalty_rate, penalty_max):
max_overflow = (1 + (penalty_max / penalty_rate)) * combat_width
return min(math.ceil(combat_width / unit_width), math.floor(max_overflow / unit_width))
@staticmethod
def overwidth_penalty(combat_width, unit_width, unit_count, penalty_rate):
overflow = max(((unit_width * unit_count / combat_width) - 1), 0)
return 1 - (overflow * penalty_rate)
# Bugged for NSB's combat widths
@staticmethod
def overstack_penalty(combat_width, unit_count, limit_per_width, penalty_per_division):
overflow = max(unit_count - math.floor(combat_width * limit_per_width), 0)
return 1 - (overflow * penalty_per_division)
@staticmethod
def load_base(**kwargs):
with open(r'./base_params.yaml') as file:
return Params(yaml.load(file, Loader=yaml.FullLoader), **kwargs)
class Simulation:
def __init__(self) -> None:
super().__init__()
self.counter = 0
def simulate(self, params, rounds, value_function=None):
return self.process_rounds(self.simulate_round, params, rounds, value_function)
def damage_received(self, attacks_received, params):
return [(attacks + (max(0, attacks - params.defense) * 3)) / 10 for attacks in attacks_received]
def process_rounds(self, simulate_round_function, params, rounds, value_function=None):
self.counter += 1
mean_result = sum((sum(simulate_round_function(params)) for _ in range(rounds))) / rounds
if value_function is not None:
return value_function(mean_result, params)
return mean_result
def simulate_round(self, params: Params):
attacks_received = [0] * params.defenders_count
spread_attacks = params.spread_attacks
focused_attacks = params.focused_attacks
for attacker in range(params.attackers_count):
target_pool = random.sample(
range(params.defenders_count),
k=params.target_pool_size)
attacks_received[min(target_pool)] += focused_attacks
for target in target_pool:
attacks_received[target] += spread_attacks
return self.damage_received(attacks_received, params)
def simulate_old(self, params, rounds, value_function=None):
return self.process_rounds(self.simulate_round_old, params, rounds, value_function)
def simulate_round_old(self, params: Params, value_function=None):
attacks_received = [0] * params.defenders_count
for attacker in range(params.attackers_count):
attacks_received[random.choice(range(params.defenders_count))] += params.attack
return self.damage_received(attacks_received, params)
def run_simulations(
rounds,
ax_range, ax_property,
x_range, x_property,
series_range, series_property,
old_series_range=None, old_series_property=None,
value_function=None,
**kwargs):
s = Simulation()
axs_data = []
for ax_value in ax_range:
dataset = [
[
s.simulate(
Params.load_base(**{ax_property: ax_value, x_property: x_value, series_property: series_value}),
rounds, value_function)
for series_value in series_range
] for x_value in x_range
]
if not old_series_range:
old_series_range = []
old_dataset = [
[
s.simulate_old(
Params.load_base(**{
ax_property: ax_value, x_property: x_value,
old_series_property: old_series_value,
'overwidth_penalty_rate': 2.0
}),
rounds, value_function)
for old_series_value in old_series_range
] for x_value in x_range
]
axs_data.append([data + old_data for (data, old_data) in zip(dataset, old_dataset)])
print("Simulation phase done, ran %d simulations with %d rounds " % (s.counter, rounds))
return axs_data
def display_results(data,
title,
ax_range, ax_property,
x_range, x_property,
series_labels, y_label='damage dice dealt', **kwargs):
fig, axs = plt.subplots(len(ax_range)) # Create a figure containing a single axes.
for i, ax in enumerate(axs):
ax.set_title(str(ax_range[i]) + ' ' + ax_property)
ax.set_xticks(x_range)
# ax.set_xlabel(x_property)
ax.set_ylabel(y_label)
ax.plot(x_range, data[i], label=series_labels)
ax.yaxis.set_major_locator(MultipleLocator(50))
ax.yaxis.set_minor_locator(AutoMinorLocator(5))
ax.grid(which='major', linestyle='--')
ax.grid(which='minor', linestyle=':')
ax.legend()
plt.suptitle(title)
plt.show()
def export_results(data, file_prefix, ax_range, ax_property, x_range, x_property, series_labels, **kwargs):
for i, ax_value in enumerate(ax_range):
filename = '../output/%s_%s_%s.csv' % (file_prefix, ax_property, ax_value)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w', newline='') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow([x_property] + series_labels) # headers
for row in ([x_value] + data_row for (x_value, data_row) in zip(x_range, data[i])):
writer.writerow(row) # headers
def attacker_scenario():
series_range = (0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65)
return {
'title': "Average total damage dice dealt by attackers (80 + 26 attack per width) to 20w defenders (700 defense) on a filled frontage across 10000 runs",
'file_prefix': 'attackers',
'rounds': 10000,
'ax_range': (80, 120, 160),
'ax_property': 'combat_width',
'x_range': list(range(10, 50)),
'x_property': 'attacker_width',
'series_range': series_range,
'series_property': 'effective_coordination',
'old_series_range': (0,),
'old_series_property': '',
'series_labels': ["Coordination = %d (%d%%)" % ((x-0.35)*100, x*100) for x in series_range] + ['20w pre-NSB rules']
}
def coordination_scenario():
series_range = (20, 25, 29, 30, 35, 39, 40, 45)
return {
'title': "Average total damage dice dealt by attackers (80 + 26 attack per width) to 20w defenders (700 defense) on a filled frontage across 10000 runs",
'file_prefix': 'coordination',
'rounds': 10000,
'ax_range': (80, 120, 160),
'ax_property': 'combat_width',
'x_range': [x / 100 for x in range(35, 70)],
'x_property': 'effective_coordination',
'series_range': series_range,
'series_property': 'attacker_width',
'old_series_range': None,
'old_series_property': '',
'series_labels': ["Attacker width = %d" % x for x in series_range]
}
def defenders_scenario():
series_range = (16, 20, 27, 30, 40)
old_series_range = (20, 40)
return {
'title': "Average total damage dice received by defenders (80 + 31 defense per width) by attackers (80 + 26 attack per width, 0.45 effective coordination) on a filled frontage across 10000 runs",
'file_prefix': 'defenders',
'rounds': 10000,
'ax_range': (80, 120, 160),
'ax_property': 'combat_width',
'x_range': list(range(2, 50)),
'x_property': 'defender_width',
'series_range': series_range,
'series_property': 'attacker_width',
'old_series_range': old_series_range,
'old_series_property': 'attacker_width',
'series_labels': ["Attacker width = %d" % x for x in series_range] + ["Pre-NSB attacker width = %d" % x for x in old_series_range]
}
def defenders_org_scenario():
def value_function(x, params: Params):
org = round(80 - (140/(2+(params.defender_width/2))))
return 100 * x / (params.defenders_count * org)
return dict(defenders_scenario(),
x_range=list(range(2, 50, 2)),
file_prefix='defenders_org',
title="Average normalized ORG damage received by defenders (80 + 31 defense per width) by attackers (80 + 26 attack per width, 0.45 effective coordination) on a filled frontage across 10000 runs",
y_label='relative ORG damage received',
value_function=value_function)
def terrain_scenario():
series_range = (
80, 84, 90, 78, 96, 75
)
series_labels = [
'hills (80)', 'forest/jungle (84)', 'plains/desert (90)', 'marsh (78)', 'urban (96)', 'mountain (75)'
]
return {
'title': "Average total damage dice dealt by attackers (80 + 26 attack per width, 0.45 eff. coordination) to defenders (80 + 31 defense per width) across 10000 runs",
'file_prefix': 'terrains',
'rounds': 10000,
'ax_range': (10, 16, 21, 25, 30),
'ax_property': 'defender_width',
'x_range': list(range(12, 50)),
'x_property': 'attacker_width',
'series_range': series_range,
'series_property': 'combat_width',
'old_series_range': None,
'old_series_property': '',
'series_labels': series_labels
}
def terrain_plus_scenario():
series_range = (
120, 126, 135, 104, 128, 100,
160, 168, 180, 130, 125
)
series_labels = [
'hills + 1 (120)', 'forest/jungle + 1 (126)', 'plains/desert + 1 (135)', 'marsh + 1 (104)', 'urban + 1 (128)', 'mountain + 1 (100)',
'hills/urban + 2 (160)', 'forest/jungle + 2 (168)', 'plains/desert + 2 (180)', 'marsh + 2 (130)', 'mountain + 2 (125)'
]
return {
'title': "Average total damage dice dealt by attackers (80 + 26 attack per width, 0.45 eff. coordination) to defenders (80 + 31 defense per width) across 10000 runs",
'file_prefix': 'terrains_plus',
'rounds': 10000,
'ax_range': (12, 16, 21, 25, 30),
'ax_property': 'defender_width',
'x_range': list(range(12, 50)),
'x_property': 'attacker_width',
'series_range': series_range,
'series_property': 'combat_width',
'old_series_range': None,
'old_series_property': '',
'series_labels': series_labels
}
def main():
scenario = terrain_plus_scenario()
data = run_simulations(**scenario)
export_results(data, **scenario)
display_results(data, **scenario)
if __name__ == '__main__':
main()
| import random, yaml, math, csv, os
import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
""" HoI4 NSB damage dealing simulator
Copyright 2021 <NAME>, <EMAIL>
"""
class Params:
def __init__(self, data, **kwargs) -> None:
def value(name):
return kwargs.get(name) or data[name]
self.attacker_width = value('attacker_width')
self.defender_width = value('defender_width')
self.combat_width = value('combat_width')
self.attack_per_width = value('attack_per_width')
self.attack_base = value('attack_base')
self.defense_per_width = value('defense_per_width')
self.defense_base = value('defense_base')
self.effective_coordination = value('effective_coordination')
self.overwidth_penalty_rate = value('overwidth_penalty_rate')
self.overwidth_penalty_max = value('overwidth_penalty_max')
self.stacking_limit_per_width = value('stacking_limit_per_width')
self.stacking_penalty_per_division = value('stacking_penalty_per_division')
self.attackers_count = Params.units_per_front(
self.combat_width, self.attacker_width, self.overwidth_penalty_rate, self.overwidth_penalty_max)
self.defenders_count = Params.units_per_front(
self.combat_width, self.defender_width, self.overwidth_penalty_rate, self.overwidth_penalty_max)
self.attack_overwidth_factor = Params.overwidth_penalty(
self.combat_width, self.attacker_width, self.attackers_count, self.overwidth_penalty_rate)
self.defense_overwidth_factor = Params.overwidth_penalty(
self.combat_width, self.defender_width, self.defenders_count, self.overwidth_penalty_rate)
self.attack_overstack_factor = Params.overstack_penalty(
self.combat_width, self.attackers_count, self.stacking_limit_per_width, self.stacking_penalty_per_division)
self.defense_overstack_factor = Params.overstack_penalty(
self.combat_width, self.defenders_count, self.stacking_limit_per_width, self.stacking_penalty_per_division)
self.attack = (
(self.attack_base + (self.attacker_width * self.attack_per_width))
* self.attack_overwidth_factor * self.attack_overstack_factor)
self.defense = (
(self.defense_base + (self.defender_width * self.defense_per_width))
* self.defense_overwidth_factor * self.defense_overstack_factor)
self.target_pool_size = min(max(1, (2 * self.attacker_width) // self.defender_width), self.defenders_count)
self.focused_attacks = self.effective_coordination * self.attack
self.spread_attacks = math.floor(((1 - self.effective_coordination) * self.attack) / self.target_pool_size)
@staticmethod
def units_per_front(combat_width, unit_width, penalty_rate, penalty_max):
max_overflow = (1 + (penalty_max / penalty_rate)) * combat_width
return min(math.ceil(combat_width / unit_width), math.floor(max_overflow / unit_width))
@staticmethod
def overwidth_penalty(combat_width, unit_width, unit_count, penalty_rate):
overflow = max(((unit_width * unit_count / combat_width) - 1), 0)
return 1 - (overflow * penalty_rate)
# Bugged for NSB's combat widths
@staticmethod
def overstack_penalty(combat_width, unit_count, limit_per_width, penalty_per_division):
overflow = max(unit_count - math.floor(combat_width * limit_per_width), 0)
return 1 - (overflow * penalty_per_division)
@staticmethod
def load_base(**kwargs):
with open(r'./base_params.yaml') as file:
return Params(yaml.load(file, Loader=yaml.FullLoader), **kwargs)
class Simulation:
def __init__(self) -> None:
super().__init__()
self.counter = 0
def simulate(self, params, rounds, value_function=None):
return self.process_rounds(self.simulate_round, params, rounds, value_function)
def damage_received(self, attacks_received, params):
return [(attacks + (max(0, attacks - params.defense) * 3)) / 10 for attacks in attacks_received]
def process_rounds(self, simulate_round_function, params, rounds, value_function=None):
self.counter += 1
mean_result = sum((sum(simulate_round_function(params)) for _ in range(rounds))) / rounds
if value_function is not None:
return value_function(mean_result, params)
return mean_result
def simulate_round(self, params: Params):
attacks_received = [0] * params.defenders_count
spread_attacks = params.spread_attacks
focused_attacks = params.focused_attacks
for attacker in range(params.attackers_count):
target_pool = random.sample(
range(params.defenders_count),
k=params.target_pool_size)
attacks_received[min(target_pool)] += focused_attacks
for target in target_pool:
attacks_received[target] += spread_attacks
return self.damage_received(attacks_received, params)
def simulate_old(self, params, rounds, value_function=None):
return self.process_rounds(self.simulate_round_old, params, rounds, value_function)
def simulate_round_old(self, params: Params, value_function=None):
attacks_received = [0] * params.defenders_count
for attacker in range(params.attackers_count):
attacks_received[random.choice(range(params.defenders_count))] += params.attack
return self.damage_received(attacks_received, params)
def run_simulations(
rounds,
ax_range, ax_property,
x_range, x_property,
series_range, series_property,
old_series_range=None, old_series_property=None,
value_function=None,
**kwargs):
s = Simulation()
axs_data = []
for ax_value in ax_range:
dataset = [
[
s.simulate(
Params.load_base(**{ax_property: ax_value, x_property: x_value, series_property: series_value}),
rounds, value_function)
for series_value in series_range
] for x_value in x_range
]
if not old_series_range:
old_series_range = []
old_dataset = [
[
s.simulate_old(
Params.load_base(**{
ax_property: ax_value, x_property: x_value,
old_series_property: old_series_value,
'overwidth_penalty_rate': 2.0
}),
rounds, value_function)
for old_series_value in old_series_range
] for x_value in x_range
]
axs_data.append([data + old_data for (data, old_data) in zip(dataset, old_dataset)])
print("Simulation phase done, ran %d simulations with %d rounds " % (s.counter, rounds))
return axs_data
def display_results(data,
title,
ax_range, ax_property,
x_range, x_property,
series_labels, y_label='damage dice dealt', **kwargs):
fig, axs = plt.subplots(len(ax_range)) # Create a figure containing a single axes.
for i, ax in enumerate(axs):
ax.set_title(str(ax_range[i]) + ' ' + ax_property)
ax.set_xticks(x_range)
# ax.set_xlabel(x_property)
ax.set_ylabel(y_label)
ax.plot(x_range, data[i], label=series_labels)
ax.yaxis.set_major_locator(MultipleLocator(50))
ax.yaxis.set_minor_locator(AutoMinorLocator(5))
ax.grid(which='major', linestyle='--')
ax.grid(which='minor', linestyle=':')
ax.legend()
plt.suptitle(title)
plt.show()
def export_results(data, file_prefix, ax_range, ax_property, x_range, x_property, series_labels, **kwargs):
for i, ax_value in enumerate(ax_range):
filename = '../output/%s_%s_%s.csv' % (file_prefix, ax_property, ax_value)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w', newline='') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow([x_property] + series_labels) # headers
for row in ([x_value] + data_row for (x_value, data_row) in zip(x_range, data[i])):
writer.writerow(row) # headers
def attacker_scenario():
series_range = (0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65)
return {
'title': "Average total damage dice dealt by attackers (80 + 26 attack per width) to 20w defenders (700 defense) on a filled frontage across 10000 runs",
'file_prefix': 'attackers',
'rounds': 10000,
'ax_range': (80, 120, 160),
'ax_property': 'combat_width',
'x_range': list(range(10, 50)),
'x_property': 'attacker_width',
'series_range': series_range,
'series_property': 'effective_coordination',
'old_series_range': (0,),
'old_series_property': '',
'series_labels': ["Coordination = %d (%d%%)" % ((x-0.35)*100, x*100) for x in series_range] + ['20w pre-NSB rules']
}
def coordination_scenario():
series_range = (20, 25, 29, 30, 35, 39, 40, 45)
return {
'title': "Average total damage dice dealt by attackers (80 + 26 attack per width) to 20w defenders (700 defense) on a filled frontage across 10000 runs",
'file_prefix': 'coordination',
'rounds': 10000,
'ax_range': (80, 120, 160),
'ax_property': 'combat_width',
'x_range': [x / 100 for x in range(35, 70)],
'x_property': 'effective_coordination',
'series_range': series_range,
'series_property': 'attacker_width',
'old_series_range': None,
'old_series_property': '',
'series_labels': ["Attacker width = %d" % x for x in series_range]
}
def defenders_scenario():
series_range = (16, 20, 27, 30, 40)
old_series_range = (20, 40)
return {
'title': "Average total damage dice received by defenders (80 + 31 defense per width) by attackers (80 + 26 attack per width, 0.45 effective coordination) on a filled frontage across 10000 runs",
'file_prefix': 'defenders',
'rounds': 10000,
'ax_range': (80, 120, 160),
'ax_property': 'combat_width',
'x_range': list(range(2, 50)),
'x_property': 'defender_width',
'series_range': series_range,
'series_property': 'attacker_width',
'old_series_range': old_series_range,
'old_series_property': 'attacker_width',
'series_labels': ["Attacker width = %d" % x for x in series_range] + ["Pre-NSB attacker width = %d" % x for x in old_series_range]
}
def defenders_org_scenario():
def value_function(x, params: Params):
org = round(80 - (140/(2+(params.defender_width/2))))
return 100 * x / (params.defenders_count * org)
return dict(defenders_scenario(),
x_range=list(range(2, 50, 2)),
file_prefix='defenders_org',
title="Average normalized ORG damage received by defenders (80 + 31 defense per width) by attackers (80 + 26 attack per width, 0.45 effective coordination) on a filled frontage across 10000 runs",
y_label='relative ORG damage received',
value_function=value_function)
def terrain_scenario():
series_range = (
80, 84, 90, 78, 96, 75
)
series_labels = [
'hills (80)', 'forest/jungle (84)', 'plains/desert (90)', 'marsh (78)', 'urban (96)', 'mountain (75)'
]
return {
'title': "Average total damage dice dealt by attackers (80 + 26 attack per width, 0.45 eff. coordination) to defenders (80 + 31 defense per width) across 10000 runs",
'file_prefix': 'terrains',
'rounds': 10000,
'ax_range': (10, 16, 21, 25, 30),
'ax_property': 'defender_width',
'x_range': list(range(12, 50)),
'x_property': 'attacker_width',
'series_range': series_range,
'series_property': 'combat_width',
'old_series_range': None,
'old_series_property': '',
'series_labels': series_labels
}
def terrain_plus_scenario():
series_range = (
120, 126, 135, 104, 128, 100,
160, 168, 180, 130, 125
)
series_labels = [
'hills + 1 (120)', 'forest/jungle + 1 (126)', 'plains/desert + 1 (135)', 'marsh + 1 (104)', 'urban + 1 (128)', 'mountain + 1 (100)',
'hills/urban + 2 (160)', 'forest/jungle + 2 (168)', 'plains/desert + 2 (180)', 'marsh + 2 (130)', 'mountain + 2 (125)'
]
return {
'title': "Average total damage dice dealt by attackers (80 + 26 attack per width, 0.45 eff. coordination) to defenders (80 + 31 defense per width) across 10000 runs",
'file_prefix': 'terrains_plus',
'rounds': 10000,
'ax_range': (12, 16, 21, 25, 30),
'ax_property': 'defender_width',
'x_range': list(range(12, 50)),
'x_property': 'attacker_width',
'series_range': series_range,
'series_property': 'combat_width',
'old_series_range': None,
'old_series_property': '',
'series_labels': series_labels
}
def main():
scenario = terrain_plus_scenario()
data = run_simulations(**scenario)
export_results(data, **scenario)
display_results(data, **scenario)
if __name__ == '__main__':
main()
| en | 0.634023 | HoI4 NSB damage dealing simulator Copyright 2021 <NAME>, <EMAIL> # Bugged for NSB's combat widths # Create a figure containing a single axes. # ax.set_xlabel(x_property) # headers # headers | 2.574438 | 3 |
src/util.py | kelhaji/geit | 14 | 6613110 | <filename>src/util.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
class Util:
@staticmethod
def parse_git_datetime(timestamp):
return datetime.strptime(timestamp, '%a %b %d %H:%M:%S %Y %z').timestamp()
@staticmethod
def add_count_to_identifier(dictionary, identifier, add):
if identifier not in dictionary:
dictionary[identifier] = 0
dictionary[identifier] += add
@staticmethod
def add_object_to_identifier(dictionary, identifier, add):
if identifier not in dictionary:
dictionary[identifier] = 0
dictionary[identifier] += add
| <filename>src/util.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
class Util:
@staticmethod
def parse_git_datetime(timestamp):
return datetime.strptime(timestamp, '%a %b %d %H:%M:%S %Y %z').timestamp()
@staticmethod
def add_count_to_identifier(dictionary, identifier, add):
if identifier not in dictionary:
dictionary[identifier] = 0
dictionary[identifier] += add
@staticmethod
def add_object_to_identifier(dictionary, identifier, add):
if identifier not in dictionary:
dictionary[identifier] = 0
dictionary[identifier] += add
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.538367 | 3 |
networks/__init__.py | EagleMIT/m-i-d | 19 | 6613111 | <filename>networks/__init__.py
from .deeplabv3_plus import DeepLabV3Plus
from .ENet import ENet
from .ERFNet import ERFNet
from .ESPNet import ESPNet
from .mobilenetv2 import MobileNetV2
from .NestedUNet import NestedUNet
from .RAUNet import RAUNet
from .resnet18 import Resnet18
from .UNet import U_Net
from .PspNet.pspnet import PSPNet
def get_model(model_name: str, channels: int):
assert model_name.lower() in ['deeplabv3+', 'enet', 'erfnet', 'espnet', 'mobilenetv2',
'unet++', 'raunet', 'resnet18', 'unet', 'pspnet']
if model_name.lower() == 'deeplabv3+':
model = DeepLabV3Plus(num_class=channels)
elif model_name.lower() == 'unet':
model = U_Net(in_ch=1, out_ch=channels)
elif model_name.lower() == 'resnet':
model = Resnet18(num_classes=channels)
elif model_name.lower() == 'raunet':
model = RAUNet(num_classes=channels)
elif model_name.lower() == 'pspnet':
model = PSPNet(num_classes=2)
elif model_name.lower() == 'mobilenetv2':
model = MobileNetV2(num_classes=channels)
elif model_name.lower() == 'espnet':
model = ESPNet(classes=channels)
elif model_name.lower() == 'erfnet':
model = ERFNet(num_classes=channels)
elif model_name.lower() == 'enet':
model = ENet(nclass=channels)
elif model_name.lower() == 'unet++':
model = NestedUNet(in_ch=1, out_ch=channels)
return model | <filename>networks/__init__.py
from .deeplabv3_plus import DeepLabV3Plus
from .ENet import ENet
from .ERFNet import ERFNet
from .ESPNet import ESPNet
from .mobilenetv2 import MobileNetV2
from .NestedUNet import NestedUNet
from .RAUNet import RAUNet
from .resnet18 import Resnet18
from .UNet import U_Net
from .PspNet.pspnet import PSPNet
def get_model(model_name: str, channels: int):
assert model_name.lower() in ['deeplabv3+', 'enet', 'erfnet', 'espnet', 'mobilenetv2',
'unet++', 'raunet', 'resnet18', 'unet', 'pspnet']
if model_name.lower() == 'deeplabv3+':
model = DeepLabV3Plus(num_class=channels)
elif model_name.lower() == 'unet':
model = U_Net(in_ch=1, out_ch=channels)
elif model_name.lower() == 'resnet':
model = Resnet18(num_classes=channels)
elif model_name.lower() == 'raunet':
model = RAUNet(num_classes=channels)
elif model_name.lower() == 'pspnet':
model = PSPNet(num_classes=2)
elif model_name.lower() == 'mobilenetv2':
model = MobileNetV2(num_classes=channels)
elif model_name.lower() == 'espnet':
model = ESPNet(classes=channels)
elif model_name.lower() == 'erfnet':
model = ERFNet(num_classes=channels)
elif model_name.lower() == 'enet':
model = ENet(nclass=channels)
elif model_name.lower() == 'unet++':
model = NestedUNet(in_ch=1, out_ch=channels)
return model | none | 1 | 2.36705 | 2 | |
src/Role/Domain/UseCases/SaveRoleUseCase.py | DigiChanges/python-experience | 0 | 6613112 | from src.Role.Domain.Entities.Role import Role
from src.Role.InterfaceAdapters.IRoleRepository import IRoleRepository
from src.Role.InterfaceAdapters.Payloads.RoleRepPayload import RoleRepPayload
from src.lazyInject import lazyInject
class SaveRoleUseCase:
repository: IRoleRepository = lazyInject.get(IRoleRepository)
def handle(self, payload: RoleRepPayload):
role = Role()
role.name = payload.getName()
role.slug = payload.getSlug()
role.enable = payload.getEnable()
role.permissions = payload.getPermissions()
return self.repository.save(role)
| from src.Role.Domain.Entities.Role import Role
from src.Role.InterfaceAdapters.IRoleRepository import IRoleRepository
from src.Role.InterfaceAdapters.Payloads.RoleRepPayload import RoleRepPayload
from src.lazyInject import lazyInject
class SaveRoleUseCase:
repository: IRoleRepository = lazyInject.get(IRoleRepository)
def handle(self, payload: RoleRepPayload):
role = Role()
role.name = payload.getName()
role.slug = payload.getSlug()
role.enable = payload.getEnable()
role.permissions = payload.getPermissions()
return self.repository.save(role)
| none | 1 | 2.002846 | 2 | |
magpy/scripts/mptest.py | geomagpy/magpy-git | 27 | 6613113 | #!/usr/bin/env python
"""
MagPy - Basic Runtime tests including durations
"""
from __future__ import print_function
from magpy.stream import *
from magpy.database import *
import magpy.transfer as tr
import magpy.absolutes as di
import magpy.mpplot as mp
import magpy.opt.emd as emd
import magpy.opt.cred as cred
import os, getopt
def getfilenames(path):
filelist = []
for root, dirs, files in os.walk(path):
for file in files:
#if file.endswith(".txt"):
#print(os.path.join(root, file))
filelist.append(os.path.join(root, file))
return filelist
def loadtest(filename, fullheader=False):
print (" ---------------------------------")
print (" --------- READ TEST -------------")
print (" ---------------------------------")
try:
t1 = datetime.utcnow()
stream = read(filename,debug=fullheader)
except:
print ("Reading data {} failed".format(filename))
return False
if stream.length()[0] > 0:
print ("Success for {}: {}".format(stream.header.get('DataFormat'),filename))
print (" - lenght: {}".format(stream.length()[0]))
print (" - keys: {}".format(stream._get_key_headers()))
t2 = datetime.utcnow()
print (" - load duration: {}".format(t2-t1))
print (" - SensorID: {}".format(stream.header.get('SensorID',"!! not available !!")))
if fullheader:
print (" ------------------------------")
print (" - Extended HEADER Information:")
for dat in stream.header:
print (" -> Key: {} -- Value: {}".format(dat, stream.header[dat]))
print (" ------------------------------")
return True
else:
return False
def writetest(source,destination,fmt):
print (" ---------------------------------")
print (" --------- WRITE TEST ------------")
print (" ---------------------------------")
stream = read(source)
t1 = datetime.utcnow()
print ("Writing format {}".format(fmt))
if stream.write(destination,filenamebegins="mptest_",format_type=fmt):
print("Writing successful for {} to {}".format(fmt,destination))
t2 = datetime.utcnow()
print (" - Needed {} sec for {} datapoints".format(t2-t1,stream.length()[0]))
return True
else:
return False
def writeDBtest(db,source):
print (" ---------------------------------")
print (" -------- DB WRITE TEST ----------")
print (" ---------------------------------")
stream = read(source)
t1 = datetime.utcnow()
if stream.writeDB(db):
print("Writing successful for {} to {}".format(fmt,destination))
t2 = datetime.utcnow()
print (" - Needed {} sec for {} datapoints".format(t2-t1,stream.length()[0]))
return True
else:
return False
def main(argv):
fmtlist = ''
source = ''
destination = ''
path = ''
database = ''
intensive = False
fmts = ['']
for fmt in PYMAG_SUPPORTED_FORMATS:
if 'w' in (PYMAG_SUPPORTED_FORMATS[fmt][0]):
fmts.append(fmt)
try:
opts, args = getopt.getopt(argv,"hp:s:o:f:d:i",["path=","source=","destination=","fmtlist=","database=",])
except getopt.GetoptError:
print ('mptest.py -p <path> -s <source> -o <destination> -f <fmtlist> -d <database> -i <intensive>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('-------------------------------------')
print ('Description:')
print ('MagPy test programm for platform test')
print ('-------------------------------------')
print ('Usage:')
print ('mptest.py -p <path> -s <source> -o <destination> -f <fmtlist> -d <database> -d <database>')
print ('-------------------------------------')
print ('Options:')
print ('-p : path for test data files')
print ('-s : single source file for write format tests')
print ('-o : output destination path')
print ('-f : list of formats to be checked (not yet available)')
print ('-d : name of a database for db type')
print ('-i (-) : intensive')
print ('-------------------------------------')
print ('Examples:')
print ('python mptest.py -p mypath')
print ('python mptest.py -p "/media/leon/237A-97B1/MagPyTest/TestFiles" -f IMAGCDF -s /media/leon/237A-97B1/MagPyTest/TestFiles/bsl20150113dsec.sec -o /tmp/')
sys.exit()
elif opt in ("-p", "--path"):
path = arg
elif opt in ("-s", "--source"):
source = arg
elif opt in ("-d", "--database"):
database = arg
elif opt in ("-o", "--destination"):
destination = arg
elif opt in ("-f", "--fmtlist"):
fmtlist = arg
try:
fml= []
fm = fmtlist.split(',')
for el in fm:
if el in fmts:
fml.append(el)
else:
print (" - Selected format {} is not available for writing".format(el))
fmts = [el for el in fml]
except:
print (" - error in option fmtlist - using default formats")
elif opt in ("-i", "--intensive"):
intensive = True
fmts = [el for el in fmts if not el == '']
# Summary info:
failedload = []
failedwrite = []
print ("\n----------------------------------")
print ("-------- Testing MagPy ----------")
print ("-------- Version: {} ---------".format(magpyversion))
if path == '' and source == '':
print ('Specify a path and/or single data source:')
print ('-- check mptest.py -h for more options and requirements')
sys.exit()
if not path == '':
filelist = getfilenames(path)
for f in filelist:
val = loadtest(f, fullheader=intensive)
if not val:
failedload.append(f)
if not destination == '' and not source == '':
for fmt in fmts:
val = writetest(source,destination,fmt)
if not val:
failedwrite.append(fmt)
print ("\n----------------------------------")
print ("----------- SUMMARY ------------")
print ("----------------------------------")
if not len(failedload) > 0 and not len(failedwrite) > 0:
print ("\nALL TESTS SUCCESSFULLY PASSED!")
else:
print ("\nFailed to load (files): {}".format(failedload))
print ("\nFailed to write (formats): {}".format(failedwrite))
if __name__ == "__main__":
main(sys.argv[1:])
| #!/usr/bin/env python
"""
MagPy - Basic Runtime tests including durations
"""
from __future__ import print_function
from magpy.stream import *
from magpy.database import *
import magpy.transfer as tr
import magpy.absolutes as di
import magpy.mpplot as mp
import magpy.opt.emd as emd
import magpy.opt.cred as cred
import os, getopt
def getfilenames(path):
filelist = []
for root, dirs, files in os.walk(path):
for file in files:
#if file.endswith(".txt"):
#print(os.path.join(root, file))
filelist.append(os.path.join(root, file))
return filelist
def loadtest(filename, fullheader=False):
print (" ---------------------------------")
print (" --------- READ TEST -------------")
print (" ---------------------------------")
try:
t1 = datetime.utcnow()
stream = read(filename,debug=fullheader)
except:
print ("Reading data {} failed".format(filename))
return False
if stream.length()[0] > 0:
print ("Success for {}: {}".format(stream.header.get('DataFormat'),filename))
print (" - lenght: {}".format(stream.length()[0]))
print (" - keys: {}".format(stream._get_key_headers()))
t2 = datetime.utcnow()
print (" - load duration: {}".format(t2-t1))
print (" - SensorID: {}".format(stream.header.get('SensorID',"!! not available !!")))
if fullheader:
print (" ------------------------------")
print (" - Extended HEADER Information:")
for dat in stream.header:
print (" -> Key: {} -- Value: {}".format(dat, stream.header[dat]))
print (" ------------------------------")
return True
else:
return False
def writetest(source,destination,fmt):
print (" ---------------------------------")
print (" --------- WRITE TEST ------------")
print (" ---------------------------------")
stream = read(source)
t1 = datetime.utcnow()
print ("Writing format {}".format(fmt))
if stream.write(destination,filenamebegins="mptest_",format_type=fmt):
print("Writing successful for {} to {}".format(fmt,destination))
t2 = datetime.utcnow()
print (" - Needed {} sec for {} datapoints".format(t2-t1,stream.length()[0]))
return True
else:
return False
def writeDBtest(db,source):
print (" ---------------------------------")
print (" -------- DB WRITE TEST ----------")
print (" ---------------------------------")
stream = read(source)
t1 = datetime.utcnow()
if stream.writeDB(db):
print("Writing successful for {} to {}".format(fmt,destination))
t2 = datetime.utcnow()
print (" - Needed {} sec for {} datapoints".format(t2-t1,stream.length()[0]))
return True
else:
return False
def main(argv):
fmtlist = ''
source = ''
destination = ''
path = ''
database = ''
intensive = False
fmts = ['']
for fmt in PYMAG_SUPPORTED_FORMATS:
if 'w' in (PYMAG_SUPPORTED_FORMATS[fmt][0]):
fmts.append(fmt)
try:
opts, args = getopt.getopt(argv,"hp:s:o:f:d:i",["path=","source=","destination=","fmtlist=","database=",])
except getopt.GetoptError:
print ('mptest.py -p <path> -s <source> -o <destination> -f <fmtlist> -d <database> -i <intensive>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('-------------------------------------')
print ('Description:')
print ('MagPy test programm for platform test')
print ('-------------------------------------')
print ('Usage:')
print ('mptest.py -p <path> -s <source> -o <destination> -f <fmtlist> -d <database> -d <database>')
print ('-------------------------------------')
print ('Options:')
print ('-p : path for test data files')
print ('-s : single source file for write format tests')
print ('-o : output destination path')
print ('-f : list of formats to be checked (not yet available)')
print ('-d : name of a database for db type')
print ('-i (-) : intensive')
print ('-------------------------------------')
print ('Examples:')
print ('python mptest.py -p mypath')
print ('python mptest.py -p "/media/leon/237A-97B1/MagPyTest/TestFiles" -f IMAGCDF -s /media/leon/237A-97B1/MagPyTest/TestFiles/bsl20150113dsec.sec -o /tmp/')
sys.exit()
elif opt in ("-p", "--path"):
path = arg
elif opt in ("-s", "--source"):
source = arg
elif opt in ("-d", "--database"):
database = arg
elif opt in ("-o", "--destination"):
destination = arg
elif opt in ("-f", "--fmtlist"):
fmtlist = arg
try:
fml= []
fm = fmtlist.split(',')
for el in fm:
if el in fmts:
fml.append(el)
else:
print (" - Selected format {} is not available for writing".format(el))
fmts = [el for el in fml]
except:
print (" - error in option fmtlist - using default formats")
elif opt in ("-i", "--intensive"):
intensive = True
fmts = [el for el in fmts if not el == '']
# Summary info:
failedload = []
failedwrite = []
print ("\n----------------------------------")
print ("-------- Testing MagPy ----------")
print ("-------- Version: {} ---------".format(magpyversion))
if path == '' and source == '':
print ('Specify a path and/or single data source:')
print ('-- check mptest.py -h for more options and requirements')
sys.exit()
if not path == '':
filelist = getfilenames(path)
for f in filelist:
val = loadtest(f, fullheader=intensive)
if not val:
failedload.append(f)
if not destination == '' and not source == '':
for fmt in fmts:
val = writetest(source,destination,fmt)
if not val:
failedwrite.append(fmt)
print ("\n----------------------------------")
print ("----------- SUMMARY ------------")
print ("----------------------------------")
if not len(failedload) > 0 and not len(failedwrite) > 0:
print ("\nALL TESTS SUCCESSFULLY PASSED!")
else:
print ("\nFailed to load (files): {}".format(failedload))
print ("\nFailed to write (formats): {}".format(failedwrite))
if __name__ == "__main__":
main(sys.argv[1:])
| en | 0.317365 | #!/usr/bin/env python MagPy - Basic Runtime tests including durations #if file.endswith(".txt"): #print(os.path.join(root, file)) # Summary info: | 2.322249 | 2 |
batterydispatch/agent/functions/helpers.py | danbolinson/BatteryAgent | 2 | 6613114 | import warnings
def first_over(val, array):
'''Returns the first value in the array that is equal to or over the given value val.
The function assumes the array is sorted and the largest value is in the 'last' index. This is not checked.
If the given value is greater than any value in the array, teh maximum array value is returned with a warning.'''
if val > max(array):
warnings.warn(
"The value {} given is greater than the max value in array {}. The max value will be returned.".format(val,
array))
return array[-1]
first = next(a[1] for a in enumerate(array) if a[1] >= val)
return first
def last_under(val, array):
'''Returns the first value in the array that is less than the given value val.
If all values in the array are smaller than the given value, the greatest (assumed last) value in the array is returned.
The function assumes the array is sorted and the largest value is in the 'last' index. This is not checked.'''
if val >= max(array):
return array[-1]
try:
first_ix = next(a[0] for a in enumerate(array) if a[1] > val)
if first_ix - 1 < 0:
warnings.warn(
"The value {} given is less than the first value in array {}. The min value will be returned.".format(
val, array))
return array[0]
else:
return array[first_ix - 1]
except StopIteration:
raise StopIteration(
"Unexpected StopIteration error raised looking for value {} in array {}.".format(val, array))
| import warnings
def first_over(val, array):
'''Returns the first value in the array that is equal to or over the given value val.
The function assumes the array is sorted and the largest value is in the 'last' index. This is not checked.
If the given value is greater than any value in the array, teh maximum array value is returned with a warning.'''
if val > max(array):
warnings.warn(
"The value {} given is greater than the max value in array {}. The max value will be returned.".format(val,
array))
return array[-1]
first = next(a[1] for a in enumerate(array) if a[1] >= val)
return first
def last_under(val, array):
'''Returns the first value in the array that is less than the given value val.
If all values in the array are smaller than the given value, the greatest (assumed last) value in the array is returned.
The function assumes the array is sorted and the largest value is in the 'last' index. This is not checked.'''
if val >= max(array):
return array[-1]
try:
first_ix = next(a[0] for a in enumerate(array) if a[1] > val)
if first_ix - 1 < 0:
warnings.warn(
"The value {} given is less than the first value in array {}. The min value will be returned.".format(
val, array))
return array[0]
else:
return array[first_ix - 1]
except StopIteration:
raise StopIteration(
"Unexpected StopIteration error raised looking for value {} in array {}.".format(val, array))
| en | 0.824001 | Returns the first value in the array that is equal to or over the given value val. The function assumes the array is sorted and the largest value is in the 'last' index. This is not checked. If the given value is greater than any value in the array, teh maximum array value is returned with a warning. Returns the first value in the array that is less than the given value val. If all values in the array are smaller than the given value, the greatest (assumed last) value in the array is returned. The function assumes the array is sorted and the largest value is in the 'last' index. This is not checked. | 4.103507 | 4 |
planemo/commands/cmd_virtualenv.py | pvanheus/planemo | 1 | 6613115 | """Module describing the planemo ``virtualenv`` command."""
import click
from planemo import virtualenv
from planemo.cli import command_function
VIRTUALENV_PATH_TYPE = click.Path(
exists=False,
writable=True,
resolve_path=True,
)
@click.command("virtualenv")
@click.option("-p", "--python",
metavar="PYTHON_EXE")
@click.argument("virtualenv_path",
metavar="VIRTUALENV_PATH",
type=VIRTUALENV_PATH_TYPE)
@command_function
def cli(ctx, virtualenv_path, **kwds):
"""Create a virtualenv.
Use virtualenv as library to create a virtualenv for Galaxy if virtualenv
is not available on the PATH.
"""
virtualenv.create_and_exit(virtualenv_path, **kwds)
| """Module describing the planemo ``virtualenv`` command."""
import click
from planemo import virtualenv
from planemo.cli import command_function
VIRTUALENV_PATH_TYPE = click.Path(
exists=False,
writable=True,
resolve_path=True,
)
@click.command("virtualenv")
@click.option("-p", "--python",
metavar="PYTHON_EXE")
@click.argument("virtualenv_path",
metavar="VIRTUALENV_PATH",
type=VIRTUALENV_PATH_TYPE)
@command_function
def cli(ctx, virtualenv_path, **kwds):
"""Create a virtualenv.
Use virtualenv as library to create a virtualenv for Galaxy if virtualenv
is not available on the PATH.
"""
virtualenv.create_and_exit(virtualenv_path, **kwds)
| en | 0.355781 | Module describing the planemo ``virtualenv`` command. Create a virtualenv. Use virtualenv as library to create a virtualenv for Galaxy if virtualenv is not available on the PATH. | 2.551888 | 3 |
python/cnnnetwork.py | iflyings/tensorflow-flower | 0 | 6613116 | #!/home/iflyings/VSCode/venv/tensorflow-venv python
# -*- coding:utf-8 -*-
# Author: iflyings
import tensorflow as tf
class CnnNetwork:
def __init__(self, input_x, batch_size, n_classes, train):
self.name = "cnnnetwork"
self.input_x = input_x
self.batch_size = batch_size
self.n_classes = n_classes
self.train = train
def __conv_wrapper(self, input, filters=32, train=True, activation=tf.nn.relu, name="1"):
conv = tf.layers.conv2d(
inputs=input,
filters=filters,
kernel_size=[3, 3],
padding="same",
activation=activation,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
name="conv_"+name)
'''
bn = tf.layers.batch_normalization(conv,
momentum=0.9,
epsilon=1e-5,
scale=True,
training=train,
name="bn_"+name)
'''
return conv
def __pool_wrapper(self, input, name='1'):
return tf.layers.max_pooling2d(
inputs=input,
pool_size=[2, 2],
strides=2,
name="pool_"+name)
def __dense_wrapper(self, input, units, activation=tf.nn.relu, name='1'):
return tf.layers.dense(inputs=input,
units=units,
activation=activation,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003),
name="dense_"+name)
def create(self):
# 第一个卷积层
#with tf.compat.v1.variable_scope('conv1') as scope:
conv1_1 = self.__conv_wrapper(self.input_x,filters=32,train=True,name="conv1_1")
pool1 = self.__pool_wrapper(conv1_1,name="pool1")
# 第二个卷积层
#with tf.compat.v1.variable_scope('conv2') as scope:
conv2_1 = self.__conv_wrapper(pool1,filters=64,train=True,name="conv2_1")
pool2 = self.__pool_wrapper(conv2_1,name="pool2")
# 第三个卷积层
#with tf.compat.v1.variable_scope('conv3') as scope:
conv3_1 = self.__conv_wrapper(pool2,filters=128,train=True,name="conv3_1")
pool3 = self.__pool_wrapper(conv3_1,name="pool3")
# 第四个卷积层
#with tf.compat.v1.variable_scope('conv4') as scope:
conv4_1 = self.__conv_wrapper(pool3,filters=256,train=True,name="conv4_1")
pool4 = self.__pool_wrapper(conv4_1,name="pool4")
#dense0 = tf.reshape(pool4, [-1, 6 * 6 * 128])
flatten = tf.layers.flatten(pool4)
# 防止过拟合,加入dropout
#re1 = tf.layers.dropout(inputs=re1, rate=0.5)
# 全连接层
#with tf.compat.v1.variable_scope('dense1') as scope:
dense1 = self.__dense_wrapper(flatten,512,name="dense1")
#with tf.compat.v1.variable_scope('dense2') as scope:
dense2 = self.__dense_wrapper(dense1,256,name="dense2")
#with tf.compat.v1.variable_scope('dense3') as scope:
logits = self.__dense_wrapper(dense2,self.n_classes,name="logits")
return logits
### 四个卷积层,两个全连接层,一个softmax层组成。
### 在每一层的卷积后面加入 batch_normalization, relu, 池化
### batch_normalization 层很好用,加了它之后,有效防止了梯度消失和爆炸,还加速了收敛。 | #!/home/iflyings/VSCode/venv/tensorflow-venv python
# -*- coding:utf-8 -*-
# Author: iflyings
import tensorflow as tf
class CnnNetwork:
def __init__(self, input_x, batch_size, n_classes, train):
self.name = "cnnnetwork"
self.input_x = input_x
self.batch_size = batch_size
self.n_classes = n_classes
self.train = train
def __conv_wrapper(self, input, filters=32, train=True, activation=tf.nn.relu, name="1"):
conv = tf.layers.conv2d(
inputs=input,
filters=filters,
kernel_size=[3, 3],
padding="same",
activation=activation,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
name="conv_"+name)
'''
bn = tf.layers.batch_normalization(conv,
momentum=0.9,
epsilon=1e-5,
scale=True,
training=train,
name="bn_"+name)
'''
return conv
def __pool_wrapper(self, input, name='1'):
return tf.layers.max_pooling2d(
inputs=input,
pool_size=[2, 2],
strides=2,
name="pool_"+name)
def __dense_wrapper(self, input, units, activation=tf.nn.relu, name='1'):
return tf.layers.dense(inputs=input,
units=units,
activation=activation,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003),
name="dense_"+name)
def create(self):
# 第一个卷积层
#with tf.compat.v1.variable_scope('conv1') as scope:
conv1_1 = self.__conv_wrapper(self.input_x,filters=32,train=True,name="conv1_1")
pool1 = self.__pool_wrapper(conv1_1,name="pool1")
# 第二个卷积层
#with tf.compat.v1.variable_scope('conv2') as scope:
conv2_1 = self.__conv_wrapper(pool1,filters=64,train=True,name="conv2_1")
pool2 = self.__pool_wrapper(conv2_1,name="pool2")
# 第三个卷积层
#with tf.compat.v1.variable_scope('conv3') as scope:
conv3_1 = self.__conv_wrapper(pool2,filters=128,train=True,name="conv3_1")
pool3 = self.__pool_wrapper(conv3_1,name="pool3")
# 第四个卷积层
#with tf.compat.v1.variable_scope('conv4') as scope:
conv4_1 = self.__conv_wrapper(pool3,filters=256,train=True,name="conv4_1")
pool4 = self.__pool_wrapper(conv4_1,name="pool4")
#dense0 = tf.reshape(pool4, [-1, 6 * 6 * 128])
flatten = tf.layers.flatten(pool4)
# 防止过拟合,加入dropout
#re1 = tf.layers.dropout(inputs=re1, rate=0.5)
# 全连接层
#with tf.compat.v1.variable_scope('dense1') as scope:
dense1 = self.__dense_wrapper(flatten,512,name="dense1")
#with tf.compat.v1.variable_scope('dense2') as scope:
dense2 = self.__dense_wrapper(dense1,256,name="dense2")
#with tf.compat.v1.variable_scope('dense3') as scope:
logits = self.__dense_wrapper(dense2,self.n_classes,name="logits")
return logits
### 四个卷积层,两个全连接层,一个softmax层组成。
### 在每一层的卷积后面加入 batch_normalization, relu, 池化
### batch_normalization 层很好用,加了它之后,有效防止了梯度消失和爆炸,还加速了收敛。 | en | 0.447688 | #!/home/iflyings/VSCode/venv/tensorflow-venv python # -*- coding:utf-8 -*- # Author: iflyings bn = tf.layers.batch_normalization(conv, momentum=0.9, epsilon=1e-5, scale=True, training=train, name="bn_"+name) # 第一个卷积层 #with tf.compat.v1.variable_scope('conv1') as scope: # 第二个卷积层 #with tf.compat.v1.variable_scope('conv2') as scope: # 第三个卷积层 #with tf.compat.v1.variable_scope('conv3') as scope: # 第四个卷积层 #with tf.compat.v1.variable_scope('conv4') as scope: #dense0 = tf.reshape(pool4, [-1, 6 * 6 * 128]) # 防止过拟合,加入dropout #re1 = tf.layers.dropout(inputs=re1, rate=0.5) # 全连接层 #with tf.compat.v1.variable_scope('dense1') as scope: #with tf.compat.v1.variable_scope('dense2') as scope: #with tf.compat.v1.variable_scope('dense3') as scope: ### 四个卷积层,两个全连接层,一个softmax层组成。 ### 在每一层的卷积后面加入 batch_normalization, relu, 池化 ### batch_normalization 层很好用,加了它之后,有效防止了梯度消失和爆炸,还加速了收敛。 | 2.640025 | 3 |
benchmarks/cec2013/cec2013_convert.py | buctlab/NIO | 4 | 6613117 | from numpy import zeros
from benchmarks.benchmark import Benchmark
from benchmarks.cec2013.cec2013.cec2013 import CEC2013
class CEC2013Convert(Benchmark):
def __init__(self, i):
self.f = CEC2013(i)
dimension = self.f.get_dimension()
min_values, max_values = [0] * dimension, [0] * dimension
for k in range(dimension):
min_values[k] = self.f.get_lbound(k)
max_values[k] = self.f.get_ubound(k)
super(CEC2013Convert, self).__init__(min_values, max_values, dimension)
def get_optimum(self):
# [[], [], ..., []], min_value
# print(__functions_[1])
return self.f.get_no_goptima(), self.f.get_fitness_goptima()
def eval(self, sol):
return self.f.evaluate(sol)
def evaluate(self, sol):
return self.eval(sol)
def get_rho(self):
return self.f.get_rho()
def get_fitness_goptima(self):
return self.f.get_fitness_goptima()
def get_no_goptima(self):
return self.f.get_no_goptima()
if __name__ == '__main__':
for i in range(1, 21):
f = CEC2013Convert(i)
# f.plot(scale=0.32)
no_optimum, goptima = f.get_optimum()
print("f", i, ":", no_optimum, goptima)
arr = [1] * f.dimension
print("val=", f.eval(arr))
| from numpy import zeros
from benchmarks.benchmark import Benchmark
from benchmarks.cec2013.cec2013.cec2013 import CEC2013
class CEC2013Convert(Benchmark):
def __init__(self, i):
self.f = CEC2013(i)
dimension = self.f.get_dimension()
min_values, max_values = [0] * dimension, [0] * dimension
for k in range(dimension):
min_values[k] = self.f.get_lbound(k)
max_values[k] = self.f.get_ubound(k)
super(CEC2013Convert, self).__init__(min_values, max_values, dimension)
def get_optimum(self):
# [[], [], ..., []], min_value
# print(__functions_[1])
return self.f.get_no_goptima(), self.f.get_fitness_goptima()
def eval(self, sol):
return self.f.evaluate(sol)
def evaluate(self, sol):
return self.eval(sol)
def get_rho(self):
return self.f.get_rho()
def get_fitness_goptima(self):
return self.f.get_fitness_goptima()
def get_no_goptima(self):
return self.f.get_no_goptima()
if __name__ == '__main__':
for i in range(1, 21):
f = CEC2013Convert(i)
# f.plot(scale=0.32)
no_optimum, goptima = f.get_optimum()
print("f", i, ":", no_optimum, goptima)
arr = [1] * f.dimension
print("val=", f.eval(arr))
| en | 0.227447 | # [[], [], ..., []], min_value # print(__functions_[1]) # f.plot(scale=0.32) | 2.577934 | 3 |
Weverse/models/post.py | MujyKun/Weverse | 7 | 6613118 | from typing import Optional, List
from . import Video, Photo, Artist, Comment
class Post:
r"""A Post object that represents a Weverse Post.
It is not suggested to create a Post manually, but rather through the
following method: :class:`Weverse.objects.create_post_objects`
The information retrieved on a Post is directly from the Weverse API and altered to fit this class.
.. container:: operations
.. describe:: x == y
Checks if two Post objects have the same ID.
.. describe:: x != y
Checks if two Post objects do not have the same ID.
.. describe:: str(x)
Returns the Post body message.
.. describe:: len(x)
Returns the amount of images (not videos) available.
Parameters
----------
id: int
The ID of the post.
community_tab_id: int
The tab the post is under.
type: str
The type of Post.
body: str
Body Message on the Post.
comment_count: int
Current amount of comments on the Post
like_count: int
Current amount of likes on the Post
max_comment_count: int
Maximum amount of comments that can be on the Post
has_my_like: bool
If the client user has the post liked.
has_my_bookmark: bool
If the client user has the post bookmarked.
created_at:
When the post was created
updated_at:
When the post was last modified.
is_locked: bool
Whether the post is locked.
is_blind: bool
Whether the post is visible?? Unknown
is_active: bool
Whether the post is active.
is_private: bool
Whether the post is private.
photos: List[:ref:`Photo`]
A list of photos under the post.
videos: List[:ref:`Video`]
A list of videos under the post.
is_hot_trending_post: bool
If the post is trending.
is_limit_comment: bool
If the comments are limited.
artist_comments: List[:ref:`Comment`]
The Artist comments under the post.
community_artist_id: int
The Community Artist ID that made the post.
artist_id: int
The ID of the Artist that made the post.
Attributes
-----------
id: int
The ID of the post.
community_tab_id: int
The tab the post is under.
type: str
The type of Post.
body: str
Body Message on the Post.
comment_count: int
Current amount of comments on the Post
like_count: int
Current amount of likes on the Post
max_comment_count: int
Maximum amount of comments that can be on the Post
has_my_like: bool
If the client user has the post liked.
has_my_bookmark: bool
If the client user has the post bookmarked.
created_at:
When the post was created
updated_at:
When the post was last modified.
is_locked: bool
Whether the post is locked.
is_blind: bool
Whether the post is visible?? Unknown
is_active: bool
Whether the post is active.
is_private: bool
Whether the post is private.
photos: List[:ref:`Photo`]
A list of photos under the post.
videos: List[:ref:`Video`]
A list of videos under the post.
is_hot_trending_post: bool
If the post is trending.
is_limit_comment: bool
If the comments are limited.
artist_comments: List[:ref:`Comment`]
The Artist comments under the post.
community_artist_id: int
The Community Artist ID that made the post.
artist_id: int
The ID of the Artist that made the post.
artist: Artist
The Artist Object the post belongs to.
"""
def __init__(self, **kwargs):
self.id = kwargs.get('post_id')
self.community_tab_id = kwargs.get('community_tab_id')
self.type = kwargs.get('post_type')
self.body = kwargs.get('body')
self.comment_count = kwargs.get('comment_count')
self.like_count = kwargs.get('like_count')
self.max_comment_count = kwargs.get('max_comment_count')
self.has_my_like = kwargs.get('has_my_like')
self.has_my_bookmark = kwargs.get('has_my_bookmark')
self.created_at = kwargs.get('created_at')
self.updated_at = kwargs.get('updated_at')
self.is_locked = kwargs.get('is_locked')
self.is_blind = kwargs.get('is_blind')
self.is_active = kwargs.get('is_active')
self.is_private = kwargs.get('is_private')
self.photos: List[Photo] = kwargs.get('photos')
self.videos: List[Video] = kwargs.get('videos')
self.is_hot_trending_post = kwargs.get('is_hot_trending_post')
self.is_limit_comment = kwargs.get('is_limit_comment')
self.artist_comments: Optional[List[Comment]] = kwargs.get('artist_comments')
self.community_artist_id = kwargs.get('community_artist_id')
self.artist_id = kwargs.get('artist_id')
self.artist: Optional[Artist] = None
def __eq__(self, other):
"""Check if the IDs of the Post objects are equal."""
if not isinstance(other, Post):
return NotImplemented
return self.id == other.id
def __ne__(self, other):
"""Check if the IDs of the Post objects are not equal."""
return not self == other
def __str__(self):
"""Returns the Post body message."""
return f"{self.body}"
def __len__(self):
"""Returns the amount of images (not videos) available."""
return len(self.photos)
| from typing import Optional, List
from . import Video, Photo, Artist, Comment
class Post:
r"""A Post object that represents a Weverse Post.
It is not suggested to create a Post manually, but rather through the
following method: :class:`Weverse.objects.create_post_objects`
The information retrieved on a Post is directly from the Weverse API and altered to fit this class.
.. container:: operations
.. describe:: x == y
Checks if two Post objects have the same ID.
.. describe:: x != y
Checks if two Post objects do not have the same ID.
.. describe:: str(x)
Returns the Post body message.
.. describe:: len(x)
Returns the amount of images (not videos) available.
Parameters
----------
id: int
The ID of the post.
community_tab_id: int
The tab the post is under.
type: str
The type of Post.
body: str
Body Message on the Post.
comment_count: int
Current amount of comments on the Post
like_count: int
Current amount of likes on the Post
max_comment_count: int
Maximum amount of comments that can be on the Post
has_my_like: bool
If the client user has the post liked.
has_my_bookmark: bool
If the client user has the post bookmarked.
created_at:
When the post was created
updated_at:
When the post was last modified.
is_locked: bool
Whether the post is locked.
is_blind: bool
Whether the post is visible?? Unknown
is_active: bool
Whether the post is active.
is_private: bool
Whether the post is private.
photos: List[:ref:`Photo`]
A list of photos under the post.
videos: List[:ref:`Video`]
A list of videos under the post.
is_hot_trending_post: bool
If the post is trending.
is_limit_comment: bool
If the comments are limited.
artist_comments: List[:ref:`Comment`]
The Artist comments under the post.
community_artist_id: int
The Community Artist ID that made the post.
artist_id: int
The ID of the Artist that made the post.
Attributes
-----------
id: int
The ID of the post.
community_tab_id: int
The tab the post is under.
type: str
The type of Post.
body: str
Body Message on the Post.
comment_count: int
Current amount of comments on the Post
like_count: int
Current amount of likes on the Post
max_comment_count: int
Maximum amount of comments that can be on the Post
has_my_like: bool
If the client user has the post liked.
has_my_bookmark: bool
If the client user has the post bookmarked.
created_at:
When the post was created
updated_at:
When the post was last modified.
is_locked: bool
Whether the post is locked.
is_blind: bool
Whether the post is visible?? Unknown
is_active: bool
Whether the post is active.
is_private: bool
Whether the post is private.
photos: List[:ref:`Photo`]
A list of photos under the post.
videos: List[:ref:`Video`]
A list of videos under the post.
is_hot_trending_post: bool
If the post is trending.
is_limit_comment: bool
If the comments are limited.
artist_comments: List[:ref:`Comment`]
The Artist comments under the post.
community_artist_id: int
The Community Artist ID that made the post.
artist_id: int
The ID of the Artist that made the post.
artist: Artist
The Artist Object the post belongs to.
"""
def __init__(self, **kwargs):
self.id = kwargs.get('post_id')
self.community_tab_id = kwargs.get('community_tab_id')
self.type = kwargs.get('post_type')
self.body = kwargs.get('body')
self.comment_count = kwargs.get('comment_count')
self.like_count = kwargs.get('like_count')
self.max_comment_count = kwargs.get('max_comment_count')
self.has_my_like = kwargs.get('has_my_like')
self.has_my_bookmark = kwargs.get('has_my_bookmark')
self.created_at = kwargs.get('created_at')
self.updated_at = kwargs.get('updated_at')
self.is_locked = kwargs.get('is_locked')
self.is_blind = kwargs.get('is_blind')
self.is_active = kwargs.get('is_active')
self.is_private = kwargs.get('is_private')
self.photos: List[Photo] = kwargs.get('photos')
self.videos: List[Video] = kwargs.get('videos')
self.is_hot_trending_post = kwargs.get('is_hot_trending_post')
self.is_limit_comment = kwargs.get('is_limit_comment')
self.artist_comments: Optional[List[Comment]] = kwargs.get('artist_comments')
self.community_artist_id = kwargs.get('community_artist_id')
self.artist_id = kwargs.get('artist_id')
self.artist: Optional[Artist] = None
def __eq__(self, other):
"""Check if the IDs of the Post objects are equal."""
if not isinstance(other, Post):
return NotImplemented
return self.id == other.id
def __ne__(self, other):
"""Check if the IDs of the Post objects are not equal."""
return not self == other
def __str__(self):
"""Returns the Post body message."""
return f"{self.body}"
def __len__(self):
"""Returns the amount of images (not videos) available."""
return len(self.photos)
| en | 0.824892 | A Post object that represents a Weverse Post. It is not suggested to create a Post manually, but rather through the following method: :class:`Weverse.objects.create_post_objects` The information retrieved on a Post is directly from the Weverse API and altered to fit this class. .. container:: operations .. describe:: x == y Checks if two Post objects have the same ID. .. describe:: x != y Checks if two Post objects do not have the same ID. .. describe:: str(x) Returns the Post body message. .. describe:: len(x) Returns the amount of images (not videos) available. Parameters ---------- id: int The ID of the post. community_tab_id: int The tab the post is under. type: str The type of Post. body: str Body Message on the Post. comment_count: int Current amount of comments on the Post like_count: int Current amount of likes on the Post max_comment_count: int Maximum amount of comments that can be on the Post has_my_like: bool If the client user has the post liked. has_my_bookmark: bool If the client user has the post bookmarked. created_at: When the post was created updated_at: When the post was last modified. is_locked: bool Whether the post is locked. is_blind: bool Whether the post is visible?? Unknown is_active: bool Whether the post is active. is_private: bool Whether the post is private. photos: List[:ref:`Photo`] A list of photos under the post. videos: List[:ref:`Video`] A list of videos under the post. is_hot_trending_post: bool If the post is trending. is_limit_comment: bool If the comments are limited. artist_comments: List[:ref:`Comment`] The Artist comments under the post. community_artist_id: int The Community Artist ID that made the post. artist_id: int The ID of the Artist that made the post. Attributes ----------- id: int The ID of the post. community_tab_id: int The tab the post is under. type: str The type of Post. body: str Body Message on the Post. comment_count: int Current amount of comments on the Post like_count: int Current amount of likes on the Post max_comment_count: int Maximum amount of comments that can be on the Post has_my_like: bool If the client user has the post liked. has_my_bookmark: bool If the client user has the post bookmarked. created_at: When the post was created updated_at: When the post was last modified. is_locked: bool Whether the post is locked. is_blind: bool Whether the post is visible?? Unknown is_active: bool Whether the post is active. is_private: bool Whether the post is private. photos: List[:ref:`Photo`] A list of photos under the post. videos: List[:ref:`Video`] A list of videos under the post. is_hot_trending_post: bool If the post is trending. is_limit_comment: bool If the comments are limited. artist_comments: List[:ref:`Comment`] The Artist comments under the post. community_artist_id: int The Community Artist ID that made the post. artist_id: int The ID of the Artist that made the post. artist: Artist The Artist Object the post belongs to. Check if the IDs of the Post objects are equal. Check if the IDs of the Post objects are not equal. Returns the Post body message. Returns the amount of images (not videos) available. | 3.080465 | 3 |
smilesmerge/operators/crossover/smiles_merge/merge_functions/mapping_class.py | Jacob-Spiegel/SMILESMerge | 1 | 6613119 | <filename>smilesmerge/operators/crossover/smiles_merge/merge_functions/mapping_class.py
"""
This script holds the Mapping class.
This is used when mapping most common substructure (MCS)
to combine two molecules.
"""
import __future__
import random
import copy
class Mapping(object):
"""
# Notes on terminology:
-most common substructure (MCS): The substructure shared between
the two parent ligands
-node/anchor/I: an atom in the MCS which has 1 or more atom(s)
connected to it which are not part of the MCS
-the anchors are labeled by their Isotope numbers as
those do not get modified, where as atom Idx are modified
by many Rdkit functions. Anchors have Isotope labels of
10,000 or higher and that label is applied to the MCS and
ligands 1 and 2 so everything is trackable.
-R-group: a chain of one or more atoms connected to a single node
-if an anchor has dimethyl's which are not part of MCS then
each methyl is considered its own R-group
-B-group: 1 or more R-groups which branch off a single node.
-if an anchor has dimethyl's which are not part of MCS then
the combination of both methyls is considered a single
B-group.
B-group Naming scheme: '{first_number}B{second_number}'
-first_number: the number before the B corresponds to
the parent ligand from which the B-group is
derived.
-second_number: the number which follows the B is the
order for which that B-group was determined when
condensing the R-groups into B-groups. Numbering
is indexed to 1. So the 1st three B groups for
parent ligand 1 are: 1B1,1B2,1B3
ie) 1B1 is the 1st B-group from parent ligand 1
1B2 is the second B-group from parent ligand 1
2B1 is the 1st B-group from parent ligand 2
2B2 is the second B-group from parent ligand 2
This class handles mapping for Bs and Is to chose B-groups which will
later be used to make a child molecule. All the choices for B-groups are
handled here.
This is important because if a B-group connects to more than one anchor
atom, the selection of that B-group determines the selection of both
anchor atoms.
ie) if 1B1 connects to anchor atom 10003 and 10004; and 2B1 connects to
10003 and 10005 then the decision for which B-group is chosen for
anchor 10003 determines the options which will be viable for anchor
atoms 10003,10004, and 10005.
These type of decisions are handled by this class.
"""
def __init__(self, b_to_is, i_to_bs):
"""
When a Mapping object is initialized, it imports 2 input dictionaries,
which can be referenced throughout the class.
Inputs:
:param dict b_to_is: Dictionary converting B-groups to anchor/node/I
atoms. This contains groups from both parent molecules. This is the
inverse of i_to_bs. keys are B-groups; items are the anchor atoms
isotope label. ie) {'1B1': [10003], '2B3': [10005], '2B2': [10003],
'2B1': [10002]}
:param dict i_to_bs: Dictionary converting Anchor/node/I atoms to
corresponding B-groups. This contains groups from both parent
molecules. This is the inverse of b_to_is. keys are the anchor
atoms
isotope labels; items are B-groups ie) {10002: ['2B1'], 10003: ['1B1',
'2B2'], 10005: ['2B3']}
"""
self.b_to_is = copy.deepcopy(
b_to_is
) # B-I mapping dictionary from outside class
self.i_to_bs = copy.deepcopy(
i_to_bs
) # I-B mapping dictionary from outside class
def locate_b(self, i):
"""
Given a specified anchor/I return a list of all the B-groups from both
parent ligands, bound to that anchor
Inputs:
:param int i: the isolabel of an anchor atom which will be used to
search for B-groups within self.i_to_bs.
Returns
:returns: list self.i_to_bs[i]: A list of all the B-groups, from both
parent ligands which are bound to that anchor. ie) ['1B1','2B1']
"""
return self.i_to_bs[i]
#
def locate_i(self, b):
"""
Given a specified B-group return the anchor/I/node it connects to.
Inputs:
:param str b: the name of a B-groups within self.b_to_is.
Returns
:returns: list self.b_to_is[b]: A list of the anchor the given
B-groups is bound. ie) [10001]
"""
return self.b_to_is[b]
#
def delete_b(self, b):
"""
Removes the b from b_to_is and all references to b in i_to_bs.
b is a Key in b_to_is. B is one or more items in i_to_bs.
Inputs:
:param str b: A B-group to be removed from the b_to_is and B in
i_to_bs dicts.
"""
i_list_to_modify = self.locate_i(b)
for i in i_list_to_modify:
blank = self.i_to_bs[i].remove(b)
del self.b_to_is[b]
#
def delete_i(self, i):
"""
Removes the i from i_to_bs and all references to i in b_to_is. i is
a Key in i_to_bs. i is one or more items in b_to_is.
Inputs:
:param int i: An interger representing the isolabel for an
anchor/node/i atom to be removed from the b_to_is and b in i_to_bs
dicts.
"""
bs_to_modify = self.locate_b(i)
for b in bs_to_modify:
self.b_to_is[b].remove(i)
del self.i_to_bs[i]
#
def chose_b_from_i(self, i):
"""
Chose your B from a given i. This makes the decision which B-group
will be chosen for a specific i.
Current implementation is that there are no null choice options. ie.
if an anchor has only 1 option to pick from then it must pick that
B-group. It can not chose nothing or to leave it blank, even if
choosing that B-group forces the future decision because of it's
connections to anchors which have yet to have B-group decisions.
this has bearings on B-groups which connect to multiple anchors as
well as on anchors which have B-groups from only one parent ligand,
but the other parent has nothing connected to that anchor.
in the case of one parent having a B-group attached to an anchor but
nothing attached to the anchor for the other parent, this
implementation will always chose to keep the B-group and never can
leave it blank.
ie (if 1B1 is connected to multiple anchors)
Lack of an B-groups bound to an anchor is not considered a B-group
Inputs:
:param int i: An interger representing the isolabel for an
anchor/node/i atom. This function choses which B-group will be bound
to this anchor in the child molecule.
Returns:
:returns: str b_x: A string of the name of a chosen B-group; None if
not in the dictionary or if there is no available choices
"""
# Developers Notes
# Current implementation has no Null/None as choices this means that
# if one parent has a B-group bound to anchor and the other has
# nothing bound to that anchor, then the program will always chose to
# add the B-group, resulting in a larger child molecule.
# This biases the output. It also results in B-groups with multiple
# connections are weighted against because 1 decision on 1 node will
# determine if they can't be chosen...
# Two alternative implementations which could be added are listed
# below, both with advantages and disadvantages:
# 1) Dominant Nulls: (Null is an option which cannot be override)
# When we reach an anchor which only has only one B-group choice then
# we add a Null B-group. This Null group means nothing can be added
# to that anchor.
# This could be implemented in this step in mapping_class.py or this
# could be implemented at the R-groups to B-group consolidation step
# -implemented it at the R-group to B-group consolidation may be a
# better option because it will simplify issues of when is it
# appropriate to add Null.
# ie) if parent lig_1 has 1B1 attached to anchors 10000,10001,10002
# 1B2 attached to anchors 10003
# if parent lig_2 has 2B1 at 10000 and 10003
# 2B at 10002
# if 1B1 is chosen 1st using anchor 10000, then anchors 10000,10001,10002
# are determined
# THIS ALSO MEANS THAT 2B1 is impossible eliminating 2B1
# from an option for anchor 10003
# When the program needs to make a decision for anchor
# 10003 what should its option be????:
# - It should have to chose 1B2
# IF WE implement THE NULLS IN THIS PART OF THE CODE WE
# WOULD HAVE TO CODE IN THAT AS A CONSIDERATION IF WE
# implement NULLS AT THE R- TO B-GROUP CONSOLIDATION PHASE
# WE WOULDN'T NEED TO ADD EXTRA CODE HERE TO PREVENT A NULL
# FROM BEING ADDED
# If a dominant null group (which is only added when an anchor has no
# R-groups attached for 1 parent but some on the other) then when the
# decision for B-group is occuring here; if a null is chosen then
# nothing can be added
# Effects:
# 1) easier to implement
# 2) affects the weighting of multi connection point B-groups -A
# single decision more dramatically impacts chains with many
# connections
# 3) a Null is permanent so its easier to code and process
# 2) Recessive Nulls: (A Null can be chosen but it can be overriden)
# (soft Null). If a recessive null is chosen instead of a B-group
# with multiple connections to the MCS then the B-group which
# wasn't chosen does not get removed from the dictionaries.
# -Then the next anchor that the not chosen B-group is connected
# to is assessed, there is still the option to chose that group.
# -If that group is chosen then we write over the Null option.
# -If that group is not chosen then the null remains as the choice
# for the 1st anchor
# Effects:
# -Recessive Nulls favors the selection of B-groups with multiple
# connections, but still allows for a null to be chosen.
# -A more balanced option between No-Nulls (the current
# implementation) and Dominant Nulls. But this does bias the
# statistics of choices
# -this also makes the decision tree more complicated and makes
# coding this more difficult
# There's no right answer to this but there are certain pro's and
# con's to each approach. The current approach is justified as the
# most code and computational efficient method, with no distict
# preference for multi-chain B-groups, but certainly with some biases
# against shrinking the child molecule
# Select an B to keep
if i in list(self.i_to_bs.keys()):
options = self.locate_b(i)
if len(options) > 1:
b_x = random.choice(options)
elif len(options) == 1:
b_x = options[0]
else:
return "None"
list_is = self.locate_i(b_x)
list_bs = []
for x in list_is:
list_bs.append(self.locate_b(x))
flattened = [val for sublist in list_bs for val in sublist]
unique_bs = list(
set(flattened)
) # convert list to set to list to remove redundant B's
# delete the B's and I's
for b in unique_bs:
self.delete_b(b)
for x in list_is:
self.delete_i(x)
return b_x
# the i is not in list(self.i_to_bs.keys())
# return the string "None"
return "None"
#
def testing_function_return_self_dicts(self):
"""
Return the properties: self.b_to_is and self.i_to_bs
Returns:
:returns: dict b_to_is: Dictionary converting B-groups to
anchor/node/I atoms. This contains groups from both parent molecules.
This is the inverse of i_to_bs. keys are B-groups; items are the
anchor atoms isotope label. ie) {'1B1': [10003], '2B3': [10005],
'2B2': [10003], '2B1': [10002]}
:returns: dict i_to_bs: Dictionary converting Anchor/node/I atoms to
corresponding B-groups. This contains groups from both parent
molecules. This is the inverse of b_to_is. keys are the anchor atoms
isotope labels; items are B-groups. ie) {10002: ['2B1'], 10003:
['1B1', '2B2'], 10005: ['2B3']}
"""
return self.b_to_is, self.i_to_bs
# i_dict = {10000: ['1B1', '2B1'], 10004: ['2B2'], 10005: ['2B3'], 10006: \
# ['2B4'], 10007: ['1B3'], 10008: ['1B2']}
# b_dict = {'1B1': [10000], '1B2': [10008], '1B3': [10007], '2B4': [10006], \
# '2B3': [10005], '2B2': [10004], '2B1': [10000]}
def run_mapping(b_dict, i_dict):
"""
This runs the mapping class which can determine which B-groups/R-groups we
will append in SmileMerge.
Inputs:
:param dict b_dict: Dictionary converting B-groups to anchor/node/I
atoms. This contains groups from both parent molecules. This is the
inverse of i_to_bs. keys are B-groups; items are the anchor atoms isotope
label. ie) {'1B1': [10003], '2B3': [10005], '2B2': [10003], '2B1':
[10002]}
:param dict i_dict: Dictionary converting Anchor/node/I atoms to
corresponding B-groups. This contains groups from both parent molecules.
This is the inverse of b_to_is. keys are the anchor atoms isotope labels;
items are B-groups. ie) {10002: ['2B1'], 10003: ['1B1', '2B2'], 10005:
['2B3']}
Returns:
:returns: list bs_chosen: A list of all the chosen B-groups to be used to
generate a child molecule later.
"""
a_mapping_object = Mapping(b_dict, i_dict)
bs_chosen = []
for i in i_dict:
b_choice = a_mapping_object.chose_b_from_i(i)
bs_chosen.append(b_choice)
bs_chosen = list(set(bs_chosen))
for i in bs_chosen:
if i == "None":
bs_chosen.remove(i)
return bs_chosen
#
#
| <filename>smilesmerge/operators/crossover/smiles_merge/merge_functions/mapping_class.py
"""
This script holds the Mapping class.
This is used when mapping most common substructure (MCS)
to combine two molecules.
"""
import __future__
import random
import copy
class Mapping(object):
"""
# Notes on terminology:
-most common substructure (MCS): The substructure shared between
the two parent ligands
-node/anchor/I: an atom in the MCS which has 1 or more atom(s)
connected to it which are not part of the MCS
-the anchors are labeled by their Isotope numbers as
those do not get modified, where as atom Idx are modified
by many Rdkit functions. Anchors have Isotope labels of
10,000 or higher and that label is applied to the MCS and
ligands 1 and 2 so everything is trackable.
-R-group: a chain of one or more atoms connected to a single node
-if an anchor has dimethyl's which are not part of MCS then
each methyl is considered its own R-group
-B-group: 1 or more R-groups which branch off a single node.
-if an anchor has dimethyl's which are not part of MCS then
the combination of both methyls is considered a single
B-group.
B-group Naming scheme: '{first_number}B{second_number}'
-first_number: the number before the B corresponds to
the parent ligand from which the B-group is
derived.
-second_number: the number which follows the B is the
order for which that B-group was determined when
condensing the R-groups into B-groups. Numbering
is indexed to 1. So the 1st three B groups for
parent ligand 1 are: 1B1,1B2,1B3
ie) 1B1 is the 1st B-group from parent ligand 1
1B2 is the second B-group from parent ligand 1
2B1 is the 1st B-group from parent ligand 2
2B2 is the second B-group from parent ligand 2
This class handles mapping for Bs and Is to chose B-groups which will
later be used to make a child molecule. All the choices for B-groups are
handled here.
This is important because if a B-group connects to more than one anchor
atom, the selection of that B-group determines the selection of both
anchor atoms.
ie) if 1B1 connects to anchor atom 10003 and 10004; and 2B1 connects to
10003 and 10005 then the decision for which B-group is chosen for
anchor 10003 determines the options which will be viable for anchor
atoms 10003,10004, and 10005.
These type of decisions are handled by this class.
"""
def __init__(self, b_to_is, i_to_bs):
"""
When a Mapping object is initialized, it imports 2 input dictionaries,
which can be referenced throughout the class.
Inputs:
:param dict b_to_is: Dictionary converting B-groups to anchor/node/I
atoms. This contains groups from both parent molecules. This is the
inverse of i_to_bs. keys are B-groups; items are the anchor atoms
isotope label. ie) {'1B1': [10003], '2B3': [10005], '2B2': [10003],
'2B1': [10002]}
:param dict i_to_bs: Dictionary converting Anchor/node/I atoms to
corresponding B-groups. This contains groups from both parent
molecules. This is the inverse of b_to_is. keys are the anchor
atoms
isotope labels; items are B-groups ie) {10002: ['2B1'], 10003: ['1B1',
'2B2'], 10005: ['2B3']}
"""
self.b_to_is = copy.deepcopy(
b_to_is
) # B-I mapping dictionary from outside class
self.i_to_bs = copy.deepcopy(
i_to_bs
) # I-B mapping dictionary from outside class
def locate_b(self, i):
"""
Given a specified anchor/I return a list of all the B-groups from both
parent ligands, bound to that anchor
Inputs:
:param int i: the isolabel of an anchor atom which will be used to
search for B-groups within self.i_to_bs.
Returns
:returns: list self.i_to_bs[i]: A list of all the B-groups, from both
parent ligands which are bound to that anchor. ie) ['1B1','2B1']
"""
return self.i_to_bs[i]
#
def locate_i(self, b):
"""
Given a specified B-group return the anchor/I/node it connects to.
Inputs:
:param str b: the name of a B-groups within self.b_to_is.
Returns
:returns: list self.b_to_is[b]: A list of the anchor the given
B-groups is bound. ie) [10001]
"""
return self.b_to_is[b]
#
def delete_b(self, b):
"""
Removes the b from b_to_is and all references to b in i_to_bs.
b is a Key in b_to_is. B is one or more items in i_to_bs.
Inputs:
:param str b: A B-group to be removed from the b_to_is and B in
i_to_bs dicts.
"""
i_list_to_modify = self.locate_i(b)
for i in i_list_to_modify:
blank = self.i_to_bs[i].remove(b)
del self.b_to_is[b]
#
def delete_i(self, i):
"""
Removes the i from i_to_bs and all references to i in b_to_is. i is
a Key in i_to_bs. i is one or more items in b_to_is.
Inputs:
:param int i: An interger representing the isolabel for an
anchor/node/i atom to be removed from the b_to_is and b in i_to_bs
dicts.
"""
bs_to_modify = self.locate_b(i)
for b in bs_to_modify:
self.b_to_is[b].remove(i)
del self.i_to_bs[i]
#
def chose_b_from_i(self, i):
"""
Chose your B from a given i. This makes the decision which B-group
will be chosen for a specific i.
Current implementation is that there are no null choice options. ie.
if an anchor has only 1 option to pick from then it must pick that
B-group. It can not chose nothing or to leave it blank, even if
choosing that B-group forces the future decision because of it's
connections to anchors which have yet to have B-group decisions.
this has bearings on B-groups which connect to multiple anchors as
well as on anchors which have B-groups from only one parent ligand,
but the other parent has nothing connected to that anchor.
in the case of one parent having a B-group attached to an anchor but
nothing attached to the anchor for the other parent, this
implementation will always chose to keep the B-group and never can
leave it blank.
ie (if 1B1 is connected to multiple anchors)
Lack of an B-groups bound to an anchor is not considered a B-group
Inputs:
:param int i: An interger representing the isolabel for an
anchor/node/i atom. This function choses which B-group will be bound
to this anchor in the child molecule.
Returns:
:returns: str b_x: A string of the name of a chosen B-group; None if
not in the dictionary or if there is no available choices
"""
# Developers Notes
# Current implementation has no Null/None as choices this means that
# if one parent has a B-group bound to anchor and the other has
# nothing bound to that anchor, then the program will always chose to
# add the B-group, resulting in a larger child molecule.
# This biases the output. It also results in B-groups with multiple
# connections are weighted against because 1 decision on 1 node will
# determine if they can't be chosen...
# Two alternative implementations which could be added are listed
# below, both with advantages and disadvantages:
# 1) Dominant Nulls: (Null is an option which cannot be override)
# When we reach an anchor which only has only one B-group choice then
# we add a Null B-group. This Null group means nothing can be added
# to that anchor.
# This could be implemented in this step in mapping_class.py or this
# could be implemented at the R-groups to B-group consolidation step
# -implemented it at the R-group to B-group consolidation may be a
# better option because it will simplify issues of when is it
# appropriate to add Null.
# ie) if parent lig_1 has 1B1 attached to anchors 10000,10001,10002
# 1B2 attached to anchors 10003
# if parent lig_2 has 2B1 at 10000 and 10003
# 2B at 10002
# if 1B1 is chosen 1st using anchor 10000, then anchors 10000,10001,10002
# are determined
# THIS ALSO MEANS THAT 2B1 is impossible eliminating 2B1
# from an option for anchor 10003
# When the program needs to make a decision for anchor
# 10003 what should its option be????:
# - It should have to chose 1B2
# IF WE implement THE NULLS IN THIS PART OF THE CODE WE
# WOULD HAVE TO CODE IN THAT AS A CONSIDERATION IF WE
# implement NULLS AT THE R- TO B-GROUP CONSOLIDATION PHASE
# WE WOULDN'T NEED TO ADD EXTRA CODE HERE TO PREVENT A NULL
# FROM BEING ADDED
# If a dominant null group (which is only added when an anchor has no
# R-groups attached for 1 parent but some on the other) then when the
# decision for B-group is occuring here; if a null is chosen then
# nothing can be added
# Effects:
# 1) easier to implement
# 2) affects the weighting of multi connection point B-groups -A
# single decision more dramatically impacts chains with many
# connections
# 3) a Null is permanent so its easier to code and process
# 2) Recessive Nulls: (A Null can be chosen but it can be overriden)
# (soft Null). If a recessive null is chosen instead of a B-group
# with multiple connections to the MCS then the B-group which
# wasn't chosen does not get removed from the dictionaries.
# -Then the next anchor that the not chosen B-group is connected
# to is assessed, there is still the option to chose that group.
# -If that group is chosen then we write over the Null option.
# -If that group is not chosen then the null remains as the choice
# for the 1st anchor
# Effects:
# -Recessive Nulls favors the selection of B-groups with multiple
# connections, but still allows for a null to be chosen.
# -A more balanced option between No-Nulls (the current
# implementation) and Dominant Nulls. But this does bias the
# statistics of choices
# -this also makes the decision tree more complicated and makes
# coding this more difficult
# There's no right answer to this but there are certain pro's and
# con's to each approach. The current approach is justified as the
# most code and computational efficient method, with no distict
# preference for multi-chain B-groups, but certainly with some biases
# against shrinking the child molecule
# Select an B to keep
if i in list(self.i_to_bs.keys()):
options = self.locate_b(i)
if len(options) > 1:
b_x = random.choice(options)
elif len(options) == 1:
b_x = options[0]
else:
return "None"
list_is = self.locate_i(b_x)
list_bs = []
for x in list_is:
list_bs.append(self.locate_b(x))
flattened = [val for sublist in list_bs for val in sublist]
unique_bs = list(
set(flattened)
) # convert list to set to list to remove redundant B's
# delete the B's and I's
for b in unique_bs:
self.delete_b(b)
for x in list_is:
self.delete_i(x)
return b_x
# the i is not in list(self.i_to_bs.keys())
# return the string "None"
return "None"
#
def testing_function_return_self_dicts(self):
"""
Return the properties: self.b_to_is and self.i_to_bs
Returns:
:returns: dict b_to_is: Dictionary converting B-groups to
anchor/node/I atoms. This contains groups from both parent molecules.
This is the inverse of i_to_bs. keys are B-groups; items are the
anchor atoms isotope label. ie) {'1B1': [10003], '2B3': [10005],
'2B2': [10003], '2B1': [10002]}
:returns: dict i_to_bs: Dictionary converting Anchor/node/I atoms to
corresponding B-groups. This contains groups from both parent
molecules. This is the inverse of b_to_is. keys are the anchor atoms
isotope labels; items are B-groups. ie) {10002: ['2B1'], 10003:
['1B1', '2B2'], 10005: ['2B3']}
"""
return self.b_to_is, self.i_to_bs
# i_dict = {10000: ['1B1', '2B1'], 10004: ['2B2'], 10005: ['2B3'], 10006: \
# ['2B4'], 10007: ['1B3'], 10008: ['1B2']}
# b_dict = {'1B1': [10000], '1B2': [10008], '1B3': [10007], '2B4': [10006], \
# '2B3': [10005], '2B2': [10004], '2B1': [10000]}
def run_mapping(b_dict, i_dict):
"""
This runs the mapping class which can determine which B-groups/R-groups we
will append in SmileMerge.
Inputs:
:param dict b_dict: Dictionary converting B-groups to anchor/node/I
atoms. This contains groups from both parent molecules. This is the
inverse of i_to_bs. keys are B-groups; items are the anchor atoms isotope
label. ie) {'1B1': [10003], '2B3': [10005], '2B2': [10003], '2B1':
[10002]}
:param dict i_dict: Dictionary converting Anchor/node/I atoms to
corresponding B-groups. This contains groups from both parent molecules.
This is the inverse of b_to_is. keys are the anchor atoms isotope labels;
items are B-groups. ie) {10002: ['2B1'], 10003: ['1B1', '2B2'], 10005:
['2B3']}
Returns:
:returns: list bs_chosen: A list of all the chosen B-groups to be used to
generate a child molecule later.
"""
a_mapping_object = Mapping(b_dict, i_dict)
bs_chosen = []
for i in i_dict:
b_choice = a_mapping_object.chose_b_from_i(i)
bs_chosen.append(b_choice)
bs_chosen = list(set(bs_chosen))
for i in bs_chosen:
if i == "None":
bs_chosen.remove(i)
return bs_chosen
#
#
| en | 0.875024 | This script holds the Mapping class. This is used when mapping most common substructure (MCS) to combine two molecules. # Notes on terminology: -most common substructure (MCS): The substructure shared between the two parent ligands -node/anchor/I: an atom in the MCS which has 1 or more atom(s) connected to it which are not part of the MCS -the anchors are labeled by their Isotope numbers as those do not get modified, where as atom Idx are modified by many Rdkit functions. Anchors have Isotope labels of 10,000 or higher and that label is applied to the MCS and ligands 1 and 2 so everything is trackable. -R-group: a chain of one or more atoms connected to a single node -if an anchor has dimethyl's which are not part of MCS then each methyl is considered its own R-group -B-group: 1 or more R-groups which branch off a single node. -if an anchor has dimethyl's which are not part of MCS then the combination of both methyls is considered a single B-group. B-group Naming scheme: '{first_number}B{second_number}' -first_number: the number before the B corresponds to the parent ligand from which the B-group is derived. -second_number: the number which follows the B is the order for which that B-group was determined when condensing the R-groups into B-groups. Numbering is indexed to 1. So the 1st three B groups for parent ligand 1 are: 1B1,1B2,1B3 ie) 1B1 is the 1st B-group from parent ligand 1 1B2 is the second B-group from parent ligand 1 2B1 is the 1st B-group from parent ligand 2 2B2 is the second B-group from parent ligand 2 This class handles mapping for Bs and Is to chose B-groups which will later be used to make a child molecule. All the choices for B-groups are handled here. This is important because if a B-group connects to more than one anchor atom, the selection of that B-group determines the selection of both anchor atoms. ie) if 1B1 connects to anchor atom 10003 and 10004; and 2B1 connects to 10003 and 10005 then the decision for which B-group is chosen for anchor 10003 determines the options which will be viable for anchor atoms 10003,10004, and 10005. These type of decisions are handled by this class. When a Mapping object is initialized, it imports 2 input dictionaries, which can be referenced throughout the class. Inputs: :param dict b_to_is: Dictionary converting B-groups to anchor/node/I atoms. This contains groups from both parent molecules. This is the inverse of i_to_bs. keys are B-groups; items are the anchor atoms isotope label. ie) {'1B1': [10003], '2B3': [10005], '2B2': [10003], '2B1': [10002]} :param dict i_to_bs: Dictionary converting Anchor/node/I atoms to corresponding B-groups. This contains groups from both parent molecules. This is the inverse of b_to_is. keys are the anchor atoms isotope labels; items are B-groups ie) {10002: ['2B1'], 10003: ['1B1', '2B2'], 10005: ['2B3']} # B-I mapping dictionary from outside class # I-B mapping dictionary from outside class Given a specified anchor/I return a list of all the B-groups from both parent ligands, bound to that anchor Inputs: :param int i: the isolabel of an anchor atom which will be used to search for B-groups within self.i_to_bs. Returns :returns: list self.i_to_bs[i]: A list of all the B-groups, from both parent ligands which are bound to that anchor. ie) ['1B1','2B1'] # Given a specified B-group return the anchor/I/node it connects to. Inputs: :param str b: the name of a B-groups within self.b_to_is. Returns :returns: list self.b_to_is[b]: A list of the anchor the given B-groups is bound. ie) [10001] # Removes the b from b_to_is and all references to b in i_to_bs. b is a Key in b_to_is. B is one or more items in i_to_bs. Inputs: :param str b: A B-group to be removed from the b_to_is and B in i_to_bs dicts. # Removes the i from i_to_bs and all references to i in b_to_is. i is a Key in i_to_bs. i is one or more items in b_to_is. Inputs: :param int i: An interger representing the isolabel for an anchor/node/i atom to be removed from the b_to_is and b in i_to_bs dicts. # Chose your B from a given i. This makes the decision which B-group will be chosen for a specific i. Current implementation is that there are no null choice options. ie. if an anchor has only 1 option to pick from then it must pick that B-group. It can not chose nothing or to leave it blank, even if choosing that B-group forces the future decision because of it's connections to anchors which have yet to have B-group decisions. this has bearings on B-groups which connect to multiple anchors as well as on anchors which have B-groups from only one parent ligand, but the other parent has nothing connected to that anchor. in the case of one parent having a B-group attached to an anchor but nothing attached to the anchor for the other parent, this implementation will always chose to keep the B-group and never can leave it blank. ie (if 1B1 is connected to multiple anchors) Lack of an B-groups bound to an anchor is not considered a B-group Inputs: :param int i: An interger representing the isolabel for an anchor/node/i atom. This function choses which B-group will be bound to this anchor in the child molecule. Returns: :returns: str b_x: A string of the name of a chosen B-group; None if not in the dictionary or if there is no available choices # Developers Notes # Current implementation has no Null/None as choices this means that # if one parent has a B-group bound to anchor and the other has # nothing bound to that anchor, then the program will always chose to # add the B-group, resulting in a larger child molecule. # This biases the output. It also results in B-groups with multiple # connections are weighted against because 1 decision on 1 node will # determine if they can't be chosen... # Two alternative implementations which could be added are listed # below, both with advantages and disadvantages: # 1) Dominant Nulls: (Null is an option which cannot be override) # When we reach an anchor which only has only one B-group choice then # we add a Null B-group. This Null group means nothing can be added # to that anchor. # This could be implemented in this step in mapping_class.py or this # could be implemented at the R-groups to B-group consolidation step # -implemented it at the R-group to B-group consolidation may be a # better option because it will simplify issues of when is it # appropriate to add Null. # ie) if parent lig_1 has 1B1 attached to anchors 10000,10001,10002 # 1B2 attached to anchors 10003 # if parent lig_2 has 2B1 at 10000 and 10003 # 2B at 10002 # if 1B1 is chosen 1st using anchor 10000, then anchors 10000,10001,10002 # are determined # THIS ALSO MEANS THAT 2B1 is impossible eliminating 2B1 # from an option for anchor 10003 # When the program needs to make a decision for anchor # 10003 what should its option be????: # - It should have to chose 1B2 # IF WE implement THE NULLS IN THIS PART OF THE CODE WE # WOULD HAVE TO CODE IN THAT AS A CONSIDERATION IF WE # implement NULLS AT THE R- TO B-GROUP CONSOLIDATION PHASE # WE WOULDN'T NEED TO ADD EXTRA CODE HERE TO PREVENT A NULL # FROM BEING ADDED # If a dominant null group (which is only added when an anchor has no # R-groups attached for 1 parent but some on the other) then when the # decision for B-group is occuring here; if a null is chosen then # nothing can be added # Effects: # 1) easier to implement # 2) affects the weighting of multi connection point B-groups -A # single decision more dramatically impacts chains with many # connections # 3) a Null is permanent so its easier to code and process # 2) Recessive Nulls: (A Null can be chosen but it can be overriden) # (soft Null). If a recessive null is chosen instead of a B-group # with multiple connections to the MCS then the B-group which # wasn't chosen does not get removed from the dictionaries. # -Then the next anchor that the not chosen B-group is connected # to is assessed, there is still the option to chose that group. # -If that group is chosen then we write over the Null option. # -If that group is not chosen then the null remains as the choice # for the 1st anchor # Effects: # -Recessive Nulls favors the selection of B-groups with multiple # connections, but still allows for a null to be chosen. # -A more balanced option between No-Nulls (the current # implementation) and Dominant Nulls. But this does bias the # statistics of choices # -this also makes the decision tree more complicated and makes # coding this more difficult # There's no right answer to this but there are certain pro's and # con's to each approach. The current approach is justified as the # most code and computational efficient method, with no distict # preference for multi-chain B-groups, but certainly with some biases # against shrinking the child molecule # Select an B to keep # convert list to set to list to remove redundant B's # delete the B's and I's # the i is not in list(self.i_to_bs.keys()) # return the string "None" # Return the properties: self.b_to_is and self.i_to_bs Returns: :returns: dict b_to_is: Dictionary converting B-groups to anchor/node/I atoms. This contains groups from both parent molecules. This is the inverse of i_to_bs. keys are B-groups; items are the anchor atoms isotope label. ie) {'1B1': [10003], '2B3': [10005], '2B2': [10003], '2B1': [10002]} :returns: dict i_to_bs: Dictionary converting Anchor/node/I atoms to corresponding B-groups. This contains groups from both parent molecules. This is the inverse of b_to_is. keys are the anchor atoms isotope labels; items are B-groups. ie) {10002: ['2B1'], 10003: ['1B1', '2B2'], 10005: ['2B3']} # i_dict = {10000: ['1B1', '2B1'], 10004: ['2B2'], 10005: ['2B3'], 10006: \ # ['2B4'], 10007: ['1B3'], 10008: ['1B2']} # b_dict = {'1B1': [10000], '1B2': [10008], '1B3': [10007], '2B4': [10006], \ # '2B3': [10005], '2B2': [10004], '2B1': [10000]} This runs the mapping class which can determine which B-groups/R-groups we will append in SmileMerge. Inputs: :param dict b_dict: Dictionary converting B-groups to anchor/node/I atoms. This contains groups from both parent molecules. This is the inverse of i_to_bs. keys are B-groups; items are the anchor atoms isotope label. ie) {'1B1': [10003], '2B3': [10005], '2B2': [10003], '2B1': [10002]} :param dict i_dict: Dictionary converting Anchor/node/I atoms to corresponding B-groups. This contains groups from both parent molecules. This is the inverse of b_to_is. keys are the anchor atoms isotope labels; items are B-groups. ie) {10002: ['2B1'], 10003: ['1B1', '2B2'], 10005: ['2B3']} Returns: :returns: list bs_chosen: A list of all the chosen B-groups to be used to generate a child molecule later. # # | 2.616345 | 3 |
corehq/apps/callcenter/tests/test_models.py | kkrampa/commcare-hq | 1 | 6613120 | from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.callcenter import const
from corehq.apps.callcenter.const import WEEK1, WEEK0, MONTH0
from corehq.apps.callcenter.models import (
TypedIndicator, BasicIndicator, ByTypeWithTotal, CallCenterIndicatorConfig
)
from django.test import SimpleTestCase
def get_indicator_slugs_from_config(config, all_types=None):
def legacy_slugs(key, date_ranges):
for date_range in date_ranges:
yield '{}{}'.format(key, date_range.title())
def basic_slugs(key, date_ranges):
for date_range in date_ranges:
yield '{}_{}'.format(key, date_range)
def typed_slugs(key, type_, date_ranges):
for date_range in date_ranges:
yield '{}_{}_{}'.format(key, type_, date_range)
slugs = []
if config.forms_submitted.enabled:
slugs.extend(basic_slugs(const.FORMS_SUBMITTED, config.forms_submitted.date_ranges))
for indicator in config.custom_form:
slugs.append('{}{}'.format(indicator.type, indicator.date_range.title()))
for key in ['cases_total', 'cases_active', 'cases_opened', 'cases_closed']:
indicator_config = getattr(config, key)
if indicator_config.totals.enabled:
slugs.extend(basic_slugs(key, indicator_config.totals.date_ranges))
if indicator_config.all_types:
for type_ in all_types:
slugs.extend(typed_slugs(key, type_, const.DATE_RANGES))
for type_config in indicator_config.by_type:
if type_config.enabled:
slugs.extend(typed_slugs(key, type_config.type, type_config.date_ranges))
if config.legacy_forms_submitted:
slugs.extend(legacy_slugs(const.LEGACY_FORMS_SUBMITTED, config.forms_submitted.date_ranges))
if config.legacy_cases_total:
slugs.append(const.LEGACY_TOTAL_CASES)
if config.legacy_cases_active:
slugs.extend(legacy_slugs(const.LEGACY_CASES_UPDATED, config.cases_active.totals.date_ranges))
return slugs
class ModelTests(SimpleTestCase):
def test_types_by_date_range(self):
by_type = ByTypeWithTotal(by_type=[
TypedIndicator(enabled=True, date_ranges={WEEK0, WEEK1}, type='dog'),
TypedIndicator(enabled=True, date_ranges={WEEK0}, type='cat'),
TypedIndicator(enabled=True, date_ranges={WEEK1}, type='canary'),
TypedIndicator(enabled=True, date_ranges={WEEK1, MONTH0}, type='fish'),
TypedIndicator(enabled=False, date_ranges={MONTH0}, type='whale'),
])
self.assertEqual(by_type.types_by_date_range(), {
WEEK0: {'dog', 'cat'},
WEEK1: {'dog', 'canary', 'fish'},
MONTH0: {'fish'},
})
def test_real_example(self):
config = CallCenterIndicatorConfig(
domain='domain',
forms_submitted=BasicIndicator(enabled=True, date_ranges={MONTH0}),
cases_total=ByTypeWithTotal(
totals=BasicIndicator(enabled=False),
by_type=[
TypedIndicator(enabled=True, date_ranges={MONTH0}, type='caregiver'),
TypedIndicator(enabled=True, date_ranges={MONTH0}, type='beneficiary'),
]
),
cases_active=ByTypeWithTotal(
totals=BasicIndicator(enabled=False),
by_type=[
TypedIndicator(enabled=True, date_ranges={MONTH0}, type='caregiver'),
TypedIndicator(enabled=True, date_ranges={MONTH0}, type='beneficiary'),
]
)
)
self.assertEqual(set(get_indicator_slugs_from_config(config)), {
'forms_submitted_month0',
'cases_total_caregiver_month0',
'cases_active_caregiver_month0',
'cases_total_beneficiary_month0',
'cases_active_beneficiary_month0'
})
def test_empty(self):
self.assertEqual(get_indicator_slugs_from_config(CallCenterIndicatorConfig()), [])
def test_default(self):
indicators = get_indicator_slugs_from_config(
CallCenterIndicatorConfig.default_config(), all_types=['t1', 't2']
)
self.assertEqual(len(indicators), 61)
| from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.callcenter import const
from corehq.apps.callcenter.const import WEEK1, WEEK0, MONTH0
from corehq.apps.callcenter.models import (
TypedIndicator, BasicIndicator, ByTypeWithTotal, CallCenterIndicatorConfig
)
from django.test import SimpleTestCase
def get_indicator_slugs_from_config(config, all_types=None):
def legacy_slugs(key, date_ranges):
for date_range in date_ranges:
yield '{}{}'.format(key, date_range.title())
def basic_slugs(key, date_ranges):
for date_range in date_ranges:
yield '{}_{}'.format(key, date_range)
def typed_slugs(key, type_, date_ranges):
for date_range in date_ranges:
yield '{}_{}_{}'.format(key, type_, date_range)
slugs = []
if config.forms_submitted.enabled:
slugs.extend(basic_slugs(const.FORMS_SUBMITTED, config.forms_submitted.date_ranges))
for indicator in config.custom_form:
slugs.append('{}{}'.format(indicator.type, indicator.date_range.title()))
for key in ['cases_total', 'cases_active', 'cases_opened', 'cases_closed']:
indicator_config = getattr(config, key)
if indicator_config.totals.enabled:
slugs.extend(basic_slugs(key, indicator_config.totals.date_ranges))
if indicator_config.all_types:
for type_ in all_types:
slugs.extend(typed_slugs(key, type_, const.DATE_RANGES))
for type_config in indicator_config.by_type:
if type_config.enabled:
slugs.extend(typed_slugs(key, type_config.type, type_config.date_ranges))
if config.legacy_forms_submitted:
slugs.extend(legacy_slugs(const.LEGACY_FORMS_SUBMITTED, config.forms_submitted.date_ranges))
if config.legacy_cases_total:
slugs.append(const.LEGACY_TOTAL_CASES)
if config.legacy_cases_active:
slugs.extend(legacy_slugs(const.LEGACY_CASES_UPDATED, config.cases_active.totals.date_ranges))
return slugs
class ModelTests(SimpleTestCase):
def test_types_by_date_range(self):
by_type = ByTypeWithTotal(by_type=[
TypedIndicator(enabled=True, date_ranges={WEEK0, WEEK1}, type='dog'),
TypedIndicator(enabled=True, date_ranges={WEEK0}, type='cat'),
TypedIndicator(enabled=True, date_ranges={WEEK1}, type='canary'),
TypedIndicator(enabled=True, date_ranges={WEEK1, MONTH0}, type='fish'),
TypedIndicator(enabled=False, date_ranges={MONTH0}, type='whale'),
])
self.assertEqual(by_type.types_by_date_range(), {
WEEK0: {'dog', 'cat'},
WEEK1: {'dog', 'canary', 'fish'},
MONTH0: {'fish'},
})
def test_real_example(self):
config = CallCenterIndicatorConfig(
domain='domain',
forms_submitted=BasicIndicator(enabled=True, date_ranges={MONTH0}),
cases_total=ByTypeWithTotal(
totals=BasicIndicator(enabled=False),
by_type=[
TypedIndicator(enabled=True, date_ranges={MONTH0}, type='caregiver'),
TypedIndicator(enabled=True, date_ranges={MONTH0}, type='beneficiary'),
]
),
cases_active=ByTypeWithTotal(
totals=BasicIndicator(enabled=False),
by_type=[
TypedIndicator(enabled=True, date_ranges={MONTH0}, type='caregiver'),
TypedIndicator(enabled=True, date_ranges={MONTH0}, type='beneficiary'),
]
)
)
self.assertEqual(set(get_indicator_slugs_from_config(config)), {
'forms_submitted_month0',
'cases_total_caregiver_month0',
'cases_active_caregiver_month0',
'cases_total_beneficiary_month0',
'cases_active_beneficiary_month0'
})
def test_empty(self):
self.assertEqual(get_indicator_slugs_from_config(CallCenterIndicatorConfig()), [])
def test_default(self):
indicators = get_indicator_slugs_from_config(
CallCenterIndicatorConfig.default_config(), all_types=['t1', 't2']
)
self.assertEqual(len(indicators), 61)
| none | 1 | 1.861787 | 2 | |
main.py | willemneal/Docky | 0 | 6613121 | from flask import Flask, request, json
app = Flask(__name__)
app.config['DEBUG'] = True
from builder import buildCode
from formatter import highlightCode
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def hello():
"""Return a friendly HTTP greeting."""
return 'Docky-py!'
@app.route('/eval',methods=["POST","GET"])
def runCode():
"""runs the code sent to the server. Also redirects stdout and stderr"""
return json.dumps(buildCode(request.args['text']))
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
@app.route('/prettify',methods=["GET"])
def makePretty():
'''returns styling '''
return json.dumps({'text':highlightCode(request.args['text'],request.args['style'])})
| from flask import Flask, request, json
app = Flask(__name__)
app.config['DEBUG'] = True
from builder import buildCode
from formatter import highlightCode
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def hello():
"""Return a friendly HTTP greeting."""
return 'Docky-py!'
@app.route('/eval',methods=["POST","GET"])
def runCode():
"""runs the code sent to the server. Also redirects stdout and stderr"""
return json.dumps(buildCode(request.args['text']))
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
@app.route('/prettify',methods=["GET"])
def makePretty():
'''returns styling '''
return json.dumps({'text':highlightCode(request.args['text'],request.args['style'])})
| en | 0.803355 | # Note: We don't need to call run() since our application is embedded within # the App Engine WSGI application server. Return a friendly HTTP greeting. runs the code sent to the server. Also redirects stdout and stderr Return a custom 404 error. returns styling | 2.649278 | 3 |
migrate/scripts/any__to__dry_run_only.py | xyla-io/datadragon | 0 | 6613122 | <reponame>xyla-io/datadragon
import datetime
for rule in db.rules.find({'lastRun': {'$exists': 1, '$ne': None}}, {'lastRun': 1}):
db.rules.update({'_id': rule['_id']}, {'$set': {'lastRun': rule['lastRun'] - datetime.timedelta(seconds=120) }}) | import datetime
for rule in db.rules.find({'lastRun': {'$exists': 1, '$ne': None}}, {'lastRun': 1}):
db.rules.update({'_id': rule['_id']}, {'$set': {'lastRun': rule['lastRun'] - datetime.timedelta(seconds=120) }}) | none | 1 | 2.387942 | 2 | |
lib/git_unittest.py | bpsinc-native/src_third_party_chromite | 0 | 6613123 | <filename>lib/git_unittest.py<gh_stars>0
#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for chromite.lib.git and helpers for testing that module."""
import functools
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from chromite.lib import cros_build_lib
from chromite.lib import cros_build_lib_unittest
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.lib import partial_mock
from chromite.lib import patch_unittest
import mock
class ManifestMock(partial_mock.PartialMock):
"""Partial mock for git.Manifest."""
TARGET = 'chromite.lib.git.Manifest'
ATTRS = ('_RunParser',)
def _RunParser(self, *_args):
pass
class ManifestCheckoutMock(partial_mock.PartialMock):
"""Partial mock for git.ManifestCheckout."""
TARGET = 'chromite.lib.git.ManifestCheckout'
ATTRS = ('_GetManifestsBranch',)
def _GetManifestsBranch(self, _root):
return 'default'
class NormalizeRefTest(cros_test_lib.TestCase):
"""Test the Normalize*Ref functions."""
def _TestNormalize(self, functor, tests):
"""Helper function for testing Normalize*Ref functions.
Args:
functor: Normalize*Ref functor that only needs the input
ref argument.
tests: Dict of test inputs to expected test outputs.
"""
for test_input, test_output in tests.iteritems():
result = functor(test_input)
msg = ('Expected %s to translate %r to %r, but got %r.' %
(functor.__name__, test_input, test_output, result))
self.assertEquals(test_output, result, msg)
def testNormalizeRef(self):
"""Test git.NormalizeRef function."""
tests = {
# These should all get 'refs/heads/' prefix.
'foo': 'refs/heads/foo',
'foo-bar-123': 'refs/heads/foo-bar-123',
# If input starts with 'refs/' it should be left alone.
'refs/foo/bar': 'refs/foo/bar',
'refs/heads/foo': 'refs/heads/foo',
# Plain 'refs' is nothing special.
'refs': 'refs/heads/refs',
None: None,
}
self._TestNormalize(git.NormalizeRef, tests)
def testNormalizeRemoteRef(self):
"""Test git.NormalizeRemoteRef function."""
remote = 'TheRemote'
tests = {
# These should all get 'refs/remotes/TheRemote' prefix.
'foo': 'refs/remotes/%s/foo' % remote,
'foo-bar-123': 'refs/remotes/%s/foo-bar-123' % remote,
# These should be translated from local to remote ref.
'refs/heads/foo': 'refs/remotes/%s/foo' % remote,
'refs/heads/foo-bar-123': 'refs/remotes/%s/foo-bar-123' % remote,
# These should be moved from one remote to another.
'refs/remotes/OtherRemote/foo': 'refs/remotes/%s/foo' % remote,
# These should be left alone.
'refs/remotes/%s/foo' % remote: 'refs/remotes/%s/foo' % remote,
'refs/foo/bar': 'refs/foo/bar',
# Plain 'refs' is nothing special.
'refs': 'refs/remotes/%s/refs' % remote,
None: None,
}
# Add remote arg to git.NormalizeRemoteRef.
functor = functools.partial(git.NormalizeRemoteRef, remote)
functor.__name__ = git.NormalizeRemoteRef.__name__
self._TestNormalize(functor, tests)
class ProjectCheckoutTest(cros_test_lib.TestCase):
"""Tests for git.ProjectCheckout"""
def setUp(self):
self.fake_unversioned_patchable = git.ProjectCheckout(
dict(name='chromite',
path='src/chromite',
revision='remotes/for/master'))
self.fake_unversioned_unpatchable = git.ProjectCheckout(
dict(name='chromite',
path='src/platform/somethingsomething/chromite',
# Pinned to a SHA1.
revision='1deadbeeaf1deadbeeaf1deadbeeaf1deadbeeaf'))
self.fake_versioned_patchable = git.ProjectCheckout(
dict(name='chromite',
path='src/chromite',
revision='1deadbeeaf1deadbeeaf1deadbeeaf1deadbeeaf',
upstream='remotes/for/master'))
self.fake_versioned_unpatchable = git.ProjectCheckout(
dict(name='chromite',
path='src/chromite',
revision='1deadbeeaf1deadbeeaf1deadbeeaf1deadbeeaf',
upstream='1deadbeeaf1deadbeeaf1deadbeeaf1deadbeeaf'))
def testIsPatchable(self):
self.assertTrue(self.fake_unversioned_patchable.IsPatchable())
self.assertFalse(self.fake_unversioned_unpatchable.IsPatchable())
self.assertTrue(self.fake_versioned_patchable.IsPatchable())
self.assertFalse(self.fake_versioned_unpatchable.IsPatchable())
class GitPushTest(cros_test_lib.MockTestCase):
"""Tests for git.GitPush function."""
# Non fast-forward push error message.
NON_FF_PUSH_ERROR = ('To https://localhost/repo.git\n'
'! [remote rejected] master -> master (non-fast-forward)\n'
'error: failed to push some refs to \'https://localhost/repo.git\'\n')
# List of possible GoB transient errors.
TRANSIENT_ERRORS = (
# Hook error when creating a new branch from SHA1 ref.
('remote: Processing changes: (-)To https://localhost/repo.git\n'
'! [remote rejected] 6c78ca083c3a9d64068c945fd9998eb1e0a3e739 -> '
'stabilize-4636.B (error in hook)\n'
'error: failed to push some refs to \'https://localhost/repo.git\'\n'),
# 'failed to lock' error when creating a new branch from SHA1 ref.
('remote: Processing changes: done\nTo https://localhost/repo.git\n'
'! [remote rejected] 4ea09c129b5fedb261bae2431ce2511e35ac3923 -> '
'stabilize-daisy-4319.96.B (failed to lock)\n'
'error: failed to push some refs to \'https://localhost/repo.git\'\n'),
# Hook error when pushing branch.
('remote: Processing changes: (\)To https://localhost/repo.git\n'
'! [remote rejected] temp_auto_checkin_branch -> '
'master (error in hook)\n'
'error: failed to push some refs to \'https://localhost/repo.git\'\n'),
# Another kind of error when pushing a branch.
'fatal: remote error: Internal Server Error',
# crbug.com/298189
('error: gnutls_handshake() failed: A TLS packet with unexpected length '
'was received. while accessing '
'http://localhost/repo.git/info/refs?service=git-upload-pack\n'
'fatal: HTTP request failed'),
# crbug.com/298189
('fatal: unable to access \'https://localhost/repo.git\': GnuTLS recv '
'error (-9): A TLS packet with unexpected length was received.'),
)
def setUp(self):
self.StartPatcher(mock.patch('time.sleep'))
@staticmethod
def _RunGitPush():
"""Runs git.GitPush with some default arguments."""
git.GitPush('some_repo_path', 'local-ref',
git.RemoteRef('some-remote', 'remote-ref'),
dryrun=True, retry=True)
def testPushSuccess(self):
"""Test handling of successful git push."""
with cros_build_lib_unittest.RunCommandMock() as rc_mock:
rc_mock.AddCmdResult(partial_mock.In('push'), returncode=0)
self._RunGitPush()
def testNonFFPush(self):
"""Non fast-forward push error propagates to the caller."""
with cros_build_lib_unittest.RunCommandMock() as rc_mock:
rc_mock.AddCmdResult(partial_mock.In('push'), returncode=128,
error=self.NON_FF_PUSH_ERROR)
self.assertRaises(cros_build_lib.RunCommandError, self._RunGitPush)
def testPersistentTransientError(self):
"""GitPush fails if transient error occurs multiple times."""
for error in self.TRANSIENT_ERRORS:
with cros_build_lib_unittest.RunCommandMock() as rc_mock:
rc_mock.AddCmdResult(partial_mock.In('push'), returncode=128,
error=error)
self.assertRaises(cros_build_lib.RunCommandError, self._RunGitPush)
def testOneTimeTransientError(self):
"""GitPush retries transient errors."""
for error in self.TRANSIENT_ERRORS:
with cros_build_lib_unittest.RunCommandMock() as rc_mock:
results = [
rc_mock.CmdResult(128, '', error),
rc_mock.CmdResult(0, 'success', ''),
]
side_effect = lambda *_args, **_kwargs: results.pop(0)
rc_mock.AddCmdResult(partial_mock.In('push'), side_effect=side_effect)
self._RunGitPush()
class GitBranchDetectionTest(patch_unittest.GitRepoPatchTestCase):
"""Tests that git library functions related to branch detection work."""
def testDoesCommitExistInRepoWithAmbiguousBranchName(self):
git1 = self._MakeRepo('git1', self.source)
git.CreateBranch(git1, 'peach', track=True)
self.CommitFile(git1, 'peach', 'Keep me.')
self.assertTrue(git.DoesCommitExistInRepo(git1, 'peach'))
if __name__ == '__main__':
cros_test_lib.main()
| <filename>lib/git_unittest.py<gh_stars>0
#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for chromite.lib.git and helpers for testing that module."""
import functools
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from chromite.lib import cros_build_lib
from chromite.lib import cros_build_lib_unittest
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.lib import partial_mock
from chromite.lib import patch_unittest
import mock
class ManifestMock(partial_mock.PartialMock):
"""Partial mock for git.Manifest."""
TARGET = 'chromite.lib.git.Manifest'
ATTRS = ('_RunParser',)
def _RunParser(self, *_args):
pass
class ManifestCheckoutMock(partial_mock.PartialMock):
"""Partial mock for git.ManifestCheckout."""
TARGET = 'chromite.lib.git.ManifestCheckout'
ATTRS = ('_GetManifestsBranch',)
def _GetManifestsBranch(self, _root):
return 'default'
class NormalizeRefTest(cros_test_lib.TestCase):
"""Test the Normalize*Ref functions."""
def _TestNormalize(self, functor, tests):
"""Helper function for testing Normalize*Ref functions.
Args:
functor: Normalize*Ref functor that only needs the input
ref argument.
tests: Dict of test inputs to expected test outputs.
"""
for test_input, test_output in tests.iteritems():
result = functor(test_input)
msg = ('Expected %s to translate %r to %r, but got %r.' %
(functor.__name__, test_input, test_output, result))
self.assertEquals(test_output, result, msg)
def testNormalizeRef(self):
"""Test git.NormalizeRef function."""
tests = {
# These should all get 'refs/heads/' prefix.
'foo': 'refs/heads/foo',
'foo-bar-123': 'refs/heads/foo-bar-123',
# If input starts with 'refs/' it should be left alone.
'refs/foo/bar': 'refs/foo/bar',
'refs/heads/foo': 'refs/heads/foo',
# Plain 'refs' is nothing special.
'refs': 'refs/heads/refs',
None: None,
}
self._TestNormalize(git.NormalizeRef, tests)
def testNormalizeRemoteRef(self):
"""Test git.NormalizeRemoteRef function."""
remote = 'TheRemote'
tests = {
# These should all get 'refs/remotes/TheRemote' prefix.
'foo': 'refs/remotes/%s/foo' % remote,
'foo-bar-123': 'refs/remotes/%s/foo-bar-123' % remote,
# These should be translated from local to remote ref.
'refs/heads/foo': 'refs/remotes/%s/foo' % remote,
'refs/heads/foo-bar-123': 'refs/remotes/%s/foo-bar-123' % remote,
# These should be moved from one remote to another.
'refs/remotes/OtherRemote/foo': 'refs/remotes/%s/foo' % remote,
# These should be left alone.
'refs/remotes/%s/foo' % remote: 'refs/remotes/%s/foo' % remote,
'refs/foo/bar': 'refs/foo/bar',
# Plain 'refs' is nothing special.
'refs': 'refs/remotes/%s/refs' % remote,
None: None,
}
# Add remote arg to git.NormalizeRemoteRef.
functor = functools.partial(git.NormalizeRemoteRef, remote)
functor.__name__ = git.NormalizeRemoteRef.__name__
self._TestNormalize(functor, tests)
class ProjectCheckoutTest(cros_test_lib.TestCase):
"""Tests for git.ProjectCheckout"""
def setUp(self):
self.fake_unversioned_patchable = git.ProjectCheckout(
dict(name='chromite',
path='src/chromite',
revision='remotes/for/master'))
self.fake_unversioned_unpatchable = git.ProjectCheckout(
dict(name='chromite',
path='src/platform/somethingsomething/chromite',
# Pinned to a SHA1.
revision='1deadbeeaf1deadbeeaf1deadbeeaf1deadbeeaf'))
self.fake_versioned_patchable = git.ProjectCheckout(
dict(name='chromite',
path='src/chromite',
revision='1deadbeeaf1deadbeeaf1deadbeeaf1deadbeeaf',
upstream='remotes/for/master'))
self.fake_versioned_unpatchable = git.ProjectCheckout(
dict(name='chromite',
path='src/chromite',
revision='1deadbeeaf1deadbeeaf1deadbeeaf1deadbeeaf',
upstream='1deadbeeaf1deadbeeaf1deadbeeaf1deadbeeaf'))
def testIsPatchable(self):
self.assertTrue(self.fake_unversioned_patchable.IsPatchable())
self.assertFalse(self.fake_unversioned_unpatchable.IsPatchable())
self.assertTrue(self.fake_versioned_patchable.IsPatchable())
self.assertFalse(self.fake_versioned_unpatchable.IsPatchable())
class GitPushTest(cros_test_lib.MockTestCase):
"""Tests for git.GitPush function."""
# Non fast-forward push error message.
NON_FF_PUSH_ERROR = ('To https://localhost/repo.git\n'
'! [remote rejected] master -> master (non-fast-forward)\n'
'error: failed to push some refs to \'https://localhost/repo.git\'\n')
# List of possible GoB transient errors.
TRANSIENT_ERRORS = (
# Hook error when creating a new branch from SHA1 ref.
('remote: Processing changes: (-)To https://localhost/repo.git\n'
'! [remote rejected] 6c78ca083c3a9d64068c945fd9998eb1e0a3e739 -> '
'stabilize-4636.B (error in hook)\n'
'error: failed to push some refs to \'https://localhost/repo.git\'\n'),
# 'failed to lock' error when creating a new branch from SHA1 ref.
('remote: Processing changes: done\nTo https://localhost/repo.git\n'
'! [remote rejected] 4ea09c129b5fedb261bae2431ce2511e35ac3923 -> '
'stabilize-daisy-4319.96.B (failed to lock)\n'
'error: failed to push some refs to \'https://localhost/repo.git\'\n'),
# Hook error when pushing branch.
('remote: Processing changes: (\)To https://localhost/repo.git\n'
'! [remote rejected] temp_auto_checkin_branch -> '
'master (error in hook)\n'
'error: failed to push some refs to \'https://localhost/repo.git\'\n'),
# Another kind of error when pushing a branch.
'fatal: remote error: Internal Server Error',
# crbug.com/298189
('error: gnutls_handshake() failed: A TLS packet with unexpected length '
'was received. while accessing '
'http://localhost/repo.git/info/refs?service=git-upload-pack\n'
'fatal: HTTP request failed'),
# crbug.com/298189
('fatal: unable to access \'https://localhost/repo.git\': GnuTLS recv '
'error (-9): A TLS packet with unexpected length was received.'),
)
def setUp(self):
self.StartPatcher(mock.patch('time.sleep'))
@staticmethod
def _RunGitPush():
"""Runs git.GitPush with some default arguments."""
git.GitPush('some_repo_path', 'local-ref',
git.RemoteRef('some-remote', 'remote-ref'),
dryrun=True, retry=True)
def testPushSuccess(self):
"""Test handling of successful git push."""
with cros_build_lib_unittest.RunCommandMock() as rc_mock:
rc_mock.AddCmdResult(partial_mock.In('push'), returncode=0)
self._RunGitPush()
def testNonFFPush(self):
"""Non fast-forward push error propagates to the caller."""
with cros_build_lib_unittest.RunCommandMock() as rc_mock:
rc_mock.AddCmdResult(partial_mock.In('push'), returncode=128,
error=self.NON_FF_PUSH_ERROR)
self.assertRaises(cros_build_lib.RunCommandError, self._RunGitPush)
def testPersistentTransientError(self):
"""GitPush fails if transient error occurs multiple times."""
for error in self.TRANSIENT_ERRORS:
with cros_build_lib_unittest.RunCommandMock() as rc_mock:
rc_mock.AddCmdResult(partial_mock.In('push'), returncode=128,
error=error)
self.assertRaises(cros_build_lib.RunCommandError, self._RunGitPush)
def testOneTimeTransientError(self):
"""GitPush retries transient errors."""
for error in self.TRANSIENT_ERRORS:
with cros_build_lib_unittest.RunCommandMock() as rc_mock:
results = [
rc_mock.CmdResult(128, '', error),
rc_mock.CmdResult(0, 'success', ''),
]
side_effect = lambda *_args, **_kwargs: results.pop(0)
rc_mock.AddCmdResult(partial_mock.In('push'), side_effect=side_effect)
self._RunGitPush()
class GitBranchDetectionTest(patch_unittest.GitRepoPatchTestCase):
"""Tests that git library functions related to branch detection work."""
def testDoesCommitExistInRepoWithAmbiguousBranchName(self):
git1 = self._MakeRepo('git1', self.source)
git.CreateBranch(git1, 'peach', track=True)
self.CommitFile(git1, 'peach', 'Keep me.')
self.assertTrue(git.DoesCommitExistInRepo(git1, 'peach'))
if __name__ == '__main__':
cros_test_lib.main()
| en | 0.740419 | #!/usr/bin/python # Copyright (c) 2013 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Unit tests for chromite.lib.git and helpers for testing that module. Partial mock for git.Manifest. Partial mock for git.ManifestCheckout. Test the Normalize*Ref functions. Helper function for testing Normalize*Ref functions. Args: functor: Normalize*Ref functor that only needs the input ref argument. tests: Dict of test inputs to expected test outputs. Test git.NormalizeRef function. # These should all get 'refs/heads/' prefix. # If input starts with 'refs/' it should be left alone. # Plain 'refs' is nothing special. Test git.NormalizeRemoteRef function. # These should all get 'refs/remotes/TheRemote' prefix. # These should be translated from local to remote ref. # These should be moved from one remote to another. # These should be left alone. # Plain 'refs' is nothing special. # Add remote arg to git.NormalizeRemoteRef. Tests for git.ProjectCheckout # Pinned to a SHA1. Tests for git.GitPush function. # Non fast-forward push error message. # List of possible GoB transient errors. # Hook error when creating a new branch from SHA1 ref. # 'failed to lock' error when creating a new branch from SHA1 ref. # Hook error when pushing branch. # Another kind of error when pushing a branch. # crbug.com/298189 # crbug.com/298189 Runs git.GitPush with some default arguments. Test handling of successful git push. Non fast-forward push error propagates to the caller. GitPush fails if transient error occurs multiple times. GitPush retries transient errors. Tests that git library functions related to branch detection work. | 2.416572 | 2 |
objectheatmap/utils.py | hqbao/dlp_tf | 0 | 6613124 | <reponame>hqbao/dlp_tf
def box2frame(box, apoint=[0.5, 0.5]):
'''
Convert [y1, x1, y2, x2] to [x, y, w, h]
'''
return [
(box[1] + apoint[1]*(box[3]-box[1])),
(box[0] + apoint[0]*(box[2]-box[0])),
(box[3] - box[1]),
(box[2] - box[0])
] | def box2frame(box, apoint=[0.5, 0.5]):
'''
Convert [y1, x1, y2, x2] to [x, y, w, h]
'''
return [
(box[1] + apoint[1]*(box[3]-box[1])),
(box[0] + apoint[0]*(box[2]-box[0])),
(box[3] - box[1]),
(box[2] - box[0])
] | en | 0.580567 | Convert [y1, x1, y2, x2] to [x, y, w, h] | 2.901917 | 3 |
main.py | iPr0ger/mdr-fastapi | 0 | 6613125 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from routers import search
from routers.api import rest, es_query_based, graphql
from elasticsearch import AsyncElasticsearch
from configs.es_configs import ELASTICSEARCH_HOST
app = FastAPI()
es = AsyncElasticsearch(hosts=[ELASTICSEARCH_HOST])
origins = [
"*"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["GET", "POST"],
allow_headers=["*"],
)
app.include_router(search.router)
app.include_router(rest.router)
app.include_router(es_query_based.router)
app.include_router(graphql.router)
@app.on_event("shutdown")
async def app_shutdown():
await es.close()
| from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from routers import search
from routers.api import rest, es_query_based, graphql
from elasticsearch import AsyncElasticsearch
from configs.es_configs import ELASTICSEARCH_HOST
app = FastAPI()
es = AsyncElasticsearch(hosts=[ELASTICSEARCH_HOST])
origins = [
"*"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["GET", "POST"],
allow_headers=["*"],
)
app.include_router(search.router)
app.include_router(rest.router)
app.include_router(es_query_based.router)
app.include_router(graphql.router)
@app.on_event("shutdown")
async def app_shutdown():
await es.close()
| none | 1 | 2.048873 | 2 | |
ggmodel_dev/models/transport/VEHC_scenario.py | Global-Green-Growth-Institute/GraphModels | 0 | 6613126 | <reponame>Global-Green-Growth-Institute/GraphModels<filename>ggmodel_dev/models/transport/VEHC_scenario.py<gh_stars>0
from ggmodel_dev.projection import *
from ggmodel_dev.models.transport.VEHC import model_dictionnary
MODEL = model_dictionnary['VEHC_model']
def run_scenario(data_dict, MAX_sat=1000, GDPC_rate=1.05):
data_dict = data_dict.copy()
data_dict['MAX_sat'] = pd.Series(
data=data_dict['MAX_sat'].values[0], index=data_dict['GDPC'].index, name='MAX_sat')
scenario_projection_dict = {
'MAX_sat': lambda x: apply_target_projection(x, MAX_sat),
'GDPC': lambda x: apply_annual_rate_projection(x, GDPC_rate),
}
data_dict = run_projection(scenario_projection_dict, data_dict)
results = MODEL.run(data_dict)
return results
def run_all_scenarios(data_dict, args_dict_1, args_dict_2):
scenarios_results = {}
scenarios_results['BAU'] = run_scenario(data_dict, MAX_sat=data_dict['MAX_sat'], GDPC_rate=1.02)
scenarios_results['scenario_one'] = run_scenario(data_dict, **args_dict_1)
scenarios_results['scenario_two'] = run_scenario(data_dict, **args_dict_2)
return scenarios_results
| from ggmodel_dev.projection import *
from ggmodel_dev.models.transport.VEHC import model_dictionnary
MODEL = model_dictionnary['VEHC_model']
def run_scenario(data_dict, MAX_sat=1000, GDPC_rate=1.05):
data_dict = data_dict.copy()
data_dict['MAX_sat'] = pd.Series(
data=data_dict['MAX_sat'].values[0], index=data_dict['GDPC'].index, name='MAX_sat')
scenario_projection_dict = {
'MAX_sat': lambda x: apply_target_projection(x, MAX_sat),
'GDPC': lambda x: apply_annual_rate_projection(x, GDPC_rate),
}
data_dict = run_projection(scenario_projection_dict, data_dict)
results = MODEL.run(data_dict)
return results
def run_all_scenarios(data_dict, args_dict_1, args_dict_2):
scenarios_results = {}
scenarios_results['BAU'] = run_scenario(data_dict, MAX_sat=data_dict['MAX_sat'], GDPC_rate=1.02)
scenarios_results['scenario_one'] = run_scenario(data_dict, **args_dict_1)
scenarios_results['scenario_two'] = run_scenario(data_dict, **args_dict_2)
return scenarios_results | none | 1 | 2.172107 | 2 | |
LuoguCodes/AT504.py | Anguei/OI-Codes | 0 | 6613127 | a1, a2, a3=map(int, raw_input().split())
b1, b2, b3=map(int, raw_input().split())
ans = max((a1 / b1) * (a2 / b2) * (a3 / b3), (a1 / b1) * (a3 / b2) * (a2 / b3), (a2 / b1) * (a1 / b2) * (a3 / b3), (a2 / b1) * (a3 / b2) * (a1 / b3), (a3 / b1) * (a1 / b2) * (a2 / b3), (a3 / b1) * (a2 / b2) * (a1 / b3))
print ans
| a1, a2, a3=map(int, raw_input().split())
b1, b2, b3=map(int, raw_input().split())
ans = max((a1 / b1) * (a2 / b2) * (a3 / b3), (a1 / b1) * (a3 / b2) * (a2 / b3), (a2 / b1) * (a1 / b2) * (a3 / b3), (a2 / b1) * (a3 / b2) * (a1 / b3), (a3 / b1) * (a1 / b2) * (a2 / b3), (a3 / b1) * (a2 / b2) * (a1 / b3))
print ans
| none | 1 | 2.579808 | 3 | |
examples/script-injection.py | PerformLine/webfriend | 32 | 6613128 | <filename>examples/script-injection.py
#!/usr/bin/env python
import logging
from webfriend.browser import Chrome
from webfriend.scripting.environment import Environment
logging.basicConfig(level=logging.DEBUG)
with Chrome(foreground=True) as browser:
commands = Environment(browser=browser)
# navigate to Hacker News
www = commands.core.go('https://news.ycombinator.com')
# set a list of patterns for text to extract from the page
# this is used by the injected script below
commands.vars.set('terms', [
'/(\d+) points/',
])
# inject and execute this JavaScript. Scripts are wrapped in a dynamically-generated
# function that lives on the "window" object. Values returned from the injected script
# will be converted to native types and be accessible as the command's return value
#
# Everything between the 'begin' and 'end' keywords will be passed to the browser.
#
matches = commands.core.javascript('''
if(this.terms) {
// we're going to start walking from the <body> tag down
var bodyTag = document.getElementsByTagName('body')[0];
var walker = document.createTreeWalker(bodyTag, NodeFilter.SHOW_TEXT, null, false);
var patterns = [];
// parse incoming terms into patterns or exact string matches
for(var i = 0; i < this.terms.length; i++) {
var term = this.terms[i];
if(term.length > 2 && term.indexOf('/') == 0 && term.lastIndexOf('/') > 0) {
var rx = term.slice(1, term.lastIndexOf('/'));
var opts = term.slice(term.lastIndexOf('/') + 1);
patterns.push(new RegExp(rx, opts));
} else {
patterns.push(term);
}
}
var results = [];
// iterate through all text nodes on the page
while(node = walker.nextNode()) {
// for each pattern...
for(var i = 0; i < patterns.length; i++) {
var pattern = patterns[i];
if(pattern instanceof RegExp) {
// if it's a regular expression, apply it
var match = node.nodeValue.match(pattern);
if(match) {
if(match.length > 1) {
// if groups were used, add each result
match = match.slice(1);
for(var j = 0; j < match.length; j++) {
results.push(parseInt(match[j]));
}
} else {
// otherwise, use the whole match
results.push(match[0]);
}
break;
}
} else if(node.nodeValue == pattern) {
// otherwise, exact matches only
results.push(node.nodeValue);
break;
}
}
}
// return results
return results;
} else {
throw 'Must provide a list of terms in the $terms variable.';
}
''')
# show the result of the script execution
logging.info(matches)
| <filename>examples/script-injection.py
#!/usr/bin/env python
import logging
from webfriend.browser import Chrome
from webfriend.scripting.environment import Environment
logging.basicConfig(level=logging.DEBUG)
with Chrome(foreground=True) as browser:
commands = Environment(browser=browser)
# navigate to Hacker News
www = commands.core.go('https://news.ycombinator.com')
# set a list of patterns for text to extract from the page
# this is used by the injected script below
commands.vars.set('terms', [
'/(\d+) points/',
])
# inject and execute this JavaScript. Scripts are wrapped in a dynamically-generated
# function that lives on the "window" object. Values returned from the injected script
# will be converted to native types and be accessible as the command's return value
#
# Everything between the 'begin' and 'end' keywords will be passed to the browser.
#
matches = commands.core.javascript('''
if(this.terms) {
// we're going to start walking from the <body> tag down
var bodyTag = document.getElementsByTagName('body')[0];
var walker = document.createTreeWalker(bodyTag, NodeFilter.SHOW_TEXT, null, false);
var patterns = [];
// parse incoming terms into patterns or exact string matches
for(var i = 0; i < this.terms.length; i++) {
var term = this.terms[i];
if(term.length > 2 && term.indexOf('/') == 0 && term.lastIndexOf('/') > 0) {
var rx = term.slice(1, term.lastIndexOf('/'));
var opts = term.slice(term.lastIndexOf('/') + 1);
patterns.push(new RegExp(rx, opts));
} else {
patterns.push(term);
}
}
var results = [];
// iterate through all text nodes on the page
while(node = walker.nextNode()) {
// for each pattern...
for(var i = 0; i < patterns.length; i++) {
var pattern = patterns[i];
if(pattern instanceof RegExp) {
// if it's a regular expression, apply it
var match = node.nodeValue.match(pattern);
if(match) {
if(match.length > 1) {
// if groups were used, add each result
match = match.slice(1);
for(var j = 0; j < match.length; j++) {
results.push(parseInt(match[j]));
}
} else {
// otherwise, use the whole match
results.push(match[0]);
}
break;
}
} else if(node.nodeValue == pattern) {
// otherwise, exact matches only
results.push(node.nodeValue);
break;
}
}
}
// return results
return results;
} else {
throw 'Must provide a list of terms in the $terms variable.';
}
''')
# show the result of the script execution
logging.info(matches)
| en | 0.446304 | #!/usr/bin/env python # navigate to Hacker News # set a list of patterns for text to extract from the page # this is used by the injected script below # inject and execute this JavaScript. Scripts are wrapped in a dynamically-generated # function that lives on the "window" object. Values returned from the injected script # will be converted to native types and be accessible as the command's return value # # Everything between the 'begin' and 'end' keywords will be passed to the browser. # if(this.terms) { // we're going to start walking from the <body> tag down var bodyTag = document.getElementsByTagName('body')[0]; var walker = document.createTreeWalker(bodyTag, NodeFilter.SHOW_TEXT, null, false); var patterns = []; // parse incoming terms into patterns or exact string matches for(var i = 0; i < this.terms.length; i++) { var term = this.terms[i]; if(term.length > 2 && term.indexOf('/') == 0 && term.lastIndexOf('/') > 0) { var rx = term.slice(1, term.lastIndexOf('/')); var opts = term.slice(term.lastIndexOf('/') + 1); patterns.push(new RegExp(rx, opts)); } else { patterns.push(term); } } var results = []; // iterate through all text nodes on the page while(node = walker.nextNode()) { // for each pattern... for(var i = 0; i < patterns.length; i++) { var pattern = patterns[i]; if(pattern instanceof RegExp) { // if it's a regular expression, apply it var match = node.nodeValue.match(pattern); if(match) { if(match.length > 1) { // if groups were used, add each result match = match.slice(1); for(var j = 0; j < match.length; j++) { results.push(parseInt(match[j])); } } else { // otherwise, use the whole match results.push(match[0]); } break; } } else if(node.nodeValue == pattern) { // otherwise, exact matches only results.push(node.nodeValue); break; } } } // return results return results; } else { throw 'Must provide a list of terms in the $terms variable.'; } # show the result of the script execution | 3.062332 | 3 |
source/New folder/dcpower_test1_12.py | MrDotJ/repetition-paper | 0 | 6613129 | import matplotlib.pyplot as plt
from DCpower.config import *
from DCpower.config3 import *
def factory():
player1 = getPlayer(player0_info)
player2 = getPlayer(player1_info)
player3 = getPlayer(player2_info)
player4 = getPlayer(player3_info)
player5 = getPlayer(player4_info)
playerN1 = playerNp1()
return [player1, player2, player3, player4, player5, playerN1]
def calculate_NE():
global g_lam
count_best_response = 0
g_angles_old = 0
while count_best_response < 10:
# TODO: maybe 30 is a little small
g_angles_old = copy.deepcopy(g_angles)
for i, player in enumerate(g_players):
# get the data for the player i
player.update_model(g_tao) # 填充x_i 以及lam_i
player_i_result = player.optimize_model()
g_angles[i] = player_i_result.copy()
# update the lam_dual variable
g_lam = g_playerN1.optimize(g_tao).copy()
# update the response
if sub_norm(g_angles_old, g_angles) < 0.001:
print(count_best_response)
break
count_best_response = count_best_response + 1
def set_oldValue():
for i, player in enumerate(g_players):
player.set_old_value(g_angles[i].copy())
g_playerN1.set_old_value(g_lam.copy())
def start():
global g_angles
result_plt = []
result_plt1 = []
result_plt2 = []
result_plt3 = []
# initial
for player in g_players:
player.build_model()
# start the outer loop
outer_loop_count = 0
while outer_loop_count < 300:
print(outer_loop_count)
# give xn, lam_n, calculate the equilibrium
calculate_NE()
# 现在我们得到了一个新的NE,我们应该把这个NE设为参照值
set_oldValue()
outer_loop_count = outer_loop_count + 1
# result_plt.append(g_angles[0][0])
# result_plt1.append(g_angles[1][0])
# result_plt2.append(g_angles[0][0] - g_angles[1][0])
result_plt.append(injection[0][0][0])
result_plt1.append(injection[1][0][0])
result_plt2.append(injection[0][0][0] + injection[1][0][0])
# result_plt3.append(injection[2][0][0] + injection[0][1][0])
# result_plt2.append(g_lam[0] + g_lam[1])
# set all value in g_ex to zero
if outer_loop_count != 600:
# reset(g_angles)
namejqy
plt.plot(result_plt, label='0->1')
plt.plot(result_plt1, '-r', label='1->0')
# plt.plot(result_plt2, '-g', label='diff')
# plt.plot(result_plt3, '*b', label='diff')
plt.legend(loc='best')
plt.show()
# plt.savefig('x-node-micro-grid.svg')
if __name__ == '__main__':
all_players = factory()
g_players = all_players[:player_num]
g_playerN1 = all_players[player_num]
start()
| import matplotlib.pyplot as plt
from DCpower.config import *
from DCpower.config3 import *
def factory():
player1 = getPlayer(player0_info)
player2 = getPlayer(player1_info)
player3 = getPlayer(player2_info)
player4 = getPlayer(player3_info)
player5 = getPlayer(player4_info)
playerN1 = playerNp1()
return [player1, player2, player3, player4, player5, playerN1]
def calculate_NE():
global g_lam
count_best_response = 0
g_angles_old = 0
while count_best_response < 10:
# TODO: maybe 30 is a little small
g_angles_old = copy.deepcopy(g_angles)
for i, player in enumerate(g_players):
# get the data for the player i
player.update_model(g_tao) # 填充x_i 以及lam_i
player_i_result = player.optimize_model()
g_angles[i] = player_i_result.copy()
# update the lam_dual variable
g_lam = g_playerN1.optimize(g_tao).copy()
# update the response
if sub_norm(g_angles_old, g_angles) < 0.001:
print(count_best_response)
break
count_best_response = count_best_response + 1
def set_oldValue():
for i, player in enumerate(g_players):
player.set_old_value(g_angles[i].copy())
g_playerN1.set_old_value(g_lam.copy())
def start():
global g_angles
result_plt = []
result_plt1 = []
result_plt2 = []
result_plt3 = []
# initial
for player in g_players:
player.build_model()
# start the outer loop
outer_loop_count = 0
while outer_loop_count < 300:
print(outer_loop_count)
# give xn, lam_n, calculate the equilibrium
calculate_NE()
# 现在我们得到了一个新的NE,我们应该把这个NE设为参照值
set_oldValue()
outer_loop_count = outer_loop_count + 1
# result_plt.append(g_angles[0][0])
# result_plt1.append(g_angles[1][0])
# result_plt2.append(g_angles[0][0] - g_angles[1][0])
result_plt.append(injection[0][0][0])
result_plt1.append(injection[1][0][0])
result_plt2.append(injection[0][0][0] + injection[1][0][0])
# result_plt3.append(injection[2][0][0] + injection[0][1][0])
# result_plt2.append(g_lam[0] + g_lam[1])
# set all value in g_ex to zero
if outer_loop_count != 600:
# reset(g_angles)
namejqy
plt.plot(result_plt, label='0->1')
plt.plot(result_plt1, '-r', label='1->0')
# plt.plot(result_plt2, '-g', label='diff')
# plt.plot(result_plt3, '*b', label='diff')
plt.legend(loc='best')
plt.show()
# plt.savefig('x-node-micro-grid.svg')
if __name__ == '__main__':
all_players = factory()
g_players = all_players[:player_num]
g_playerN1 = all_players[player_num]
start()
| en | 0.290755 | # TODO: maybe 30 is a little small # get the data for the player i # 填充x_i 以及lam_i # update the lam_dual variable # update the response # initial # start the outer loop # give xn, lam_n, calculate the equilibrium # 现在我们得到了一个新的NE,我们应该把这个NE设为参照值 # result_plt.append(g_angles[0][0]) # result_plt1.append(g_angles[1][0]) # result_plt2.append(g_angles[0][0] - g_angles[1][0]) # result_plt3.append(injection[2][0][0] + injection[0][1][0]) # result_plt2.append(g_lam[0] + g_lam[1]) # set all value in g_ex to zero # reset(g_angles) # plt.plot(result_plt2, '-g', label='diff') # plt.plot(result_plt3, '*b', label='diff') # plt.savefig('x-node-micro-grid.svg') | 2.78878 | 3 |
train.py | vader-coder/Grade-Predictor | 0 | 6613130 | #Import Relevant Libraries
import numpy
import pandas
from sklearn import linear_model
import sklearn
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from matplotlib import style
import pickle
import time as t
def split (x, y, defaultTestSize):
return sklearn.model_selection.train_test_split(x, y, test_size=defaultTestSize)
def remStrArr (data):
length = len(data[0])#numpy.size(data, axis=0)
i = 0
while (i<length):#loop through columns
if (isinstance(data[0, i], str)):#if type in row 0, column i is string.
data = numpy.delete(data, i, axis=1);#drop that column
length -= 1
i -= 1
i+=1
return data
start = t.monotonic()
path = "student-mat.csv"
data = pandas.read_csv(path, sep=";")
target = "G3" #what category we will predict
#eliminate = ['school', 'sex', 'address', 'famsize', 'Pstatus', 'Mjob', 'Fjob', 'reason', 'guardian',
#'schoolsup', 'famsup', 'paid', 'activities', 'nursery', 'higher', 'internet', 'romantic', ]
labels = numpy.array(data[target])#convert object to array
features = remStrArr(numpy.array(data.drop([target], 1)))
#data.drop() removes target from array.
#next line splits data into training and test sections.
topModel = 0
topScore = 0
for i in range(100):
featuresTrain, featuresTest, labelsTrain, labelsTest = split(features, labels, 0.1)
model = linear_model.LinearRegression() #select linear regression model
model.fit(featuresTrain, labelsTrain) #draw line of best fit using training data
modelScore = model.score(featuresTest, labelsTest) #determine how well model performs.
if (modelScore > topScore):
topScore = modelScore
topModel = model
file = open('topModel.pickle', 'wb')
pickle.dump(topModel, file)
file.close()
print("best score: " + str(topScore))
print("Time: " + str(t.monotonic()-start))
| #Import Relevant Libraries
import numpy
import pandas
from sklearn import linear_model
import sklearn
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from matplotlib import style
import pickle
import time as t
def split (x, y, defaultTestSize):
return sklearn.model_selection.train_test_split(x, y, test_size=defaultTestSize)
def remStrArr (data):
length = len(data[0])#numpy.size(data, axis=0)
i = 0
while (i<length):#loop through columns
if (isinstance(data[0, i], str)):#if type in row 0, column i is string.
data = numpy.delete(data, i, axis=1);#drop that column
length -= 1
i -= 1
i+=1
return data
start = t.monotonic()
path = "student-mat.csv"
data = pandas.read_csv(path, sep=";")
target = "G3" #what category we will predict
#eliminate = ['school', 'sex', 'address', 'famsize', 'Pstatus', 'Mjob', 'Fjob', 'reason', 'guardian',
#'schoolsup', 'famsup', 'paid', 'activities', 'nursery', 'higher', 'internet', 'romantic', ]
labels = numpy.array(data[target])#convert object to array
features = remStrArr(numpy.array(data.drop([target], 1)))
#data.drop() removes target from array.
#next line splits data into training and test sections.
topModel = 0
topScore = 0
for i in range(100):
featuresTrain, featuresTest, labelsTrain, labelsTest = split(features, labels, 0.1)
model = linear_model.LinearRegression() #select linear regression model
model.fit(featuresTrain, labelsTrain) #draw line of best fit using training data
modelScore = model.score(featuresTest, labelsTest) #determine how well model performs.
if (modelScore > topScore):
topScore = modelScore
topModel = model
file = open('topModel.pickle', 'wb')
pickle.dump(topModel, file)
file.close()
print("best score: " + str(topScore))
print("Time: " + str(t.monotonic()-start))
| en | 0.568634 | #Import Relevant Libraries #numpy.size(data, axis=0) #loop through columns #if type in row 0, column i is string. #drop that column #what category we will predict #eliminate = ['school', 'sex', 'address', 'famsize', 'Pstatus', 'Mjob', 'Fjob', 'reason', 'guardian', #'schoolsup', 'famsup', 'paid', 'activities', 'nursery', 'higher', 'internet', 'romantic', ] #convert object to array #data.drop() removes target from array. #next line splits data into training and test sections. #select linear regression model #draw line of best fit using training data #determine how well model performs. | 3.443811 | 3 |
tasks/tasks.py | rvalien/orbbot | 0 | 6613131 | import asyncio
import datetime
import discord
import os
from discord.ext import tasks
from moduls import random_gif
from itertools import cycle
apikey = os.environ["TENSOR_API_KEY"]
delay = int(os.environ["DELAY"])
CHANNELS = {
"books": 825411232159760405,
"づ。◕‿‿◕。づ": 774365764190732309,
"dev🛠": 811505442252521492,
}
@tasks.loop(hours=5.0)
async def change_status(self):
status = cycle(
["Quake Champions", "Control", "Hollow knight", "Alien isolation", "Banner saga", "Divinity: Original sin 2"]
)
while not self.is_closed():
await self.change_presence(activity=discord.Game(next(status)))
@tasks.loop(hours=5.0)
async def bdays_check(self):
if 10 <= datetime.datetime.utcnow().hour <= 20:
query = """
select user_id
from bdays
where extract(month from bday) = (SELECT date_part('month', (SELECT current_timestamp)))
and extract(day from bday) = (SELECT date_part('day', (SELECT current_timestamp)))
"""
party_dude = await self.pg_con.fetchval(query)
if party_dude:
user = self.get_user(party_dude)
channel = self.get_channel(CHANNELS.get("づ。◕‿‿◕。づ"))
embed = discord.Embed()
url = random_gif(apikey, "birth day")
embed.set_image(url=url)
async with channel.typing():
await asyncio.sleep(0.10)
await channel.send(f"{user.mention} happy BD, **{user.name}**! We Love you!", embed=embed)
@tasks.loop(hours=1)
async def deadline_check(self, redis_client):
# channel = self.get_channel(757694875096449029) тестовый канал на тестовом сервере
channel = self.get_channel(CHANNELS.get("books"))
keyword = "book_club_notify_timestamp"
utc_now = datetime.datetime.utcnow()
timestamp = redis_client.get(keyword)
if 8 <= utc_now.hour <= 15:
if timestamp is None or datetime.datetime.fromtimestamp(int(timestamp)).date() != utc_now.date():
days = await self.pg_con.fetchval("select deadline - current_date from book_club_deadline")
if days and days < 0:
await self.bot.pg_con.execute("truncate table book_club_deadline")
elif days and days <= 7:
redis_client.set(keyword, int(utc_now.timestamp()))
await channel.send(f"Дней до обсуждения: {days}")
| import asyncio
import datetime
import discord
import os
from discord.ext import tasks
from moduls import random_gif
from itertools import cycle
apikey = os.environ["TENSOR_API_KEY"]
delay = int(os.environ["DELAY"])
CHANNELS = {
"books": 825411232159760405,
"づ。◕‿‿◕。づ": 774365764190732309,
"dev🛠": 811505442252521492,
}
@tasks.loop(hours=5.0)
async def change_status(self):
status = cycle(
["Quake Champions", "Control", "Hollow knight", "Alien isolation", "Banner saga", "Divinity: Original sin 2"]
)
while not self.is_closed():
await self.change_presence(activity=discord.Game(next(status)))
@tasks.loop(hours=5.0)
async def bdays_check(self):
if 10 <= datetime.datetime.utcnow().hour <= 20:
query = """
select user_id
from bdays
where extract(month from bday) = (SELECT date_part('month', (SELECT current_timestamp)))
and extract(day from bday) = (SELECT date_part('day', (SELECT current_timestamp)))
"""
party_dude = await self.pg_con.fetchval(query)
if party_dude:
user = self.get_user(party_dude)
channel = self.get_channel(CHANNELS.get("づ。◕‿‿◕。づ"))
embed = discord.Embed()
url = random_gif(apikey, "birth day")
embed.set_image(url=url)
async with channel.typing():
await asyncio.sleep(0.10)
await channel.send(f"{user.mention} happy BD, **{user.name}**! We Love you!", embed=embed)
@tasks.loop(hours=1)
async def deadline_check(self, redis_client):
# channel = self.get_channel(757694875096449029) тестовый канал на тестовом сервере
channel = self.get_channel(CHANNELS.get("books"))
keyword = "book_club_notify_timestamp"
utc_now = datetime.datetime.utcnow()
timestamp = redis_client.get(keyword)
if 8 <= utc_now.hour <= 15:
if timestamp is None or datetime.datetime.fromtimestamp(int(timestamp)).date() != utc_now.date():
days = await self.pg_con.fetchval("select deadline - current_date from book_club_deadline")
if days and days < 0:
await self.bot.pg_con.execute("truncate table book_club_deadline")
elif days and days <= 7:
redis_client.set(keyword, int(utc_now.timestamp()))
await channel.send(f"Дней до обсуждения: {days}")
| en | 0.392429 | select user_id from bdays where extract(month from bday) = (SELECT date_part('month', (SELECT current_timestamp))) and extract(day from bday) = (SELECT date_part('day', (SELECT current_timestamp))) # channel = self.get_channel(757694875096449029) тестовый канал на тестовом сервере | 2.552584 | 3 |
exercises/practice/atbash-cipher/.meta/example.py | Stigjb/python | 1,177 | 6613132 | <filename>exercises/practice/atbash-cipher/.meta/example.py
from string import ascii_lowercase
BLOCK_SIZE = 5
trtbl = str.maketrans(ascii_lowercase, ascii_lowercase[::-1])
def base_trans(text):
return ''.join([character for character in text if character.isalnum()]).lower().translate(trtbl)
def encode(plain):
cipher = base_trans(plain)
return ' '.join(cipher[idx:idx + BLOCK_SIZE]
for idx in range(0, len(cipher), BLOCK_SIZE))
def decode(ciphered):
return base_trans(ciphered)
| <filename>exercises/practice/atbash-cipher/.meta/example.py
from string import ascii_lowercase
BLOCK_SIZE = 5
trtbl = str.maketrans(ascii_lowercase, ascii_lowercase[::-1])
def base_trans(text):
return ''.join([character for character in text if character.isalnum()]).lower().translate(trtbl)
def encode(plain):
cipher = base_trans(plain)
return ' '.join(cipher[idx:idx + BLOCK_SIZE]
for idx in range(0, len(cipher), BLOCK_SIZE))
def decode(ciphered):
return base_trans(ciphered)
| none | 1 | 3.684155 | 4 | |
base/views.py | pyprism/Hiren-Notes | 1 | 6613133 | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
from django.contrib import auth
from django.contrib import messages
from django.db.utils import IntegrityError
from .models import Account, Setting
from django.shortcuts import get_object_or_404
from django.contrib.auth import logout as fuckoff
def login(request):
"""
Handles authentication
:param request:
:return:
"""
if request.user.is_authenticated:
return redirect('secret_code')
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(username=username, password=password)
if user:
auth.login(request, user)
return redirect('secret_code')
else:
messages.warning(request, 'Username/Password is not valid!')
return redirect('login')
else:
return render(request, 'base/login.html')
def signup(request):
"""
Handles signup
:param request:
:return:
"""
if request.user.is_authenticated:
return redirect('secret_code')
if request.method == "POST":
sign_up, created = Setting.objects.get_or_create(task='S')
if sign_up.active:
username = request.POST.get('username')
password = request.POST.get('password')
acc = Account(username=username, password=make_password(password))
try:
acc.save()
except IntegrityError:
messages.warning(request, "Username is not available!")
return redirect('signup')
messages.success(request, 'Account has been created successfully!')
return redirect('login')
else:
messages.warning(request, 'Signup is disabled!')
return redirect('signup')
else:
return render(request, 'base/signup.html')
@login_required
def secret_code(request):
return render(request, 'base/secret_code.html')
@login_required
def settings(request):
users = Account.objects.all()
sign_up, created = Setting.objects.get_or_create(task='S')
return render(request, 'base/settings.html', {'users': users, 'signup': sign_up})
@login_required
def create_user(request):
"""
Create non-admin user
:param request:
:return:
"""
if request.user.is_admin:
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
acc = Account(username=username, password=make_password(password))
try:
acc.save()
except IntegrityError:
messages.error(request, "username is not available!")
return redirect('create_user')
messages.success(request, 'Account created successfully!')
return redirect('create_user')
return render(request, 'base/create_user.html')
@login_required
def signup_settings(request):
if request.user.is_admin:
if request.method == 'POST':
sign_up = Setting.objects.get(task='S')
if request.POST.get('enable'):
sign_up.active = False
elif request.POST.get('disable'):
sign_up.active = True
sign_up.save()
return redirect('settings')
@login_required
def update_user(request, username):
"""
update password
:param request:
:param username:
:return:
"""
if request.user.is_admin:
if request.method == 'POST':
user = get_object_or_404(Account, username=username)
user.set_password(request.POST.get('password'))
user.save()
messages.success(request, 'Password updated.')
return redirect('update_user', username=username)
return render(request, 'base/update_user.html')
def logout(request):
"""
fuck off and logout
:param request:
:return:
"""
fuckoff(request)
return redirect("login")
| from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
from django.contrib import auth
from django.contrib import messages
from django.db.utils import IntegrityError
from .models import Account, Setting
from django.shortcuts import get_object_or_404
from django.contrib.auth import logout as fuckoff
def login(request):
"""
Handles authentication
:param request:
:return:
"""
if request.user.is_authenticated:
return redirect('secret_code')
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(username=username, password=password)
if user:
auth.login(request, user)
return redirect('secret_code')
else:
messages.warning(request, 'Username/Password is not valid!')
return redirect('login')
else:
return render(request, 'base/login.html')
def signup(request):
"""
Handles signup
:param request:
:return:
"""
if request.user.is_authenticated:
return redirect('secret_code')
if request.method == "POST":
sign_up, created = Setting.objects.get_or_create(task='S')
if sign_up.active:
username = request.POST.get('username')
password = request.POST.get('password')
acc = Account(username=username, password=make_password(password))
try:
acc.save()
except IntegrityError:
messages.warning(request, "Username is not available!")
return redirect('signup')
messages.success(request, 'Account has been created successfully!')
return redirect('login')
else:
messages.warning(request, 'Signup is disabled!')
return redirect('signup')
else:
return render(request, 'base/signup.html')
@login_required
def secret_code(request):
return render(request, 'base/secret_code.html')
@login_required
def settings(request):
users = Account.objects.all()
sign_up, created = Setting.objects.get_or_create(task='S')
return render(request, 'base/settings.html', {'users': users, 'signup': sign_up})
@login_required
def create_user(request):
"""
Create non-admin user
:param request:
:return:
"""
if request.user.is_admin:
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
acc = Account(username=username, password=make_password(password))
try:
acc.save()
except IntegrityError:
messages.error(request, "username is not available!")
return redirect('create_user')
messages.success(request, 'Account created successfully!')
return redirect('create_user')
return render(request, 'base/create_user.html')
@login_required
def signup_settings(request):
if request.user.is_admin:
if request.method == 'POST':
sign_up = Setting.objects.get(task='S')
if request.POST.get('enable'):
sign_up.active = False
elif request.POST.get('disable'):
sign_up.active = True
sign_up.save()
return redirect('settings')
@login_required
def update_user(request, username):
"""
update password
:param request:
:param username:
:return:
"""
if request.user.is_admin:
if request.method == 'POST':
user = get_object_or_404(Account, username=username)
user.set_password(request.POST.get('password'))
user.save()
messages.success(request, 'Password updated.')
return redirect('update_user', username=username)
return render(request, 'base/update_user.html')
def logout(request):
"""
fuck off and logout
:param request:
:return:
"""
fuckoff(request)
return redirect("login")
| en | 0.693404 | Handles authentication :param request: :return: Handles signup :param request: :return: Create non-admin user :param request: :return: update password :param request: :param username: :return: fuck off and logout :param request: :return: | 2.293937 | 2 |
fzfaws/utils/pyfzf.py | kazhala/fawsf | 66 | 6613134 | <filename>fzfaws/utils/pyfzf.py<gh_stars>10-100
"""This module contains the wrapper class to interacte with fzf.
The fzf class should be used for all occasion when fzf needs
to be launched. fzfaws comes with 4 fzf binary files and will
be used if user doesn't specify to use system fzf in config file.
"""
import os
import subprocess
import sys
from typing import Any, Dict, Generator, List, Optional, Union
from fzfaws.utils.exceptions import EmptyList, NoSelectionMade
class Pyfzf:
r"""A simple wrapper class for fzf utilizing subprocess module.
To create a entry into fzf, use Pyfzf.append_fzf() and pass in the string.
To create mutiple entries, would require manually pass in \n to seperate each entry.
For a list of response from boto3, it is recommended to use the process_list() function.
Example:
fzf = Pyfzf()
s3 = boto3.client('s3')
response = s3.list_buckets()
fzf.process_list(response["Buckets"], "Name")
selected_bucket = fzf.execute_fzf(multi_select=False)
The above example process the list of buckets in response and make "Name" the return value.
The selected_bucket will be a bucket name.
"""
def __init__(self) -> None:
"""Construct the Pyfzf instance.
Credit to https://github.com/pmazurek/aws-fuzzy-finder for the binary detection
method.
"""
self.fzf_string: str = ""
if sys.maxsize > 2 ** 32:
arch = "amd64"
else:
arch = "386"
if sys.platform.startswith("darwin"):
system = "darwin"
elif sys.platform.startswith("linux"):
system = "linux"
else:
print(
"fzfaws currently is only compatible with python3.6+ on MacOS or Linux"
)
sys.exit(1)
self.fzf_path: str = (
"fzf"
if os.getenv("FZFAWS_FZF_EXECUTABLE", "binary") == "system"
else "%s/../libs/fzf-0.21.1-%s_%s"
% (os.path.dirname(os.path.abspath(__file__)), system, arch)
)
def append_fzf(self, new_string: str) -> None:
r"""Append stings to fzf_string.
To have mutiple entries, seperate them by '\n'
Example:fzf.append_fzf('hello')
fzf.append_fzf('\n')
fzf.append_fzf('world')
:param new_string: strings to append to fzf entry
:type new_string: str
"""
self.fzf_string += new_string
def execute_fzf(
self,
empty_allow: bool = False,
print_col: int = 2,
preview: Optional[str] = None,
multi_select: bool = False,
header: Optional[str] = None,
delimiter: Optional[str] = None,
) -> Union[List[Any], List[str], str]:
r"""Execute fzf and return formated string.
Example:
fzf = Pyfzf()
fzf.append_fzf('Hello: hello')
fzf.append_fzf('\n')
fzf.append_fzf('World: world')
fzf.append_fzf('\n')
print(fzf.execute_fzf(empty_allow=True, print_col=1, preview='cat {}', multi_select=True))
The selected string would look like "Hello: hello".
Above example would return 'Hello:'' if the first entry is selected, print col is 1,
if print_col was 2, 'hello' would be printed.
:param empty_allow: determine if empty selection is allowed
:type empty_allow: bool, optional
:param print_col: which column of the result to print (used by awk), -1 print everything except first col
:type print_col: int, optional
:param preview: display preview in fzf, e.g.(echo 'hello')
:type preview: str, optional
:param multi_select: enable fzf multi selection
:type multi_select: bool, optional
:param header: header to display in fzf
:type header: str, optional
:param delimiter: the delimiter to seperate print_col, like awk number
:type delimiter: Optional[str]
:raises NoSelectionMade: when user did not make a selection and empty_allow is False
:return: selected entry from fzf
:rtype: Union[list[Any], list[str], str]
"""
# remove trailing spaces/lines
self.fzf_string = str(self.fzf_string).rstrip()
fzf_input = subprocess.Popen(("echo", self.fzf_string), stdout=subprocess.PIPE)
cmd_list: list = self._construct_fzf_cmd()
selection: bytes = b""
selection_str: str = ""
if header:
cmd_list.append("--header=%s" % header)
if multi_select:
cmd_list.append("--multi")
else:
cmd_list.append("--no-multi")
if preview:
cmd_list.extend(["--preview", preview])
try:
selection = subprocess.check_output(cmd_list, stdin=fzf_input.stdout)
selection_str = str(selection, "utf-8")
if not selection and not empty_allow:
raise NoSelectionMade
# if first line contains ctrl-c, exit
self._check_ctrl_c(selection_str)
except subprocess.CalledProcessError:
# this exception may happend if user didn't make a selection in fzf
# thus ending with non zero exit code
if not empty_allow:
raise NoSelectionMade
elif empty_allow:
if multi_select:
return []
else:
return ""
if multi_select:
return_list: List[str] = []
# multi_select would return everything seperate by \n
selections: List[str] = selection_str.strip().splitlines()
for item in selections:
processed_str = self._get_col(item, print_col, delimiter)
return_list.append(processed_str)
return return_list
else:
return self._get_col(selection_str.strip(), print_col, delimiter)
def get_local_file(
self,
search_from_root: bool = False,
cloudformation: bool = False,
directory: bool = False,
hidden: bool = False,
empty_allow: bool = False,
multi_select: bool = False,
header: Optional[str] = None,
) -> Union[List[Any], List[str], str]:
"""Get local files through fzf.
Populate the local files into fzf, if search_from_root is true
all files would be populated.
Note: could be extremely slow to seach from root if fd not installed.
:param search_from_root: search files from root
:type search_from_root: bool, optional
:param cloudformation: only search yaml or json
:type cloudformation: bool, optional
:param directory: search directory
:type directory: bool, optional
:param hidden: search hidden file, only has effect when fd installed
:type hidden: bool, optional
:param empty_allow: allow empty selection
:type empty_allow: bool, optional
:param multi_select: allow multi selection
:type multi_select: bool, optional
:param header: header display in fzf
:type header: str, optional
:raises NoSelectionMade: when empty_allow=False and no selection was made
:return: selected file path or folder path
:rtype: Union[list[Any], list[str], str]
"""
if search_from_root:
home_path = os.path.expanduser("~")
os.chdir(home_path)
if not header and directory:
header = r"Selecting ./ will use current directory"
cmd: str = ""
if self._check_fd():
if directory:
cmd = "echo \033[33m./\033[0m; fd --type d"
elif cloudformation:
cmd = "fd --type f --regex '(yaml|yml|json)$'"
else:
cmd = "fd --type f"
if hidden:
cmd += " -H"
else:
if directory:
cmd = "echo \033[33m./\033[0m; find * -type d"
elif cloudformation:
cmd = 'find * -type f -name "*.json" -o -name "*.yaml" -o -name "*.yml"'
else:
cmd = "find * -type f"
list_file = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True
)
selected_file_path: bytes = b""
selected_file_path_str: str = ""
try:
cmd_list: list = self._construct_fzf_cmd()
if header:
cmd_list.append("--header=%s" % header)
if multi_select:
cmd_list.append("-m")
else:
cmd_list.append("+m")
selected_file_path = subprocess.check_output(
cmd_list, stdin=list_file.stdout
)
selected_file_path_str = str(selected_file_path, "utf-8")
if not empty_allow and not selected_file_path:
raise NoSelectionMade
self._check_ctrl_c(selected_file_path_str)
except subprocess.CalledProcessError:
# subprocess exception will raise when user press ecs to exit fzf
if not empty_allow:
raise NoSelectionMade
elif empty_allow:
return [] if empty_allow else ""
if multi_select:
# multi_select would return everything seperate by \n
return selected_file_path_str.strip().splitlines()
else:
return selected_file_path_str.strip()
def _construct_fzf_cmd(self) -> List[str]:
"""Construct command for fzf.
:return: command list processable by subprocess
:rtype: list[str]
"""
cmd_list: list = [self.fzf_path, "--ansi", "--expect=ctrl-c"]
if os.getenv("FZFAWS_FZF_OPTS"):
cmd_list.extend(os.getenv("FZFAWS_FZF_OPTS").split(" "))
if os.getenv("FZFAWS_FZF_KEYS"):
cmd_list.append(os.getenv("FZFAWS_FZF_KEYS", ""))
return cmd_list
def _check_ctrl_c(self, fzf_result: str) -> None:
"""Check if ctrl_c is pressed during fzf invokation.
If ctrl_c is pressed, exit entire fzfaws program instead of
keep moving forward.
:param fzf_result: the str output of fzf subprocess
:type fzf_result: tr
"""
result = fzf_result.splitlines()
if len(result) < 1:
return
if result[0] == "ctrl-c":
raise KeyboardInterrupt
def _check_fd(self):
"""Check if fd is intalled on the machine."""
try:
subprocess.run(
["command", "-v", "fd"], stdout=subprocess.DEVNULL, check=True
)
return True
except:
return False
def process_list(
self,
response_list: Union[list, Generator],
key_name: str,
*arg_keys,
empty_allow: bool = False
) -> None:
"""Process list passed in and formatted for fzf.
Processes the list passed into it and prepare the fzf operation
Note: need to invoke fzf.execute_fzf() to pop the fzf
process and get the user selection.
Example:
list = [{'Name': 1, 'Mame': 2}, {'Name': 2, 'Mame': 3}]
fzf.process_list(list, 'Name', 'Mame')
fzf.execute_fzf(empty_allow=False)
In the above example, if first entry is selected, it will return 1.
:param response_list: list to process
:type response_list: list
:param key_name: key_name to search and add into response
:type key_name: str
:param gap: gap between each key
:type gap: int, optional
:raises EmptyList: when the list is empty and did not get any result
"""
for item in response_list:
self.append_fzf("%s: %s" % (key_name, item.get(key_name)))
for arg in arg_keys:
self.append_fzf(" | ")
self.append_fzf("%s: %s" % (arg, item.get(arg)))
self.append_fzf("\n")
if not self.fzf_string and not empty_allow:
raise EmptyList("Result list was empty")
def format_selected_to_dict(self, selected_str: str) -> Dict[str, Any]:
"""Format the selected option into a proper dictionary.
This is only useful if fzf.execute_fzf(print_col=0).
This is useful to use in conjuction with process_list, process_list
might contain a lot of information but printing all of them into
a str may not be useful enough.
Example:
fzf.process_list(
response_generator,
"InstanceId",
"Status",
"InstanceType",
"Name",
"KeyName",
"PublicDnsName",
"PublicIpAddress",
"PrivateIpAddress",
)
result = fzf.execute_fzf(multi_select=multi_select, header=header, print_col=0)
result_dict = fzf.format_selected_to_dict(result)
:param selected_str: the selected str from fzf.execute_fzf
:type selected_str: str
:return: formatted instance details in dict form
:rtype: Dict[str, Any]
"""
formatted_dict: Dict[str, Any] = {}
selected_list = selected_str.split(" | ")
for key_value in selected_list:
key, value = key_value.split(": ")
if value == "None":
formatted_dict.update({key: None})
else:
formatted_dict.update({key: value})
return formatted_dict
def _get_col(self, string: str, print_col: int, delimiter: Optional[str]) -> str:
"""Return the wanted col of the given str.
:param string: string to process
:type string: str
:param print_col: column to return
:type print_col: int
:param delimiter: delimiter that seperate the column
:type delimiter: Optional[str]
:return: the print_col of the string
:rtype: str
"""
if print_col == 0:
return string
else:
delimited_str = string.split(delimiter)
if print_col - 1 > len(delimited_str):
# somewhat similar to awk behavior?
# when the print col exceed the col number, awk return the entire string
return string
return delimited_str[print_col - 1]
| <filename>fzfaws/utils/pyfzf.py<gh_stars>10-100
"""This module contains the wrapper class to interacte with fzf.
The fzf class should be used for all occasion when fzf needs
to be launched. fzfaws comes with 4 fzf binary files and will
be used if user doesn't specify to use system fzf in config file.
"""
import os
import subprocess
import sys
from typing import Any, Dict, Generator, List, Optional, Union
from fzfaws.utils.exceptions import EmptyList, NoSelectionMade
class Pyfzf:
r"""A simple wrapper class for fzf utilizing subprocess module.
To create a entry into fzf, use Pyfzf.append_fzf() and pass in the string.
To create mutiple entries, would require manually pass in \n to seperate each entry.
For a list of response from boto3, it is recommended to use the process_list() function.
Example:
fzf = Pyfzf()
s3 = boto3.client('s3')
response = s3.list_buckets()
fzf.process_list(response["Buckets"], "Name")
selected_bucket = fzf.execute_fzf(multi_select=False)
The above example process the list of buckets in response and make "Name" the return value.
The selected_bucket will be a bucket name.
"""
def __init__(self) -> None:
"""Construct the Pyfzf instance.
Credit to https://github.com/pmazurek/aws-fuzzy-finder for the binary detection
method.
"""
self.fzf_string: str = ""
if sys.maxsize > 2 ** 32:
arch = "amd64"
else:
arch = "386"
if sys.platform.startswith("darwin"):
system = "darwin"
elif sys.platform.startswith("linux"):
system = "linux"
else:
print(
"fzfaws currently is only compatible with python3.6+ on MacOS or Linux"
)
sys.exit(1)
self.fzf_path: str = (
"fzf"
if os.getenv("FZFAWS_FZF_EXECUTABLE", "binary") == "system"
else "%s/../libs/fzf-0.21.1-%s_%s"
% (os.path.dirname(os.path.abspath(__file__)), system, arch)
)
def append_fzf(self, new_string: str) -> None:
r"""Append stings to fzf_string.
To have mutiple entries, seperate them by '\n'
Example:fzf.append_fzf('hello')
fzf.append_fzf('\n')
fzf.append_fzf('world')
:param new_string: strings to append to fzf entry
:type new_string: str
"""
self.fzf_string += new_string
def execute_fzf(
self,
empty_allow: bool = False,
print_col: int = 2,
preview: Optional[str] = None,
multi_select: bool = False,
header: Optional[str] = None,
delimiter: Optional[str] = None,
) -> Union[List[Any], List[str], str]:
r"""Execute fzf and return formated string.
Example:
fzf = Pyfzf()
fzf.append_fzf('Hello: hello')
fzf.append_fzf('\n')
fzf.append_fzf('World: world')
fzf.append_fzf('\n')
print(fzf.execute_fzf(empty_allow=True, print_col=1, preview='cat {}', multi_select=True))
The selected string would look like "Hello: hello".
Above example would return 'Hello:'' if the first entry is selected, print col is 1,
if print_col was 2, 'hello' would be printed.
:param empty_allow: determine if empty selection is allowed
:type empty_allow: bool, optional
:param print_col: which column of the result to print (used by awk), -1 print everything except first col
:type print_col: int, optional
:param preview: display preview in fzf, e.g.(echo 'hello')
:type preview: str, optional
:param multi_select: enable fzf multi selection
:type multi_select: bool, optional
:param header: header to display in fzf
:type header: str, optional
:param delimiter: the delimiter to seperate print_col, like awk number
:type delimiter: Optional[str]
:raises NoSelectionMade: when user did not make a selection and empty_allow is False
:return: selected entry from fzf
:rtype: Union[list[Any], list[str], str]
"""
# remove trailing spaces/lines
self.fzf_string = str(self.fzf_string).rstrip()
fzf_input = subprocess.Popen(("echo", self.fzf_string), stdout=subprocess.PIPE)
cmd_list: list = self._construct_fzf_cmd()
selection: bytes = b""
selection_str: str = ""
if header:
cmd_list.append("--header=%s" % header)
if multi_select:
cmd_list.append("--multi")
else:
cmd_list.append("--no-multi")
if preview:
cmd_list.extend(["--preview", preview])
try:
selection = subprocess.check_output(cmd_list, stdin=fzf_input.stdout)
selection_str = str(selection, "utf-8")
if not selection and not empty_allow:
raise NoSelectionMade
# if first line contains ctrl-c, exit
self._check_ctrl_c(selection_str)
except subprocess.CalledProcessError:
# this exception may happend if user didn't make a selection in fzf
# thus ending with non zero exit code
if not empty_allow:
raise NoSelectionMade
elif empty_allow:
if multi_select:
return []
else:
return ""
if multi_select:
return_list: List[str] = []
# multi_select would return everything seperate by \n
selections: List[str] = selection_str.strip().splitlines()
for item in selections:
processed_str = self._get_col(item, print_col, delimiter)
return_list.append(processed_str)
return return_list
else:
return self._get_col(selection_str.strip(), print_col, delimiter)
def get_local_file(
self,
search_from_root: bool = False,
cloudformation: bool = False,
directory: bool = False,
hidden: bool = False,
empty_allow: bool = False,
multi_select: bool = False,
header: Optional[str] = None,
) -> Union[List[Any], List[str], str]:
"""Get local files through fzf.
Populate the local files into fzf, if search_from_root is true
all files would be populated.
Note: could be extremely slow to seach from root if fd not installed.
:param search_from_root: search files from root
:type search_from_root: bool, optional
:param cloudformation: only search yaml or json
:type cloudformation: bool, optional
:param directory: search directory
:type directory: bool, optional
:param hidden: search hidden file, only has effect when fd installed
:type hidden: bool, optional
:param empty_allow: allow empty selection
:type empty_allow: bool, optional
:param multi_select: allow multi selection
:type multi_select: bool, optional
:param header: header display in fzf
:type header: str, optional
:raises NoSelectionMade: when empty_allow=False and no selection was made
:return: selected file path or folder path
:rtype: Union[list[Any], list[str], str]
"""
if search_from_root:
home_path = os.path.expanduser("~")
os.chdir(home_path)
if not header and directory:
header = r"Selecting ./ will use current directory"
cmd: str = ""
if self._check_fd():
if directory:
cmd = "echo \033[33m./\033[0m; fd --type d"
elif cloudformation:
cmd = "fd --type f --regex '(yaml|yml|json)$'"
else:
cmd = "fd --type f"
if hidden:
cmd += " -H"
else:
if directory:
cmd = "echo \033[33m./\033[0m; find * -type d"
elif cloudformation:
cmd = 'find * -type f -name "*.json" -o -name "*.yaml" -o -name "*.yml"'
else:
cmd = "find * -type f"
list_file = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True
)
selected_file_path: bytes = b""
selected_file_path_str: str = ""
try:
cmd_list: list = self._construct_fzf_cmd()
if header:
cmd_list.append("--header=%s" % header)
if multi_select:
cmd_list.append("-m")
else:
cmd_list.append("+m")
selected_file_path = subprocess.check_output(
cmd_list, stdin=list_file.stdout
)
selected_file_path_str = str(selected_file_path, "utf-8")
if not empty_allow and not selected_file_path:
raise NoSelectionMade
self._check_ctrl_c(selected_file_path_str)
except subprocess.CalledProcessError:
# subprocess exception will raise when user press ecs to exit fzf
if not empty_allow:
raise NoSelectionMade
elif empty_allow:
return [] if empty_allow else ""
if multi_select:
# multi_select would return everything seperate by \n
return selected_file_path_str.strip().splitlines()
else:
return selected_file_path_str.strip()
def _construct_fzf_cmd(self) -> List[str]:
"""Construct command for fzf.
:return: command list processable by subprocess
:rtype: list[str]
"""
cmd_list: list = [self.fzf_path, "--ansi", "--expect=ctrl-c"]
if os.getenv("FZFAWS_FZF_OPTS"):
cmd_list.extend(os.getenv("FZFAWS_FZF_OPTS").split(" "))
if os.getenv("FZFAWS_FZF_KEYS"):
cmd_list.append(os.getenv("FZFAWS_FZF_KEYS", ""))
return cmd_list
def _check_ctrl_c(self, fzf_result: str) -> None:
"""Check if ctrl_c is pressed during fzf invokation.
If ctrl_c is pressed, exit entire fzfaws program instead of
keep moving forward.
:param fzf_result: the str output of fzf subprocess
:type fzf_result: tr
"""
result = fzf_result.splitlines()
if len(result) < 1:
return
if result[0] == "ctrl-c":
raise KeyboardInterrupt
def _check_fd(self):
"""Check if fd is intalled on the machine."""
try:
subprocess.run(
["command", "-v", "fd"], stdout=subprocess.DEVNULL, check=True
)
return True
except:
return False
def process_list(
self,
response_list: Union[list, Generator],
key_name: str,
*arg_keys,
empty_allow: bool = False
) -> None:
"""Process list passed in and formatted for fzf.
Processes the list passed into it and prepare the fzf operation
Note: need to invoke fzf.execute_fzf() to pop the fzf
process and get the user selection.
Example:
list = [{'Name': 1, 'Mame': 2}, {'Name': 2, 'Mame': 3}]
fzf.process_list(list, 'Name', 'Mame')
fzf.execute_fzf(empty_allow=False)
In the above example, if first entry is selected, it will return 1.
:param response_list: list to process
:type response_list: list
:param key_name: key_name to search and add into response
:type key_name: str
:param gap: gap between each key
:type gap: int, optional
:raises EmptyList: when the list is empty and did not get any result
"""
for item in response_list:
self.append_fzf("%s: %s" % (key_name, item.get(key_name)))
for arg in arg_keys:
self.append_fzf(" | ")
self.append_fzf("%s: %s" % (arg, item.get(arg)))
self.append_fzf("\n")
if not self.fzf_string and not empty_allow:
raise EmptyList("Result list was empty")
def format_selected_to_dict(self, selected_str: str) -> Dict[str, Any]:
"""Format the selected option into a proper dictionary.
This is only useful if fzf.execute_fzf(print_col=0).
This is useful to use in conjuction with process_list, process_list
might contain a lot of information but printing all of them into
a str may not be useful enough.
Example:
fzf.process_list(
response_generator,
"InstanceId",
"Status",
"InstanceType",
"Name",
"KeyName",
"PublicDnsName",
"PublicIpAddress",
"PrivateIpAddress",
)
result = fzf.execute_fzf(multi_select=multi_select, header=header, print_col=0)
result_dict = fzf.format_selected_to_dict(result)
:param selected_str: the selected str from fzf.execute_fzf
:type selected_str: str
:return: formatted instance details in dict form
:rtype: Dict[str, Any]
"""
formatted_dict: Dict[str, Any] = {}
selected_list = selected_str.split(" | ")
for key_value in selected_list:
key, value = key_value.split(": ")
if value == "None":
formatted_dict.update({key: None})
else:
formatted_dict.update({key: value})
return formatted_dict
def _get_col(self, string: str, print_col: int, delimiter: Optional[str]) -> str:
"""Return the wanted col of the given str.
:param string: string to process
:type string: str
:param print_col: column to return
:type print_col: int
:param delimiter: delimiter that seperate the column
:type delimiter: Optional[str]
:return: the print_col of the string
:rtype: str
"""
if print_col == 0:
return string
else:
delimited_str = string.split(delimiter)
if print_col - 1 > len(delimited_str):
# somewhat similar to awk behavior?
# when the print col exceed the col number, awk return the entire string
return string
return delimited_str[print_col - 1]
| en | 0.659763 | This module contains the wrapper class to interacte with fzf. The fzf class should be used for all occasion when fzf needs to be launched. fzfaws comes with 4 fzf binary files and will be used if user doesn't specify to use system fzf in config file. A simple wrapper class for fzf utilizing subprocess module. To create a entry into fzf, use Pyfzf.append_fzf() and pass in the string. To create mutiple entries, would require manually pass in \n to seperate each entry. For a list of response from boto3, it is recommended to use the process_list() function. Example: fzf = Pyfzf() s3 = boto3.client('s3') response = s3.list_buckets() fzf.process_list(response["Buckets"], "Name") selected_bucket = fzf.execute_fzf(multi_select=False) The above example process the list of buckets in response and make "Name" the return value. The selected_bucket will be a bucket name. Construct the Pyfzf instance. Credit to https://github.com/pmazurek/aws-fuzzy-finder for the binary detection method. Append stings to fzf_string. To have mutiple entries, seperate them by '\n' Example:fzf.append_fzf('hello') fzf.append_fzf('\n') fzf.append_fzf('world') :param new_string: strings to append to fzf entry :type new_string: str Execute fzf and return formated string. Example: fzf = Pyfzf() fzf.append_fzf('Hello: hello') fzf.append_fzf('\n') fzf.append_fzf('World: world') fzf.append_fzf('\n') print(fzf.execute_fzf(empty_allow=True, print_col=1, preview='cat {}', multi_select=True)) The selected string would look like "Hello: hello". Above example would return 'Hello:'' if the first entry is selected, print col is 1, if print_col was 2, 'hello' would be printed. :param empty_allow: determine if empty selection is allowed :type empty_allow: bool, optional :param print_col: which column of the result to print (used by awk), -1 print everything except first col :type print_col: int, optional :param preview: display preview in fzf, e.g.(echo 'hello') :type preview: str, optional :param multi_select: enable fzf multi selection :type multi_select: bool, optional :param header: header to display in fzf :type header: str, optional :param delimiter: the delimiter to seperate print_col, like awk number :type delimiter: Optional[str] :raises NoSelectionMade: when user did not make a selection and empty_allow is False :return: selected entry from fzf :rtype: Union[list[Any], list[str], str] # remove trailing spaces/lines # if first line contains ctrl-c, exit # this exception may happend if user didn't make a selection in fzf # thus ending with non zero exit code # multi_select would return everything seperate by \n Get local files through fzf. Populate the local files into fzf, if search_from_root is true all files would be populated. Note: could be extremely slow to seach from root if fd not installed. :param search_from_root: search files from root :type search_from_root: bool, optional :param cloudformation: only search yaml or json :type cloudformation: bool, optional :param directory: search directory :type directory: bool, optional :param hidden: search hidden file, only has effect when fd installed :type hidden: bool, optional :param empty_allow: allow empty selection :type empty_allow: bool, optional :param multi_select: allow multi selection :type multi_select: bool, optional :param header: header display in fzf :type header: str, optional :raises NoSelectionMade: when empty_allow=False and no selection was made :return: selected file path or folder path :rtype: Union[list[Any], list[str], str] # subprocess exception will raise when user press ecs to exit fzf # multi_select would return everything seperate by \n Construct command for fzf. :return: command list processable by subprocess :rtype: list[str] Check if ctrl_c is pressed during fzf invokation. If ctrl_c is pressed, exit entire fzfaws program instead of keep moving forward. :param fzf_result: the str output of fzf subprocess :type fzf_result: tr Check if fd is intalled on the machine. Process list passed in and formatted for fzf. Processes the list passed into it and prepare the fzf operation Note: need to invoke fzf.execute_fzf() to pop the fzf process and get the user selection. Example: list = [{'Name': 1, 'Mame': 2}, {'Name': 2, 'Mame': 3}] fzf.process_list(list, 'Name', 'Mame') fzf.execute_fzf(empty_allow=False) In the above example, if first entry is selected, it will return 1. :param response_list: list to process :type response_list: list :param key_name: key_name to search and add into response :type key_name: str :param gap: gap between each key :type gap: int, optional :raises EmptyList: when the list is empty and did not get any result Format the selected option into a proper dictionary. This is only useful if fzf.execute_fzf(print_col=0). This is useful to use in conjuction with process_list, process_list might contain a lot of information but printing all of them into a str may not be useful enough. Example: fzf.process_list( response_generator, "InstanceId", "Status", "InstanceType", "Name", "KeyName", "PublicDnsName", "PublicIpAddress", "PrivateIpAddress", ) result = fzf.execute_fzf(multi_select=multi_select, header=header, print_col=0) result_dict = fzf.format_selected_to_dict(result) :param selected_str: the selected str from fzf.execute_fzf :type selected_str: str :return: formatted instance details in dict form :rtype: Dict[str, Any] Return the wanted col of the given str. :param string: string to process :type string: str :param print_col: column to return :type print_col: int :param delimiter: delimiter that seperate the column :type delimiter: Optional[str] :return: the print_col of the string :rtype: str # somewhat similar to awk behavior? # when the print col exceed the col number, awk return the entire string | 2.812168 | 3 |
boards/admin.py | rpdeo/discussion-boards | 0 | 6613135 | from django.contrib import admin
from .models import Board, Topic, Post
admin.site.register(Board)
admin.site.register(Topic)
admin.site.register(Post)
| from django.contrib import admin
from .models import Board, Topic, Post
admin.site.register(Board)
admin.site.register(Topic)
admin.site.register(Post)
| none | 1 | 1.317246 | 1 | |
regex/ip-address-validation.py | znxster/hackerrank | 0 | 6613136 | <gh_stars>0
import re
lines = []
total_matches = 0
ipv4_pattern = r'^(([0-9]|[0-9]{2}|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9]{2}|1[0-9]{2}|2[0-4][0-9]|255)$'
ipv6_pattern = r'^([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}$'
line_count = int(input())
while line_count > 0:
line = input()
ipv4_matches = re.findall(ipv4_pattern, line)
ipv6_matches = re.findall(ipv6_pattern, line)
if len(ipv4_matches) > 0 and len(ipv6_matches) > 0:
print("Both")
elif len(ipv4_matches) > 0:
print("IPv4")
elif len(ipv6_matches) > 0:
print("IPv6")
else:
print("Neither")
line_count -= 1
| import re
lines = []
total_matches = 0
ipv4_pattern = r'^(([0-9]|[0-9]{2}|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9]{2}|1[0-9]{2}|2[0-4][0-9]|255)$'
ipv6_pattern = r'^([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}$'
line_count = int(input())
while line_count > 0:
line = input()
ipv4_matches = re.findall(ipv4_pattern, line)
ipv6_matches = re.findall(ipv6_pattern, line)
if len(ipv4_matches) > 0 and len(ipv6_matches) > 0:
print("Both")
elif len(ipv4_matches) > 0:
print("IPv4")
elif len(ipv6_matches) > 0:
print("IPv6")
else:
print("Neither")
line_count -= 1 | none | 1 | 3.261722 | 3 | |
utils/ddp/__init__.py | UESTC-Liuxin/CVMI_Sementic_Segmentation | 0 | 6613137 | '''
Author: <NAME>
Date: 2021-11-18 09:58:40
LastEditors: <NAME>
LastEditTime: 2021-11-26 12:13:14
Description: file content
FilePath: /CVMI_Sementic_Segmentation/utils/ddp/__init__.py
'''
from utils.ddp.dist_utils import get_dist_info, setup_distributed, convert_sync_bn, mkdirs
from utils.ddp.mmdistributed_ddp import MMDistributedDataParallel | '''
Author: <NAME>
Date: 2021-11-18 09:58:40
LastEditors: <NAME>
LastEditTime: 2021-11-26 12:13:14
Description: file content
FilePath: /CVMI_Sementic_Segmentation/utils/ddp/__init__.py
'''
from utils.ddp.dist_utils import get_dist_info, setup_distributed, convert_sync_bn, mkdirs
from utils.ddp.mmdistributed_ddp import MMDistributedDataParallel | en | 0.573963 | Author: <NAME> Date: 2021-11-18 09:58:40 LastEditors: <NAME> LastEditTime: 2021-11-26 12:13:14 Description: file content FilePath: /CVMI_Sementic_Segmentation/utils/ddp/__init__.py | 1.305191 | 1 |
aula38/aula38.py | jessicsous/Curso_Python | 1 | 6613138 | '''
Iteráveis
Iteradores
Geradores
'''
lista = [1,2,3,4,5,6]
lista1 = 123456
lista2 = 'string'
print(hasattr(lista, '__iter__')) # verificação se a lista é um objeto iterável
print(hasattr(lista1, '__iter__'))
print(hasattr(lista2, '__iter__'))
# como transformar uma lista em um iterador
lista3 = [10,20,30,40,50,60]
lista3 = iter(lista)
print(next(lista3))
print(next(lista3))
print(next(lista3))
print(next(lista3))
print(next(lista3))
print(next(lista3))
# geradores (servem para evitar o consulmo de memoria)
import sys
lista4 = list(range(1000))
print(lista4)
print(sys.getsizeof(lista4)) # quantos bytes a lista ta consulmindo de memoria
import time # módulo para vêr oq acontece na tela
def gera(): # criando uma função
r = [] # cria uma raw
for n in range(100): # faz uma ieração da função range de 0 a 99
r.append(n) # colocando os valores, a cada iteração do laço, no raw vazio
time.sleep(0.1) # dormir 0.1 segundo
return r # depois de tudo retorna o valor
g = gera()
for v in g:
print(v)
# para retornar um valor de cada vez
def gerad():
for n in range(100):
yield n
time.sleep(0.1)
g1 = gerad()
for v in g1:
print(v)
# código manual sem o laço for
def gerador():
variavel = 'valor 1'
yield variavel
variavel = 'valor 2'
yield variavel
g2 = gerador()
print(next(g2))
print(next(g2))
# forma mais simples para criar gerador
l1 = [x for x in range(100)] # salva todos os valores na memória
print(type(l1))
l2 = (x for x in range(100)) # não salva todos valores na memória, só entrega os valores pedidos. para pedir pode ser utilizado o next ou o for
print(type(l2))
print(sys.getsizeof(l1)) # tamanho da lista
print(sys.getsizeof(l2)) # tamanho da lista / apesar de identicas tem tamanhos diferentes
| '''
Iteráveis
Iteradores
Geradores
'''
lista = [1,2,3,4,5,6]
lista1 = 123456
lista2 = 'string'
print(hasattr(lista, '__iter__')) # verificação se a lista é um objeto iterável
print(hasattr(lista1, '__iter__'))
print(hasattr(lista2, '__iter__'))
# como transformar uma lista em um iterador
lista3 = [10,20,30,40,50,60]
lista3 = iter(lista)
print(next(lista3))
print(next(lista3))
print(next(lista3))
print(next(lista3))
print(next(lista3))
print(next(lista3))
# geradores (servem para evitar o consulmo de memoria)
import sys
lista4 = list(range(1000))
print(lista4)
print(sys.getsizeof(lista4)) # quantos bytes a lista ta consulmindo de memoria
import time # módulo para vêr oq acontece na tela
def gera(): # criando uma função
r = [] # cria uma raw
for n in range(100): # faz uma ieração da função range de 0 a 99
r.append(n) # colocando os valores, a cada iteração do laço, no raw vazio
time.sleep(0.1) # dormir 0.1 segundo
return r # depois de tudo retorna o valor
g = gera()
for v in g:
print(v)
# para retornar um valor de cada vez
def gerad():
for n in range(100):
yield n
time.sleep(0.1)
g1 = gerad()
for v in g1:
print(v)
# código manual sem o laço for
def gerador():
variavel = 'valor 1'
yield variavel
variavel = 'valor 2'
yield variavel
g2 = gerador()
print(next(g2))
print(next(g2))
# forma mais simples para criar gerador
l1 = [x for x in range(100)] # salva todos os valores na memória
print(type(l1))
l2 = (x for x in range(100)) # não salva todos valores na memória, só entrega os valores pedidos. para pedir pode ser utilizado o next ou o for
print(type(l2))
print(sys.getsizeof(l1)) # tamanho da lista
print(sys.getsizeof(l2)) # tamanho da lista / apesar de identicas tem tamanhos diferentes
| pt | 0.987784 | Iteráveis Iteradores Geradores # verificação se a lista é um objeto iterável # como transformar uma lista em um iterador # geradores (servem para evitar o consulmo de memoria) # quantos bytes a lista ta consulmindo de memoria # módulo para vêr oq acontece na tela # criando uma função # cria uma raw # faz uma ieração da função range de 0 a 99 # colocando os valores, a cada iteração do laço, no raw vazio # dormir 0.1 segundo # depois de tudo retorna o valor # para retornar um valor de cada vez # código manual sem o laço for # forma mais simples para criar gerador # salva todos os valores na memória # não salva todos valores na memória, só entrega os valores pedidos. para pedir pode ser utilizado o next ou o for # tamanho da lista # tamanho da lista / apesar de identicas tem tamanhos diferentes | 3.816016 | 4 |
test/test.py | SENERGY-Platform/analytics-operator-local-estimator | 0 | 6613139 | # Copyright 2020 InfAI (CC SES)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timezone, timedelta
import unittest
import numpy as np
from dateutil.relativedelta import *
import main
class TestMainMethods(unittest.TestCase):
def test_predict(self):
n_samples = 0
max_samples = 744
data = np.random.randint(6, size=max_samples)
target_value = np.sum(data)
today = datetime.utcnow().date()
dt = datetime(today.year, 1, 1)
target = datetime(today.year, 2, 1)
target_timestamp = target.replace(tzinfo=timezone.utc).timestamp()
val = 0
while n_samples < max_samples:
val = val + data[n_samples]
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
x, y = np.array([[timestamp, 1]]), np.array([val])
print(x, y)
main.train(val, timestamp)
pred = main.predict(target_timestamp)
print(pred)
print(pred / target_value)
print("*******************")
dt = dt + timedelta(hours=1)
n_samples += 1
def test_date(self):
today = datetime(2013, 12, 31)
eoy = datetime(today.year, 12, 31)
eom = datetime(today.year, today.month, 1)+relativedelta(months=1)
eod = datetime(today.year, today.month, today.day)+timedelta(days=1)
print(eoy)
print(eom)
print(eod)
| # Copyright 2020 InfAI (CC SES)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timezone, timedelta
import unittest
import numpy as np
from dateutil.relativedelta import *
import main
class TestMainMethods(unittest.TestCase):
def test_predict(self):
n_samples = 0
max_samples = 744
data = np.random.randint(6, size=max_samples)
target_value = np.sum(data)
today = datetime.utcnow().date()
dt = datetime(today.year, 1, 1)
target = datetime(today.year, 2, 1)
target_timestamp = target.replace(tzinfo=timezone.utc).timestamp()
val = 0
while n_samples < max_samples:
val = val + data[n_samples]
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
x, y = np.array([[timestamp, 1]]), np.array([val])
print(x, y)
main.train(val, timestamp)
pred = main.predict(target_timestamp)
print(pred)
print(pred / target_value)
print("*******************")
dt = dt + timedelta(hours=1)
n_samples += 1
def test_date(self):
today = datetime(2013, 12, 31)
eoy = datetime(today.year, 12, 31)
eom = datetime(today.year, today.month, 1)+relativedelta(months=1)
eod = datetime(today.year, today.month, today.day)+timedelta(days=1)
print(eoy)
print(eom)
print(eod)
| en | 0.849015 | # Copyright 2020 InfAI (CC SES) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.391685 | 2 |
examples/benchmarks/json/parsers/parsy_json.py | eerimoq/textparser | 23 | 6613140 | import timeit
import parsy
from parsy import generate
from parsy import regex
from parsy import string
whitespace = regex(r'\s*')
lexeme = lambda p: p << whitespace
lbrace = lexeme(string('{'))
rbrace = lexeme(string('}'))
lbrack = lexeme(string('['))
rbrack = lexeme(string(']'))
colon = lexeme(string(':'))
comma = lexeme(string(','))
true = lexeme(string('true'))
false = lexeme(string('false'))
null = lexeme(string('null'))
number = lexeme(
regex(r'-?(0|[1-9][0-9]*)([.][0-9]+)?([eE][+-]?[0-9]+)?')
)
string_part = regex(r'[^"\\]+')
string_esc = string('\\') >> (
string('\\')
| string('/')
| string('"')
| string('b')
| string('f')
| string('n')
| string('r')
| string('t')
| regex(r'u[0-9a-fA-F]{4}').map(lambda s: chr(int(s[1:], 16)))
)
quoted = lexeme(string('"') >> (string_part | string_esc).many().concat() << string('"'))
# Circular dependency between array and value means we use `generate`
# form here.
@generate
def array():
yield lbrack
elements = yield value.sep_by(comma)
yield rbrack
return elements
@generate
def object_pair():
key = yield quoted
yield colon
val = yield value
return (key, val)
json_object = lbrace >> object_pair.sep_by(comma) << rbrace
value = quoted | number | json_object | array | true | false | null
json = whitespace >> value
def parse_time(json_string, iterations):
def _parse():
json.parse(json_string)
return timeit.timeit(_parse, number=iterations)
def parse(json_string):
return json.parse(json_string)
def version():
return parsy.__version__
| import timeit
import parsy
from parsy import generate
from parsy import regex
from parsy import string
whitespace = regex(r'\s*')
lexeme = lambda p: p << whitespace
lbrace = lexeme(string('{'))
rbrace = lexeme(string('}'))
lbrack = lexeme(string('['))
rbrack = lexeme(string(']'))
colon = lexeme(string(':'))
comma = lexeme(string(','))
true = lexeme(string('true'))
false = lexeme(string('false'))
null = lexeme(string('null'))
number = lexeme(
regex(r'-?(0|[1-9][0-9]*)([.][0-9]+)?([eE][+-]?[0-9]+)?')
)
string_part = regex(r'[^"\\]+')
string_esc = string('\\') >> (
string('\\')
| string('/')
| string('"')
| string('b')
| string('f')
| string('n')
| string('r')
| string('t')
| regex(r'u[0-9a-fA-F]{4}').map(lambda s: chr(int(s[1:], 16)))
)
quoted = lexeme(string('"') >> (string_part | string_esc).many().concat() << string('"'))
# Circular dependency between array and value means we use `generate`
# form here.
@generate
def array():
yield lbrack
elements = yield value.sep_by(comma)
yield rbrack
return elements
@generate
def object_pair():
key = yield quoted
yield colon
val = yield value
return (key, val)
json_object = lbrace >> object_pair.sep_by(comma) << rbrace
value = quoted | number | json_object | array | true | false | null
json = whitespace >> value
def parse_time(json_string, iterations):
def _parse():
json.parse(json_string)
return timeit.timeit(_parse, number=iterations)
def parse(json_string):
return json.parse(json_string)
def version():
return parsy.__version__
| en | 0.895902 | # Circular dependency between array and value means we use `generate` # form here. | 2.835296 | 3 |
day17/d17p3.py | Akankshasharmaa/100DaysOfCode | 2 | 6613141 | import math
x = list(range(10,0, -1))
y = [math.ceil(math.sqrt(item)) for item in x]
ziplist = list(zip(x,y))
print(ziplist)
print([(10, 4), (9, 3), (8, 3), (7, 3), (6, 3), (5, 3), (4, 2), (3, 2), (2, 2), (1, 1)]) | import math
x = list(range(10,0, -1))
y = [math.ceil(math.sqrt(item)) for item in x]
ziplist = list(zip(x,y))
print(ziplist)
print([(10, 4), (9, 3), (8, 3), (7, 3), (6, 3), (5, 3), (4, 2), (3, 2), (2, 2), (1, 1)]) | none | 1 | 3.582025 | 4 | |
dataloader/blender_render/obj_blender.py | zgjslc/Film-Recovery-master1 | 3 | 6613142 | <filename>dataloader/blender_render/obj_blender.py
import bpy, _cycles
import bmesh
import random
import math
import numpy as np
from mathutils import Vector, Euler
import os
import addon_utils
import string
import pickle
from bpy_extras.object_utils import world_to_camera_view
############################################################################################
# Adding Film object and Shader
############################################################################################
def add_film(film_path):
# bpy.ops.object.mode_set(mode="OBJECT")
film_name = film_path.split('/')[-1]
root_path = os.path.dirname(film_path)
bpy.ops.import_image.to_plane(files=[{"name":film_name,"name":film_name}],directory=root_path,align_axis='Z+', relative=False)
label = film_name.split('.')[0]
film = bpy.data.objects[label]
select_object(film)
film.data.materials.append(bpy.data.materials[label])
mat = bpy.data.materials[label]
film.active_material = mat
# pos_x = random.rand(0.5,1.5)
# pos_y = random.random(0.5,1.5)
# pos_z = random.random(2,5)
# film.location = (pos_x, pos_y, pos_z)
nodes = mat.node_tree.nodes
links = mat.node_tree.links
texture_node = nodes['Image Texture']
texture_node.extension='EXTEND'
output_node = nodes['Material Output']
# Notes: modified by trans
trans_bsdf = nodes.new(type='ShaderNodeBsdfTransparent')
mix_shader = nodes.new(type='ShaderNodeMixShader')
bsdf_node = nodes['Principled BSDF']
bsdf_node.inputs['Metallic'].default_value=0.#random.uniform(0.6,0.7)
bsdf_node.inputs['Roughness'].default_value=0.08
bsdf_node.inputs['IOR'].default_value=random.uniform(1.1,1.6)
bsdf_node.inputs['Transmission'].default_value=1.0
bsdf_node.inputs['Specular'].default_value= 0.08
bsdf_node.inputs['Clearcoat'].default_value=0.03 #0.3
bsdf_node.inputs['Alpha'].default_value=1.0
#trans_node = nodes.new(type='ShaderNodeBsdfTransparent')
#mix_node = nodes.new(type='ShaderNodeMixShader')
#mix_node.inputs[0].default_value = 0.018
texturecoord_node = nodes.new(type='ShaderNodeTexCoord')
for link in links:
links.remove(link)
links.new(texture_node.outputs['Color'],bsdf_node.inputs['Base Color'])
# Notes: trans
links.new(texture_node.outputs['Alpha'], mix_shader.inputs['Fac'])
links.new(trans_bsdf.outputs['BSDF'], mix_shader.inputs['Shader'])
links.new(bsdf_node.outputs['BSDF'], mix_shader.inputs['Shader'])
links.new(mix_shader.outputs['Shader'], output_node.inputs['Surface'])
# links.new(bsdf_node.outputs['BSDF'],output_node.inputs[0])
links.new(texture_node.inputs[0],texturecoord_node.outputs[2])
return film
#############################################################################################
# Physics Simulation
#############################################################################################
def simulation_softbody(obj, frame = 10, sub_num = 12, choice_num = 5):
select_object(obj)
subdivide(obj, sub_num)
bpy.ops.object.mode_set(mode="EDIT")
label = obj.name
mesh = bpy.data.meshes[label]
bm = bmesh.from_edit_mesh(mesh)
choice_verts = np.random.choice(bm.verts,choice_num)
index_list = [v.index for v in choice_verts]
bpy.ops.object.mode_set(mode="OBJECT")
group = obj.vertex_groups.new(name = 'Group')
control = obj.vertex_groups.new(name = 'Control')
if np.random.rand():
for i in index_list:
group.add([i], random.choice([0.75,0.85,0.8,0.9,1]), 'REPLACE')
else:
group.add(index_list, random.choice([0.75,0.85,0.8,0.9]), 'REPLACE')
control.add([v.index for v in mesh.vertices], 1., 'REPLACE')
bpy.ops.object.modifier_add(type='SOFT_BODY')
bpy.context.object.modifiers["Softbody"].settings.vertex_group_goal = "Group"
bpy.context.object.modifiers["Softbody"].settings.goal_spring = 0.9
bpy.context.object.modifiers["Softbody"].settings.goal_default = 1
bpy.context.object.modifiers["Softbody"].settings.goal_max = 1
bpy.context.object.modifiers["Softbody"].settings.goal_min = 0.65
bpy.context.object.modifiers["Softbody"].settings.pull = 0.9
bpy.context.object.modifiers["Softbody"].settings.push = 0.9
bpy.context.object.modifiers["Softbody"].settings.damping = 0
bpy.context.object.modifiers["Softbody"].settings.bend = 10
bpy.context.object.modifiers["Softbody"].settings.spring_length = 100
bpy.context.object.modifiers["Softbody"].settings.use_stiff_quads = True
bpy.context.object.modifiers["Softbody"].settings.use_self_collision = True
bpy.ops.ptcache.bake_all(bake=True)
bpy.context.scene.frame_set(frame)
count = 0
v = True
while not too_unwarp(obj):
count = count + 1
frame = random.uniform(10,150)
bpy.context.scene.frame_set(frame)
if count == 50:
v = False
break
# for i in range(1,frame+1):
# bpy.context.scene.frame_set(i)
select_contour(obj)
bpy.ops.object.modifier_add(type='SUBSURF')
bpy.context.object.modifiers["Subdivision"].render_levels = 6
bpy.context.object.modifiers["Subdivision"].quality = 6
reset_camera(obj)
# bpy.ops.ptcache.bake_all(bake=False)
# reset_camera(label=obj.name)
# group2 = obj.vertex_groups.new(name = 'Control')
# all_index = [v.index for v in bm.verts]
# group2.add(all_index, 1., 'REPLACE')
return v
def simulation_cloth(obj, frame = 10, sub_num = 12, choice_num = 5):
select_object(obj)
subdivide(obj, sub_num)
bpy.ops.object.mode_set(mode="EDIT")
label = obj.name
mesh = bpy.data.meshes[label]
bm = bmesh.from_edit_mesh(mesh)
choice_verts = np.random.choice(bm.verts,choice_num)
index_list = [v.index for v in choice_verts]
bpy.ops.object.mode_set(mode="OBJECT")
group = obj.vertex_groups.new(name = 'Group')
if np.random.rand()<0.6:
for i in index_list:
group.add([i], random.choice([0.6,0.7,0.8,0.9,1]), 'REPLACE')
else:
group.add(index_list, random.choice([0.6,0.7,0.8,0.9]), 'REPLACE')
bpy.ops.object.modifier_add(type='CLOTH')
bpy.context.object.modifiers["Cloth"].settings.quality = 15
bpy.context.object.modifiers["Cloth"].settings.mass = 0.4
bpy.context.object.modifiers["Cloth"].settings.tension_stiffness = 80
bpy.context.object.modifiers["Cloth"].settings.compression_stiffness = 80
bpy.context.object.modifiers["Cloth"].settings.shear_stiffness = 80
bpy.context.object.modifiers["Cloth"].settings.bending_stiffness = 150
bpy.context.object.modifiers["Cloth"].settings.tension_damping = 25
bpy.context.object.modifiers["Cloth"].settings.compression_damping = 25
bpy.context.object.modifiers["Cloth"].settings.shear_damping = 25
bpy.context.object.modifiers["Cloth"].settings.air_damping = 1
bpy.context.object.modifiers["Cloth"].settings.vertex_group_mass = "Group"
bpy.context.scene.frame_set(frame)
bpy.ops.ptcache.bake_all(bake=False)
bpy.context.view_layer.update()
# group2 = obj.vertex_groups.new(name = 'Control')
# all_index = [v.index for v in bm.verts]
# group2.add(all_index, 1., 'REPLACE')
return
| <filename>dataloader/blender_render/obj_blender.py
import bpy, _cycles
import bmesh
import random
import math
import numpy as np
from mathutils import Vector, Euler
import os
import addon_utils
import string
import pickle
from bpy_extras.object_utils import world_to_camera_view
############################################################################################
# Adding Film object and Shader
############################################################################################
def add_film(film_path):
# bpy.ops.object.mode_set(mode="OBJECT")
film_name = film_path.split('/')[-1]
root_path = os.path.dirname(film_path)
bpy.ops.import_image.to_plane(files=[{"name":film_name,"name":film_name}],directory=root_path,align_axis='Z+', relative=False)
label = film_name.split('.')[0]
film = bpy.data.objects[label]
select_object(film)
film.data.materials.append(bpy.data.materials[label])
mat = bpy.data.materials[label]
film.active_material = mat
# pos_x = random.rand(0.5,1.5)
# pos_y = random.random(0.5,1.5)
# pos_z = random.random(2,5)
# film.location = (pos_x, pos_y, pos_z)
nodes = mat.node_tree.nodes
links = mat.node_tree.links
texture_node = nodes['Image Texture']
texture_node.extension='EXTEND'
output_node = nodes['Material Output']
# Notes: modified by trans
trans_bsdf = nodes.new(type='ShaderNodeBsdfTransparent')
mix_shader = nodes.new(type='ShaderNodeMixShader')
bsdf_node = nodes['Principled BSDF']
bsdf_node.inputs['Metallic'].default_value=0.#random.uniform(0.6,0.7)
bsdf_node.inputs['Roughness'].default_value=0.08
bsdf_node.inputs['IOR'].default_value=random.uniform(1.1,1.6)
bsdf_node.inputs['Transmission'].default_value=1.0
bsdf_node.inputs['Specular'].default_value= 0.08
bsdf_node.inputs['Clearcoat'].default_value=0.03 #0.3
bsdf_node.inputs['Alpha'].default_value=1.0
#trans_node = nodes.new(type='ShaderNodeBsdfTransparent')
#mix_node = nodes.new(type='ShaderNodeMixShader')
#mix_node.inputs[0].default_value = 0.018
texturecoord_node = nodes.new(type='ShaderNodeTexCoord')
for link in links:
links.remove(link)
links.new(texture_node.outputs['Color'],bsdf_node.inputs['Base Color'])
# Notes: trans
links.new(texture_node.outputs['Alpha'], mix_shader.inputs['Fac'])
links.new(trans_bsdf.outputs['BSDF'], mix_shader.inputs['Shader'])
links.new(bsdf_node.outputs['BSDF'], mix_shader.inputs['Shader'])
links.new(mix_shader.outputs['Shader'], output_node.inputs['Surface'])
# links.new(bsdf_node.outputs['BSDF'],output_node.inputs[0])
links.new(texture_node.inputs[0],texturecoord_node.outputs[2])
return film
#############################################################################################
# Physics Simulation
#############################################################################################
def simulation_softbody(obj, frame = 10, sub_num = 12, choice_num = 5):
select_object(obj)
subdivide(obj, sub_num)
bpy.ops.object.mode_set(mode="EDIT")
label = obj.name
mesh = bpy.data.meshes[label]
bm = bmesh.from_edit_mesh(mesh)
choice_verts = np.random.choice(bm.verts,choice_num)
index_list = [v.index for v in choice_verts]
bpy.ops.object.mode_set(mode="OBJECT")
group = obj.vertex_groups.new(name = 'Group')
control = obj.vertex_groups.new(name = 'Control')
if np.random.rand():
for i in index_list:
group.add([i], random.choice([0.75,0.85,0.8,0.9,1]), 'REPLACE')
else:
group.add(index_list, random.choice([0.75,0.85,0.8,0.9]), 'REPLACE')
control.add([v.index for v in mesh.vertices], 1., 'REPLACE')
bpy.ops.object.modifier_add(type='SOFT_BODY')
bpy.context.object.modifiers["Softbody"].settings.vertex_group_goal = "Group"
bpy.context.object.modifiers["Softbody"].settings.goal_spring = 0.9
bpy.context.object.modifiers["Softbody"].settings.goal_default = 1
bpy.context.object.modifiers["Softbody"].settings.goal_max = 1
bpy.context.object.modifiers["Softbody"].settings.goal_min = 0.65
bpy.context.object.modifiers["Softbody"].settings.pull = 0.9
bpy.context.object.modifiers["Softbody"].settings.push = 0.9
bpy.context.object.modifiers["Softbody"].settings.damping = 0
bpy.context.object.modifiers["Softbody"].settings.bend = 10
bpy.context.object.modifiers["Softbody"].settings.spring_length = 100
bpy.context.object.modifiers["Softbody"].settings.use_stiff_quads = True
bpy.context.object.modifiers["Softbody"].settings.use_self_collision = True
bpy.ops.ptcache.bake_all(bake=True)
bpy.context.scene.frame_set(frame)
count = 0
v = True
while not too_unwarp(obj):
count = count + 1
frame = random.uniform(10,150)
bpy.context.scene.frame_set(frame)
if count == 50:
v = False
break
# for i in range(1,frame+1):
# bpy.context.scene.frame_set(i)
select_contour(obj)
bpy.ops.object.modifier_add(type='SUBSURF')
bpy.context.object.modifiers["Subdivision"].render_levels = 6
bpy.context.object.modifiers["Subdivision"].quality = 6
reset_camera(obj)
# bpy.ops.ptcache.bake_all(bake=False)
# reset_camera(label=obj.name)
# group2 = obj.vertex_groups.new(name = 'Control')
# all_index = [v.index for v in bm.verts]
# group2.add(all_index, 1., 'REPLACE')
return v
def simulation_cloth(obj, frame = 10, sub_num = 12, choice_num = 5):
select_object(obj)
subdivide(obj, sub_num)
bpy.ops.object.mode_set(mode="EDIT")
label = obj.name
mesh = bpy.data.meshes[label]
bm = bmesh.from_edit_mesh(mesh)
choice_verts = np.random.choice(bm.verts,choice_num)
index_list = [v.index for v in choice_verts]
bpy.ops.object.mode_set(mode="OBJECT")
group = obj.vertex_groups.new(name = 'Group')
if np.random.rand()<0.6:
for i in index_list:
group.add([i], random.choice([0.6,0.7,0.8,0.9,1]), 'REPLACE')
else:
group.add(index_list, random.choice([0.6,0.7,0.8,0.9]), 'REPLACE')
bpy.ops.object.modifier_add(type='CLOTH')
bpy.context.object.modifiers["Cloth"].settings.quality = 15
bpy.context.object.modifiers["Cloth"].settings.mass = 0.4
bpy.context.object.modifiers["Cloth"].settings.tension_stiffness = 80
bpy.context.object.modifiers["Cloth"].settings.compression_stiffness = 80
bpy.context.object.modifiers["Cloth"].settings.shear_stiffness = 80
bpy.context.object.modifiers["Cloth"].settings.bending_stiffness = 150
bpy.context.object.modifiers["Cloth"].settings.tension_damping = 25
bpy.context.object.modifiers["Cloth"].settings.compression_damping = 25
bpy.context.object.modifiers["Cloth"].settings.shear_damping = 25
bpy.context.object.modifiers["Cloth"].settings.air_damping = 1
bpy.context.object.modifiers["Cloth"].settings.vertex_group_mass = "Group"
bpy.context.scene.frame_set(frame)
bpy.ops.ptcache.bake_all(bake=False)
bpy.context.view_layer.update()
# group2 = obj.vertex_groups.new(name = 'Control')
# all_index = [v.index for v in bm.verts]
# group2.add(all_index, 1., 'REPLACE')
return
| de | 0.234178 | ############################################################################################ # Adding Film object and Shader ############################################################################################ # bpy.ops.object.mode_set(mode="OBJECT") # pos_x = random.rand(0.5,1.5) # pos_y = random.random(0.5,1.5) # pos_z = random.random(2,5) # film.location = (pos_x, pos_y, pos_z) # Notes: modified by trans #random.uniform(0.6,0.7) #0.3 #trans_node = nodes.new(type='ShaderNodeBsdfTransparent') #mix_node = nodes.new(type='ShaderNodeMixShader') #mix_node.inputs[0].default_value = 0.018 # Notes: trans # links.new(bsdf_node.outputs['BSDF'],output_node.inputs[0]) ############################################################################################# # Physics Simulation ############################################################################################# # for i in range(1,frame+1): # bpy.context.scene.frame_set(i) # bpy.ops.ptcache.bake_all(bake=False) # reset_camera(label=obj.name) # group2 = obj.vertex_groups.new(name = 'Control') # all_index = [v.index for v in bm.verts] # group2.add(all_index, 1., 'REPLACE') # group2 = obj.vertex_groups.new(name = 'Control') # all_index = [v.index for v in bm.verts] # group2.add(all_index, 1., 'REPLACE') | 2.234621 | 2 |
optimizers/benchmark.py | Coricos/Challenger | 15 | 6613143 | <gh_stars>10-100
# Author: <NAME>
# Date: 25/11/2019
# Project: optimizers
try:
from optimizers.parzen import *
from optimizers.bayesian import *
from optimizers.optunas import *
except:
from parzen import *
from bayesian import *
from optunas import *
class Benchmark:
def __init__(self, problem, logger, seed=42, boundaries=None):
self.seed = seed
self.logger = logger
self.problem = problem
if not (boundaries is None): self.boundaries = boundaries
else: self.boundaries = self.problem.loadBoundaries()
def run(self, iterations=100):
scores = dict()
opt = Parzen(self.problem, self.boundaries, self.logger, seed=self.seed)
opt.run(n_iter=iterations)
scores['parzen'] = {'scores': self.problem.score, 'bestParams': self.problem.bestParameters()}
self.problem.reset()
opt = Bayesian(self.problem, self.boundaries, self.logger, seed=self.seed)
opt.run(n_init=iterations, n_iter=0)
scores['random'] = {'scores': self.problem.score, 'bestParams': self.problem.bestParameters()}
self.problem.reset()
opt = Bayesian(self.problem, self.boundaries, self.logger, seed=self.seed)
opt.run(n_init=int(0.8*iterations), n_iter=int(0.2*iterations))
scores['bayesian'] = {'scores': self.problem.score, 'bestParams': self.problem.bestParameters()}
self.problem.reset()
opt = Evolutive(self.problem, self.boundaries, self.logger, seed=self.seed)
opt.run(n_tpe=int(0.2*iterations), n_random=int(0.6*iterations), n_bayesian=int(0.2*iterations))
scores['evolutive'] = {'scores': self.problem.score, 'bestParams': self.problem.bestParameters()}
self.problem.reset()
return scores
| # Author: <NAME>
# Date: 25/11/2019
# Project: optimizers
try:
from optimizers.parzen import *
from optimizers.bayesian import *
from optimizers.optunas import *
except:
from parzen import *
from bayesian import *
from optunas import *
class Benchmark:
def __init__(self, problem, logger, seed=42, boundaries=None):
self.seed = seed
self.logger = logger
self.problem = problem
if not (boundaries is None): self.boundaries = boundaries
else: self.boundaries = self.problem.loadBoundaries()
def run(self, iterations=100):
scores = dict()
opt = Parzen(self.problem, self.boundaries, self.logger, seed=self.seed)
opt.run(n_iter=iterations)
scores['parzen'] = {'scores': self.problem.score, 'bestParams': self.problem.bestParameters()}
self.problem.reset()
opt = Bayesian(self.problem, self.boundaries, self.logger, seed=self.seed)
opt.run(n_init=iterations, n_iter=0)
scores['random'] = {'scores': self.problem.score, 'bestParams': self.problem.bestParameters()}
self.problem.reset()
opt = Bayesian(self.problem, self.boundaries, self.logger, seed=self.seed)
opt.run(n_init=int(0.8*iterations), n_iter=int(0.2*iterations))
scores['bayesian'] = {'scores': self.problem.score, 'bestParams': self.problem.bestParameters()}
self.problem.reset()
opt = Evolutive(self.problem, self.boundaries, self.logger, seed=self.seed)
opt.run(n_tpe=int(0.2*iterations), n_random=int(0.6*iterations), n_bayesian=int(0.2*iterations))
scores['evolutive'] = {'scores': self.problem.score, 'bestParams': self.problem.bestParameters()}
self.problem.reset()
return scores | en | 0.667952 | # Author: <NAME> # Date: 25/11/2019 # Project: optimizers | 2.508925 | 3 |
2019/day09.py | gcalmettes/AdventOfCode2017 | 1 | 6613144 | <gh_stars>1-10
"""
https://adventofcode.com/2019/day/9
"""
from intcode_final import run_intcode, listToDict
p = listToDict([109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99])
assert f'{list(p.values())}' == f'{run_intcode(p, inputs=[])}'
assert len(str(run_intcode([1102,34915192,34915192,7,4,7,99,0], inputs=[])[0])) == 16
p = listToDict([104,1125899906842624,99])
assert run_intcode([104,1125899906842624,99], inputs=[]) == [p[1]]
with open('day09_input.txt') as f:
program = listToDict([int(x) for x in f.read().strip().split(',')])
part1 = run_intcode(program, inputs=[1])
print(f'part 1: {part1}')
part2 = run_intcode(program, inputs=[2])
print(f'part 2: {part2}') | """
https://adventofcode.com/2019/day/9
"""
from intcode_final import run_intcode, listToDict
p = listToDict([109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99])
assert f'{list(p.values())}' == f'{run_intcode(p, inputs=[])}'
assert len(str(run_intcode([1102,34915192,34915192,7,4,7,99,0], inputs=[])[0])) == 16
p = listToDict([104,1125899906842624,99])
assert run_intcode([104,1125899906842624,99], inputs=[]) == [p[1]]
with open('day09_input.txt') as f:
program = listToDict([int(x) for x in f.read().strip().split(',')])
part1 = run_intcode(program, inputs=[1])
print(f'part 1: {part1}')
part2 = run_intcode(program, inputs=[2])
print(f'part 2: {part2}') | en | 0.790036 | https://adventofcode.com/2019/day/9 | 3.034706 | 3 |
python/testData/inspections/PyAbstractClassInspection/quickFix/AddImportedABCToSuperclasses/main.py | jnthn/intellij-community | 2 | 6613145 | <reponame>jnthn/intellij-community
from abc import ABC, abstractmethod
class A1(ABC):
@abstractmethod
def m1(self):
pass
class A<caret>2(A1):
pass | from abc import ABC, abstractmethod
class A1(ABC):
@abstractmethod
def m1(self):
pass
class A<caret>2(A1):
pass | none | 1 | 3.250295 | 3 | |
record/record.py | GearDragon-Software/Record | 0 | 6613146 | <filename>record/record.py
# Copyright (c) GearDragon Software 2016
class Record(object):
Unused = 0
FORMAT = lambda n: "%08d" % (n,)
Serialized = dict()
def __init__(self, name="null")
self.name = name
self.serial = Record.FORMAT(Record.Unused)
Record.Unused += 1
Record.Serialized.update({self.serial: self})
def __repr__(self):
return repr([self.name])
def __setattr__(self, name, value):
if name == "serial":
raise AttributeError("cannot manually set serial attribute of a record")
else:
super(object).__setattr__(self, name, value)
Record.Serialized.update({self.serial: self})
def make_callable(self, action=lambda: none): # TODO: lint
CallableRecord.__init__(self, action, **vars(self))
class CallableRecord(Record):
def __init__(self, name="null", action=lambda: None, **kwargs):
Record.__init__(self, name)
self.__action__ = action
for kw, arg in kwargs:
setattr(self, kw, arg)
def __repr__(self):
return repr([(self.name, self.__action__)])
def __call__(self, *args, **kwargs):
return self.__action__(*args, **kwargs)
def make_uncallable(self): # TODO: lint
del self.__action__
del self.__call__
| <filename>record/record.py
# Copyright (c) GearDragon Software 2016
class Record(object):
Unused = 0
FORMAT = lambda n: "%08d" % (n,)
Serialized = dict()
def __init__(self, name="null")
self.name = name
self.serial = Record.FORMAT(Record.Unused)
Record.Unused += 1
Record.Serialized.update({self.serial: self})
def __repr__(self):
return repr([self.name])
def __setattr__(self, name, value):
if name == "serial":
raise AttributeError("cannot manually set serial attribute of a record")
else:
super(object).__setattr__(self, name, value)
Record.Serialized.update({self.serial: self})
def make_callable(self, action=lambda: none): # TODO: lint
CallableRecord.__init__(self, action, **vars(self))
class CallableRecord(Record):
def __init__(self, name="null", action=lambda: None, **kwargs):
Record.__init__(self, name)
self.__action__ = action
for kw, arg in kwargs:
setattr(self, kw, arg)
def __repr__(self):
return repr([(self.name, self.__action__)])
def __call__(self, *args, **kwargs):
return self.__action__(*args, **kwargs)
def make_uncallable(self): # TODO: lint
del self.__action__
del self.__call__
| en | 0.334792 | # Copyright (c) GearDragon Software 2016 # TODO: lint # TODO: lint | 2.869916 | 3 |
deliver/ia369/iacolorhist.py | mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso | 7 | 6613147 | <reponame>mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso
# -*- encoding: utf-8 -*-
# Module iacolorhist
def iacolorhist(f, mask=None):
import numpy as np
from iahistogram import iahistogram
WFRAME=5
f = np.asarray(f)
if len(f.shape) == 1: f = f[np.newaxis,:]
if not f.dtype == 'uint8':
raise Exception,'error, can only process uint8 images'
if not f.shape[0] == 3:
raise Exception, 'error, can only process 3-band images'
r,g,b = f[0].astype(np.int), f[1].astype(np.int), f[2].astype(np.int)
n_zeros = 0
if mask:
n_zeros = f.shape[0]*f.shape[1]-len(np.nonzero(np.ravel(mask)))
r,g,b = mask*r, mask*g, mask*b
hrg = np.zeros((256,256), np.int32); hbg=hrg+0; hrb=hrg+0
img = 256*r + g; m1 = img.max()
aux = iahistogram(img.astype(np.int32)); aux[0] = aux[0] - n_zeros
np.put(np.ravel(hrg), range(m1+1), aux)
img = 256*b + g; m2 = img.max()
aux = iahistogram(img.astype(np.int32)); aux[0] = aux[0] - n_zeros
np.put(np.ravel(hbg), range(m2+1), aux)
img = 256*r + b; m3 = img.max()
aux = iahistogram(img.astype(np.int32)); aux[0] = aux[0] - n_zeros
np.put(np.ravel(hrb), range(m3+1), aux)
m=max(hrg.max(),hbg.max(),hrb.max())
hc=m*np.ones((3*WFRAME+2*256,3*WFRAME+2*256))
hc[WFRAME:WFRAME+256,WFRAME:WFRAME+256] = np.transpose(hrg)
hc[WFRAME:WFRAME+256,2*WFRAME+256:2*WFRAME+512] = np.transpose(hbg)
hc[2*WFRAME+256:2*WFRAME+512,WFRAME:WFRAME+256] = np.transpose(hrb)
return hc
| # -*- encoding: utf-8 -*-
# Module iacolorhist
def iacolorhist(f, mask=None):
import numpy as np
from iahistogram import iahistogram
WFRAME=5
f = np.asarray(f)
if len(f.shape) == 1: f = f[np.newaxis,:]
if not f.dtype == 'uint8':
raise Exception,'error, can only process uint8 images'
if not f.shape[0] == 3:
raise Exception, 'error, can only process 3-band images'
r,g,b = f[0].astype(np.int), f[1].astype(np.int), f[2].astype(np.int)
n_zeros = 0
if mask:
n_zeros = f.shape[0]*f.shape[1]-len(np.nonzero(np.ravel(mask)))
r,g,b = mask*r, mask*g, mask*b
hrg = np.zeros((256,256), np.int32); hbg=hrg+0; hrb=hrg+0
img = 256*r + g; m1 = img.max()
aux = iahistogram(img.astype(np.int32)); aux[0] = aux[0] - n_zeros
np.put(np.ravel(hrg), range(m1+1), aux)
img = 256*b + g; m2 = img.max()
aux = iahistogram(img.astype(np.int32)); aux[0] = aux[0] - n_zeros
np.put(np.ravel(hbg), range(m2+1), aux)
img = 256*r + b; m3 = img.max()
aux = iahistogram(img.astype(np.int32)); aux[0] = aux[0] - n_zeros
np.put(np.ravel(hrb), range(m3+1), aux)
m=max(hrg.max(),hbg.max(),hrb.max())
hc=m*np.ones((3*WFRAME+2*256,3*WFRAME+2*256))
hc[WFRAME:WFRAME+256,WFRAME:WFRAME+256] = np.transpose(hrg)
hc[WFRAME:WFRAME+256,2*WFRAME+256:2*WFRAME+512] = np.transpose(hbg)
hc[2*WFRAME+256:2*WFRAME+512,WFRAME:WFRAME+256] = np.transpose(hrb)
return hc | en | 0.552342 | # -*- encoding: utf-8 -*- # Module iacolorhist | 2.546731 | 3 |
africanus/util/requirements.py | JoshVStaden/codex-africanus | 13 | 6613148 | # -*- coding: utf-8 -*-
import importlib
from decorator import decorate
from africanus.util.docs import on_rtd
from africanus.util.testing import in_pytest, force_missing_pkg_exception
def _missing_packages(fn, packages, import_errors):
if len(import_errors) > 0:
import_err_str = "\n".join((str(e) for e in import_errors))
return ("%s requires installation of "
"the following packages: %s.\n"
"%s" % (fn, packages, import_err_str))
else:
return ("%s requires installation of the following packages: %s. "
% (fn, tuple(packages)))
class MissingPackageException(Exception):
pass
def requires_optional(*requirements):
"""
Decorator returning either the original function,
or a dummy function raising a
:class:`MissingPackageException` when called,
depending on whether the supplied ``requirements``
are present.
If packages are missing and called within a test, the
dummy function will call :func:`pytest.skip`.
Used in the following way:
.. code-block:: python
try:
from scipy import interpolate
except ImportError as e:
# https://stackoverflow.com/a/29268974/1611416, pep 3110 and 344
scipy_import_error = e
else:
scipy_import_error = None
@requires_optional('scipy', scipy_import_error)
def function(*args, **kwargs):
return interpolate(...)
Parameters
----------
requirements : iterable of string, None or ImportError
Sequence of package names required by the decorated function.
ImportError exceptions (or None, indicating their absence)
may also be supplied and will be immediately re-raised within
the decorator. This is useful for tracking down problems
in user import logic.
Returns
-------
callable
Either the original function if all ``requirements``
are available or a dummy function that throws
a :class:`MissingPackageException` or skips a pytest.
"""
# Return a bare wrapper if we're on readthedocs
if on_rtd():
def _function_decorator(fn):
def _wrapper(*args, **kwargs):
pass
return decorate(fn, _wrapper)
return _function_decorator
have_requirements = True
missing_requirements = []
honour_pytest_marker = True
actual_imports = []
import_errors = []
# Try imports
for requirement in requirements:
# Ignore
if requirement is None:
continue
# Reraise any supplied ImportErrors
elif isinstance(requirement, ImportError):
import_errors.append(requirement)
# An actual package, try to import it
elif isinstance(requirement, str):
try:
importlib.import_module(requirement)
except ImportError:
missing_requirements.append(requirement)
have_requirements = False
else:
actual_imports.append(requirement)
# We should force exceptions, even if we're in a pytest test case
elif requirement == force_missing_pkg_exception:
honour_pytest_marker = False
# Just wrong
else:
raise TypeError("requirements must be "
"None, strings or ImportErrors. "
"Received %s" % requirement)
# Requested requirement import succeeded, but there were user
# import errors that we now re-raise
if have_requirements and len(import_errors) > 0:
raise ImportError("Successfully imported %s "
"but the following user-supplied "
"ImportErrors ocurred: \n%s" %
(actual_imports,
'\n'.join((str(e) for e in import_errors))))
def _function_decorator(fn):
# We have requirements, return the original function
if have_requirements:
return fn
# We don't have requirements, produce a failing wrapper
def _wrapper(*args, **kwargs):
""" Empty docstring """
# We're running test cases
if honour_pytest_marker and in_pytest():
try:
import pytest
except ImportError as e:
raise ImportError("Marked as in a pytest "
"test case, but pytest cannot "
"be imported! %s" % str(e))
else:
msg = _missing_packages(
fn.__name__, missing_requirements, import_errors)
pytest.skip(msg)
# Raise the exception
else:
msg = _missing_packages(
fn.__name__, missing_requirements, import_errors)
raise MissingPackageException(msg)
return decorate(fn, _wrapper)
return _function_decorator
| # -*- coding: utf-8 -*-
import importlib
from decorator import decorate
from africanus.util.docs import on_rtd
from africanus.util.testing import in_pytest, force_missing_pkg_exception
def _missing_packages(fn, packages, import_errors):
if len(import_errors) > 0:
import_err_str = "\n".join((str(e) for e in import_errors))
return ("%s requires installation of "
"the following packages: %s.\n"
"%s" % (fn, packages, import_err_str))
else:
return ("%s requires installation of the following packages: %s. "
% (fn, tuple(packages)))
class MissingPackageException(Exception):
pass
def requires_optional(*requirements):
"""
Decorator returning either the original function,
or a dummy function raising a
:class:`MissingPackageException` when called,
depending on whether the supplied ``requirements``
are present.
If packages are missing and called within a test, the
dummy function will call :func:`pytest.skip`.
Used in the following way:
.. code-block:: python
try:
from scipy import interpolate
except ImportError as e:
# https://stackoverflow.com/a/29268974/1611416, pep 3110 and 344
scipy_import_error = e
else:
scipy_import_error = None
@requires_optional('scipy', scipy_import_error)
def function(*args, **kwargs):
return interpolate(...)
Parameters
----------
requirements : iterable of string, None or ImportError
Sequence of package names required by the decorated function.
ImportError exceptions (or None, indicating their absence)
may also be supplied and will be immediately re-raised within
the decorator. This is useful for tracking down problems
in user import logic.
Returns
-------
callable
Either the original function if all ``requirements``
are available or a dummy function that throws
a :class:`MissingPackageException` or skips a pytest.
"""
# Return a bare wrapper if we're on readthedocs
if on_rtd():
def _function_decorator(fn):
def _wrapper(*args, **kwargs):
pass
return decorate(fn, _wrapper)
return _function_decorator
have_requirements = True
missing_requirements = []
honour_pytest_marker = True
actual_imports = []
import_errors = []
# Try imports
for requirement in requirements:
# Ignore
if requirement is None:
continue
# Reraise any supplied ImportErrors
elif isinstance(requirement, ImportError):
import_errors.append(requirement)
# An actual package, try to import it
elif isinstance(requirement, str):
try:
importlib.import_module(requirement)
except ImportError:
missing_requirements.append(requirement)
have_requirements = False
else:
actual_imports.append(requirement)
# We should force exceptions, even if we're in a pytest test case
elif requirement == force_missing_pkg_exception:
honour_pytest_marker = False
# Just wrong
else:
raise TypeError("requirements must be "
"None, strings or ImportErrors. "
"Received %s" % requirement)
# Requested requirement import succeeded, but there were user
# import errors that we now re-raise
if have_requirements and len(import_errors) > 0:
raise ImportError("Successfully imported %s "
"but the following user-supplied "
"ImportErrors ocurred: \n%s" %
(actual_imports,
'\n'.join((str(e) for e in import_errors))))
def _function_decorator(fn):
# We have requirements, return the original function
if have_requirements:
return fn
# We don't have requirements, produce a failing wrapper
def _wrapper(*args, **kwargs):
""" Empty docstring """
# We're running test cases
if honour_pytest_marker and in_pytest():
try:
import pytest
except ImportError as e:
raise ImportError("Marked as in a pytest "
"test case, but pytest cannot "
"be imported! %s" % str(e))
else:
msg = _missing_packages(
fn.__name__, missing_requirements, import_errors)
pytest.skip(msg)
# Raise the exception
else:
msg = _missing_packages(
fn.__name__, missing_requirements, import_errors)
raise MissingPackageException(msg)
return decorate(fn, _wrapper)
return _function_decorator
| en | 0.747452 | # -*- coding: utf-8 -*- Decorator returning either the original function, or a dummy function raising a :class:`MissingPackageException` when called, depending on whether the supplied ``requirements`` are present. If packages are missing and called within a test, the dummy function will call :func:`pytest.skip`. Used in the following way: .. code-block:: python try: from scipy import interpolate except ImportError as e: # https://stackoverflow.com/a/29268974/1611416, pep 3110 and 344 scipy_import_error = e else: scipy_import_error = None @requires_optional('scipy', scipy_import_error) def function(*args, **kwargs): return interpolate(...) Parameters ---------- requirements : iterable of string, None or ImportError Sequence of package names required by the decorated function. ImportError exceptions (or None, indicating their absence) may also be supplied and will be immediately re-raised within the decorator. This is useful for tracking down problems in user import logic. Returns ------- callable Either the original function if all ``requirements`` are available or a dummy function that throws a :class:`MissingPackageException` or skips a pytest. # Return a bare wrapper if we're on readthedocs # Try imports # Ignore # Reraise any supplied ImportErrors # An actual package, try to import it # We should force exceptions, even if we're in a pytest test case # Just wrong # Requested requirement import succeeded, but there were user # import errors that we now re-raise # We have requirements, return the original function # We don't have requirements, produce a failing wrapper Empty docstring # We're running test cases # Raise the exception | 2.764302 | 3 |
src/old/mpas-source/testing_and_setup/compass/utility_scripts/make_parameter_study_configs.py | meteorologytoday/E3SM-sicn | 0 | 6613149 | <reponame>meteorologytoday/E3SM-sicn<filename>src/old/mpas-source/testing_and_setup/compass/utility_scripts/make_parameter_study_configs.py
#!/usr/bin/env python
"""
Writes out a series of config files to be used to perform a parameter
study. A template file, specified with the -t flag, contains dummy
strings prefixed with '@' to be replaced with parameter values from a list.
The resulting config files are numbered consecutively with a prefix povided
with the -o flag. Parameter names and values are provided as a list with
the -p flag using syntax as in this example:
-p param1=1,2,3 param2=1e3,1e4,1e5 param3='a','b','c' \\
param4=.true.,.false.,.true.
The number of parameter values must be the same for all parameters and all
parameters are varied simultaneously.
"""
import argparse
def write_from_template(inFile,outFile,replacements):
inID = open(inFile)
outID = open(outFile, 'w')
for line in inID:
for src, target in replacements.iteritems():
line = line.replace(src, target)
outID.write(line)
inID.close()
outID.close()
# Define and process input arguments
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-t", "--template", dest="template", help="A config file in which to add or modify a parameter", metavar="TEMPLATE", required=True)
parser.add_argument("-o", "--out_prefix", dest="out_prefix", help="The prefix for the output config file", metavar="PREFIX", required=True)
parser.add_argument("-p", "--parameters", dest="parameters", help="A list of parameters and comma-separated values", metavar="PARAMETERS", nargs="+", required=True)
args = parser.parse_args()
parameters = {}
first = True
for parameterString in args.parameters:
(parameter, valueString) = parameterString.split('=',1)
values = valueString.split(',')
if first:
valueCount = len(values)
first = False
else:
assert(len(values) == valueCount)
parameters[parameter] = values
for valueIndex in range(valueCount):
outFileName = '%s_%02i.xml'%(args.out_prefix, valueIndex)
replacements = {}
for parameter in parameters:
replacements['@%s'%parameter] = parameters[parameter][valueIndex]
write_from_template(args.template, outFileName, replacements)
| #!/usr/bin/env python
"""
Writes out a series of config files to be used to perform a parameter
study. A template file, specified with the -t flag, contains dummy
strings prefixed with '@' to be replaced with parameter values from a list.
The resulting config files are numbered consecutively with a prefix povided
with the -o flag. Parameter names and values are provided as a list with
the -p flag using syntax as in this example:
-p param1=1,2,3 param2=1e3,1e4,1e5 param3='a','b','c' \\
param4=.true.,.false.,.true.
The number of parameter values must be the same for all parameters and all
parameters are varied simultaneously.
"""
import argparse
def write_from_template(inFile,outFile,replacements):
inID = open(inFile)
outID = open(outFile, 'w')
for line in inID:
for src, target in replacements.iteritems():
line = line.replace(src, target)
outID.write(line)
inID.close()
outID.close()
# Define and process input arguments
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-t", "--template", dest="template", help="A config file in which to add or modify a parameter", metavar="TEMPLATE", required=True)
parser.add_argument("-o", "--out_prefix", dest="out_prefix", help="The prefix for the output config file", metavar="PREFIX", required=True)
parser.add_argument("-p", "--parameters", dest="parameters", help="A list of parameters and comma-separated values", metavar="PARAMETERS", nargs="+", required=True)
args = parser.parse_args()
parameters = {}
first = True
for parameterString in args.parameters:
(parameter, valueString) = parameterString.split('=',1)
values = valueString.split(',')
if first:
valueCount = len(values)
first = False
else:
assert(len(values) == valueCount)
parameters[parameter] = values
for valueIndex in range(valueCount):
outFileName = '%s_%02i.xml'%(args.out_prefix, valueIndex)
replacements = {}
for parameter in parameters:
replacements['@%s'%parameter] = parameters[parameter][valueIndex]
write_from_template(args.template, outFileName, replacements) | en | 0.516593 | #!/usr/bin/env python Writes out a series of config files to be used to perform a parameter study. A template file, specified with the -t flag, contains dummy strings prefixed with '@' to be replaced with parameter values from a list. The resulting config files are numbered consecutively with a prefix povided with the -o flag. Parameter names and values are provided as a list with the -p flag using syntax as in this example: -p param1=1,2,3 param2=1e3,1e4,1e5 param3='a','b','c' \\ param4=.true.,.false.,.true. The number of parameter values must be the same for all parameters and all parameters are varied simultaneously. # Define and process input arguments | 3.793055 | 4 |
simplelist/listfromtxt.py | NeilShah2026/SimpleList | 0 | 6613150 | <filename>simplelist/listfromtxt.py
def listfromtxt(file):
with open(file, 'r') as f:
name = [line.strip() for line in f]
return name
| <filename>simplelist/listfromtxt.py
def listfromtxt(file):
with open(file, 'r') as f:
name = [line.strip() for line in f]
return name
| none | 1 | 3.610847 | 4 |