id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3388316 | import hashlib
import io
import unittest
import numpy as np
import pandas as pd
from evt.dataset import Dataset
from evt.estimators.gpdmle import GPDMLE
from evt.methods.peaks_over_threshold import PeaksOverThreshold
import matplotlib.pyplot as plt
class TestGPDMLE(unittest.TestCase):
def setUp(self) -> None:
self.series = pd.Series([np.exp(exponent) for exponent in range(5)])
self.peaks_over_threshold = PeaksOverThreshold(
Dataset(self.series),
threshold=0
)
self.gpdmle = GPDMLE(self.peaks_over_threshold)
def test_gpdmle(self):
(
(tail_index_estimate, tail_index_ci_lower, tail_index_ci_upper),
(scale_estimate, scale_ci_lower, scale_ci_upper),
) = self.gpdmle.estimate()
self.assertAlmostEqual(0.4850660575136194, tail_index_estimate)
self.assertAlmostEqual(-0.816627816142266, tail_index_ci_lower)
self.assertAlmostEqual(1.7867599311695048, tail_index_ci_upper)
self.assertAlmostEqual(10.164192059236363, scale_estimate)
self.assertAlmostEqual(8.594893265235651, scale_ci_lower)
self.assertAlmostEqual(11.733490853237075, scale_ci_upper)
def test_runtime_error(self):
fig = plt.figure(figsize=(8, 6))
ax = plt.gca()
out_file = io.BytesIO()
with self.assertRaises(RuntimeError):
self.gpdmle.plot_qq_gpd(ax)
def test_plot_qq_gev(self):
self.gpdmle.estimate()
fig = plt.figure(figsize=(8, 6))
ax = plt.gca()
out_file = io.BytesIO()
self.gpdmle.plot_qq_gpd(ax)
fig.savefig(out_file, format='raw')
out_file.seek(0)
hashed = hashlib.md5(out_file.read()).digest()
self.assertEqual(
b'y\n\x8b\xaa\x17\x85\x01\x85\xe8m\xc7\xb6<-\x03F',
hashed
)
| StarcoderdataPython |
1713338 | <filename>reviews/Bokeh/sliders.py
# start bokeh app
# # bokeh serve sliders.py
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, TextInput
from bokeh.plotting import figure
# Set up data
N = 200
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
# Set up plot
plot = figure(plot_height=400, plot_width=400, title="my sine wave",
tools="crosshair,pan,reset,save,wheel_zoom",
x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
freq = Slider(title="frequency", value=1.0, start=0.1, end=5.1, step=0.1)
def update_data(attrname, old, new):
k = freq.value
# Generate the new curve
x = np.linspace(0, 4*np.pi, N)
y = np.sin(k*x)
source.data = dict(x=x, y=y)
freq.on_change('value', update_data)
# Set up layouts and add to document
widget_box = widgetbox(freq)
curdoc().add_root(row(widget_box, plot, width=800))
curdoc().title = "Sliders"
# def change_slider_in_python():
# freq.value = np.random.uniform(0.1, 5.1)
#
# curdoc().add_periodic_callback(change_slider_in_python, 2000)
| StarcoderdataPython |
3226454 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 30 13:05:14 2014
@author: stevo
"""
from __future__ import print_function
import logging
import sys
import os
import cPickle
import numpy as np
from scipy.sparse import dok_matrix
from scipy.io import mmwrite, mmread
import text_entail.dictionary as td
import text_entail.io as tio
def w1Asfeature(d_triples, d_w1):
"""
"""
w1_mat = dok_matrix((len(d_triples), len(d_triples._m2ids)))
for w1, ids in d_triples._m2ids.items():
j = d_w1.add(w1)
for i in ids:
w1_mat[i,j] = 1
return w1_mat
def w2Asfeature(d_triples, d_w2):
"""
"""
w2_mat = dok_matrix((len(d_triples), len(d_triples._r2ids)))
for w2, ids in d_triples._r2ids.items():
j = d_w2.add(w2)
for i in ids:
w2_mat[i,j] = 1
return w2_mat
def ctxAsfeature(d_triples, d_ctx):
"""
"""
ctx_mat = dok_matrix((len(d_triples), len(d_triples._l2ids)))
for ctx, ids in d_triples._l2ids.items():
j = d_ctx.add(ctx)
for i in ids:
ctx_mat[i,j] = 1
return ctx_mat
def binarize_sparse_matrix(mat):
"""
"""
logging.info('binarizing feature matrix')
mat = mat.astype(bool)
mat = mat.astype(np.float64)
logging.info('finished binarizing feature matrix')
return mat
def pred_vectors_with_context(preds_file, has_header=True):
"""
"""
logging.info("creating predicate pairs class vector '{}'".format(preds_file))
temp = []
xy_predl_predr_entail = tio.read_preds_w_ctx(preds_file, has_header=has_header)
d_triples = td.TripleDict() # rows
duplicates = 0
contradicting_duplicates = 0
for ctx_X, ctx_Y, pred_l, pred_r, entailing in xy_predl_predr_entail:
ctx = '{}\t{}'.format(ctx_X, ctx_Y)
i = d_triples.add((ctx, pred_l, pred_r))
if i < len(temp):
label = 1 if entailing.strip().lower() == 'true' else 0
print("omitting duplicate example: '{} {} {} {}' ".format(ctx, pred_l, pred_r, entailing) ,file=sys.stderr)
duplicates += 1
if temp[i] != label:
print("duplicate example has different label: '{}' vs. '{}'".format(temp[i], label) ,file=sys.stderr)
contradicting_duplicates += 1
else:
temp.append(1 if entailing.strip().lower() == 'true' else 0)
vec = np.array(temp, dtype=np.float64)
logging.info("finished creating arg pairs class vector '{}'".format(preds_file))
logging.info("found {} duplicate examples with {} having contradicting labels.".format(duplicates, contradicting_duplicates))
return vec, d_triples
def arg_l_arg_r_pairs_vector(args_file, file_contains_context=False, has_header=True):
"""
"""
logging.info("creating arg pairs class vector '{}'".format(args_file))
temp = []
if file_contains_context:
ctx_argl_argr_entail = tio.read_args_w_ctx(args_file, has_header=has_header)
else:
argl_argr_entail = tio.read_args_wo_ctx(args_file, has_header=has_header)
def append_empty_context(tuples):
for l,r,e in tuples:
yield '', l, r, e
ctx_argl_argr_entail = append_empty_context(argl_argr_entail)
d_triples = td.TripleDict() # rows
duplicates = 0
contradicting_duplicates = 0
for ctx, arg_l, arg_r, entailing in ctx_argl_argr_entail:
i = d_triples.add((ctx, arg_l, arg_r))
if i < len(temp):
label = 1 if entailing.strip().lower() == 'true' else 0
print("omitting duplicate example: '{} {} {} {}' ".format(ctx, arg_l, arg_r, entailing) ,file=sys.stderr)
duplicates += 1
if temp[i] != label:
print("duplicate example has different label: '{}' vs. '{}'".format(temp[i], label) ,file=sys.stderr)
contradicting_duplicates += 1
else:
temp.append(1 if entailing.strip().lower() == 'true' else 0)
vec = np.array(temp, dtype=np.float64)
logging.info("finished creating arg pairs class vector '{}'".format(args_file))
logging.info("found {} duplicate examples with {} having contradicting labels.".format(duplicates, contradicting_duplicates))
return vec, d_triples
def arg_l_arg_r_asjo_matrix(
row_indices,
jb_file,
num_rows,
col_indices,
transform_w1 = lambda w1 : (w1[:w1.find('::@')], w1[w1.find('@::')+3:]),
transform_w2sig = lambda w2sig : w2sig,
mmfile_presuffix = '',
reload = False):
"""
"""
mm_file = os.path.splitext( jb_file )[0] + mmfile_presuffix + '.mm'
if not reload:
# legacy condition ( for files with file extension inside filename )
if not os.path.exists(mm_file):
mm_file = jb_file + mmfile_presuffix + '.mm'
if os.path.exists(mm_file) and os.path.isfile(mm_file):
logging.info("corresponding matrix file already exists for '{}'.".format(jb_file))
logging.info("loading '{}'.".format(mm_file))
mat = mmread(mm_file)
with open(mm_file+'i','r') as f:
col_indices._id2w = cPickle.load(f)
for i, w in enumerate(col_indices._id2w):
col_indices._w2id[w] = i
logging.info("finished loading '{}'.".format(mm_file))
return mat
logging.info("creating arg pair feature matrix '{}'".format(jb_file))
mat = dok_matrix((num_rows,1),dtype=np.float64) # len(d_pairs) = number of rows
j_bs = tio.read_jb_file_filter_by_jo(jb_file, lambda jo : transform_w1(jo) in row_indices)
for j, bs in j_bs:
ks = row_indices[transform_w1(j)]
for b, s in transform_w2sig(bs):
l = col_indices.add(b)
if mat.shape[1] <= l:
mat.resize((mat.shape[0],l+1))
for k in ks:
mat[k,l] = float(s)
logging.info("finished creating arg pair feature matrix '{}'".format(jb_file))
logging.info("saving matrix to '{}'.".format(mm_file))
with open(mm_file,'w') as f:
mmwrite(f, mat)
with open(mm_file+'i','w') as f:
cPickle.dump(col_indices._id2w, f)
logging.info("finshed saving matrix")
return mat
def arg_asjo_matrix(
row_indices,
col_indices,
jb_file,
num_rows,
transform_w1 = lambda w1 : w1,
transform_w2sig = lambda w2sig : w2sig,
mmfile_presuffix = '',
reload = False):
"""
"""
mm_file = os.path.splitext( jb_file )[0] + mmfile_presuffix + '.mm'
if not reload:
# legacy condition ( for files with file extension inside filename )
if not os.path.exists(mm_file):
mm_file = jb_file + mmfile_presuffix + '.mm'
if os.path.exists(mm_file) and os.path.isfile(mm_file):
logging.info("corresponding matrix file already exists for '{}'.".format(jb_file))
logging.info("loading '{}'.".format(mm_file))
mat = mmread(mm_file)
with open(mm_file+'i','r') as f:
col_indices._id2w = cPickle.load(f)
for i, w in enumerate(col_indices._id2w):
col_indices._w2id[w] = i
logging.info("finished loading '{}'.".format(mm_file))
return mat
logging.info("creating arg feature matrix '{}'".format(jb_file))
mat = dok_matrix((num_rows,1),dtype=np.float64) # number of rows x 1
j_bs = tio.read_jb_file_filter_by_jo(jb_file, lambda jo : transform_w1(jo) in row_indices)
for j, bs in j_bs:
j = transform_w1(j)
ks = row_indices[j]
for b, s in transform_w2sig(bs):
l = col_indices.add(b)
if mat.shape[1] <= l:
mat.resize((mat.shape[0],l+1))
for k in ks:
mat[k,l] = float(s)
logging.info("finished creating arg feature matrix '{}'".format(jb_file))
logging.info("saving matrix to '{}'.".format(mm_file))
with open(mm_file,'w') as f:
mmwrite(f, mat)
with open(mm_file+'i','w') as f:
cPickle.dump(col_indices._id2w, f)
logging.info("finshed saving matrix")
return mat
def arg_to_topic_matrix(
args,
word2topic_file,
num_rows,
transform_w = lambda w: w,
mmfile_presuffix = '',
reload = False):
"""
"""
mm_file = os.path.splitext( word2topic_file )[0] + mmfile_presuffix + '.mm'
if not reload:
# legacy condition ( for files with file extension inside filename )
if not os.path.exists(mm_file):
mm_file = word2topic_file + mmfile_presuffix + '.mm'
if os.path.exists(mm_file) and os.path.isfile(mm_file):
logging.info("corresponding matrix file already exists for '{}'.".format(word2topic_file))
logging.info("loading '{}'.".format(mm_file))
mat = mmread(mm_file)
logging.info("finished loading '{}'.".format(mm_file))
return mat
logging.info("creating topic feature matrix '{}'".format(word2topic_file))
mat = dok_matrix((num_rows,1),dtype=np.float64) # number of rows x 1
w2t = tio.read_word2topicfile(word2topic_file)
for w, t in w2t:
w = transform_w(w)
if not w in args:
continue
ks = args[w]
if mat.shape[1] <= t:
mat.resize((mat.shape[0],t+1))
for k in ks:
mat[k,t] = 1
logging.info("finished creating topic feature matrix '{}'".format(word2topic_file))
logging.info("saving matrix to '{}'.".format(word2topic_file))
with open(mm_file,'w') as f:
mmwrite(f, mat)
logging.info("finished saving matrix")
return mat
def arg_l_arg_r_to_topic_matrix(
row_indices,
pair2topic_file,
num_rows,
transform_w = lambda w1 : (w1[:w1.find('::@')], w1[w1.find('@::')+3:]),
mmfile_presuffix = '',
reload = False):
"""
"""
mm_file = os.path.splitext( pair2topic_file )[0] + mmfile_presuffix + '.mm'
if not reload:
# legacy condition ( for files with file extension inside filename )
if not os.path.exists(mm_file):
mm_file = pair2topic_file + mmfile_presuffix + '.mm'
if os.path.exists(mm_file) and os.path.isfile(mm_file):
logging.info("corresponding matrix file already exists for '{}'.".format(pair2topic_file))
logging.info("loading '{}'.".format(mm_file))
mat = mmread(mm_file)
logging.info("finished loading '{}'.".format(mm_file))
return mat
logging.info("creating topic feature matrix '{}'".format(pair2topic_file))
mat = dok_matrix((num_rows,1),dtype=np.float64) # number of rows x 1
w2t = tio.read_word2topicfile(pair2topic_file)
for w, t in w2t:
p = transform_w(w)
if p not in row_indices:
continue
ks = row_indices[p]
if mat.shape[1] <= t:
mat.resize((mat.shape[0],t+1))
for k in ks:
mat[k,t] = 1
logging.info("finished creating topic feature matrix '{}'".format(pair2topic_file))
logging.info("saving matrix to '{}'.".format(pair2topic_file))
with open(mm_file,'w') as f:
mmwrite(f, mat)
logging.info("finished saving matrix")
return mat
def topic_vector_matrix(
row_indices,
word2topicvector_file,
num_rows,
transform_w = lambda w: w,
mmfile_presuffix = '',
reload = False):
"""
"""
mm_file = os.path.splitext(word2topicvector_file)[0] + mmfile_presuffix + '.mm'
if not reload:
# # legacy condition ( for files with file extension inside filename )
# if not os.path.exists(mm_file):
# mm_file = word2topic_file + mmfile_presuffix + '.mm'
if os.path.exists(mm_file) and os.path.isfile(mm_file):
logging.info("corresponding matrix file already exists for '{}'.".format(word2topicvector_file))
logging.info("loading '{}'.".format(mm_file))
mat = mmread(mm_file)
logging.info("finished loading '{}'.".format(mm_file))
return mat
logging.info("creating topic vector feature matrix '{}'".format(word2topicvector_file))
mat = dok_matrix((num_rows,1),dtype=np.float64) # number of rows x 1
w2t = tio.read_word2topicvectorfile(word2topicvector_file)
for w, t in w2t:
w = transform_w(w)
if not w in row_indices:
continue
t = np.array(t.split(' '), dtype=np.float)
ks = row_indices[w]
if mat.shape[1] < len(t):
mat.resize((mat.shape[0],len(t)))
for k in ks:
mat[k,:] = t
logging.info("finished creating topic feature matrix '{}'".format(word2topicvector_file))
logging.info("saving matrix to '{}'.".format(word2topicvector_file))
with open(mm_file,'w') as f:
mmwrite(f, mat)
logging.info("finished saving matrix")
return mat | StarcoderdataPython |
81575 | <gh_stars>0
import os
import re
import sys
from typing import (
List,
Optional,
Tuple,
)
import alembic.config
from alembic.config import Config
from alembic.runtime.migration import MigrationContext
from alembic.script import ScriptDirectory
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from galaxy.model.database_utils import is_one_database
from galaxy.model.migrations import (
AlembicManager,
DatabaseConfig,
DatabaseStateCache,
GXY,
IncorrectVersionError,
SQLALCHEMYMIGRATE_LAST_VERSION_GXY,
TSI,
)
from galaxy.util.properties import (
find_config_file,
get_data_dir,
load_app_properties,
)
DEFAULT_CONFIG_NAMES = ["galaxy", "universe_wsgi"]
CONFIG_FILE_ARG = "--galaxy-config"
CONFIG_DIR_NAME = "config"
GXY_CONFIG_PREFIX = "GALAXY_CONFIG_"
TSI_CONFIG_PREFIX = "GALAXY_INSTALL_CONFIG_"
def get_configuration(argv: List[str], cwd: str) -> Tuple[DatabaseConfig, DatabaseConfig, bool]:
"""
Return a 3-item-tuple with configuration values used for managing databases.
"""
config_file = _pop_config_file(argv)
if config_file is None:
cwds = [cwd, os.path.join(cwd, CONFIG_DIR_NAME)]
config_file = find_config_file(DEFAULT_CONFIG_NAMES, dirs=cwds)
# load gxy properties and auto-migrate
properties = load_app_properties(config_file=config_file, config_prefix=GXY_CONFIG_PREFIX)
default_url = f"sqlite:///{os.path.join(get_data_dir(properties), 'universe.sqlite')}?isolation_level=IMMEDIATE"
url = properties.get("database_connection", default_url)
template = properties.get("database_template", None)
encoding = properties.get("database_encoding", None)
is_auto_migrate = properties.get("database_auto_migrate", False)
gxy_config = DatabaseConfig(url, template, encoding)
# load tsi properties
properties = load_app_properties(config_file=config_file, config_prefix=TSI_CONFIG_PREFIX)
default_url = gxy_config.url
url = properties.get("install_database_connection", default_url)
template = properties.get("database_template", None)
encoding = properties.get("database_encoding", None)
tsi_config = DatabaseConfig(url, template, encoding)
return (gxy_config, tsi_config, is_auto_migrate)
def _pop_config_file(argv: List[str]) -> Optional[str]:
if CONFIG_FILE_ARG in argv:
pos = argv.index(CONFIG_FILE_ARG)
argv.pop(pos) # pop argument name
return argv.pop(pos) # pop and return argument value
return None
def add_db_urls_to_command_arguments(argv: List[str], gxy_url: str, tsi_url: str) -> None:
_insert_x_argument(argv, "tsi_url", tsi_url)
_insert_x_argument(argv, "gxy_url", gxy_url)
def _insert_x_argument(argv, key: str, value: str) -> None:
# `_insert_x_argument('mykey', 'myval')` transforms `foo -a 1` into `foo -x mykey=myval -a 42`
argv.insert(1, f"{key}={value}")
argv.insert(1, "-x")
def invoke_alembic() -> None:
"""
Invoke the Alembic command line runner.
Accept 'heads' as the target revision argument to enable upgrading both gxy and tsi in one command.
This is consistent with Alembic's CLI, which allows `upgrade heads`. However, this would not work for
separate gxy and tsi databases: we can't attach a database url to a revision after Alembic has been
invoked with the 'upgrade' command and the 'heads' argument. So, instead we invoke Alembic for each head.
"""
if "heads" in sys.argv and "upgrade" in sys.argv:
i = sys.argv.index("heads")
sys.argv[i] = f"{GXY}@head"
alembic.config.main()
sys.argv[i] = f"{TSI}@head"
alembic.config.main()
else:
alembic.config.main()
class LegacyScriptsException(Exception):
# Misc. errors caused by incorrect arguments passed to a legacy script.
def __init__(self, message: str) -> None:
super().__init__(message)
class LegacyScripts:
LEGACY_CONFIG_FILE_ARG_NAMES = ["-c", "--config", "--config-file"]
ALEMBIC_CONFIG_FILE_ARG = "--alembic-config" # alembic config file, set in the calling script
DEFAULT_DB_ARG = "default"
def __init__(self, argv: List[str], cwd: Optional[str] = None) -> None:
self.argv = argv
self.cwd = cwd or os.getcwd()
self.database = self.DEFAULT_DB_ARG
def run(self) -> None:
"""
Convert legacy arguments to current spec required by Alembic,
then add db url arguments required by Alembic
"""
self.convert_args()
add_db_urls_to_command_arguments(self.argv, self.gxy_url, self.tsi_url)
def convert_args(self) -> None:
"""
Convert legacy arguments to current spec required by Alembic.
Note: The following method calls must be done in this sequence.
"""
self.pop_database_argument()
self.rename_config_argument()
self.rename_alembic_config_argument()
self.load_db_urls()
self.convert_version_argument()
def pop_database_argument(self) -> None:
"""
If last argument is a valid database name, pop and assign it; otherwise assign default.
"""
arg = self.argv[-1]
if arg in ["galaxy", "install"]:
self.database = self.argv.pop()
def rename_config_argument(self) -> None:
"""
Rename the optional config argument: we can't use '-c' because that option is used by Alembic.
"""
for arg in self.LEGACY_CONFIG_FILE_ARG_NAMES:
if arg in self.argv:
self._rename_arg(arg, CONFIG_FILE_ARG)
return
def rename_alembic_config_argument(self) -> None:
"""
Rename argument name: `--alembic-config` to `-c`. There should be no `-c` argument present.
"""
if "-c" in self.argv:
raise LegacyScriptsException("Cannot rename alembic config argument: `-c` argument present.")
self._rename_arg(self.ALEMBIC_CONFIG_FILE_ARG, "-c")
def convert_version_argument(self) -> None:
"""
Convert legacy version argument to current spec required by Alembic.
"""
if "--version" in self.argv:
# Just remove it: the following argument should be the version/revision identifier.
pos = self.argv.index("--version")
self.argv.pop(pos)
else:
# If we find --version=foo, extract foo and replace arg with foo (which is the revision identifier)
p = re.compile(r"--version=([0-9A-Fa-f]+)")
for i, arg in enumerate(self.argv):
m = p.match(arg)
if m:
self.argv[i] = m.group(1)
return
# No version argument found: construct argument for an upgrade operation.
# Raise exception otherwise.
if "upgrade" not in self.argv:
raise LegacyScriptsException("If no `--version` argument supplied, `upgrade` argument is requried")
if self._is_one_database(): # upgrade both regardless of database argument
self.argv.append("heads")
else: # for separate databases, choose one
if self.database in ["galaxy", self.DEFAULT_DB_ARG]:
self.argv.append("gxy@head")
elif self.database == "install":
self.argv.append("tsi@head")
def _rename_arg(self, old_name, new_name) -> None:
pos = self.argv.index(old_name)
self.argv[pos] = new_name
def load_db_urls(self) -> None:
gxy_config, tsi_config, _ = get_configuration(self.argv, self.cwd)
self.gxy_url = gxy_config.url
self.tsi_url = tsi_config.url
def _is_one_database(self):
return is_one_database(self.gxy_url, self.tsi_url)
class LegacyManageDb:
def __init__(self):
self._set_db_urls()
def get_gxy_version(self):
"""
Get the head revision for the gxy branch from the Alembic script directory.
(previously referred to as "max/repository version")
"""
script_directory = self._get_script_directory()
heads = script_directory.get_heads()
for head in heads:
revision = script_directory.get_revision(head)
if revision and GXY in revision.branch_labels:
return head
return None
def get_gxy_db_version(self, gxy_db_url=None):
"""
Get the head revision for the gxy branch from the galaxy database. If there
is no alembic_version table, get the sqlalchemy migrate version. Raise error
if that version is not the latest.
(previously referred to as "database version")
"""
db_url = gxy_db_url or self.gxy_db_url
try:
engine = create_engine(db_url)
version = self._get_gxy_alembic_db_version(engine)
if not version:
version = self._get_gxy_sam_db_version(engine)
if version != SQLALCHEMYMIGRATE_LAST_VERSION_GXY:
raise IncorrectVersionError(GXY, SQLALCHEMYMIGRATE_LAST_VERSION_GXY)
return version
finally:
engine.dispose()
def run_upgrade(self, gxy_db_url=None, tsi_db_url=None):
"""
Alembic will upgrade both branches, gxy and tsi, to their head revisions.
"""
gxy_db_url = gxy_db_url or self.gxy_db_url
tsi_db_url = tsi_db_url or self.tsi_db_url
self._upgrade(gxy_db_url, GXY)
self._upgrade(tsi_db_url, TSI)
def _upgrade(self, db_url, model):
try:
engine = create_engine(db_url)
am = get_alembic_manager(engine)
am.upgrade(model)
finally:
engine.dispose()
def _set_db_urls(self):
ls = LegacyScripts(sys.argv, os.getcwd())
ls.rename_config_argument()
ls.load_db_urls()
self.gxy_db_url = ls.gxy_url
self.tsi_db_url = ls.tsi_url
def _get_gxy_sam_db_version(self, engine):
dbcache = DatabaseStateCache(engine)
return dbcache.sqlalchemymigrate_version
def _get_script_directory(self):
alembic_cfg = self._get_alembic_cfg()
return ScriptDirectory.from_config(alembic_cfg)
def _get_alembic_cfg(self):
config_file = os.path.join(os.path.dirname(__file__), "alembic.ini")
config_file = os.path.abspath(config_file)
return Config(config_file)
def _get_gxy_alembic_db_version(self, engine):
# We may get 2 values, one for each branch (gxy and tsi). So we need to
# determine which one is the gxy head.
with engine.connect() as conn:
context = MigrationContext.configure(conn)
db_heads = context.get_current_heads()
if db_heads:
gxy_revisions = self._get_all_gxy_revisions()
for db_head in db_heads:
if db_head in gxy_revisions:
return db_head
return None
def _get_all_gxy_revisions(self):
gxy_revisions = set()
script_directory = self._get_script_directory()
for rev in script_directory.walk_revisions():
if GXY in rev.branch_labels:
gxy_revisions.add(rev.revision)
return gxy_revisions
def get_alembic_manager(engine: Engine) -> AlembicManager:
return AlembicManager(engine)
| StarcoderdataPython |
60191 | <gh_stars>0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matlablib import *
plt.ion()
tic()
fname_1=r"70dB-2sec.csv"
fname_2=r'70dB-5sec.csv'
pd1=pd.read_csv(fname_1)
pd2=pd.read_csv(fname_2)
print('loaded')
toc()
y1 = pd1.RL1_1310
y2 = pd2.RL1_1310
x1 = np.linspace(1, len(y1), len(y1))
x2 = np.linspace(1, len(y2), len(y2))
# print(y1)
# Mean level of 2s
mean_2s = np.mean(y1)
mean_5s = np.mean(y2)
#%%
def makePlot():
plt.figure() # Make a new figure
line_2s = plt.plot(x1, y1, label=("2s mean=" + str(round(mean_2s, 2))
+ ", std=" + str(round(np.std(y1), 2))))
line_5s = plt.plot(x2, y2, label="5s mean=" + str(round(mean_5s, 2))
+ ", std=" + str(round(np.std(y2), 2)))
plt.grid(1, which='major', axis='both')
plt.title('2s vs 5s ATIME')
# plt.legend(handles=[line_2s, line_5s])
plt.legend()
toc()
plt.show(block=True)
##
#%%
makePlot()
| StarcoderdataPython |
3295868 | from fs_data import FSData
if __name__=="__main__":
# RL
alhpa = 0.1
gamma = 0.99
epsilon = 0.01
# BSO
flip = 5
max_chance = 3
bees_number = 10
maxIterations = 10
locIterations = 10
# Test type
typeOfAlgo = 1
nbr_exec = 1
dataset = "Iris"
data_loc_path = "./datasets/"
location = data_loc_path + dataset + ".csv"
method = "qbso_simple"
test_param = "rl"
param = "gamma"
val = str(locals()[param])
classifier = "knn"
instance = FSData(typeOfAlgo,location,nbr_exec,method,test_param,param,val,classifier,alhpa,gamma,epsilon)
instance.run(flip,max_chance,bees_number,maxIterations,locIterations) | StarcoderdataPython |
3261451 | <filename>tensorflow_tts/utils/griffin_lim.py
# -*- coding: utf-8 -*-
# Copyright 2020 <NAME> (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Griffin-Lim phase reconstruction algorithm from mel spectrogram."""
import os
import librosa
import numpy as np
import soundfile as sf
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
def griffin_lim_lb(
mel_spec, stats_path, dataset_config, n_iter=32, output_dir=None, wav_name="lb"
):
"""Generate wave from mel spectrogram with Griffin-Lim algorithm using Librosa.
Args:
mel_spec (ndarray): array representing the mel spectrogram.
stats_path (str): path to the `stats.npy` file containing norm statistics.
dataset_config (Dict): dataset configuration parameters.
n_iter (int): number of iterations for GL.
output_dir (str): output directory where audio file will be saved.
wav_name (str): name of the output file.
Returns:
gl_lb (ndarray): generated wave.
"""
scaler = StandardScaler()
scaler.mean_, scaler.scale_ = np.load(stats_path)
mel_spec = np.power(10.0, scaler.inverse_transform(mel_spec)).T
mel_basis = librosa.filters.mel(
dataset_config["sampling_rate"],
n_fft=dataset_config["fft_size"],
n_mels=dataset_config["num_mels"],
fmin=dataset_config["fmin"],
fmax=dataset_config["fmax"],
)
mel_to_linear = np.maximum(1e-10, np.dot(np.linalg.pinv(mel_basis), mel_spec))
gl_lb = librosa.griffinlim(
mel_to_linear,
n_iter=n_iter,
hop_length=dataset_config["hop_size"],
win_length=dataset_config["win_length"] or dataset_config["fft_size"],
)
if output_dir:
output_path = os.path.join(output_dir, f"{wav_name}.wav")
sf.write(output_path, gl_lb, dataset_config["sampling_rate"], "PCM_16")
return gl_lb
class TFGriffinLim(tf.keras.layers.Layer):
"""Griffin-Lim algorithm for phase reconstruction from mel spectrogram magnitude."""
def __init__(self, stats_path, dataset_config):
"""Init GL params.
Args:
stats_path (str): path to the `stats.npy` file containing norm statistics.
dataset_config (Dict): dataset configuration parameters.
"""
super().__init__()
scaler = StandardScaler()
scaler.mean_, scaler.scale_ = np.load(stats_path)
self.scaler = scaler
self.ds_config = dataset_config
self.mel_basis = librosa.filters.mel(
self.ds_config["sampling_rate"],
n_fft=self.ds_config["fft_size"],
n_mels=self.ds_config["num_mels"],
fmin=self.ds_config["fmin"],
fmax=self.ds_config["fmax"],
) # [num_mels, fft_size // 2 + 1]
def save_wav(self, gl_tf, output_dir, wav_name):
"""Generate WAV file and save it.
Args:
gl_tf (tf.Tensor): reconstructed signal from GL algorithm.
output_dir (str): output directory where audio file will be saved.
wav_name (str): name of the output file.
"""
encode_fn = lambda x: tf.audio.encode_wav(x, self.ds_config["sampling_rate"])
gl_tf = tf.expand_dims(gl_tf, -1)
if not isinstance(wav_name, list):
wav_name = [wav_name]
if len(gl_tf.shape) > 2:
bs, *_ = gl_tf.shape
assert bs == len(wav_name), "Batch and 'wav_name' have different size."
tf_wav = tf.map_fn(encode_fn, gl_tf, dtype=tf.string)
for idx in tf.range(bs):
output_path = os.path.join(output_dir, f"{wav_name[idx]}.wav")
tf.io.write_file(output_path, tf_wav[idx])
else:
tf_wav = encode_fn(gl_tf)
tf.io.write_file(os.path.join(output_dir, f"{wav_name[0]}.wav"), tf_wav)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, None], dtype=tf.float32),
tf.TensorSpec(shape=[], dtype=tf.int32),
]
)
def call(self, mel_spec, n_iter=32):
"""Apply GL algorithm to batched mel spectrograms.
Args:
mel_spec (tf.Tensor): normalized mel spectrogram.
n_iter (int): number of iterations to run GL algorithm.
Returns:
(tf.Tensor): reconstructed signal from GL algorithm.
"""
# de-normalize mel spectogram
mel_spec = tf.math.pow(10.0, mel_spec * self.scaler.scale_ + self.scaler.mean_)
inverse_mel = tf.linalg.pinv(self.mel_basis)
# [:, num_mels] @ [fft_size // 2 + 1, num_mels].T
mel_to_linear = tf.linalg.matmul(mel_spec, inverse_mel, transpose_b=True)
mel_to_linear = tf.cast(tf.math.maximum(1e-10, mel_to_linear), tf.complex64)
init_phase = tf.cast(
tf.random.uniform(tf.shape(mel_to_linear), maxval=1), tf.complex64
)
phase = tf.math.exp(2j * np.pi * init_phase)
for _ in tf.range(n_iter):
inverse = tf.signal.inverse_stft(
mel_to_linear * phase,
frame_length=self.ds_config["win_length"] or self.ds_config["fft_size"],
frame_step=self.ds_config["hop_size"],
fft_length=self.ds_config["fft_size"],
window_fn=tf.signal.inverse_stft_window_fn(self.ds_config["hop_size"]),
)
phase = tf.signal.stft(
inverse,
self.ds_config["win_length"] or self.ds_config["fft_size"],
self.ds_config["hop_size"],
self.ds_config["fft_size"],
)
phase /= tf.cast(tf.maximum(1e-10, tf.abs(phase)), tf.complex64)
return tf.signal.inverse_stft(
mel_to_linear * phase,
frame_length=self.ds_config["win_length"] or self.ds_config["fft_size"],
frame_step=self.ds_config["hop_size"],
fft_length=self.ds_config["fft_size"],
window_fn=tf.signal.inverse_stft_window_fn(self.ds_config["hop_size"]),
)
| StarcoderdataPython |
1607689 | import rover
class Planet:
def __init__(self, width, height):
self.width = width
self.height = height
self.obstacles = set()
def createRover(self, x, y, orientation):
return rover.Rover(x, y, orientation, self)
def wrap_x(self, x):
return x % self.width
def wrap_y(self, y):
return y % self.height
def createPositionKey(self, x, y):
return "{:d}:{:d}".format(x, y)
def setObstacle(self, x, y):
key = self.createPositionKey(x, y)
self.obstacles.add(key)
def checkObstacle(self, x, y):
key = self.createPositionKey(x, y)
return key in self.obstacles
| StarcoderdataPython |
3312427 | <filename>tests/test_compatibility_patch.py
from django.test import SimpleTestCase
from bootstrap_datepicker_plus._compatibility import BaseRenderer
from bootstrap_datepicker_plus._helpers import get_base_input
class CustomCompatibleDatePickerInput(get_base_input(True)):
template_name = "myapp/custom_input/date-picker.html"
class TestCompatibilityPatch(SimpleTestCase):
def setUp(self):
self.CompatibleDatePickerInput = get_base_input(True)
self.dp_input = self.CompatibleDatePickerInput()
def test_raise_on_get_template(self):
self.assertRaises(
NotImplementedError, lambda: BaseRenderer().get_template("test")
)
def test_format_value_method(self):
self.assertEqual(self.dp_input.format_value(""), None)
def test_get_context(self):
context = self.dp_input.get_context("input_name", "2018-04-12", {})
self.assertEqual(context["widget"]["name"], "input_name")
self.assertEqual(context["widget"]["value"], "2018-04-12")
def test_compatible_input_render(self):
html = self.dp_input.render("input_name", "2018-04-12", {})
self.assertGreater(len(html), 0)
def test_compatible_custom_input_render(self):
dp_input = CustomCompatibleDatePickerInput()
html = dp_input.render("input_name", "2018-04-12", {})
self.assertGreater(len(html), 0)
| StarcoderdataPython |
1663235 | # -*- coding: utf-8 -*-
from common.base_test import BaseTest
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, equal_to
SUITE = {
"description": "Testing correct work of contract with 'eth_accuracy:True'"
}
@lcc.disabled()
@lcc.prop("main", "type")
@lcc.tags("scenarios", "eth_accuracy")
@lcc.suite("Check scenario 'eth_accuracy'")
class EthAccuracy(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.__history_api_identifier = None
self.echo_acc0 = None
self.contract = self.get_byte_code("eth_accuracy", "code")
self.balance = self.get_byte_code("eth_accuracy", "balance()")
self.withdraw = self.get_byte_code("eth_accuracy", "withdraw(address,uint256)")
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
self.__history_api_identifier = self.get_identifier("history")
lcc.log_info(
"API identifiers are: database='{}', registration='{}', "
"history='{}'".format(
self.__database_api_identifier, self.__registration_api_identifier, self.__history_api_identifier
)
)
self.echo_acc0 = self.get_account_id(
self.accounts[0], self.__database_api_identifier, self.__registration_api_identifier
)
self.echo_acc1 = self.get_account_id(
self.accounts[1], self.__database_api_identifier, self.__registration_api_identifier
)
lcc.log_info("Echo account is '{}' '{}'".format(self.echo_acc0, self.echo_acc1))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("The scenario describes the work of contract with 'eth_accuracy:True'")
def eth_accuracy(self):
echo_accuracy_amount = 1
contract_ids = []
withdraw_amount = 1
eth_accuracy_balance = 10000000000
for i in range(2):
lcc.set_step("Create contract in the Echo network and get its contract id")
contract_id = self.utils.get_contract_id(
self,
self.echo_acc0,
self.contract,
self.__database_api_identifier,
eth_accuracy=True,
log_broadcast=False
)
lcc.log_info("contract id: {}".format(contract_id))
contract_ids.append(contract_id)
lcc.set_step(
"Call 'payable' method = add {} assets to contact {}".format(echo_accuracy_amount, contract_id)
)
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo,
registrar=self.echo_acc0,
bytecode="",
callee=contract_id,
value_amount=echo_accuracy_amount
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation, log_broadcast=False)
lcc.set_step("Call 'balanceof()' method = Get contact {} balance".format(contract_id))
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.balance, callee=contract_id
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(
echo=self.echo, list_operations=collected_operation, log_broadcast=False
)
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
contract_output = self.get_contract_output(contract_result, output_type=int)
lcc.log_info("contract_output: {}".format(contract_output))
lcc.set_step("Call 'balanceof()' method = Get contact {} balance".format(contract_id[0]))
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.balance, callee=contract_ids[0]
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation)
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
contract_output = self.get_contract_output(contract_result, output_type=int)
check_that("contract balance", contract_output, equal_to(eth_accuracy_balance))
lcc.set_step("Call 'balanceof()' method = Get contact {} balance".format(contract_id[1]))
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.balance, callee=contract_ids[1]
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation)
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
contract_output = self.get_contract_output(contract_result, output_type=int)
check_that("contract balance", contract_output, equal_to(eth_accuracy_balance))
lcc.set_step(
"Call 'withdraw()' to contract address method with {} amount of contract {}".format(
withdraw_amount, contract_ids[0]
)
)
contract_id_hex = hex(int(contract_ids[1].split(".")[-1])).split("x")[-1]
bytecode = (
'{}{}{}{}'.format(
str(self.withdraw), "00000000000000000000000001000000000000000000000000000000000000", contract_id_hex,
"00000000000000000000000000000000000000000000000000000002540BE400"
)
)
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, value_amount=0, bytecode=bytecode, callee=contract_ids[0]
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier, fee_amount=200)
broadcast_result = self.echo_ops.broadcast(
echo=self.echo, list_operations=collected_operation, log_broadcast=False
)
if not self.is_operation_completed(broadcast_result, expected_static_variant=1):
raise Exception("Error: can't add balance to new account, response:\n{}".format(broadcast_result))
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
lcc.log_info(" {}".format(contract_result))
self.produce_block(self.__database_api_identifier)
lcc.set_step("Get contact {} balance after withdrawal to contract address".format(contract_ids[1]))
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.balance, callee=contract_ids[1]
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation)
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
contract_output = self.get_contract_output(contract_result, output_type=int)
check_that("contract balance", contract_output, equal_to(eth_accuracy_balance * 2))
lcc.set_step("Get contact {} balance after withdrawal to contract address".format(contract_ids[0]))
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.balance, callee=contract_ids[0]
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation)
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
contract_output = self.get_contract_output(contract_result, output_type=int)
check_that("contract balance", contract_output, equal_to(0))
lcc.set_step("Get balances of '1.2.6' account before withdrawal")
params = ["1.2.9", ["1.3.0"]]
response_id = self.send_request(
self.get_request("get_account_balances", params), self.__database_api_identifier
)
balance = self.get_response(response_id)["result"][0]["amount"]
lcc.log_info("Balance is {}".format(balance))
lcc.set_step(
"Call 'withdraw()' to '1.2.6' account address method with {} amount of contract {}".format(
withdraw_amount, contract_ids[1]
)
)
bytecode = (
'{}{}'.format(
str(self.withdraw),
"000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000002540BE400"
)
)
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, value_amount=0, bytecode=bytecode, callee=contract_ids[1]
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier, fee_amount=200)
broadcast_result = self.echo_ops.broadcast(
echo=self.echo, list_operations=collected_operation, log_broadcast=False
)
if not self.is_operation_completed(broadcast_result, expected_static_variant=1):
raise Exception("Error: can't add balance to new account, response:\n{}".format(broadcast_result))
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
lcc.log_info(" {}".format(contract_result))
self.produce_block(self.__database_api_identifier)
lcc.set_step("Get contact {} balance after withdrawal to account address".format(contract_ids[1]))
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.balance, callee=contract_ids[1]
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation)
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
contract_output = self.get_contract_output(contract_result, output_type=int)
check_that("contract balance", contract_output, equal_to(eth_accuracy_balance))
lcc.set_step("Get balances of '1.2.6' account after withdrawal")
params = ["1.2.9", ["1.3.0"]]
response_id = self.send_request(
self.get_request("get_account_balances", params), self.__database_api_identifier
)
updated_balance = self.get_response(response_id)["result"][0]["amount"]
check_that("contract balance", int(updated_balance), equal_to(int(balance) + 1))
| StarcoderdataPython |
33921 | import itertools
import pytest
from iterators.invalid_iter import InvalidIter
def _grouper_to_keys(grouper):
return [g[0] for g in grouper]
def _grouper_to_groups(grouper):
return [list(g[1]) for g in grouper]
@pytest.mark.parametrize("keyfunc, data, expected_keys", [
(lambda x: x, [], []),
(lambda x: x, [1, 2, 3], [1, 2, 3]),
(lambda x: x, [1, 2, 2, 2, 3, 3], [1, 2, 3]),
(lambda x: x, "", []),
(lambda x: x, "ABC", ["A", "B", "C"]),
(lambda x: x, "ABBBCC", ["A", "B", "C"]),
])
def test_groupby_basic_case_keys(keyfunc, data, expected_keys):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_keys(grouper) == expected_keys
@pytest.mark.parametrize("keyfunc, data, expected_groups", [
(lambda x: x, [], []),
(lambda x: x, [1, 2, 3], [[1], [2], [3]]),
(lambda x: x, [1, 2, 2, 2, 3, 3], [[1], [2, 2, 2], [3, 3]]),
(lambda x: x, "", []),
(lambda x: x, "ABC", [["A"], ["B"], ["C"]]),
(lambda x: x, "ABBBCC", [["A"], ["B", "B", "B"], ["C", "C"]]),
])
def test_groupby_basic_case_groups(keyfunc, data, expected_groups):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_groups(grouper) == expected_groups
@pytest.mark.parametrize("keyfunc, data, exception_message", [
(lambda x: x, 1, "'int' object is not iterable"),
(lambda x: x, min, "'builtin_function_or_method' object is not iterable"),
(lambda x: x, InvalidIter(), "'InvalidIter' object is not iterable")
])
def test_groupby_basic_case_invalid_data(keyfunc, data, exception_message):
with pytest.raises(TypeError) as excinfo:
itertools.groupby(data, keyfunc)
assert excinfo.value.args[0] == exception_message
@pytest.mark.parametrize("keyfunc, data, expected_keys", [
(lambda x: x % 2, [], []),
(lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [1, 0]),
(lambda x: x % 2, [1, 2, 3, 4, 5], [1, 0, 1, 0, 1]),
(lambda x: True, [], []),
(lambda x: True, [1, 2, 3, 4], [True]),
(lambda x: True, "ABCDEF", [True]),
])
def test_groupby_different_keyfunc_keys(keyfunc, data, expected_keys):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_keys(grouper) == expected_keys
@pytest.mark.parametrize("keyfunc, data, expected_groups", [
(lambda x: x % 2, [], []),
(lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [[1, 3, 5, 7], [2, 4, 6, 8]]),
(lambda x: x % 2, [1, 2, 3, 4, 5], [[1], [2], [3], [4], [5]]),
(lambda x: True, [], []),
(lambda x: True, [1, 2, 3, 4], [[1, 2, 3, 4]]),
(lambda x: True, "ABCDEF", [["A", "B", "C", "D", "E", "F"]]),
])
def test_groupby_different_keyfunc_groups(keyfunc, data, expected_groups):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_groups(grouper) == expected_groups
| StarcoderdataPython |
72644 | <reponame>AustEcon/bitcoinX<filename>tests/test_packing.py
from io import BytesIO
import pytest
from bitcoinx.packing import *
from struct import error as struct_error
pack_cases = [
('pack_le_int32', -258, b'\xfe\xfe\xff\xff'),
('pack_le_int32', 258, b'\x02\x01\x00\x00'),
('pack_le_int64', -2345684275723, b'\xf5mR\xda\xdd\xfd\xff\xff'),
('pack_le_int64', 1234567890123456, b'\xc0\xba\x8a<\xd5b\x04\x00'),
('pack_le_uint16', 987, b'\xdb\x03'),
('pack_le_uint32', 4000000, b'\x00\t=\x00'),
('pack_le_uint64', 3615905184676284416, b'\x00\x88\x1f\x8a\x9bF.2'),
('pack_be_uint16', 12345, b'09'),
('pack_be_uint32', 123456789, b'\x07[\xcd\x15'),
('pack_be_uint64', 1234567890123456, bytes(reversed(pack_le_uint64(1234567890123456)))),
('pack_byte', 144, b'\x90'),
('pack_port', 8333, b' \x8d'),
('pack_varint', 0, b'\x00'),
('pack_varint', 252, b'\xfc'),
('pack_varint', 253, b'\xfd\xfd\x00'),
('pack_varint', 31000, b'\xfd\x18y'),
('pack_varint', 65535, b'\xfd\xff\xff'),
('pack_varint', 65536, b'\xfe\x00\x00\x01\x00'),
('pack_varint', 1234567890, b'\xfe\xd2\x02\x96I'),
('pack_varint', 4294967295, b'\xfe\xff\xff\xff\xff'),
('pack_varint', 12345678901234567890, b'\xff\xd2\n\x1f\xeb\x8c\xa9T\xab'),
('pack_varint', 1 << 64 - 1, b'\xff\x00\x00\x00\x00\x00\x00\x00\x80'),
]
@pytest.mark.parametrize("pack_func,case,result", pack_cases)
def test_pack_funcs(pack_func, case, result):
pack_func = globals()[pack_func]
assert pack_func(case) == result
@pytest.mark.parametrize("pack_func",
[func for func in set(case[0] for case in pack_cases)
if not '_int' in func])
def test_pack_negative(pack_func):
pack_func = globals()[pack_func]
with pytest.raises(struct_error):
pack_func(-127)
@pytest.mark.parametrize("pack_func", set(case[0] for case in pack_cases))
def test_oversized(pack_func):
big = 1 << 64
func = globals()[pack_func]
with pytest.raises(struct_error):
assert func(big)
with pytest.raises(struct_error):
assert func(-big)
@pytest.mark.parametrize("varbyte_len", (0, 252, 253, 254, 32757, 70000))
def test_pack_varbytes(varbyte_len):
data = b'1' * varbyte_len
assert pack_varbytes(data) == pack_varint(varbyte_len) + data
def unpack_cases():
for func, case, result in pack_cases:
unfunc = globals().get(f'un{func}')
if unfunc:
yield unfunc, result, case
@pytest.mark.parametrize("unpack_func,case,result", unpack_cases())
def test_unpack(unpack_func, case, result):
value, = unpack_func(case)
assert value == result
def unpack_from_cases():
for func, case, result in pack_cases:
unfunc = globals().get(f'un{func}_from')
if unfunc:
yield unfunc, result, case
@pytest.mark.parametrize("unpack_from_func,case,result", unpack_from_cases())
def test_unpack_from(unpack_from_func, case, result):
value, = unpack_from_func(case, 0)
assert value == result
def read_tests():
for func, value, result in pack_cases:
read_func_name = func.replace('pack_', 'read_')
read_func = globals().get(read_func_name)
if read_func:
yield read_func, result, value
else:
print('skipping ', read_func_name)
@pytest.mark.parametrize("read_func,data,value", read_tests())
def test_read(read_func, data, value):
io = BytesIO(data)
assert read_func(io.read) == value
@pytest.mark.parametrize("varbyte_len", (0, 252, 253, 254, 32757, 70000))
def test_read_varbytes(varbyte_len):
value = b'7' * varbyte_len
data = pack_varbytes(value)
io = BytesIO(data)
assert read_varbytes(io.read) == value
@pytest.mark.parametrize("read_func,data,value", read_tests())
def test_read_short(read_func, data, value):
io = BytesIO(data[:-1])
with pytest.raises(struct_error):
read_func(io.read)
@pytest.mark.parametrize("varbyte_len", (0, 252, 253, 254, 32757, 70000))
def test_read_varbytes_short(varbyte_len):
value = b'7' * varbyte_len
data = pack_varbytes(value)
io = BytesIO(data[:-1])
with pytest.raises(struct_error):
read_varbytes(io.read)
@pytest.mark.parametrize("header,answer", (
(b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00;\xa3\xed\xfdz{\x12\xb2z\xc7,>gv\x8fa\x7f\xc8\x1b'
b'\xc3\x88\x8aQ2:\x9f\xb8\xaaK\x1e^J)\xab_I\xff\xff\x00\x1d\x1d\xac+|',
(1, b'\0' * 32, b';\xa3\xed\xfdz{\x12\xb2z\xc7,>gv\x8fa\x7f\xc8\x1b'
b'\xc3\x88\x8aQ2:\x9f\xb8\xaaK\x1e^J',
1231006505, 486604799, 2083236893)),
# Fake header to test signedness of integer fields
(b'\xff\xff\xff\xff\x8d\xa1\xebr\xec\x00\x8e\xad\xaczv\xd2\xfb>\x16\xba'
b'|$\x0c\xb7\x7f\xb0\x8b\x17v\xa80\x02n\xb6\xa8\xcc\x0fq\xcb\xbc\x01\xce'
b'\xe9\xb3h\x96l\x8d\xb43H\x7f%\xc4\xe3\x1d7i\xd7\x8d\x18\xc6`\xe8g\xf8'
b'\xb5\xa1\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff',
(-1, b'\x8d\xa1\xebr\xec\x00\x8e\xad\xaczv\xd2\xfb>\x16\xba|$\x0c\xb7'
b'\x7f\xb0\x8b\x17v\xa80\x02n\xb6\xa8\xcc', b'\x0fq\xcb\xbc\x01\xce'
b'\xe9\xb3h\x96l\x8d\xb43H\x7f%\xc4\xe3\x1d7i\xd7\x8d\x18\xc6`\xe8'
b'g\xf8\xb5\xa1', 4294967295, 4294967295, 4294967295)),
))
def test_unpack_header(header, answer):
assert unpack_header(header) == answer
def test_pack_and_read_list():
items = [1, 34598236, -23462436]
p = pack_list(items, pack_le_int32)
assert p == pack_varint(len(items)) + b''.join(pack_le_int32(item) for item in items)
bio = BytesIO(p)
assert read_list(bio.read, read_le_int32) == items
| StarcoderdataPython |
1673538 | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import *
from PyQt5.QtWebEngineWidgets import *
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.browser = QWebEngineView()
self.browser.setUrl(QUrl('https://duckduckgo.com/'))
self.setCentralWidget(self.browser)
self.showMaximized()
# navbar
navbar = QToolBar()
self.addToolBar(navbar)
back_btn = QAction(QIcon('res/backward.png'), 'Back', self)
back_btn.triggered.connect(self.browser.back)
back_btn.setShortcut('Ctrl+B')
navbar.addAction(back_btn)
forward_btn = QAction(QIcon('res/forward.png'), 'Forward', self)
forward_btn.triggered.connect(self.browser.forward)
navbar.addAction(forward_btn)
reload_btn = QAction(QIcon('res/refresh.png'), 'Refresh', self)
reload_btn.triggered.connect(self.browser.reload)
reload_btn.setShortcut('Ctrl+R')
navbar.addAction(reload_btn)
home_btn = QAction(QIcon('res/home.png'), 'Home', self)
home_btn.triggered.connect(self.navigate_home)
home_btn.setShortcut('H')
navbar.addAction(home_btn)
self.url_bar = QLineEdit()
self.url_bar.returnPressed.connect(self.navigate_to_url)
self.url_bar.setPlaceholderText('Enter address: http://')
navbar.addWidget(self.url_bar)
search_btn = QAction(QIcon('res/search.png'), 'Search', self)
search_btn.triggered.connect(self.navigate_home)
navbar.addAction(search_btn)
history = QAction(QIcon('res/history.png'), 'History', self)
history.triggered.connect(self.navigate_home)
navbar.addAction(history)
quit = QAction(self)
quit.triggered.connect(self.quit)
quit.setShortcut('Ctrl+Q')
navbar.addAction(quit)
self.browser.urlChanged.connect(self.update_url)
def navigate_home(self):
self.browser.setUrl(QUrl('https://duckduckgo.com'))
def navigate_to_url(self):
url = self.url_bar.text().replace(' ','+')
self.browser.setUrl(QUrl('https://duckduckgo.com/?q='+url))
def update_url(self, q):
self.url_bar.setText(q.toString())
def history(self):
self.browser.setUrl(QUrl('https://duckduckgo.com'))
def quit(self):
reply = QMessageBox.question(self, 'Confirm close',
"You are about to exit. Are you sure you want to exit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.Yes) # parent, title, message, buttons, default button
if reply == QMessageBox.Yes:
event.accept()
else:
pass
app = QApplication(sys.argv)
QApplication.setApplicationName('Infinity Browser')
QApplication.setWindowIcon(QIcon('res/ico.png'))
window = MainWindow()
app.exec_() | StarcoderdataPython |
1728160 | <filename>spark_auto_mapper_fhir/value_sets/implant_status.py<gh_stars>1-10
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ImplantStatusCode(GenericTypeCode):
"""
Implant Status
From: http://terminology.hl7.org/CodeSystem/implantStatus in valuesets.xml
A set codes that define the functional status of an implanted device.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/implantStatus
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/implantStatus"
class ImplantStatusCodeValues:
"""
The implanted device is working normally.
From: http://terminology.hl7.org/CodeSystem/implantStatus in valuesets.xml
"""
Functional = ImplantStatusCode("functional")
"""
The implanted device is not working.
From: http://terminology.hl7.org/CodeSystem/implantStatus in valuesets.xml
"""
Non_Functional = ImplantStatusCode("non-functional")
"""
The implanted device has been turned off.
From: http://terminology.hl7.org/CodeSystem/implantStatus in valuesets.xml
"""
Disabled = ImplantStatusCode("disabled")
"""
the functional status of the implant has not been determined.
From: http://terminology.hl7.org/CodeSystem/implantStatus in valuesets.xml
"""
Unknown = ImplantStatusCode("unknown")
| StarcoderdataPython |
3233656 | <filename>pomodoro_system/foundation/models/__init__.py<gh_stars>0
__all__ = ["db", "User", "UserDateFrameDefinitionModel"]
from foundation.models.user import User, UserDateFrameDefinitionModel, db
| StarcoderdataPython |
4801265 | <reponame>TueVJ/PyGuEx
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
from benders_stochastic_master import Benders_Master
sns.set_style('ticks')
m = Benders_Master()
m.model.Params.OutputFlag = False
m.optimize()
rtdf = pd.DataFrame({g: {m.data.demand_rt[s]: m.submodels[s].variables.gprod_rt[g].x for s in m.data.scenarios} for g in m.data.generators})
updf = pd.DataFrame({g: {m.data.demand_rt[s]: m.submodels[s].variables.gprod_rt_up[g].x for s in m.data.scenarios} for g in m.data.generators})
downdf = pd.DataFrame({g: {m.data.demand_rt[s]: m.submodels[s].variables.gprod_rt_down[g].x for s in m.data.scenarios} for g in m.data.generators})
dacost = m.model.ObjVal - m.variables.alpha.x
rscostseries = pd.Series({m.data.demand_rt[s]: sum(
m.data.geninfo.price[g]*m.submodels[s].variables.gprod_rt[g].x +
m.data.geninfo.uppremium[g]*m.submodels[s].variables.gprod_rt_up[g].x +
m.data.geninfo.downpremium[g]*m.submodels[s].variables.gprod_rt_down[g].x
for g in m.data.generators) for s in m.data.scenarios})
plt.ion()
plt.figure(figsize=(12, 8))
ax = plt.subplot(221)
rtdf.plot(ax=ax, marker='.')
plt.xlabel('Realised real-time demand [MW]')
plt.ylabel('Generator setting [MW]')
ax = plt.subplot(222)
rscostseries.plot(ax=ax, marker='.')
plt.xlabel('Realised real-time demand [MW]')
plt.ylabel('Final cost [$]')
ax = plt.subplot(223)
updf.plot(ax=ax, marker='.')
plt.xlabel('Realised real-time demand [MW]')
plt.ylabel('Generator upregulation [MW]')
ax = plt.subplot(224)
downdf.plot(ax=ax, marker='.')
plt.xlabel('Realised real-time demand [MW]')
plt.ylabel('Generator downregulation [MW]')
plt.tight_layout()
m.model.Params.OutputFlag = False
m.params.verbose = False
demands = np.linspace(160, 240, 81)
costs = []
for demand in demands:
m.variables.load_da.ub = demand
m.variables.load_da.lb = demand
m._clear_cuts()
m.optimize()
costs.append(-m.model.ObjVal)
m.variables.load_da.ub = 200
m.variables.load_da.lb = 0
plt.figure()
plt.plot(demands, costs)
plt.ylabel('Social Welfare [$]')
plt.xlabel('Demand cleared in DA market [MW]')
| StarcoderdataPython |
1636978 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
class OutputPublisher:
"""
A base class for pages displaying output in the JS application
"""
name = None
button_label = None
description = None
order = float('inf')
@classmethod
def publish(cls, conf, repo, benchmarks, graphs, revisions):
pass
| StarcoderdataPython |
24900 | class Solution:
def XXX(self, nums: List[int]) -> List[List[int]]:
final = list()
# ----------------------------------------------------
if len(nums)==1:
return [[],nums]
if len(nums)==0:
return []
# ------------------------------------------------------
def pop(cut):
if not cut:
return
else:
for i in range(len(cut)):
tmp = copy.deepcopy(cut)
tmp.pop(i)
if tmp not in final:
final.append(tmp)
pop(tmp)
pop(nums)
if nums:
final.append(nums)
return final
| StarcoderdataPython |
1693351 |
"""Test dplaapi.handlers.v2"""
import pytest
import requests
import json
import os
import boto3
import secrets
from starlette.testclient import TestClient
from starlette.exceptions import HTTPException
from starlette.responses import Response
from starlette.requests import Request
from starlette.background import BackgroundTask
from apistar.exceptions import ValidationError
from dplaapi.responses import JSONResponse
from dplaapi import app
from dplaapi import types, models
from dplaapi.handlers import v2 as v2_handlers
from dplaapi.queries import search_query
from dplaapi.queries.search_query import SearchQuery
import dplaapi.analytics
from peewee import OperationalError, DoesNotExist
client = TestClient(app,
base_url='http://localhost',
raise_server_exceptions=False)
minimal_good_response = {
'took': 5,
'timed_out': False,
'shards': {'total': 3, 'successful': 3, 'skipped': 0, 'failed': 0},
'hits': {
'total': {'value': 1},
'max_score': None,
'hits': [
{'_source': {'sourceResource': {'title': 'x'}}}
]
}
}
minimal_necro_response = {
'took': 5,
'timed_out': False,
'shards': {'total': 3, 'successful': 3, 'skipped': 0, 'failed': 0},
'hits': {
'total': {'value': 1},
'max_score': None,
'hits': [
{'_source': {'id': '13283cd2bd45ef385aae962b144c7e6a'}}
]
}
}
es6_facets = {
'provider.name': {
'doc_count_error_upper_bound': 169613,
'sum_other_doc_count': 5893411,
'buckets': [
{
'key': 'National Archives and Records Administration',
'doc_count': 3781862
}
]
},
"sourceResource.date.begin.year": {
"doc_count": 14,
"sourceResource.date.begin.year": {
"buckets": [
{
"key_as_string": "1947",
"key": -725846400000,
"doc_count": 1
}
]
}
},
'sourceResource.spatial.coordinates': {
'buckets': [
{
'key': '*-99.0',
'from': 0,
'to': 99,
'doc_count': 518784
}
]
}
}
class MockGoodResponse():
"""Mock a good `requests.Response`"""
def raise_for_status(self):
pass
def json(self):
return minimal_good_response
class Mock400Response():
"""Mock a `requests.Response` for an HTTP 400"""
status_code = 400
def raise_for_status(self):
raise requests.exceptions.HTTPError('Can not parse whatever that was')
class Mock404Response():
"""Mock a `requests.Response` for an HTTP 404"""
status_code = 404
def raise_for_status(self):
raise requests.exceptions.HTTPError('Index not found')
class Mock500Response():
"""Mock a `requests.Response` for an HTTP 500"""
status_code = 500
def raise_for_status(self):
raise requests.exceptions.HTTPError('I have failed you.')
def mock_es_post_response_200(url, json):
"""Mock `requests.post()` for a successful request"""
return MockGoodResponse()
def mock_es_post_response_400(url, json):
"""Mock `requests.post()` with a Bad Request response"""
return Mock400Response()
def mock_es_post_response_404(url, json):
"""Mock `requests.post()` with a Not Found response"""
return Mock404Response()
def mock_es_post_response_err(url, json):
"""Mock `requests.post()` with a non-success status code"""
return Mock500Response()
def mock_Account_get(*args, **kwargs):
return models.Account(key='<KEY>',
email='<EMAIL>',
enabled=True)
def mock_disabled_Account_get(*args, **kwargs):
return models.Account(key='<KEY>',
email='<EMAIL>',
enabled=False)
def mock_not_found_Account_get(*args, **kwargs):
raise DoesNotExist()
def get_request(path, querystring=None, path_params=None):
rv = {'type': 'http', 'method': 'GET', 'path': path, 'query_string': b''}
if querystring:
rv['query_string'] = querystring.encode('utf-8')
if path_params:
rv['path_params'] = path_params
return Request(rv)
def post_request(path, path_params=None):
rv = {'type': 'http', 'method': 'POST', 'path': path}
if path_params:
rv['path_params'] = path_params
return Request(rv)
@pytest.fixture(scope='function')
def disable_auth():
os.environ['DISABLE_AUTH'] = 'true'
yield
del(os.environ['DISABLE_AUTH'])
@pytest.fixture(scope='function')
def patch_db_connection(monkeypatch):
def mock_db_connect():
return True
def mock_db_close():
return True
monkeypatch.setattr(models.db, 'connect', mock_db_connect)
monkeypatch.setattr(models.db, 'close', mock_db_close)
yield
@pytest.fixture(scope='function')
def patch_bad_db_connection(monkeypatch):
def mock_db_connect(*args, **kwargs):
raise OperationalError()
def mock_db_close():
return True
monkeypatch.setattr(models.db, 'connect', mock_db_connect)
monkeypatch.setattr(models.db, 'close', mock_db_close)
yield
@pytest.fixture(scope='function')
def disable_api_key_check(monkeypatch, mocker):
acct_stub = mocker.stub()
monkeypatch.setattr(v2_handlers, 'account_from_params', acct_stub)
yield
@pytest.fixture(scope='function')
def stub_tracking(monkeypatch, mocker):
track_stub = mocker.stub()
monkeypatch.setattr(dplaapi.analytics, 'track', track_stub)
# account_from_params() tests ...
@pytest.mark.usefixtures('patch_db_connection')
def test_account_from_params_queries_account(monkeypatch, mocker):
"""It connects to the database and retrieves the Account"""
mocker.patch('dplaapi.models.db.connect')
monkeypatch.setattr(models.Account, 'get', mock_Account_get)
monkeypatch.setattr(requests, 'post', mock_es_post_response_200)
params = {
'api_key': '<KEY>',
'from': 0,
'page': 1,
'page_size': 1
}
v2_handlers.account_from_params(params)
models.db.connect.assert_called_once()
@pytest.mark.usefixtures('patch_db_connection')
def test_account_from_params_returns_for_disabled_acct(monkeypatch, mocker):
"""It returns HTTP 403 Forbidden if the Account is disabled"""
mocker.patch('dplaapi.models.db.connect')
monkeypatch.setattr(models.Account, 'get', mock_disabled_Account_get)
params = {
'api_key': '<KEY>',
'from': 0,
'page': 1,
'page_size': 1
}
with pytest.raises(HTTPException) as e:
v2_handlers.account_from_params(params)
assert e.status_code == 403
@pytest.mark.usefixtures('patch_db_connection')
def test_account_from_params_bad_api_key(monkeypatch, mocker):
"""It returns HTTP 403 Forbidden if the Account is disabled"""
mocker.patch('dplaapi.models.db.connect')
monkeypatch.setattr(models.Account, 'get', mock_not_found_Account_get)
params = {
'api_key': '<KEY>',
'from': 0,
'page': 1,
'page_size': 1
}
with pytest.raises(HTTPException) as e:
v2_handlers.account_from_params(params)
assert e.status_code == 403
@pytest.mark.usefixtures('patch_bad_db_connection')
def test_account_from_params_ServerError_bad_db(monkeypatch, mocker):
"""It returns Service Unavailable if it can't connect to the database"""
params = {
'api_key': '<KEY>',
'from': 0,
'page': 1,
'page_size': 1
}
with pytest.raises(HTTPException) as e:
v2_handlers.account_from_params(params)
assert e.status_code == 503
# end account_from_params() tests
# items() tests ...
@pytest.mark.usefixtures('disable_auth')
def test_items_makes_es_request(monkeypatch):
"""multiple_items() makes an HTTP request to Elasticsearch"""
monkeypatch.setattr(requests, 'post', mock_es_post_response_200)
sq = SearchQuery({'q': 'abcd', 'from': 0, 'page': 1, 'page_size': 1})
v2_handlers.items(sq) # No error
@pytest.mark.usefixtures('disable_auth')
def test_items_Exception_for_elasticsearch_errs(monkeypatch):
"""An Elasticsearch error response other than a 400 results in a 500"""
monkeypatch.setattr(requests, 'post', mock_es_post_response_err)
# Simulate some unsuccessful status code from Elasticsearch, other than a
# 400 Bad Request. Say a 500 Server Error, or a 404.
sq = SearchQuery({'q': 'goodquery', 'from': 0, 'page': 1, 'page_size': 1})
with pytest.raises(Exception):
v2_handlers.items(sq)
# multiple_items() tests ...
@pytest.mark.usefixtures('disable_auth')
def test_multiple_items_calls_search_items_correctly(monkeypatch):
"""/v2/items calls search_items() with dictionary"""
def mock_items(arg):
assert isinstance(arg, dict)
return minimal_good_response
monkeypatch.setattr(v2_handlers, 'search_items', mock_items)
client.get('/v2/items')
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_auth')
@pytest.mark.usefixtures('stub_tracking')
async def test_multiple_items_formats_response_metadata(monkeypatch, mocker):
"""multiple_items() assembles the correct response metadata"""
monkeypatch.setattr(requests, 'post', mock_es_post_response_200)
request = get_request('/v2/items', 'q=abcd')
response_obj = await v2_handlers.multiple_items(request)
result = json.loads(response_obj.body)
# See minimal_good_response above
assert result['count'] == 1
assert result['start'] == 1 # page 1; the default
assert result['limit'] == 10 # the default
assert result['docs'] == \
[hit['_source'] for hit in minimal_good_response['hits']['hits']]
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_auth')
async def test_multiple_items_handles_query_parameters(monkeypatch, mocker):
"""multiple_items() makes a good `goodparams' from querystring params"""
def mock_searchquery(params_to_check):
assert params_to_check == {'q': 'test'}
return {}
monkeypatch.setattr(search_query, 'SearchQuery', mock_searchquery)
monkeypatch.setattr(requests, 'post', mock_es_post_response_200)
request = get_request('/v2/items', 'q=test')
await v2_handlers.multiple_items(request)
@pytest.mark.asyncio
async def test_multiple_items_calls_BackgroundTask(monkeypatch,
mocker):
"""It instantiates BackgroundTask correctly"""
def mock_items(*args):
return minimal_good_response
def mock_account(*args):
return models.Account(id=1, key='a1b2c3', email='<EMAIL>')
def mock_background_task(*args, **kwargs):
# __init__() has to return None, so this is not a mocker.stub()
return None
monkeypatch.setattr(v2_handlers, 'items', mock_items)
monkeypatch.setattr(v2_handlers, 'account_from_params', mock_account)
monkeypatch.setattr(BackgroundTask, '__init__', mock_background_task)
mocker.spy(BackgroundTask, '__init__')
request = get_request('/v2/items', 'q=test')
ok_data = {
'count': 1,
'start': 1,
'limit': 10,
'docs': [{'sourceResource': {'title': 'x'}}],
'facets': []
}
await v2_handlers.multiple_items(request)
BackgroundTask.__init__.assert_called_once_with(
mocker.ANY, mocker.ANY, request=mocker.ANY, results=ok_data,
api_key='a1b2c3', title='Item search results')
@pytest.mark.asyncio
async def test_multiple_items_strips_lone_star_vals(monkeypatch, mocker):
def mock_items(*argv):
return minimal_good_response
def mock_account(*argv):
return models.Account(key='a1b2c3', email='<EMAIL>')
monkeypatch.setattr(v2_handlers, 'account_from_params', mock_account)
monkeypatch.setattr(v2_handlers, 'search_items', mock_items)
mocker.spy(v2_handlers, 'search_items')
# 'q' should be stripped out because it is just '*'
request = get_request('/v2/items', 'q=*')
await v2_handlers.multiple_items(request)
v2_handlers.search_items.assert_called_once_with(
{'page': 1, 'page_size': 10, 'sort_order': 'asc'})
# end multiple_items tests.
# mlt tests ...
@pytest.mark.usefixtures('disable_auth')
def test_mlt_calls_mlt_items_correctly(monkeypatch):
"""/v2/items/<item>/mlt calls mlt_items with dictionary"""
def mock_items(arg):
assert isinstance(arg, dict)
return minimal_good_response
monkeypatch.setattr(v2_handlers, 'mlt_items', mock_items)
client.get('/v2/items/13283cd2bd45ef385aae962b144c7e6a/mlt')
@pytest.mark.usefixtures('disable_auth')
@pytest.mark.usefixtures('stub_tracking')
def test_mlt_formats_response_metadata(monkeypatch, mocker):
"""mlt_items() assembles the correct response metadata"""
monkeypatch.setattr(requests, 'post', mock_es_post_response_200)
response = client.get('/v2/items/13283cd2bd45ef385aae962b144c7e6a/mlt')
result = response.json()
# See minimal_good_response above
assert result['count'] == 1
assert result['start'] == 1
assert result['limit'] == 10
assert result['docs'] == \
[hit['_source'] for hit in minimal_good_response['hits']['hits']]
@pytest.mark.usefixtures('disable_auth')
def test_mlt_returns_bad_request_err_for_bad_id(monkeypatch, mocker):
"""It raises a Bad Request error for a badly-formatted record ID"""
response = client.get('/v2/items/13283cd2bd45ef385aae962b144c7e6a,x/mlt')
assert response.status_code == 400
@pytest.mark.asyncio
async def test_mlt_calls_track_w_correct_params(monkeypatch, mocker):
"""It calls dplaapi.analytics.track() correctly"""
def mock_items(*argv):
return minimal_good_response
def mock_account(*argv):
return models.Account(key='a1b2c3', email='<EMAIL>')
monkeypatch.setattr(v2_handlers, 'items', mock_items)
monkeypatch.setattr(v2_handlers, 'account_from_params', mock_account)
track_stub = mocker.stub(name='track_stub')
monkeypatch.setattr(v2_handlers, 'track', track_stub)
path_params = {'id_or_ids': '13283cd2bd45ef385aae962b144c7e6a'}
request = get_request('/v2/items/13283cd2bd45ef385aae962b144c7e6a/mlt',
path_params=path_params)
ok_data = {
'count': 1,
'start': 1,
'limit': 10,
'docs': [{'sourceResource': {'title': 'x'}}]
}
await v2_handlers.mlt(request)
track_stub.assert_called_once_with(request, ok_data, 'a1b2c3',
'More-Like-This search results')
@pytest.mark.usefixtures('disable_auth')
def test_mlt_rejects_invalid_params(monkeypatch, mocker):
"""The MLT handler rejects parameters of the regular search that are
irrelevant to More-Like-This and gives a clear message about the
parameter being invalid.
"""
search_param_keys = set(types.items_params.keys())
mlt_param_keys = set(types.mlt_params.keys())
bad_params = search_param_keys - mlt_param_keys
for param in bad_params:
path = '/v2/items/13283cd2bd45ef385aae962b144c7e6a/mlt?%s=x' % param
response = client.get(path)
assert response.status_code == 400
assert 'is not a valid parameter' in response.json()
# end mlt tests.
# specific_items tests ...
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_item_passes_ids(monkeypatch, mocker):
"""specific_item() calls search_items() with correct 'ids' parameter"""
def mock_search_items(*args):
return minimal_good_response
monkeypatch.setattr(v2_handlers, 'search_items', mock_search_items)
mocker.spy(v2_handlers, 'search_items')
path_params = {'id_or_ids': '13283cd2bd45ef385aae962b144c7e6a'}
request = get_request('/v2/items/13283cd2bd45ef385aae962b144c7e6a',
path_params=path_params)
await v2_handlers.specific_item(request)
v2_handlers.search_items.assert_called_once_with(
{'page': 1, 'page_size': 1, 'sort_order': 'asc',
'ids': ['13283cd2bd45ef385aae962b144c7e6a']})
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_item_handles_multiple_ids(monkeypatch, mocker):
"""It splits ids on commas and calls search_items() with a list of those
IDs
"""
def mock_search_items(arg):
assert len(arg['ids']) == 2
return minimal_good_response
ids = '13283cd2bd45ef385aae962b144c7e6a,00000062461c867a39cac531e13a48c1'
monkeypatch.setattr(v2_handlers, 'search_items', mock_search_items)
path_params = {'id_or_ids': ids}
request = get_request("/v2/items/%s" % ids, path_params=path_params)
await v2_handlers.specific_item(request)
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_item_rejects_bad_ids_1(mocker):
path_params = {'id_or_ids': 'x'}
request = get_request('/v2/items/x', path_params=path_params)
with pytest.raises(HTTPException) as e:
await v2_handlers.specific_item(request)
assert e.status_code == 400
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_item_rejects_bad_ids_2(mocker):
ids = '13283cd2bd45ef385aae962b144c7e6a,00000062461c867'
path_params = {'id_or_ids': ids}
request = get_request("/v2/items/%s" % ids, path_params=path_params)
with pytest.raises(HTTPException) as e:
await v2_handlers.specific_item(request)
assert e.status_code == 400
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_item_accepts_callback_querystring_param(monkeypatch,
mocker):
def mock_items(arg):
return minimal_good_response
monkeypatch.setattr(v2_handlers, 'items', mock_items)
ids = '13283cd2bd45ef385aae962b144c7e6a'
path_params = {'id_or_ids': ids}
query_string = 'callback=f'
request = get_request("/v2/items/%s" % ids,
path_params=path_params,
querystring=query_string)
await v2_handlers.specific_item(request)
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_item_rejects_bad_querystring_param(mocker):
ids = '13283cd2bd45ef385aae962b144c7e6a'
path_params = {'id_or_ids': ids}
query_string = 'page_size=1'
request = get_request("/v2/items/%s" % ids,
path_params=path_params,
querystring=query_string)
with pytest.raises(HTTPException) as e:
await v2_handlers.specific_item(request)
assert e.status_code == 400
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_item_NotFound_for_zero_hits(monkeypatch, mocker):
"""It raises a Not Found if there are no documents"""
def mock_zero_items(*args):
return {'hits': {'total': {'value': 0}}}
monkeypatch.setattr(v2_handlers, 'items', mock_zero_items)
ids = '13283cd2bd45ef385aae962b144c7e6a'
path_params = {'id_or_ids': ids}
request = get_request("/v2/items/%s" % ids, path_params=path_params)
with pytest.raises(HTTPException) as e:
await v2_handlers.specific_item(request)
assert e.status_code == 404
@pytest.mark.asyncio
async def test_specific_item_calls_BackgroundTask(monkeypatch,
mocker):
"""It instantiates BackgroundTask correctly"""
def mock_items(*argv):
return minimal_good_response
def mock_account(*argv):
return models.Account(id=1, key='a1b2c3', email='<EMAIL>')
def mock_background_task(*args, **kwargs):
# __init__() has to return None, so this is not a mocker.stub()
return None
monkeypatch.setattr(v2_handlers, 'search_items', mock_items)
monkeypatch.setattr(v2_handlers, 'account_from_params', mock_account)
monkeypatch.setattr(BackgroundTask, '__init__', mock_background_task)
mocker.spy(BackgroundTask, '__init__')
ok_data = {
'count': 1,
'docs': [{'sourceResource': {'title': 'x'}}]
}
ids = '13283cd2bd45ef385aae962b144c7e6a'
path_params = {'id_or_ids': ids}
request = get_request("/v2/items/%s" % ids, path_params=path_params)
await v2_handlers.specific_item(request)
BackgroundTask.__init__.assert_called_once_with(
mocker.ANY, mocker.ANY, request=mocker.ANY, results=ok_data,
api_key='a1b2c3', title='Fetch items')
# end specific_items tests.
# begin necropolis tests
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_necro_item_passes_id(monkeypatch, mocker):
"""specific_necropolis_item() calls search_necropolis_items() with correct
'id' parameter"""
def mock_necropolis_item(*args):
return minimal_necro_response
monkeypatch.setattr(v2_handlers, 'search_necropolis_items',
mock_necropolis_item)
mocker.spy(v2_handlers, 'search_necropolis_items')
path_params = {'single_id': '13283cd2bd45ef385aae962b144c7e6a'}
request = get_request('/v2/necropolis/13283cd2bd45ef385aae962b144c7e6a',
path_params=path_params)
await v2_handlers.specific_necropolis_item(request)
v2_handlers.search_necropolis_items.assert_called_once_with(
{'page': 1, 'page_size': 10, 'sort_order': 'asc',
'id': '13283cd2bd45ef385aae962b144c7e6a'})
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_necro_item_rejects_multiple_ids(monkeypatch, mocker):
ids = '13283cd2bd45ef385aae962b144c7e6a,00000062461c867a39cac531e13a48c1'
path_params = {'single_id': ids}
request = get_request("/v2/necropolis/%s" % ids, path_params=path_params)
with pytest.raises(HTTPException) as e:
await v2_handlers.specific_necropolis_item(request)
assert e.status_code == 400
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_necro_item_rejects_bad_ids_1(mocker):
path_params = {'single_id': 'x'}
request = get_request('/v2/necropolis/x', path_params=path_params)
with pytest.raises(HTTPException) as e:
await v2_handlers.specific_necropolis_item(request)
assert e.status_code == 400
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_necro_item_rejects_bad_ids_2(mocker):
id = '00000062461c867'
path_params = {'single_id': id}
request = get_request("/v2/necropolis/%s" % id, path_params=path_params)
with pytest.raises(HTTPException) as e:
await v2_handlers.specific_necropolis_item(request)
assert e.status_code == 400
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_necro_item_accepts_callback_query_param(monkeypatch,
mocker):
def mock_items(arg):
return minimal_necro_response
monkeypatch.setattr(v2_handlers, 'necropolis_items', mock_items)
id = '<KEY>'
path_params = {'single_id': id}
query_string = 'callback=f'
request = get_request("/v2/necropolis/%s" % id,
path_params=path_params,
querystring=query_string)
await v2_handlers.specific_necropolis_item(request)
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_necro_item_rejects_bad_querystring_param(mocker):
id = '<KEY>'
path_params = {'single_id': id}
query_string = 'page_size=1'
request = get_request("/v2/necropolis/%s" % id,
path_params=path_params,
querystring=query_string)
with pytest.raises(HTTPException) as e:
await v2_handlers.specific_necropolis_item(request)
assert e.status_code == 400
@pytest.mark.asyncio
@pytest.mark.usefixtures('disable_api_key_check')
async def test_specific_necro_item_NotFound_for_zero_hits(monkeypatch, mocker):
"""It raises a Not Found if there are no documents"""
def mock_zero_items(*args):
return {'hits': {'total': {'value': 0}}}
monkeypatch.setattr(v2_handlers, 'necropolis_items', mock_zero_items)
id = '13283cd2bd45ef385aae962b144c7e6a'
path_params = {'single_id': id}
request = get_request("/v2/necropolis/%s" % id, path_params=path_params)
with pytest.raises(HTTPException) as e:
await v2_handlers.specific_necropolis_item(request)
assert e.status_code == 404
@pytest.mark.asyncio
async def test_specific_nero_item_calls_BackgroundTask(monkeypatch, mocker):
"""It instantiates BackgroundTask correctly"""
def mock_items(*argv):
return minimal_necro_response
def mock_account(*argv):
return models.Account(id=1, key='a1b2c3', email='<EMAIL>')
def mock_background_task(*args, **kwargs):
# __init__() has to return None, so this is not a mocker.stub()
return None
monkeypatch.setattr(v2_handlers, 'search_necropolis_items', mock_items)
monkeypatch.setattr(v2_handlers, 'account_from_params', mock_account)
monkeypatch.setattr(BackgroundTask, '__init__', mock_background_task)
mocker.spy(BackgroundTask, '__init__')
ok_data = {
'count': 1,
'docs': [{'id': '<KEY>'}]
}
id = '<KEY>'
path_params = {'single_id': id}
request = get_request("/v2/items/%s" % id, path_params=path_params)
await v2_handlers.specific_necropolis_item(request)
BackgroundTask.__init__.assert_called_once_with(
mocker.ANY, mocker.ANY, request=mocker.ANY, results=ok_data,
api_key='a1b2c3', title='Fetch necropolis item')
# end necropolis tests
# begin api_key and related tests
class MockBoto3Client():
def send_email(*args):
pass
def mock_boto3_client_factory(*args):
return MockBoto3Client()
def test_send_email_uses_correct_source_and_destination(monkeypatch, mocker):
"""It makes the boto3 send_email call with the right parameters"""
from_email = '<EMAIL>'
to_email = '<EMAIL>'
monkeypatch.setenv('EMAIL_FROM', from_email)
monkeypatch.setattr(boto3, 'client', mock_boto3_client_factory)
send_email_stub = mocker.stub()
monkeypatch.setattr(MockBoto3Client, 'send_email', send_email_stub)
v2_handlers.send_email('x', to_email)
send_email_stub.assert_called_once_with(
Destination={'ToAddresses': ['<EMAIL>']},
Message='x',
Source='<EMAIL>')
def test_send_email_raises_ServerError_for_no_EMAIL_FROM(monkeypatch, mocker):
"""It raises Server Error when the EMAIL_FROM env. var. is undefined"""
monkeypatch.delenv('EMAIL_FROM', raising=False)
with pytest.raises(HTTPException) as e:
v2_handlers.send_email('x', '<EMAIL>')
assert e.status_code == 500
def test_send_api_key_email_calls_send_email_w_correct_params(monkeypatch,
mocker):
"""It calls send_email() with the correct parameters"""
send_email_stub = mocker.stub()
monkeypatch.setattr(v2_handlers, 'send_email', send_email_stub)
v2_handlers.send_api_key_email('<EMAIL>', 'a1b2c3')
send_email_stub.assert_called_once_with(
{
'Body': {
'Text': {
'Data': 'Your API key is a1b2c3'
}
},
'Subject': {'Data': 'Your new DPLA API key'}
},
'<EMAIL>'
)
def test_send_api_key_email_raises_ServerError_for_Exception(monkeypatch):
def buggy_send_email(*args):
1/0
monkeypatch.setattr(v2_handlers, 'send_email', buggy_send_email)
with pytest.raises(HTTPException) as e:
v2_handlers.send_api_key_email('<EMAIL>', 'a1b2c3')
assert e.status_code == 500
def test_send_api_key_email_reraises_HTTPException(monkeypatch):
def buggy_send_email(*args):
raise HTTPException(404)
monkeypatch.setattr(v2_handlers, 'send_email', buggy_send_email)
with pytest.raises(HTTPException) as e:
v2_handlers.send_api_key_email('<EMAIL>', 'a1b2c3')
assert e.status_code == 404
def test_send_reminder_email_calls_send_email_w_correct_params(monkeypatch,
mocker):
"""It calls send_email with the correct parameters."""
send_email_stub = mocker.stub()
monkeypatch.setattr(v2_handlers, 'send_email', send_email_stub)
v2_handlers.send_reminder_email('<EMAIL>', 'a1b2c3')
send_email_stub.assert_called_once_with(
{
'Body': {
'Text': {
'Data': 'The most recent API key for <EMAIL> '
'is a1b2c3'
}
},
'Subject': {'Data': 'Your existing DPLA API key'}
},
'<EMAIL>'
)
def test_send_reminder_email_raises_Server_Error_for_Exception(monkeypatch):
def buggy_send_email(*args):
1/0
monkeypatch.setattr(v2_handlers, 'send_email', buggy_send_email)
with pytest.raises(HTTPException) as e:
v2_handlers.send_reminder_email('<EMAIL>', 'a1b2c3')
assert e.status_code == 500
def test_send_reminder_email_reraises_HTTPException(monkeypatch):
def buggy_send_email(*args):
raise HTTPException(404)
monkeypatch.setattr(v2_handlers, 'send_email', buggy_send_email)
with pytest.raises(HTTPException) as e:
v2_handlers.send_reminder_email('<EMAIL>', 'a1b2c3')
assert e.status_code == 404
@pytest.mark.asyncio
async def test_api_key_flunks_bad_email():
"""api_key() rejects an obviously malformed email address"""
# But only the most obvious cases involving misplaced '@' or lack of '.'
bad_addrs = ['f@@ey', '56b7165e4f8a54b4faf1e04c46a6145c']
for addr in bad_addrs:
path_params = {'email': addr}
request = post_request("/v2/api_key/%s" % addr,
path_params=path_params)
with pytest.raises(HTTPException) as e:
await v2_handlers.api_key(request)
assert e.status_code == 400
@pytest.mark.asyncio
@pytest.mark.usefixtures('patch_db_connection')
async def test_api_key_bails_if_account_exists_for_email(monkeypatch, mocker):
"""api_key() quits and sends a reminder email if there's already an Account
for the given email"""
def mock_get(*args, **kwargs):
return models.Account(email='<EMAIL>',
key='<KEY>')
monkeypatch.setattr(models.Account, 'get', mock_get)
stub = mocker.stub()
monkeypatch.setattr(v2_handlers, 'send_reminder_email', stub)
request = post_request("/v2/api_key/<EMAIL>",
path_params={'email': '<EMAIL>'})
with pytest.raises(HTTPException) as e:
await v2_handlers.api_key(request)
assert e.status_code == 409
stub.assert_called_once_with('<EMAIL>',
'<KEY>')
@pytest.mark.asyncio
@pytest.mark.usefixtures('patch_bad_db_connection')
async def test_api_key_raises_503_for_bad_db_connection(monkeypatch,
mocker):
"""api_key() raises ServerError if it can't connect to the database"""
with pytest.raises(HTTPException) as e:
request = post_request("/v2/api_key/<EMAIL>",
path_params={'email': '<EMAIL>'})
await v2_handlers.api_key(request)
assert e.status_code == 503
# Fixture for the following two tests
@pytest.fixture(scope='function')
def good_api_key_invocation(monkeypatch, mocker):
def mock_token_hex(*args):
return '<KEY>'
def mock_get(*args, **kwargs):
raise DoesNotExist()
class AtomicContextMgr(object):
def __enter__(self):
pass
def __exit__(self, *args, **kwargs):
pass
monkeypatch.setattr(secrets, 'token_hex', mock_token_hex)
monkeypatch.setattr(models.Account, 'get', mock_get)
send_email_stub = mocker.stub()
monkeypatch.setattr(v2_handlers, 'send_api_key_email', send_email_stub)
save_stub = mocker.stub()
monkeypatch.setattr(models.Account, 'save', save_stub)
monkeypatch.setattr(models.db, 'atomic', AtomicContextMgr)
yield
@pytest.mark.asyncio
@pytest.mark.usefixtures('patch_db_connection')
@pytest.mark.usefixtures('good_api_key_invocation')
async def test_api_key_creates_account(monkeypatch, mocker):
"""api_key() creates a new Account record & defines the right fields"""
mocker.spy(models.Account, '__init__')
request = post_request("/v2/api_key/<EMAIL>",
path_params={'email': '<EMAIL>'})
await v2_handlers.api_key(request)
models.Account.__init__.assert_called_with(
mocker.ANY,
key='<KEY>',
email='<EMAIL>',
enabled=True)
# end api_key tests
def test_geo_facets():
result = v2_handlers.geo_facets(
es6_facets['sourceResource.spatial.coordinates'])
assert result == {
'_type': 'geo_distance',
'ranges': [{'from': 0, 'to': 99, 'count': 518784}]
}
def test_date_facets():
result = v2_handlers.date_facets(
es6_facets['sourceResource.date.begin.year'])
assert result == {
'_type': 'date_histogram',
'entries': [{'time': '1947', 'count': 1}]
}
def test_term_facets():
result = v2_handlers.term_facets(
es6_facets['provider.name'])
assert result == {
'_type': 'terms',
'terms': [
{
'term': 'National Archives and Records Administration',
'count': 3781862
}
]
}
def test_formatted_facets():
"""It makes the necessary function calls and dictionary lookups to
construct and return a correct dict"""
assert v2_handlers.formatted_facets(es6_facets) == {
'provider.name': {
'_type': 'terms',
'terms': [
{
'term': 'National Archives and Records Administration',
'count': 3781862
}
]
},
'sourceResource.date.begin.year': {
'_type': 'date_histogram',
'entries': [
{
'time': '1947',
'count': 1
}
]
},
'sourceResource.spatial.coordinates': {
'_type': 'geo_distance',
'ranges': [
{
'from': 0,
'to': 99,
'count': 518784
}
]
}
}
def test_formatted_facets_returns_empty_list_for_no_facets():
"""It returns an empty list, not a dict, for no facets!"""
assert v2_handlers.formatted_facets({}) == [] # sigh.
def test_dict_with_date_buckets_works_with_ranges_aggregation():
"""It picks the 'buckets' out of an aggregation response for a range
aggregation"""
es_result_agg = {
'buckets': [
{
'key': '1520-1529',
'from': -14200704000000,
'from_as_string': '1520',
'to': -13916620800000,
'to_as_string': '1529',
'doc_count': 0
}
]
}
result = v2_handlers.dict_with_date_buckets(es_result_agg)
assert type(result) == list
def test_dict_with_date_buckets_works_with_histogram_aggregation():
"""It picks the 'buckets' out of an aggregation response for a histogram
aggregation with our 'filter' clause"""
es_result_agg = {
'doc_count': 14,
'sourceResource.date.begin.year': {
'buckets': [
{
'key_as_string': '1947',
'key': -725846400000,
'doc_count': 1
}
]
}
}
result = v2_handlers.dict_with_date_buckets(es_result_agg)
assert type(result) == list
def test_dict_with_date_buckets_raises_exception_with_weird_aggregation():
"""It raises an Exception if it gets weird data without a 'buckets'
property"""
es_result_agg = {
'doc_count': 14,
'x': 'x'
}
with pytest.raises(Exception):
v2_handlers.dict_with_date_buckets(es_result_agg)
def test_response_object_returns_JSONResponse_for_typical_request():
"""It returns an apistar.http.JSONResponse object for a typical request,
without a JSONP callback"""
rv = v2_handlers.response_object({}, {})
assert isinstance(rv, JSONResponse)
def test_response_object_returns_correct_Response_for_JSONP_request():
"""It returns an apistar.http.JSONResponse object for a typical request,
without a JSONP callback"""
rv = v2_handlers.response_object({}, {'callback': 'f'})
assert isinstance(rv, Response)
assert rv.headers['content-type'] \
== 'application/javascript; charset=utf-8'
assert rv.body == b'f({})'
# Exception-handling and HTTP status double-checks
@pytest.mark.usefixtures('disable_auth')
def test_elasticsearch_500_means_client_503(monkeypatch):
monkeypatch.setattr(requests, 'post', mock_es_post_response_err)
response = client.get('/v2/items')
assert response.status_code == 503
assert response.json() == 'Backend search operation failed'
@pytest.mark.usefixtures('disable_auth')
def test_elasticsearch_400_means_client_400(monkeypatch):
monkeypatch.setattr(requests, 'post', mock_es_post_response_400)
response = client.get('/v2/items?q=some+bad+search')
assert response.status_code == 400
assert response.json() == 'Invalid query'
@pytest.mark.usefixtures('disable_auth')
def test_elasticsearch_404_means_client_503(monkeypatch):
monkeypatch.setattr(requests, 'post', mock_es_post_response_404)
response = client.get('/v2/items')
assert response.status_code == 503
assert response.json() == 'Backend search operation failed'
@pytest.mark.usefixtures('disable_api_key_check')
def test_ItemsQueryType_ValidationError_means_client_400(monkeypatch):
def badinit(*args, **kwargs):
raise ValidationError('no good')
monkeypatch.setattr(types.ItemsQueryType, '__init__', badinit)
response = client.get('/v2/items?hasView.format=some+bad+string')
assert response.status_code == 400
@pytest.mark.usefixtures('disable_api_key_check')
def test_ItemsQueryType_Exception_means_client_500(monkeypatch):
def badinit(*args, **kwargs):
raise AttributeError()
monkeypatch.setattr(types.ItemsQueryType, '__init__', badinit)
response = client.get('/v2/items?provider.name=a+provider')
assert response.status_code == 500
assert response.json() == 'Unexpected error'
@pytest.mark.usefixtures('disable_auth')
def test_search_query_Exception_means_client_500(monkeypatch):
def problem_func(*args, **kwargs):
raise KeyError()
monkeypatch.setattr(SearchQuery, '__init__', problem_func)
response = client.get('/v2/items')
assert response.status_code == 500
assert response.json() == 'Unexpected error'
def test_compact_with_dotted_field_param():
"""compact() takes an ES 6 "doc" and compacts the keys so that they look
like they used to coming out of ES 0.90"""
before = {
'id': '00000134adfdfa05e988480f9fa56b1a',
'sourceResource': {
'date': {
'begin': '1990',
'end': '1991'
}
}
}
after = {
'id': '00000134adfdfa05e988480f9fa56b1a',
'sourceResource.date': {
'begin': '1990',
'end': '1991'
}
}
result = v2_handlers.compact(before.copy(),
{'fields': 'id,sourceResource.date'})
assert result == after
def test_compact_without_toplevel_field_param():
"""compact() does not flatten fields if we're returning the whole
sourceResource"""
before = {
'id': '00000134adfdfa05e988480f9fa56b1a',
'sourceResource': {
'title': 'x',
'date': {
'begin': '1990',
'end': '1991'
}
}
}
result = v2_handlers.compact(before.copy(),
{'fields': 'id,sourceResource'})
assert result == before
def test_compact_handles_missing_fields():
"""compact() handles documents that don't have the requested field"""
before = {
'id': '00000134adfdfa05e988480f9fa56b1a',
'sourceResource': {
'title': 'x'
}
}
after = {
'id': '00000134adfdfa05e988480f9fa56b1a',
'sourceResource.title': 'x'
}
params = {
'fields': 'id,sourceResource.title,sourceResource.date'
}
result = v2_handlers.compact(before.copy(), params)
assert result == after
def test_items_key():
params = {'api_key': 'a1b2c3', 'ids': ['e5', 'd4']}
result = v2_handlers.items_key(params)
# Note that 'ids' is sorted
assert result == (('api_key', 'a1b2c3'), ('ids', 'd4,e5'), 'v2_items')
def test_traverse_doc_handles_strings():
path = 'sourceResource.language.name'
doc = {'sourceResource': {'language': {'name': 'English'}}}
result = v2_handlers.traverse_doc(path, doc)
assert result == 'English'
def test_traverse_doc_handles_lists_1():
path = 'sourceResource.language.name'
doc = {'sourceResource': {'language': [{'name': 'English'}]}}
result = v2_handlers.traverse_doc(path, doc)
assert result == 'English'
def test_traverse_doc_handles_lists_2():
path = 'sourceResource.language.name'
doc = {
'sourceResource': {
'language': [{'name': 'English'}, {'name': 'Spanish'}]
}
}
result = v2_handlers.traverse_doc(path, doc)
assert result == ['English', 'Spanish']
def test_traverse_doc_handles_nested_arrays_and_objects():
path = 'a.b.c.d'
doc = {'a': {'b': [{'c': [{'d': 'the value'}]}]}}
result = v2_handlers.traverse_doc(path, doc)
assert result == 'the value'
def test_traverse_doc_handles_nonexistent_field_1():
path = 'a.b.c.d'
doc = {'a': {'b': [{'foo': 'x'}]}}
result = v2_handlers.traverse_doc(path, doc)
assert result is None
def test_traverse_doc_handles_nonexistent_field_2():
path = 'a.b'
doc = {'a': 'x'}
result = v2_handlers.traverse_doc(path, doc)
assert result is None
def test_traverse_doc_handles_empty_list():
path = 'a'
doc = {'a': []}
result = v2_handlers.traverse_doc(path, doc)
assert result is None
def test_traverse_doc_handles_object():
path = 'a'
doc = {'a': {'b': 'c'}}
result = v2_handlers.traverse_doc(path, doc)
assert result == {'b': 'c'}
def test_flatten():
li = ['a', 'b']
rv = [x for x in v2_handlers.flatten(li)]
assert rv == ['a', 'b']
li = ['a', ['b', 'c']]
rv = [x for x in v2_handlers.flatten(li)]
assert rv == ['a', 'b', 'c']
li = ['a', ['b', ['c', 'd']]]
rv = [x for x in v2_handlers.flatten(li)]
assert rv == ['a', 'b', 'c', 'd']
rv = [x for x in v2_handlers.flatten(None)]
assert rv == []
| StarcoderdataPython |
36902 | """
Author: <NAME>
Test simulation functionality
"""
print("Test")
| StarcoderdataPython |
88624 | <filename>analyze_simulation_length.py
""" Analyze simulation output - mass change, runoff, etc. """
# Built-in libraries
#from collections import OrderedDict
#import datetime
#import glob
import os
#import pickle
# External libraries
#import cartopy
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from matplotlib.pyplot import MaxNLocator
#from matplotlib.lines import Line2D
#import matplotlib.patches as mpatches
#from matplotlib.ticker import MultipleLocator
#from matplotlib.ticker import EngFormatter
#from matplotlib.ticker import StrMethodFormatter
#from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
import numpy as np
import pandas as pd
#from scipy.stats import linregress
#from scipy.ndimage import uniform_filter
#import scipy
#from scipy import stats
#from scipy.stats.kde import gaussian_kde
#from scipy.stats import norm
#from scipy.stats import truncnorm
#from scipy.stats import uniform
#from scipy.stats import linregress
#from scipy.stats import lognorm
#from scipy.optimize import minimize
from scipy.stats import median_abs_deviation
import xarray as xr
# Local libraries
#import class_climate
#import class_mbdata
#import pygem.pygem_input as pygem_prms
#import pygemfxns_gcmbiasadj as gcmbiasadj
#import pygemfxns_massbalance as massbalance
#import pygemfxns_modelsetup as modelsetup
#import run_calibration as calibration
#%% ===== Input data =====
netcdf_fp_cmip5 = '/Users/drounce/Documents/HiMAT/spc_backup/simulations_100/'
#netcdf_fp_era = pygem_prms.output_filepath + 'simulations/spc_20190914/merged/ERA-Interim/'
#%%
regions = [1]
# GCMs and RCP scenarios
gcm_names = ['CCSM4']
#rcps = ['rcp26', 'rcp45', 'rcp85']
rcps = ['rcp26']
# Grouping
grouping = 'rgi_region'
for reg in regions:
for gcm_name in gcm_names:
for rcp in rcps:
netcdf_fp = netcdf_fp_cmip5 + gcm_name + '/' + rcp + '/' + 'essential/'
glac_fns = []
for i in os.listdir(netcdf_fp):
if i.endswith('.nc'):
glac_fns.append(i)
glac_fns = sorted(glac_fns)
# for glac_fn in glac_fns[0:1]:
for glac_fn in [glac_fns[15519]]:
ds = xr.open_dataset(netcdf_fp + glac_fn)
# Volume at end of model run
glac_vol_final = ds.glac_volume_annual.values[0,-1,:]
# statistics
glac_vol_final_med = np.median(glac_vol_final)
glac_vol_final_mad = median_abs_deviation(glac_vol_final)
# Bootstrap method for different lengthed lists
nsims_list = [100,90,80,70,60,50,40,30,20,10]
n_iters = 1000
df_columns = ['nsims', 'mean of meds', 'std of meds', 'std of meds [%]',
'mean of mads', 'std of mads', 'std of mads [%]']
df = pd.DataFrame(np.zeros((len(nsims_list), len(df_columns))), columns=df_columns)
for n, nsims in enumerate(nsims_list):
glac_vol_final_meds = []
glac_vol_final_mads = []
for ncount in np.arange(0,n_iters):
rand_idx = np.random.randint(0,glac_vol_final.shape[0],size=nsims)
glac_vol_final_sample = glac_vol_final[rand_idx]
glac_vol_final_sample_med = np.median(glac_vol_final_sample)
glac_vol_final_meds.append(glac_vol_final_sample_med)
glac_vol_final_sample_mad = median_abs_deviation(glac_vol_final_sample)
glac_vol_final_mads.append(glac_vol_final_sample_mad)
glac_vol_final_meds = np.array(glac_vol_final_meds)
glac_vol_final_meds_std = glac_vol_final_meds.std()
glac_vol_final_meds_mean = glac_vol_final_meds.mean()
glac_vol_final_mads = np.array(glac_vol_final_mads)
glac_vol_final_mads_std = glac_vol_final_mads.std()
glac_vol_final_mads_mean = glac_vol_final_mads.mean()
df.loc[n,:] = [nsims, glac_vol_final_meds_mean, glac_vol_final_meds_std,
glac_vol_final_meds_std/glac_vol_final_meds_mean*100,
glac_vol_final_mads_mean, glac_vol_final_mads_std,
glac_vol_final_mads_std/glac_vol_final_mads_mean*100]
print('nsims:', nsims, 'std of medians [%]:',
np.round(glac_vol_final_meds_std/glac_vol_final_meds_mean*100,1),'%')
| StarcoderdataPython |
1715628 | <filename>sign_in/forms.py
from django import forms
from .models import Profile
from django.contrib.auth.models import User
class Register(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ("username", "password", "email")
help_texts = {"username": None, "password": <PASSWORD>, "email": None}
def clean_email(self):
email = self.cleaned_data.get("email")
username = self.cleaned_data.get("username")
if (
email and User.objects.filter(email=email).exclude(username=username).exists()
):
raise forms.ValidationError(u"Email address already registered.")
return email
class ProfileInfo(forms.ModelForm):
class Meta:
model = Profile
exclude = ["user"]
| StarcoderdataPython |
1785280 | """
The MIT License (MIT)
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import sys
from pyb import I2C, LED, Servo, Switch
from srf_rangefinder import SRF08
def setup():
"""Initialize hardware."""
global led, servo, sw, rf
led = LED(2)
servo = Servo(1)
sw = Switch()
# SRF08 rangefinder on I2C bus 2
rf = SRF08(2, I2C.MASTER, baudrate=50000)
# print some sensor info
sensor_info = {
'address': rf.bus_address() << 1,
'revision': rf.sw_rev(),
}
sys.stdout.write(json.dumps(sensor_info) + '\n')
# set max range and gain
rf.set_max_range(6000)
rf.set_analog_gain(16)
def loop():
"""main program loop."""
global led, servo, sw, rf
pyb.wfi()
if sw():
led.on()
sys.stdout.write(json.dumps({'reset': True}) + '\n')
servo.angle(-90)
# allow servo to reach starting angle
pyb.delay(500)
for a in range(-90, 100, 10):
# set new angle, allow servo to settle
servo.angle(a, 150)
pyb.delay(10)
# measure distance in cm
rf.measure_range()
pyb.delay(75)
# read distance, send json to host
sample = {
'angle': a,
'range': rf.read_range(),
}
sys.stdout.write(json.dumps(sample) + '\n')
servo.angle(0)
led.off()
if __name__ == "__main__":
"""
Main program loop, does not return.
"""
setup()
while True:
loop()
| StarcoderdataPython |
1653101 | <filename>python-dev/a115_buggy_image_lh/a116_buggy_image_lh.py
# a116_buggy_image.py
import turtle as trtl
x = trtl.Turtle()
x.pensize(40)
x.circle(20)
w = 6
y = 70
z = 380 / w
x.pensize(5)
n = 0
while (n < w):
x.goto(0,0)
x.setheading(z*n)
x.forward(y)
n = n + 1
x.hideturtle()
wn = trtl.Screen()
wn.mainloop() | StarcoderdataPython |
1681205 | <filename>templates.py<gh_stars>1-10
JAVA_FILE = """{header}
package {package};
{imports}
@Deprecated
public class {class_name} {{
{class_content}
static {{
{static_content}
}}
}}
"""
JAVA_HEADER = """/*
* {header}
*/
"""
MATERIAL_PROPERTIES_CLASS = \
"""static final ImmutableMap<Material, PropertyDefs> MATERIAL_PROPERTIES;
static class PropertyDefs {
private final PropMap properties;
PropertyDefs(PropMap properties) {
this.properties = properties;
}
public int serialize(Map<String, Object> values) {
if (values == null) {
return 0;
}
int mask = 0;
for (int i = this.properties.keys.length - 1; i >= 0; i--) {
String key = this.properties.keys[i];
PropertyEnum propEnum = this.properties.enums[i];
mask |= propEnum.serialize(values.getOrDefault(key, propEnum.defaultValue())) << i;
}
return mask;
}
public Map<String, Object> deserialize(int serial) {
int sizeAcc = 0;
ImmutableMap.Builder<String, Object> result = ImmutableMap.builder();
for (int i = this.properties.keys.length - 1; i >= 0; i--) {
String key = this.properties.keys[i];
PropertyEnum propEnum = this.properties.enums[i];
int enumSize = (int) Math.ceil(Math.log(propEnum.size()) / Math.log(2));
int x = ((1 << enumSize) - 1) & (serial >> ((sizeAcc + 1) - 1));
sizeAcc += enumSize;
result.put(key, propEnum.deserialize(x));
}
return result.build();
}
}
static class PropertyEnum {
private final String[] values;
public PropertyEnum(String... values) {
this.values = values;
}
public int serialize(Object value) {
return ArrayUtils.indexOf(values, value);
}
public Object deserialize(int serial) {
return values[serial];
}
public Object defaultValue() {
return values[0];
}
public int size() {
return values.length;
}
}
static class PropertyEnumBoolean extends PropertyEnum {
public PropertyEnumBoolean() {
super();
}
@Override
public Object deserialize(int serial) {
return serial != 1;
}
@Override
public int serialize(Object value) {
boolean b;
if (value instanceof Boolean) {
b = (Boolean) value;
} else if (value instanceof String) {
b = Boolean.valueOf((String) value);
} else {
b = true;
}
return b ? 0 : 1;
}
@Override
public int size() {
return 2;
}
@Override
public Object defaultValue() {
return true;
}
}
static class PropMap {
private final String[] keys;
private final PropertyEnum[] enums;
public PropMap(String[] keys, PropertyEnum[] enums) {
this.keys = keys;
this.enums = enums;
}
}
private static PropMap propMapOf(Object... keysAndVals) {
Iterator<Object> iter = Iterators.forArray(keysAndVals);
List<String> keys = new ArrayList<>();
List<PropertyEnum> enums = new ArrayList<>();
while (iter.hasNext()) {
keys.add((String) iter.next());
enums.add((PropertyEnum) iter.next());
}
return new PropMap(keys.toArray(new String[0]), enums.toArray(new PropertyEnum[0]));
}
"""
MATERIAL_PROPERTIES_STATIC = \
"""ImmutableMap.Builder<Material, PropertyDefs> materialPropertiesBuilder = ImmutableMap.builder();
materialPropertiesBuilder
{put_statements}
;
MATERIAL_PROPERTIES = materialPropertiesBuilder.build();
"""
DIRECTIONAL_POSSIBLE_FACES_CLASS = "static final ImmutableMap<Material, List<BlockFace>> " \
"DIRECTIONAL_POSSIBLE_FACES;\n"
DIRECTIONAL_POSSIBLE_FACES_STATIC = \
"""ImmutableMap.Builder<Material, List<BlockFace>> possibleFacesBuilder = ImmutableMap.builder();
possibleFacesBuilder
{put_statements}
;
DIRECTIONAL_POSSIBLE_FACES = possibleFacesBuilder.build();
"""
STATE_BASE_IDS_YML = \
"""
stateBaseIds: [{base_ids}]
stateDefaultIds: [{default_ids}]
"""
STATE_BASE_IDS_STATIC = \
"""
YamlConfiguration stateIdsYml = YamlConfiguration.loadConfiguration(new InputStreamReader(GeneratedFlatteningData.class.getClassLoader().getResourceAsStream("builtin/baseIds.yml")));
stateIdsYml.getIntegerList("stateBaseIds").toArray(STATE_BASE_IDS);
stateIdsYml.getIntegerList("stateDefaultIds").toArray(STATE_DEFAULT_IDS);
"""
STATE_BASE_IDS_CLASS = \
"""
static final Integer[] STATE_BASE_IDS = new Integer[{ids_length}];
static final Integer[] STATE_DEFAULT_IDS = new Integer[{ids_length}];
"""
MATERIAL_ID_MAP_CLASS = \
"""
static final ImmutableBiMap<Integer, Material> MATERIAL_ID_MAP;
"""
MATERIAL_ID_MAP_STATIC = \
"""ImmutableBiMap.Builder<Integer, Material> materialIdMapBuilder = ImmutableBiMap.builder();
materialIdMapBuilder
{put_statements}
;
MATERIAL_ID_MAP = materialIdMapBuilder.build();
"""
| StarcoderdataPython |
3388451 | import os
import datetime
from time import strftime
from hashlib import md5
import uuid
from django.utils.translation import ugettext_lazy as _
from django.core.files.base import ContentFile
def guid_generator(user_id=None, length=32):
if user_id:
guid_base = "%s" % (user_id)
guid_encode = guid_base.encode('ascii', 'ignore')
guid = md5(guid_encode).hexdigest()[:12]
else:
guid_base = str(uuid.uuid4())
guid_encoded = guid_base.encode('ascii', 'ignore')
guid = md5(guid_encoded).hexdigest()[:length]
return guid
| StarcoderdataPython |
3275812 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collection of types used across multiple objects and functions.
"""
from io import BufferedIOBase
from pathlib import Path
from typing import Any, NamedTuple, Union
import dask.array as da
import numpy as np
# Imaging Data Types
SixDArray = np.ndarray # In order STCZYX
# IO Types
PathLike = Union[str, Path]
BufferLike = Union[bytes, BufferedIOBase]
FileLike = Union[PathLike, BufferLike]
ArrayLike = Union[np.ndarray, da.core.Array]
ImageLike = Union[FileLike, ArrayLike]
class LoadResults(NamedTuple):
data: SixDArray
dims: str
metadata: Any
| StarcoderdataPython |
1734364 | <filename>scripts/bulk_detectfigures.py
"""Run figure detection on a batch of PDFs.
See ``DetectFigures_Bulk.py --help`` for more information.
"""
import logging
import os
import click
import shutil
from deepfigures import settings
from scripts import build, execute, detectfigures
# Same module as in detectfigures.py in deepfigures_open
# However, this proved to be easiest and least error prone way to use function
def detectfigures(
output_directory,
pdf_path,
skip_dependencies=False):
"""Run figure extraction on the PDF at PDF_PATH.
Run figure extraction on the PDF at PDF_PATH and write the results
to OUTPUT_DIRECTORY.
"""
if not skip_dependencies:
build.build.callback()
cpu_docker_img = settings.DEEPFIGURES_IMAGES['cpu']
# Paths so pdf can be added to docker env correctly
pdf_directory, pdf_name = os.path.split(pdf_path)
pdf_directory = settings.Input_PDF_dir
internal_output_directory = '/work/host-output'
internal_pdf_directory = '/work/host-input'
internal_pdf_path = os.path.join(
internal_pdf_directory, pdf_name)
# Look up docker documentation for more explanations about what each -- does
execute(
'docker run'
' --rm'
' --env-file deepfigures-local.env'
' --volume {output_directory}:{internal_output_directory}'
' --volume {pdf_directory}:{internal_pdf_directory}'
' {tag}:{version}'
' python3 /work/scripts/rundetection.py'
' {internal_output_directory}'
' {internal_pdf_path}'.format(
tag=cpu_docker_img['tag'],
version=settings.VERSION,
output_directory=output_directory,
internal_output_directory=internal_output_directory,
pdf_directory=pdf_directory,
internal_pdf_directory=internal_pdf_directory,
internal_pdf_path=internal_pdf_path),
logger,
raise_error=True)
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.argument(
'output_directory',
type=click.Path(
exists=True,
file_okay=False,
dir_okay=True,
resolve_path=True))
@click.argument(
'input_directory',
type=click.Path(
exists=True,
file_okay=False,
dir_okay=True,
resolve_path=True))
def bulk_detectfigures(output_directory, input_directory):
"""Run figure extraction on the PDFs in input_directory.
Writes the results to OUTPUT_DIRECTORY that is defined in settings.py in AWS_Linking.
The results are a folder that consists of .json with coordinates of bounding box,
pdf with bounding box overlay, and the original pdf.
"""
settings.Local_Output = output_directory # Set global output directory
settings.Input_PDF_dir = input_directory # Set global input directory
for pdf in os.listdir(input_directory):
if pdf.endswith(".pdf"):
# Only process pdfs that haven't been before
if (os.path.isdir(os.path.join(output_directory, os.path.basename(pdf).replace('.pdf', '_output')))):
print('Skipped pdf because it has already been processed')
continue
detectfigures(output_directory, pdf)
# Rename directory to pdf name + output, this currently loops through every directory
try:
# Looping through every directory in pdfs
for dir in os.listdir(output_directory):
dir = os.path.join(output_directory, dir)
for files in os.listdir(dir):
if (os.path.basename(files).replace('.pdf', '_output') != dir and os.path.basename(files).endswith('.pdf')):
shutil.move(dir, dir.replace(os.path.basename(dir), os.path.basename(files).replace('.pdf', '_output')))
except NotADirectoryError or FileNotFoundError:
# Just skip if not a Directory
pass
print('Detect Figures is Done')
if __name__ == '__main__':
bulk_detectfigures()
| StarcoderdataPython |
59719 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from projects.models import Project
# @python_2_unicode_compatible
# class LineFollowerStage(models.Model):
# order = models.PositiveSmallIntegerField(verbose_name=_("Stage Order"))
# is_current = models.BooleanField(
# verbose_name=_("Is current stage?"), default=False)
# is_final = models.BooleanField(
# verbose_name=_("Is final stage?"), default=False)
# orders_available = models.BooleanField(
# verbose_name=_("Race Orders Availability"), default=False)
# results_available = models.BooleanField(
# verbose_name=_("Race Results Availability"), default=False)
#
# class Meta:
# verbose_name = _("Line Follower Stage")
# verbose_name_plural = _("Line Follower Stages")
# ordering = ["order"]
#
# def __str__(self):
# return "Stage #{}".format(self.order)
@python_2_unicode_compatible
class LineFollowerJuniorStage(models.Model):
order = models.PositiveSmallIntegerField(verbose_name=_("Stage Order"))
is_current = models.BooleanField(
verbose_name=_("Is current stage?"), default=False)
is_final = models.BooleanField(
verbose_name=_("Is final stage?"), default=False)
orders_available = models.BooleanField(
verbose_name=_("Race Orders Availability"), default=False)
results_available = models.BooleanField(
verbose_name=_("Race Results Availability"), default=False)
class Meta:
verbose_name = _("Line Follower Junior Stage")
verbose_name_plural = _("Line Follower Junior Stages")
ordering = ["order"]
def __str__(self):
return "Stage #{}".format(self.order)
@python_2_unicode_compatible
class BaseOrder(models.Model):
order = models.PositiveSmallIntegerField(verbose_name=_("Race Order"))
class Meta:
abstract = True
def __str__(self):
return self.project.name
# class LineFollowerRaceOrder(BaseOrder):
# stage = models.ForeignKey(
# LineFollowerStage, verbose_name=_("Line Follower Stage"))
# project = models.ForeignKey(Project, verbose_name=_("Project"))
#
# class Meta:
# verbose_name = _("Line Follower Race Order")
# verbose_name_plural = _("Line Follower Race Orders")
# ordering = ["order"]
# unique_together = (("project", "stage"),)
class LineFollowerJuniorRaceOrder(BaseOrder):
stage = models.ForeignKey(
LineFollowerJuniorStage, verbose_name=_("Line Follower Junior Stage"))
project = models.ForeignKey(Project, verbose_name=_("Project"))
class Meta:
verbose_name = _("Line Follower Junior Race Order")
verbose_name_plural = _("Line Follower Junior Race Orders")
ordering = ["order"]
unique_together = (("project", "stage"),)
class RaceOrder(BaseOrder):
project = models.OneToOneField(Project, verbose_name=_("Project"))
class Meta:
verbose_name = _("Race Order")
verbose_name_plural = _("Race Orders")
ordering = ["order"]
| StarcoderdataPython |
78758 | import os
from easydict import EasyDict as edict
import torch
import torch.utils.model_zoo as model_zoo
#from torchvision.models.resnet import model_urls
from common_pytorch.base_modules.deconv_head import DeconvHead
from common_pytorch.base_modules.resnet import resnet_spec, ResnetBackbone
from common_pytorch.base_modules.architecture import PoseNet_1branch, PoseNet_2branch
def get_default_network_config():
config = edict()
config.from_model_zoo = True
config.pretrained = ''
config.num_layers = 18
# default head setting
config.num_deconv_layers = 3
config.num_deconv_filters = 256
config.num_deconv_kernel = 4
config.final_conv_kernel = 1
# input
config.input_channel = 3
# output
config.depth_dim = 1
#AE
config.head_branch = 2
return config
model_urls = {
'resnet18': 'resnet18.pth',
'resnet34': 'resnet34.pth',
'resnet50': 'resnet50.pth',
'resnet101': 'resnet101.pth',
'resnet152': 'resnet152.pth',
'resnext50_32x4d': 'resnext50.pth',
'resnext101_32x8d': 'resnext101.pth',
'wide_resnet50_2': 'wide_resnet50.pth',
'wide_resnet101_2': 'wide_resnet101.pth',
}
def init_pose_net(pose_net, cfg):
if cfg.from_model_zoo:
_, _, _, name = resnet_spec[cfg.num_layers]
resnetdir = os.path.join(os.path.dirname(__file__), '../../resnet')
resnetfile=os.path.join(resnetdir,model_urls[name])
if not os.path.exists(resnetfile):
raise ValueError("resnet file:{} is not exists".format(resnetfile))
org_resnet =torch.load(resnetfile) #model_zoo.load_url(model_urls[name])
# drop orginal resnet fc layer, add 'None' in case of no fc layer, that will raise error
org_resnet.pop('fc.weight', None)
org_resnet.pop('fc.bias', None)
pose_net.backbone.load_state_dict(org_resnet)
print("Init Network from model zoo")
else:
if os.path.exists(cfg.pretrained):
model = torch.load(cfg.pretrained)
pose_net.load_state_dict(model['network'])
print("Init Network from pretrained", cfg.pretrained)
def get_pose_net(network_cfg, ae_feat_dim, num_point_types):
block_type, layers, channels, name = resnet_spec[network_cfg.num_layers]
backbone_net = ResnetBackbone(block_type, layers, network_cfg.input_channel)
# one branch, double output channel
out_channel = num_point_types * 2
if network_cfg.head_branch == 2:
out_channel = num_point_types
heatmap_head = DeconvHead(channels[-1], network_cfg.num_deconv_layers, network_cfg.num_deconv_filters,
network_cfg.num_deconv_kernel, network_cfg.final_conv_kernel, out_channel, network_cfg.depth_dim)
# NOTE: to specify 4 to avoid center tag
tagmap_head = DeconvHead(channels[-1], network_cfg.num_deconv_layers, network_cfg.num_deconv_filters,
network_cfg.num_deconv_kernel, network_cfg.final_conv_kernel, 4, ae_feat_dim)
if network_cfg.head_branch == 1:
return PoseNet_1branch(backbone_net, heatmap_head)
elif network_cfg.head_branch == 2:
return PoseNet_2branch(backbone_net, heatmap_head, tagmap_head)
else:
assert 0 | StarcoderdataPython |
1625416 | <gh_stars>1-10
# MINLP written by GAMS Convert at 04/21/18 13:51:11
#
# Equation counts
# Total E G L N X C B
# 2 1 0 1 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 21 1 0 20 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 41 21 20 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.i2 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i3 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i4 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i5 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i6 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i7 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i8 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i9 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i10 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i11 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i12 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i13 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i14 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i15 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i16 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i17 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i18 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i19 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i20 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.i21 = Var(within=Integers,bounds=(-100,None),initialize=0)
m.obj = Objective(expr= 19*m.i2 + 18*m.i3 + 17*m.i4 + 16*m.i5 + 15*m.i6 + 14*m.i7 + 13*m.i8 + 12*m.i9 + 11*m.i10
+ 10*m.i11 + 9*m.i12 + 8*m.i13 + 7*m.i14 + 6*m.i15 + 5*m.i16 + 4*m.i17 + 3*m.i18 + 2*m.i19
+ m.i20 + 20*m.i21, sense=minimize)
m.c2 = Constraint(expr=100*m.i20**2 - 98*m.i20 + 100*m.i19**2 - 98*m.i19 + 100*m.i18**2 - 98*m.i18 + 100*m.i17**2 - 98*
m.i17 + 100*m.i16**2 - 98*m.i16 + 100*m.i15**2 - 98*m.i15 + 100*m.i14**2 - 98*m.i14 + 100*m.i13**
2 - 98*m.i13 + 100*m.i12**2 - 98*m.i12 + 100*m.i11**2 - 98*m.i11 + 100*m.i10**2 - 98*m.i10 + 100*
m.i9**2 - 98*m.i9 + 100*m.i8**2 - 98*m.i8 + 100*m.i7**2 - 98*m.i7 + 100*m.i6**2 - 98*m.i6 + 100*
m.i5**2 - 98*m.i5 + 100*m.i4**2 - 98*m.i4 + 100*m.i3**2 - 98*m.i3 + 100*m.i2**2 - 98*m.i2 + 100*
m.i21**2 - 98*m.i21 - 2*m.i20*m.i19 - 2*m.i20*m.i19 - 2*m.i18*m.i17 - 2*m.i18*m.i17 - 2*m.i16*
m.i15 - 2*m.i16*m.i15 - 2*m.i14*m.i13 - 2*m.i14*m.i13 - 2*m.i12*m.i11 - 2*m.i12*m.i11 - 2*m.i10*
m.i9 - 2*m.i10*m.i9 - 2*m.i8*m.i7 - 2*m.i8*m.i7 - 2*m.i6*m.i5 - 2*m.i6*m.i5 - 2*m.i4*m.i3 - 2*
m.i4*m.i3 - 2*m.i2*m.i21 - 2*m.i2*m.i21 <= -1)
| StarcoderdataPython |
3339216 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os.path
import numpy as np
import pandas as pd
from extract_features import convert
from tqdm import tqdm
from tsfresh import extract_features
from sklearn.linear_model import Lasso
from sklearn.feature_selection import RFE
from tsfresh.utilities.dataframe_functions import impute
def main():
if len(sys.argv) < 2:
print('Usage: ./select_features.py datafile.csv')
exit(1)
if not os.path.isfile('timeseries.csv') or not os.path.isfile('labels.csv'):
filename = sys.argv[1]
raw_price_data = pd.read_csv(filename, index_col=None, header=0, thousands=',')
# raw_price_data = raw_price_data[raw_price_data.columns[1:]] # get rid of the date columns
timeseries, labels = convert(raw_price_data)
timeseries.to_csv('timeseries.csv', index=False, header=True)
labels.to_csv('labels.csv', index=False, header=False)
else:
print('Intermediate files exist...')
timeseries = pd.read_csv('timeseries.csv', index_col=None, header=0)
labels = pd.read_csv('labels.csv', index_col=None, header=None, squeeze=True)
sample_windows = pd.DataFrame(columns=timeseries.columns)
sample_labels = labels.sample(frac=0.01)
for index in tqdm(sample_labels.index):
sample_windows = sample_windows.append(timeseries.loc[timeseries['id'] == index])
sample_windows.reset_index(drop=True, inplace=True)
sample_features = extract_features(sample_windows, column_id='id', column_sort='time')
impute(sample_features)
best_features = rfe(sample_features, sample_labels, 8)
print(best_features)
print(type(best_features))
best_features.savetxt('best_features.csv')
def rfe(features, labels, num_features):
estimator = Lasso(alpha=200)
selector = RFE(estimator, num_features, step=10, verbose=10)
selector = selector.fit(features, labels)
ranking = np.array(selector.ranking_)
best_feature_index = np.where(ranking == 1)
best_features = features.columns[best_feature_index]
return best_features
if __name__ == '__main__':
main() | StarcoderdataPython |
130606 | <gh_stars>1-10
from json import dumps as toJS, loads as fromJS
import re
class Index(object):
def __init__(self, name, chartOrParent):
self._name = name
if isinstance(chartOrParent, Index):
self._parent = chartOrParent
self._chart = self._parent._chart
else:
self._parent = None
self._chart = chartOrParent
def get(self):
path = []
next = self
while next:
path.append(next)
next = next._parent
val = self._chart._m
path.reverse()
for node in path:
val = val[node._name]
return val
def __getitem__(self, k):
return Index(k, self)
def __setitem__(self, k, v):
self._chart.set(self, k, v)
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except AttributeError:
return self[k]
def __setattr__(self, k, v):
if k[0] == '_':
object.__setattr__(self, k, v)
else:
self[k] = v
def __enter__(self): return self
def __exit__(self, type, value, traceback): pass
def __str__(self):
return "%s > %s" % (self._parent, self._name) if self._parent else self._name
def createKeys(self):
m = self._parent.createKeys() if self._parent else self._chart._m
try:
m[self._name]
except: # If it doesn't exist ("in" checks fail for types like lists)
m[self._name] = {}
return m[self._name]
class Chart(object):
def __init__(self, placeholder = None):
self._m = {}
if placeholder:
self.chart.renderTo = placeholder
def __getitem__(self, k):
return Index(k, self)
def __setitem__(self, k, v):
self._m[k] = v
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except AttributeError:
return self[k]
def __setattr__(self, k, v):
if k[0] == '_':
object.__setattr__(self, k, v)
else:
self[k] = v
def __enter__(self): return self
def __exit__(self, type, value, traceback): pass
def set(self, index, k, v):
m = index.createKeys()
if isinstance(v, dict):
m[k].update(v)
else:
m[k] = v
@staticmethod
def include():
# print "<script type=\"text/javascript\" src=\"/static/highcharts/js/highcharts.js\"></script>"
print "<script type=\"text/javascript\" src=\"/static/highcharts/highstock/js/highstock.js\"></script>"
print "<script type=\"text/javascript\" src=\"/static/highcharts/js/highcharts-more.js\"></script>"
def js(self):
print "<script type=\"text/javascript\">"
print "$(document).ready(function() {",
print "new Highcharts.Chart(",
print re.sub('"(?:\\\\n)*function\(\) {(.*)}(?:\\\\n)*"', lambda match: fromJS(match.group(0)), toJS(self._m, sort_keys = True, indent = 4)),
print ");",
print "});"
print "</script>"
def placeholder(self):
print "<div id=\"%s\"></div>" % self._m['chart']['renderTo']
| StarcoderdataPython |
1616536 | # Generated by Django 2.2.5 on 2020-04-09 13:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0035_auto_20200409_1258'),
]
operations = [
migrations.RemoveField(
model_name='expressiondata',
name='species',
),
migrations.RemoveField(
model_name='genelist',
name='species',
),
]
| StarcoderdataPython |
1667312 | <gh_stars>1-10
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))
import waf
# ref : https://cloud.kt.com/portal/openapi-guide/computing_enterprise-Server-server_api_make
# Zone parameter : Mandatory
# 1) KR-CA : KOR-Central A Zone
# 2) KR-CB : KOR-Central B Zone
# 3) KR-M : KOR-Seoul M Zone
# 4) KR-M2 : KOR-Seoul M2 Zone
# simple example
print(waf.createWAF(
# zone='KR-CA',
# name='sdktest3',
# type='single',
# spec='standard',
# waf1consoleport='5997',
# waf1sshport='5998',
# waf1dbport='5999'
zone='KR-M2',
name='sdktest4',
type='single',
spec='standard',
waf1consoleport='5997',
waf1sshport='5998',
waf1dbport='5999'
))
| StarcoderdataPython |
1645025 | <filename>problemtools/languages.py<gh_stars>10-100
"""
This module contains functionality for reading and using configuration
of programming languages.
"""
import fnmatch
import re
import string
from . import config
class LanguageConfigError(Exception):
"""Exception class for errors in language configuration."""
pass
class Language(object):
"""
Class representing a single language.
"""
__KEYS = ['name', 'priority', 'files', 'shebang', 'compile', 'run']
__VARIABLES = ['path', 'files', 'binary', 'mainfile', 'mainclass', 'Mainclass', 'memlim']
def __init__(self, lang_id, lang_spec):
"""Construct language object
Args:
lang_id (str): language identifier
lang_spec (dict): dictionary containing the specification
of the language.
"""
if not re.match('[a-z][a-z0-9]*', lang_id):
raise LanguageConfigError('Invalid language ID "%s"' % lang_id)
self.lang_id = lang_id
self.name = None
self.priority = None
self.files = None
self.shebang = None
self.compile = None
self.run = None
self.update(lang_spec)
def get_source_files(self, file_list):
"""Given a list of files, determine which ones would be considered
source files for the language.
Args:
file_list (list of str): list of file names
"""
return [file_name for file_name in file_list
if (any(fnmatch.fnmatch(file_name, glob)
for glob in self.files)
and
self.__matches_shebang(file_name))]
def update(self, values):
"""Update a language specification with new values.
Args:
values (dict): dictionary containing new values for some
subset of the language properties.
"""
# Check that all provided values are known keys
for unknown in set(values)-set(Language.__KEYS):
raise LanguageConfigError(
'Unknown key "%s" specified for language %s'
% (unknown, self.lang_id))
for (key, value) in values.items():
# Check type
if key == 'priority':
if not isinstance(value, int):
raise LanguageConfigError(
'Language %s: priority must be integer but is %s.'
% (self.lang_id, type(value)))
else:
if not isinstance(value, str):
raise LanguageConfigError(
'Language %s: %s must be string but is %s.'
% (self.lang_id, key, type(value)))
# Save the value
if key == 'shebang':
# Compile shebang RE
self.shebang = re.compile(value)
elif key == 'files':
# Split glob patterns
self.files = value.split()
else:
# Other keys, just copy the value
self.__dict__[key] = value
self.__check()
def __check(self):
"""Check that the language specification is valid (all mandatory
fields provided, all metavariables used in compile/run
commands valid, and uniquely defined entry point.
"""
# Check that all mandatory fields are provided
if self.name is None:
raise LanguageConfigError(
'Language %s has no name' % self.lang_id)
if self.priority is None:
raise LanguageConfigError(
'Language %s has no priority' % self.lang_id)
if self.files is None:
raise LanguageConfigError(
'Language %s has no files glob' % self.lang_id)
if self.run is None:
raise LanguageConfigError(
'Language %s has no run command' % self.lang_id)
# Check that all variables appearing are valid
variables = Language.__variables_in_command(self.run)
if self.compile is not None:
variables = variables | Language.__variables_in_command(self.compile)
for unknown in variables - set(Language.__VARIABLES):
raise LanguageConfigError(
'Unknown variable "{%s}" used for language %s'
% (unknown, self.lang_id))
# Check for uniquely defined entry point
entry = variables & set(['binary', 'mainfile', 'mainclass', 'Mainclass'])
if len(entry) == 0:
raise LanguageConfigError(
'No entry point variable used for language %s' % self.lang_id)
if len(entry) > 1:
raise LanguageConfigError(
'More than one entry point type variable used for language %s'
% self.lang_id)
@staticmethod
def __variables_in_command(cmd):
"""List all meta-variables appearing in a string."""
formatter = string.Formatter()
return set(field for _, field, _, _ in formatter.parse(cmd)
if field is not None)
def __matches_shebang(self, filename):
"""Check if a file matches the shebang rule for the language."""
if self.shebang is None:
return True
with open(filename, 'r') as f_in:
shebang_line = f_in.readline()
return self.shebang.search(shebang_line) is not None
class Languages(object):
"""A set of languages."""
def __init__(self, data=None):
"""Create a set of languages from a dict.
Args:
data (dict): dictonary containing configuration.
If None, resulting set of languages is empty.
See documentation of update() method below for details.
"""
self.languages = {}
if data is not None:
self.update(data)
def detect_language(self, file_list):
"""Auto-detect language for a set of files.
Args:
file_list (list of str): list of file names
Returns:
Language object for the detected language or None if the
list of files did not match any language in the set.
"""
result = None
src = []
prio = 1e99
for lang in self.languages.values():
lang_src = lang.get_source_files(file_list)
if (len(lang_src), lang.priority) > (len(src), prio):
result = lang
src = lang_src
prio = lang.priority
return result
def update(self, data):
"""Update the set with language configuration data from a dict.
Args:
data (dict): dictionary containing configuration.
If this dictionary contains (possibly partial) configuration
for a language already in the set, the configuration
for that language will be overridden and updated.
"""
if not isinstance(data, dict):
raise LanguageConfigError(
'Config file error: content must be a dictionary, but is %s.'
% (type(data)))
for (lang_id, lang_spec) in data.items():
if not isinstance(lang_id, str):
raise LanguageConfigError(
'Config file error: language IDs must be strings, but %s is %s.'
% (lang_id, type(lang_id)))
if not isinstance(lang_spec, dict):
raise LanguageConfigError(
'Config file error: language spec must be a dictionary, but spec of language %s is %s.'
% (lang_id, type(lang_spec)))
if lang_id not in self.languages:
self.languages[lang_id] = Language(lang_id, lang_spec)
else:
self.languages[lang_id].update(lang_spec)
priorities = {}
for (lang_id, lang) in self.languages.items():
if lang.priority in priorities:
raise LanguageConfigError(
'Languages %s and %s both have priority %d.'
% (lang_id, priorities[lang.priority], lang.priority))
priorities[lang.priority] = lang_id
def load_language_config():
"""Load language configuration.
Returns:
Languages object for the set of languages.
"""
return Languages(config.load_config('languages.yaml'))
| StarcoderdataPython |
1777691 | <gh_stars>0
#!/usr/bin/env python
from distutils.core import setup
setup(name='sitemap_gen',
version='3',
description='Sitemap Generator',
license='BSD',
author='lusob',
author_email='<EMAIL>',
url='https://github.com/lusob/sitemap_gen'
platforms = ['POSIX', 'Windows'],
py_modules = ['sitemap_gen'],
)
| StarcoderdataPython |
3214685 | import pytest
import mock
def test_pibrella_red_light_on(GPIO, atexit):
import pibrella
pibrella.light.red.on()
pibrella.light.red.off()
assert GPIO.output.has_calls((
mock.call(pibrella.PB_PIN_LIGHT_RED, True),
mock.call(pibrella.PB_PIN_LIGHT_RED, False)
)) | StarcoderdataPython |
4818453 | from mpkg.common import Soft
from mpkg.utils import Search
class Package(Soft):
ID = 'wget'
def _prepare(self):
data = self.data
data.bin = ['wget.exe']
links = {'32bit': 'https://eternallybored.org/misc/wget/releases/wget-{ver}-win32.zip',
'64bit': 'https://eternallybored.org/misc/wget/releases/wget-{ver}-win64.zip'}
url = 'https://eternallybored.org/misc/wget/releases/?C=M;O=D'
regex = '>wget-([\\d.-]+)-win64\\.zip</a>'
data.changelog = 'https://eternallybored.org/misc/wget'
data.ver = Search(url, regex)
data.arch = Search(links=links, ver=data.ver)
| StarcoderdataPython |
3392945 | import argparse
import time
import gc
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from models.models import GroupIM
from utils.user_utils import TrainUserDataset, EvalUserDataset
from utils.group_utils import TrainGroupDataset, EvalGroupDataset
from eval.evaluate import evaluate_user, evaluate_group
if torch.cuda.is_available():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
gpu_id = int(np.argmax(memory_available))
torch.cuda.set_device(gpu_id)
parser = argparse.ArgumentParser(description='PyTorch GroupIM: Group Information Maximization for Group Recommendation')
parser.add_argument('--dataset', type=str, default='weeplaces', help='Name of dataset')
# Training settings.
parser.add_argument('--lr', type=float, default=5e-3, help='initial learning rate')
parser.add_argument('--wd', type=float, default=0.00, help='weight decay coefficient')
parser.add_argument('--lambda_mi', type=float, default=1.0, help='MI lambda hyper param')
parser.add_argument('--drop_ratio', type=float, default=0.4, help='Dropout ratio')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--epochs', type=int, default=200, help='maximum # training epochs')
parser.add_argument('--eval_freq', type=int, default=5, help='frequency to evaluate performance on validation set')
# Model settings.
parser.add_argument('--emb_size', type=int, default=64, help='layer size')
parser.add_argument('--aggregator', type=str, default='attention', help='choice of group preference aggregator',
choices=['maxpool', 'meanpool', 'attention'])
parser.add_argument('--negs_per_group', type=int, default=5, help='# negative users sampled per group')
# Pre-training settings.
parser.add_argument('--pretrain_user', action='store_true', help='Pre-train user encoder on user-item interactions')
parser.add_argument('--pretrain_mi', action='store_true', help='Pre-train MI estimator for a few epochs')
parser.add_argument('--pretrain_epochs', type=int, default=100, help='# pre-train epochs for user encoder layer')
parser.add_argument('--cuda', action='store_true', help='use CUDA')
parser.add_argument('--seed', type=int, default=1111, help='random seed for reproducibility')
# Model save file parameters.
parser.add_argument('--save', type=str, default='model_user.pt', help='path to save the final model')
parser.add_argument('--save_group', type=str, default='model_group.pt', help='path to save the final model')
args = parser.parse_args()
torch.manual_seed(args.seed) # Set the random seed manually for reproducibility.
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
###############################################################################
# Load data
###############################################################################
train_params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': 6, 'pin_memory': True}
eval_params = {'batch_size': args.batch_size, 'shuffle': False, 'num_workers': 6, 'pin_memory': True}
device = torch.device("cuda" if args.cuda else "cpu")
# Define train/val/test datasets on user interactions.
train_dataset = TrainUserDataset(args.dataset) # train dataset for user-item interactions.
n_items, n_users = train_dataset.n_items, train_dataset.n_users
val_dataset = EvalUserDataset(args.dataset, n_items, datatype='val')
test_dataset = EvalUserDataset(args.dataset, n_items, datatype='test')
# Define train/val/test datasets on group and user interactions.
train_group_dataset = TrainGroupDataset(args.dataset, n_items, args.negs_per_group)
padding_idx = train_group_dataset.padding_idx
val_group_dataset = EvalGroupDataset(args.dataset, n_items, padding_idx, datatype='val')
test_group_dataset = EvalGroupDataset(args.dataset, n_items, padding_idx, datatype='test')
# Define data loaders on user interactions.
train_loader = DataLoader(train_dataset, **train_params)
val_loader = DataLoader(val_dataset, **eval_params)
test_loader = DataLoader(test_dataset, **eval_params)
# Define data loaders on group interactions.
train_group_loader = DataLoader(train_group_dataset, **train_params)
val_group_loader = DataLoader(val_group_dataset, **eval_params)
test_group_loader = DataLoader(test_group_dataset, **eval_params)
###############################################################################
# Build the model
###############################################################################
user_layers = [args.emb_size] # user encoder layer configuration is tunable.
model = GroupIM(n_items, user_layers, drop_ratio=args.drop_ratio, aggregator_type=args.aggregator,
lambda_mi=args.lambda_mi).to(device)
optimizer_gr = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
best_user_n100, best_group_n100 = -np.inf, -np.inf
print("args", args)
# At any point you can hit Ctrl + C to break out of training early.
try:
if args.pretrain_user:
optimizer_ur = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=args.wd)
print("Pre-training model on user-item interactions")
for epoch in range(0, args.pretrain_epochs):
epoch_start_time = time.time()
model.train()
train_user_loss = 0.0
start_time = time.time()
for batch_index, data in enumerate(train_loader):
optimizer_ur.zero_grad()
data = [x.to(device, non_blocking=True) for x in data]
(train_users, train_items) = data
user_logits, user_embeds = model.user_preference_encoder.pre_train_forward(train_items)
user_loss = model.user_loss(user_logits, train_items)
user_loss.backward()
train_user_loss += user_loss.item()
optimizer_ur.step()
del train_users, train_items, user_logits, user_embeds
elapsed = time.time() - start_time
print('| epoch {:3d} | time {:4.2f} | loss {:4.2f}'.format(epoch + 1, elapsed,
train_user_loss / len(train_loader)))
if epoch % args.eval_freq == 0:
val_loss, n100, r20, r50, _ = evaluate_user(model, val_loader, device, mode='pretrain')
if n100 > best_user_n100:
torch.save(model.state_dict(), args.save)
best_user_n100 = n100
print("Load best pre-trained user encoder")
model.load_state_dict(torch.load(args.save))
model = model.to(device)
val_loss, n100, r20, r50, _ = evaluate_user(model, val_loader, device, mode='pretrain')
print('=' * 89)
print('| User evaluation | val loss {:4.4f} | n100 {:4.4f} | r20 {:4.4f} | '
'r50 {:4.4f}'.format(val_loss, n100, r20, r50))
print("Initializing group recommender with pre-train user encoder")
# Initialize the group predictor (item embedding) weight based on the pre-trained user predictor.
model.group_predictor.weight.data = model.user_preference_encoder.user_predictor.weight.data
if args.pretrain_mi:
# pre-train MI estimator.
for epoch in range(0, 10):
model.train()
t = time.time()
mi_epoch_loss = 0.0
for batch_index, data in enumerate(train_group_loader):
data = [x.to(device, non_blocking=True) for x in data]
group, group_users, group_mask, group_items, user_items, corrupted_user_items = data
optimizer_gr.zero_grad()
model.zero_grad()
model.train()
_, group_embeds, _ = model(group, group_users, group_mask, user_items)
obs_user_embed = model.user_preference_encoder(user_items).detach() # [B, G, D]
corrupted_user_embed = model.user_preference_encoder(corrupted_user_items).detach() # [B, # negs, D]
scores_observed = model.discriminator(group_embeds, obs_user_embed, group_mask) # [B, G]
scores_corrupted = model.discriminator(group_embeds, corrupted_user_embed, group_mask) # [B, # negs]
mi_loss = model.discriminator.mi_loss(scores_observed, group_mask, scores_corrupted, device=device)
mi_loss.backward()
optimizer_gr.step()
mi_epoch_loss += mi_loss
del group, group_users, group_mask, group_items, user_items, corrupted_user_items, \
obs_user_embed, corrupted_user_embed
gc.collect()
print("MI loss: {}".format(float(mi_epoch_loss) / len(train_group_loader)))
optimizer_gr = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
for epoch in range(0, args.epochs):
epoch_start_time = time.time()
model.train()
train_group_epoch_loss = 0.0
for batch_index, data in enumerate(train_group_loader):
data = [x.to(device, non_blocking=True) for x in data]
group, group_users, group_mask, group_items, user_items, corrupted_user_items = data
optimizer_gr.zero_grad()
model.zero_grad()
group_logits, group_embeds, scores_ug = model(group.squeeze(), group_users, group_mask, user_items)
group_loss = model.loss(group_logits, group_embeds, scores_ug, group_mask, group_items, user_items,
corrupted_user_items, device=device)
group_loss.backward()
train_group_epoch_loss += group_loss.item()
optimizer_gr.step()
del group, group_users, group_mask, group_items, user_items, corrupted_user_items, \
group_logits, group_embeds, scores_ug
gc.collect()
print("Train loss: {}".format(float(train_group_epoch_loss) / len(train_group_loader)))
if epoch % args.eval_freq == 0:
# Group evaluation.
val_loss_group, n100_group, r20_group, r50_group, _ = evaluate_group(model, val_group_loader, device)
print('-' * 89)
print('| end of epoch {:3d} | time: {:4.2f}s | n100 (group) {:5.4f} | r20 (group) {:5.4f} | r50 (group) '
'{:5.4f}'.format(epoch + 1, time.time() - epoch_start_time, n100_group, r20_group, r50_group))
print('-' * 89)
# Save the model if the n100 is the best we've seen so far.
if n100_group > best_group_n100:
with open(args.save_group, 'wb') as f:
torch.save(model, f)
best_group_n100 = n100_group
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save_group, 'rb') as f:
model = torch.load(f, map_location='cuda')
model = model.to(device)
# Best validation evaluation
val_loss, n100, r20, r50, _ = evaluate_user(model, val_loader, device, mode='group')
print('=' * 89)
print('| User evaluation | val loss {:4.4f} | n100 {:4.4f} | r20 {:4.4f} | r50 {:4.4f}'
.format(val_loss, n100, r20, r50))
# Test evaluation
test_loss, n100, r20, r50, _ = evaluate_user(model, test_loader, device, mode='group')
print('=' * 89)
print('| User evaluation | test loss {:4.4f} | n100 {:4.4f} | r20 {:4.4f} | r50 {:4.4f}'
.format(test_loss, n100, r20, r50))
print('=' * 89)
_, n100_group, r20_group, r50_group, _ = evaluate_group(model, val_group_loader, device)
print('| Group evaluation (val) | n100 (group) {:4.4f} | r20 (group) {:4.4f} | r50 (group) {:4.4f}'
.format(n100_group, r20_group, r50_group))
print('=' * 89)
_, n100_group, r20_group, r50_group, _ = evaluate_group(model, test_group_loader, device)
print('| Group evaluation (test) | n100 (group) {:4.4f} | r20 (group) {:4.4f} | r50 (group) {:4.4f}'
.format(n100_group, r20_group, r50_group))
| StarcoderdataPython |
1628411 | import os
import uuid as uuid_module
import weakref
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import pymongo
import bson
except ImportError:
# mongo dependency is optional
pymongo = None
import logging
l = logging.getLogger("ana.datalayer")
class DataLayer(object):
'''
The DataLayer handles storing and retrieving UUID-identified objects
to/from a central store.
'''
def __init__(self):
self.uuid_cache = weakref.WeakValueDictionary()
self._store_type = None
self.uuid = uuid_module.uuid4()
def store_state(self, uuid, s):
raise NotImplementedError()
def load_state(self, uuid):
raise NotImplementedError()
class SimpleDataLayer(DataLayer):
def __init__(self):
DataLayer.__init__(self)
self._store_type = 'simple'
def store_state(self, uuid, s):
return
def load_state(self, uuid):
raise ANAError("SimpleDataLayer does not support state loading.")
class DirDataLayer(DataLayer):
def __init__(self, pickle_dir):
DataLayer.__init__(self)
self._store_type = 'pickle'
self._dir = pickle_dir
if not os.path.exists(self._dir):
l.warning("Directory '%s' doesn't exit. Creating.", self._dir)
os.makedirs(self._dir)
def store_state(self, uuid, s):
with open(os.path.join(self._dir, str(uuid)+'.p'), 'wb') as f:
pickle.dump(s, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_state(self, uuid):
with open(os.path.join(self._dir, str(uuid)+'.p'), 'rb') as f:
return pickle.load(f)
class MongoDataLayer(DataLayer):
def __init__(self, mongo_args, mongo_db='ana', mongo_collection='storage'):
DataLayer.__init__(self)
if pymongo is None:
raise ImportError("pymongo necessary for ANA mongo backend")
l.debug("Pickling into mongo.")
self._store_type = 'mongo'
self._mongo = pymongo.MongoClient(*mongo_args)[mongo_db][mongo_collection]
def store_state(self, uuid, s):
# TODO: investigate whether check/insert is faster than
# upsert (because of latency) and also deal with the race
# condition here
if self._mongo.find({'_id': uuid}).limit(1).count(with_limit_and_skip=True) == 0:
p = pickle.dumps(s, protocol=pickle.HIGHEST_PROTOCOL)
self._mongo.insert_one({'_id': uuid, 'pickled': bson.binary.Binary(p)})
def load_state(self, uuid):
p = self._mongo.find_one({'_id': uuid})['pickled']
return pickle.loads(p)
class DictDataLayer(DataLayer):
def __init__(self, the_dict=None):
DataLayer.__init__(self)
self._store_type = 'dict'
self._state_store = { } if the_dict is None else the_dict
def store_state(self, uuid, s):
p = pickle.dumps(s, protocol=pickle.HIGHEST_PROTOCOL)
self._state_store[uuid] = p
def load_state(self, uuid):
p = self._state_store[uuid]
return pickle.loads(p)
from .errors import ANAError
| StarcoderdataPython |
3324498 | <filename>vanilla_GAN/bento_predictor.py
import bentoml
from bentoml.adapters import JsonInput
from bentoml.frameworks.tensorflow import TensorflowSavedModelArtifact
import tensorflow as tf
import importlib.util
import numpy as np
from PIL import Image
@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([TensorflowSavedModelArtifact("model")])
class DigitGenerator(bentoml.BentoService):
@bentoml.api(input=JsonInput())
def generate_image(self, file_stream):
model = self.artifacts.model.signatures["serving_default"]
model._num_positional_args = 1
noise = np.random.normal(0, 1, (1, 100))
noise = tf.convert_to_tensor(noise, dtype=tf.float32)
results = model(noise)
generated_image = results.get("dense_3")[0].numpy().reshape(28, 28)
return {"digit_generated": generated_image}
| StarcoderdataPython |
3310090 | #!/usr/bin/python
from parsers.baseparser import BaseParser
from parsers.common.evos import parse_unit, parse_evo, parse_item
class EvoParser(BaseParser):
"""Parser for evolution data.
Expects 'unit', 'evo', 'items' and 'dict' in the raw data.
"""
required_data = ['unit', 'evo', 'items', 'dict']
def __init__(self, **data):
super(self.__class__, self).__init__(**data)
self.file_name = "evo_list.json"
def run(self):
# Check if we have all the data we need
if not all(name in self.data for name in self.required_data):
raise AttributeError("Failed to find all required data")
is_foreign = self.data['isForeign']
units_data = {}
for unit in self.data['unit']:
unit_data = parse_unit(unit, self.data['dict'], is_foreign)
units_data[unit_data['id']] = unit_data
items_data = {}
for item in self.data['items']:
item_data = parse_item(item, self.data['dict'], is_foreign)
items_data[item_data['id']] = item_data
evos_data = {}
for evo in self.data['evo']:
evo_data = parse_evo(evo, units_data, items_data, self.data['dict'])
evos_data[evo_data['unit_id']] = evo_data
evo_data.pop('unit_id')
self.parsed_data = evos_data
| StarcoderdataPython |
3274168 | <reponame>dirk-attraktor/pyHtmlGui
from pyhtmlgui import PyHtmlGui, PyHtmlView, Observable
class App(Observable):
pass
class DummyView(PyHtmlView):
TEMPLATE_STR = '''Edit me at runtime and save file, the frontend will update after a few seconds when filesystem changes are detected'''
class AppView(PyHtmlView):
TEMPLATE_FILE = "app.html"
def __init__(self, observedObject, parentView):
super().__init__(observedObject, parentView)
self.dummyview = DummyView(observedObject, self)
def click_me(self): # this is clicked in html frontend, arrived here, calles create_random_number from app.js, then updated the result in the frontend
self.call_javascript("create_random_string",["This part was created in python,"])(self.echo_back_to_frontends) # only call with callback of ignore result, don't call with () here, this will block the event loop
# if you set shared_instance = True below, and open multile windows,
# you will see feedback values from all open frontends here, else every connected frontend has its own AppView Instance
def echo_back_to_frontends(self, values):
#self.eval_javascript('''document.getElementById(args.uid).querySelector('[name="result"').innerHTML = args.innerHTML;''', skip_results=True, uid=self.uid, innerHTML="<br>".join(values))
self.call_javascript("pyhtmlgui.eval_script", [
'''document.getElementById(args.uid).querySelector('[name="result"').innerHTML = args.innerHTML;''',
{ "uid": self.uid, "innerHTML": "<br>".join(values) }
], skip_results=True)
if __name__ == "__main__":
gui = PyHtmlGui(
appInstance = App(),
appViewClass = AppView,
static_dir = "examples/static",
template_dir = "examples/templates",
main_html = "window.html",
auto_reload = True, # Debug only, if you changed the class template_str or files, the frontend will update views on rumtime. try editing app.html while app is running and waid few seconds
single_instance = True, # if you set this to False, open multiple windows, see one AppView connected to multiple frontends,
)
gui.start(show_frontend=True, block=True)
| StarcoderdataPython |
3219109 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from os import path
from setuptools import setup, find_packages
this_directory = path.abspath(path.dirname(__file__))
setup(
name='aiogram_dialog',
description='Mini-framework for dialogs on top of aiogram',
version='0.9.1',
url='https://github.com/tishka17/aiogram_dialog',
author='<NAME>',
author_email='<EMAIL>',
license='Apache2',
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3'
],
packages=find_packages(include=['aiogram_dialog', 'aiogram_dialog.*']),
install_requires=[
'aiogram<3',
'jinja2',
],
python_requires=">=3.6",
)
| StarcoderdataPython |
36672 | <reponame>Shelnutt2/TileDB-Py
from __future__ import absolute_import
try:
import pandas as pd
import pandas._testing as tm
import_failed = False
except ImportError:
import_failed = True
import unittest, os
import warnings
import string, random, copy
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pathlib import Path
import tiledb
from tiledb.tests.common import *
if sys.version_info > (3, 0):
str_type = str
else:
str_type = unicode
def make_dataframe_basic1(col_size=10):
# ensure no duplicates when using as string dim
chars = list()
for _ in range(col_size):
next = rand_ascii_bytes(2)
while next in chars:
next = rand_ascii_bytes(2)
chars.append(next)
data_dict = {
"time": rand_datetime64_array(col_size),
"x": np.array([rand_ascii(4).encode("UTF-8") for _ in range(col_size)]),
"chars": np.array(chars),
"cccc": np.arange(0, col_size),
"q": np.array([rand_utf8(np.random.randint(1, 100)) for _ in range(col_size)]),
"t": np.array([rand_utf8(4) for _ in range(col_size)]),
"r": np.array(
[rand_ascii_bytes(np.random.randint(1, 100)) for _ in range(col_size)]
),
"s": np.array([rand_ascii() for _ in range(col_size)]),
"u": np.array([rand_ascii_bytes().decode() for _ in range(col_size)]),
"v": np.array([rand_ascii_bytes() for _ in range(col_size)]),
"vals_int64": np.random.randint(
dtype_max(np.int64), size=col_size, dtype=np.int64
),
"vals_float64": np.random.rand(col_size),
}
# TODO: dump this dataframe to pickle/base64 so that it can be reconstructed if
# there are weird failures on CI?
df = pd.DataFrame.from_dict(data_dict)
return df
def make_dataframe_basic2():
# This code is from Pandas feather i/o tests "test_basic" function:
# https://github.com/pandas-dev/pandas/blob/master/pandas/tests/io/test_feather.py
# (available under BSD 3-clause license
# https://github.com/pandas-dev/pandas/blob/master/LICENSE
import pandas as pd
df = pd.DataFrame(
{
"string": list("abc"),
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
# TODO "float_with_null": [1.0, np.nan, 3],
"bool": [True, False, True],
# TODO "bool_with_null": [True, np.nan, False],
# "cat": pd.Categorical(list("abc")),
"dt": pd.date_range("20130101", periods=3),
# "dttz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
# "dt_with_null": [
# pd.Timestamp("20130101"),
# pd.NaT,
# pd.Timestamp("20130103"),
# ],
"dtns": pd.date_range("20130101", periods=3, freq="ns"),
}
)
return df
def make_dataframe_basic3(col_size=10, time_range=(None, None)):
df_dict = {
"time": rand_datetime64_array(
col_size, start=time_range[0], stop=time_range[1]
),
"double_range": np.linspace(-1000, 1000, col_size),
"int_vals": np.random.randint(
dtype_max(np.int64), size=col_size, dtype=np.int64
),
}
df = pd.DataFrame(df_dict)
return df
class PandasDataFrameRoundtrip(DiskTestCase):
def setUp(self):
if import_failed:
self.skipTest("Pandas not available")
else:
super(PandasDataFrameRoundtrip, self).setUp()
def test_dataframe_basic_rt1_manual(self):
uri = self.path("dataframe_basic_rt1_manual")
ctx = tiledb.Ctx()
dom = tiledb.Domain(
tiledb.Dim(name="i_chars", domain=(0, 10000), tile=10, dtype=np.uint64),
tiledb.Dim(
name="datetime",
domain=(0, np.iinfo(np.uint64).max - 3600 * 1000000000),
tile=3600 * 1000000000,
dtype=np.uint64,
),
tiledb.Dim(
name="cccc",
domain=(0, dtype_max(np.uint64) - 1),
tile=dtype_max(np.uint64),
dtype=np.uint64,
),
ctx=ctx,
)
compression = tiledb.FilterList([tiledb.ZstdFilter(level=-1)])
attrs = [
tiledb.Attr(name="x", dtype="S", filters=compression, ctx=ctx),
tiledb.Attr(name="chars", dtype="|S2", filters=compression, ctx=ctx),
tiledb.Attr(name="q", dtype="U", filters=compression, ctx=ctx),
tiledb.Attr(name="r", dtype="S", filters=compression, ctx=ctx),
tiledb.Attr(name="s", dtype="U", filters=compression, ctx=ctx),
tiledb.Attr(
name="vals_int64", dtype=np.int64, filters=compression, ctx=ctx
),
tiledb.Attr(
name="vals_float64", dtype=np.float64, filters=compression, ctx=ctx
),
tiledb.Attr(name="t", dtype="U", filters=compression, ctx=ctx),
tiledb.Attr(name="u", dtype="U", filters=compression, ctx=ctx),
tiledb.Attr(name="v", dtype="S", filters=compression, ctx=ctx),
]
schema = tiledb.ArraySchema(domain=dom, sparse=True, attrs=attrs, ctx=ctx)
tiledb.SparseArray.create(uri, schema)
df = make_dataframe_basic1()
incr = 0
with tiledb.SparseArray(uri, "w") as A:
s_ichars = []
for s in df["chars"]:
s_ichars.append(incr)
incr += 1
times = df["time"]
cccc = df["cccc"]
df = df.drop(columns=["time", "cccc"], axis=1)
A[s_ichars, times, cccc] = df.to_dict(orient="series")
with tiledb.SparseArray(uri) as A:
df_res = pd.DataFrame.from_dict(A[:])
for col in df.columns:
# TileDB default return is unordered, so must sort to compare
assert_array_equal(df[col].sort_values(), df_res[col].sort_values())
def test_dataframe_basic1(self):
uri = self.path("dataframe_basic_rt1")
df = make_dataframe_basic1()
ctx = tiledb.Ctx()
tiledb.from_dataframe(uri, df, sparse=False, ctx=ctx)
df_readback = tiledb.open_dataframe(uri)
tm.assert_frame_equal(df, df_readback)
uri = self.path("dataframe_basic_rt1_unlimited")
tiledb.from_dataframe(uri, df, full_domain=True, sparse=False, ctx=ctx)
df_readback = tiledb.open_dataframe(uri)
tm.assert_frame_equal(df, df_readback)
with tiledb.open(uri) as A:
df_arrow = A.query(use_arrow=True).df[:]
tm.assert_frame_equal(df, df_arrow)
def test_dataframe_basic2(self):
uri = self.path("dataframe_basic_rt2")
df = make_dataframe_basic2()
tiledb.from_dataframe(uri, df, sparse=False)
df_readback = tiledb.open_dataframe(uri)
tm.assert_frame_equal(df, df_readback)
with tiledb.open(uri) as B:
tm.assert_frame_equal(df, B.df[:], check_index_type=False)
def test_dataframe_csv_rt1(self):
def rand_dtype(dtype, size):
import os
nbytes = size * np.dtype(dtype).itemsize
randbytes = os.urandom(nbytes)
return np.frombuffer(randbytes, dtype=dtype)
uri = self.path("dataframe_csv_rt1")
os.mkdir(uri)
col_size = 15
data_dict = {
"dates": np.array(
rand_dtype(np.uint64, col_size), dtype=np.dtype("datetime64[ns]")
),
"float64s": rand_dtype(np.float64, col_size),
"ints": rand_dtype(np.int64, col_size),
"strings": [rand_utf8(5) for _ in range(col_size)],
}
df_orig = pd.DataFrame.from_dict(data_dict)
csv_uri = os.path.join(uri, "test.csv")
# note: encoding must be specified to avoid printing the b'' bytes
# prefix, see https://github.com/pandas-dev/pandas/issues/9712
df_orig.to_csv(csv_uri, mode="w")
csv_array_uri = os.path.join(uri, "tiledb_csv")
tiledb.from_csv(
csv_array_uri, csv_uri, index_col=0, parse_dates=[1], sparse=False
)
ctx = tiledb.default_ctx()
df_from_array = tiledb.open_dataframe(csv_array_uri, ctx=ctx)
tm.assert_frame_equal(df_orig, df_from_array)
# Test reading via TileDB VFS. The main goal is to support reading
# from a remote VFS, using local with `file://` prefix as a test for now.
with tiledb.FileIO(tiledb.VFS(), csv_uri, "rb") as fio:
csv_uri_unc = "file:///" + csv_uri
csv_array_uri2 = "file:///" + os.path.join(csv_array_uri + "_2")
tiledb.from_csv(
csv_array_uri2, csv_uri_unc, index_col=0, parse_dates=[1], sparse=False
)
df_from_array2 = tiledb.open_dataframe(csv_array_uri2)
tm.assert_frame_equal(df_orig, df_from_array2)
# test timestamp write
uri2 = self.path("dataframe_csv_timestamp")
timestamp = random.randint(0, np.iinfo(np.int64).max)
tiledb.from_csv(uri2, csv_uri, timestamp=0, index_col=0)
tiledb.from_pandas(
uri2,
df_orig,
timestamp=timestamp,
mode="append",
row_start_idx=0,
index_col=0,
)
with tiledb.open(uri2, timestamp=0) as A:
self.assertEqual(A.timestamp, 0)
with tiledb.open(uri2, timestamp=timestamp) as A:
self.assertEqual(A.timestamp, timestamp)
def test_dataframe_index_to_sparse_dims(self):
# This test
# - loops over all of the columns from make_basic_dataframe,
# - sets the index to the current column
# - creates a dataframe
# - check that indexing the nonempty_domain of the resulting
# dimension matches the input
# TODO should find a way to dump the whole dataframe dict to a
# (print-safe) bytestring in order to debug generated output
df = make_dataframe_basic1(100)
for col in df.columns:
uri = self.path("df_indx_dim+{}".format(str(col)))
# ensure that all column which will be used as string dim index
# is sorted, because that is how it will be returned
if df.dtypes[col] == "O":
df.sort_values(col, inplace=True)
# also ensure that string columns are converted to bytes
# b/c only TILEDB_ASCII supported for string dimension
if type(df[col][0]) == str_type:
df[col] = [x.encode("UTF-8") for x in df[col]]
new_df = df.drop_duplicates(subset=col)
new_df.set_index(col, inplace=True)
tiledb.from_dataframe(uri, new_df, sparse=True)
with tiledb.open(uri) as A:
self.assertEqual(A.domain.dim(0).name, col)
nonempty = A.nonempty_domain()[0]
res = A.multi_index[nonempty[0] : nonempty[1]]
index = pd.Index(res.pop(col), name=col)
res_df = pd.DataFrame(res, index=index)
tm.assert_frame_equal(new_df, res_df, check_like=True)
def test_dataframe_multiindex_dims(self):
uri = self.path("df_multiindex_dims")
col_size = 10
df = make_dataframe_basic3(col_size)
df_dict = df.to_dict(orient="series")
df.set_index(["time", "double_range"], inplace=True)
tiledb.from_dataframe(uri, df, sparse=True)
with tiledb.open(uri) as A:
ned_time = A.nonempty_domain()[0]
ned_dbl = A.nonempty_domain()[1]
res = A.multi_index[slice(*ned_time), :]
assert_array_equal(res["time"], df_dict["time"])
assert_array_equal(res["double_range"], df_dict["double_range"])
assert_array_equal(res["int_vals"], df.int_vals.values)
# test .df[] indexing
df_idx_res = A.df[slice(*ned_time), :]
tm.assert_frame_equal(df_idx_res, df)
# test .df[] indexing with query
df_idx_res = A.query(attrs=["int_vals"]).df[slice(*ned_time), :]
tm.assert_frame_equal(df_idx_res, df)
# test .df[] with Arrow
df_idx_res = A.query(use_arrow=True).df[slice(*ned_time), :]
tm.assert_frame_equal(df_idx_res, df)
df_idx_res = A.query(use_arrow=False).df[slice(*ned_time), :]
tm.assert_frame_equal(df_idx_res, df)
def test_csv_dense(self):
col_size = 10
df_data = {
"index": np.arange(0, col_size),
"chars": np.array([rand_ascii(4).encode("UTF-8") for _ in range(col_size)]),
"vals_float64": np.random.rand(col_size),
}
df = pd.DataFrame(df_data).set_index("index")
# Test 1: basic round-trip
tmp_dir = self.path("csv_dense")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.to_csv(tmp_csv)
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
tmp_csv,
index_col=["index"],
dtype={"index": np.uint64},
sparse=False,
)
tmp_array2 = os.path.join(tmp_dir, "array2")
tiledb.from_csv(tmp_array2, tmp_csv, sparse=False)
def test_csv_col_to_sparse_dims(self):
df = make_dataframe_basic3(20)
# Test 1: basic round-trip
tmp_dir = self.path("csv_col_to_sparse_dims")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.sort_values("time", inplace=True)
df.to_csv(tmp_csv, index=False)
df.set_index(["time", "double_range"], inplace=True)
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
tmp_csv,
sparse=True,
index_col=["time", "double_range"],
parse_dates=["time"],
)
df_bk = tiledb.open_dataframe(tmp_array)
tm.assert_frame_equal(df, df_bk)
# Test 2: check from_csv `sparse` and `allows_duplicates` keyword args
df = make_dataframe_basic3(20)
tmp_csv2 = os.path.join(tmp_dir, "generated2.csv")
tmp_array2a = os.path.join(tmp_dir, "array2a")
tmp_array2b = os.path.join(tmp_dir, "array2b")
# create a duplicate value
df.loc[0, "int_vals"] = df.int_vals[1]
df.sort_values("int_vals", inplace=True)
df.to_csv(tmp_csv2, index=False)
# try once and make sure error is raised because of duplicate value
with self.assertRaisesRegex(
tiledb.TileDBError, "Duplicate coordinates \\(.*\\) are not allowed"
):
tiledb.from_csv(
tmp_array2a,
tmp_csv2,
index_col=["int_vals"],
sparse=True,
allows_duplicates=False,
)
# try again, check from_csv(allows_duplicates=True, sparse=True)
tiledb.from_csv(
tmp_array2b,
tmp_csv2,
index_col=["int_vals"],
parse_dates=["time"],
sparse=True,
allows_duplicates=True,
float_precision="round_trip",
)
with tiledb.open(tmp_array2b) as A:
self.assertTrue(A.schema.sparse)
res_df = A.df[:]
# the duplicate value is on the dimension and can be retrieved in arbitrary
# order. we need to re-sort in order to compare, to avoid spurious failures.
res_df.sort_values("time", inplace=True)
cmp_df = df.set_index("int_vals").sort_values(by="time")
tm.assert_frame_equal(res_df, cmp_df)
def test_dataframe_csv_schema_only(self):
col_size = 10
df = make_dataframe_basic3(col_size)
tmp_dir = self.path("csv_schema_only")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.sort_values("time", inplace=True)
df.to_csv(tmp_csv, index=False)
attrs_filters = tiledb.FilterList([tiledb.ZstdFilter(1)])
# from_dataframe default is 1, so use 7 here to check
# the arg is correctly parsed/passed
coords_filters = tiledb.FilterList([tiledb.ZstdFilter(7)])
tmp_assert_dir = os.path.join(tmp_dir, "array")
# this should raise an error
with self.assertRaises(ValueError):
tiledb.from_csv(tmp_assert_dir, tmp_csv, tile="abc")
with self.assertRaises(ValueError):
tiledb.from_csv(tmp_assert_dir, tmp_csv, tile=(3, 1.0))
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
tmp_csv,
index_col=["time", "double_range"],
parse_dates=["time"],
mode="schema_only",
capacity=1001,
sparse=True,
tile={"time": 5},
coords_filters=coords_filters,
)
t0, t1 = df.time.min(), df.time.max()
import numpy
ref_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
*[
tiledb.Dim(
name="time",
domain=(t0.to_datetime64(), t1.to_datetime64()),
tile=5,
dtype="datetime64[ns]",
),
tiledb.Dim(
name="double_range",
domain=(-1000.0, 1000.0),
tile=1000,
dtype="float64",
),
]
),
attrs=[
tiledb.Attr(name="int_vals", dtype="int64", filters=attrs_filters),
],
coords_filters=coords_filters,
cell_order="row-major",
tile_order="row-major",
capacity=1001,
sparse=True,
allows_duplicates=False,
)
# note: filters omitted
array_nfiles = len(tiledb.VFS().ls(tmp_array))
self.assertEqual(array_nfiles, 3)
with tiledb.open(tmp_array) as A:
self.assertEqual(A.schema, ref_schema)
# TODO currently no equality check for filters
self.assertEqual(A.schema.coords_filters[0].level, coords_filters[0].level)
self.assertEqual(A.schema.attr(0).filters[0].level, attrs_filters[0].level)
# Test mode='append' for from_csv
tiledb.from_csv(tmp_array, tmp_csv, mode="append", row_start_idx=0)
df2 = make_dataframe_basic3(10, time_range=(t0, t1))
df2.sort_values("time", inplace=True)
df2.set_index(["time", "double_range"], inplace=True)
# Test mode='append' for from_pandas
tiledb.from_pandas(tmp_array, df2, row_start_idx=len(df2), mode="append")
with tiledb.open(tmp_array) as A:
df_bk = A.df[:]
df.set_index(["time", "double_range"], inplace=True)
df_combined = pd.concat([df, df2])
df_combined.sort_index(level="time", inplace=True)
df_bk.sort_index(level="time", inplace=True)
tm.assert_frame_equal(df_bk, df_combined)
def test_dataframe_csv_chunked(self):
col_size = 200
df = make_dataframe_basic3(col_size)
tmp_dir = self.path("csv_chunked")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.sort_values("time", inplace=True)
df.to_csv(tmp_csv, index=False)
# Test sparse chunked
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
tmp_csv,
index_col=["double_range"],
parse_dates=["time"],
date_spec={"time": "%Y-%m-%dT%H:%M:%S.%f"},
chunksize=10,
sparse=True,
)
with tiledb.open(tmp_array) as A:
res = A[:]
df_bk = pd.DataFrame(res)
df_bk.set_index(["double_range"], inplace=True)
df_ck = df.set_index(["double_range"])
tm.assert_frame_equal(df_bk, df_ck)
# Test dense chunked
tmp_array_dense = os.path.join(tmp_dir, "array_dense")
tiledb.from_csv(
tmp_array_dense, tmp_csv, parse_dates=["time"], sparse=False, chunksize=25
)
with tiledb.open(tmp_array_dense) as A:
# with sparse=False and no index column, we expect to have unlimited domain
self.assertEqual(A.schema.domain.dim(0).domain[1], 18446744073709541615)
# chunked writes go to unlimited domain, so we must only read nonempty
ned = A.nonempty_domain()[0]
# TODO should support numpy scalar here
res = A.multi_index[int(ned[0]) : int(ned[1])]
df_bk = pd.DataFrame(res)
tm.assert_frame_equal(df_bk, df)
# test .df[] indexing
df_idx_res = A.df[int(ned[0]) : int(ned[1])]
tm.assert_frame_equal(df_idx_res, df, check_index_type=False)
# test .df[] indexing with query
df_idx_res = A.query(attrs=["time"]).df[int(ned[0]) : int(ned[1])]
tm.assert_frame_equal(df_idx_res, df[["time"]])
df_idx_res = A.query(attrs=["double_range"]).df[int(ned[0]) : int(ned[1])]
tm.assert_frame_equal(df_idx_res, df[["double_range"]])
# test .df[] indexing with arrow
df_idx_res = A.query(use_arrow=True, attrs=["time"]).df[
int(ned[0]) : int(ned[1])
]
tm.assert_frame_equal(df_idx_res, df[["time"]])
df_idx_res = A.query(use_arrow=True, attrs=["double_range"]).df[
int(ned[0]) : int(ned[1])
]
tm.assert_frame_equal(df_idx_res, df[["double_range"]])
# disable coordinate dimension/index
df_idx_res = A.query(coords=False).df[int(ned[0]) : int(ned[1])]
tm.assert_frame_equal(df_idx_res, df.reset_index(drop=True))
def test_csv_fillna(self):
col_size = 10
data = np.random.rand(10) * 100 # make some integers for the 2nd test
data[4] = np.nan
df = pd.DataFrame({"v": data})
tmp_dir = self.path("csv_fillna")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.to_csv(tmp_csv, index=False, na_rep="NaN")
tmp_array = os.path.join(tmp_dir, "array")
# TODO: test Dense too
tiledb.from_csv(tmp_array, tmp_csv, fillna={"v": 0}, sparse=True)
def check_array(path, df):
# update the value in the original dataframe to match what we expect on read-back
df["v"][4] = 0
with tiledb.open(path) as A:
df_bk = A.df[:]
tm.assert_frame_equal(df_bk, df, check_index_type=False)
check_array(tmp_array, copy.deepcopy(df))
# Test writing a StringDtype in newer pandas versions
if hasattr(pd, "StringDtype"):
tmp_array2 = os.path.join(tmp_dir, "array2")
tiledb.from_csv(
tmp_array2,
tmp_csv,
fillna={"v": 0},
column_types={"v": pd.Int64Dtype},
sparse=True,
)
df_to_check = copy.deepcopy(df)
df_to_check["v"][4] = 0
df_to_check = df_to_check.astype({"v": np.int64})
check_array(tmp_array2, df_to_check)
def test_csv_multi_file(self):
col_size = 10
csv_dir = self.path("csv_multi_dir")
os.mkdir(csv_dir)
# Write a set of CSVs with 10 rows each
input_dfs = list()
for i in range(20):
df = make_dataframe_basic3(col_size)
output_path = os.path.join(csv_dir, "csv_{}.csv".format(i))
df.to_csv(output_path, index=False)
input_dfs.append(df)
tmp_dir = self.path("csv_multi_array_dir")
os.mkdir(tmp_dir)
# Create TileDB array with flush every 25 rows
csv_paths = glob.glob(csv_dir + "/*.csv")
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
csv_paths,
index_col=["time"],
parse_dates=["time"],
chunksize=25,
sparse=True,
)
# Check number of fragments
# * should equal 8 based on chunksize=25
# * 20 files, 10 rows each, 200 rows == 8 writes:
fragments = glob.glob(tmp_array + "/*.ok")
self.assertEqual(len(fragments), 8)
# Check the returned data
# note: tiledb returns sorted values
df_orig = pd.concat(input_dfs, axis=0).set_index(["time"]).sort_values("time")
with tiledb.open(tmp_array) as A:
df_bk = A.df[:]
# TileDB default return is unordered, so sort to compare
df_bk = df_bk.sort_index()
tm.assert_frame_equal(df_bk, df_orig)
def test_dataframe_misc(self):
uri = self.path("test_small_domain_range")
df = pd.DataFrame({"data": [2]}, index=[0])
tiledb.from_pandas(uri, df)
data = {
"data": np.array([1, 2, 3]),
"raw": np.array([4, 5, 6]),
"index": np.array(["a", "b", "c"], dtype=np.dtype("|S")),
"indey": np.array([0.0, 0.5, 0.9]),
}
df = pd.DataFrame.from_dict(data)
df = df.set_index(["index", "indey"])
uri = self.path("test_string_index_infer")
tiledb.from_pandas(uri, df)
with tiledb.open(uri) as A:
self.assertTrue(A.schema.domain.dim(0).dtype == np.dtype("|S"))
# test setting Attr and Dim filter list by override
uri = self.path("test_df_attrs_filters1")
bz_filter = [tiledb.Bzip2Filter(4)]
def_filter = [tiledb.GzipFilter(-1)]
tiledb.from_pandas(uri, df, attr_filters=bz_filter, dim_filters=bz_filter)
with tiledb.open(uri) as A:
self.assertTrue(A.schema.attr("data").filters == bz_filter)
self.assertTrue(A.schema.attr("raw").filters == bz_filter)
self.assertTrue(A.schema.domain.dim("index").filters == bz_filter)
self.assertTrue(A.schema.domain.dim("indey").filters == bz_filter)
# test setting Attr and Dim filter list by dict
uri = self.path("test_df_attrs_filters2")
tiledb.from_pandas(
uri, df, attr_filters={"data": bz_filter}, dim_filters={"index": bz_filter}
)
with tiledb.open(uri) as A:
self.assertTrue(A.schema.attr("data").filters == bz_filter)
self.assertTrue(A.schema.attr("raw").filters == tiledb.FilterList())
self.assertTrue(A.schema.domain.dim("index").filters == bz_filter)
self.assertTrue(A.schema.domain.dim("indey").filters == tiledb.FilterList())
def test_dataframe_query(self):
uri = self.path("df_query")
col_size = 10
df = make_dataframe_basic3(col_size)
df.set_index(["time"], inplace=True)
tiledb.from_dataframe(uri, df, sparse=True)
with tiledb.open(uri) as A:
with self.assertRaises(tiledb.TileDBError):
A.query(dims=["nodimnodim"])
with self.assertRaises(tiledb.TileDBError):
A.query(attrs=["noattrnoattr"])
res_df = A.query(dims=["time"], attrs=["int_vals"]).df[:]
self.assertTrue("time" == res_df.index.name)
self.assertTrue("int_vals" in res_df)
self.assertTrue("double_range" not in res_df)
# try index_col alone: should have *only* the default RangeIndex column
res_df2 = A.query(index_col=None).df[:]
self.assertTrue(isinstance(res_df2.index, pd.RangeIndex))
# try no dims, index_col None: should only value cols and default index
res_df3 = A.query(dims=False, index_col=None).df[:]
self.assertTrue("time" not in res_df3)
self.assertTrue("int_vals" in res_df3)
self.assertTrue("double_range" in res_df3)
self.assertTrue(isinstance(res_df3.index, pd.RangeIndex))
# try attr as index_col:
res_df4 = A.query(dims=False, index_col=["int_vals"]).df[:]
self.assertTrue("time" not in res_df4)
self.assertTrue("double_range" in res_df4)
self.assertTrue("int_vals" == res_df4.index.name)
def test_read_parquet(self):
uri = Path(self.path("test_read_parquet"))
os.mkdir(uri)
def try_rt(name, df, pq_args={}):
tdb_uri = str(uri.joinpath(f"{name}.tdb"))
pq_uri = str(uri.joinpath(f"{name}.pq"))
df.to_parquet(
pq_uri,
# this is required to losslessly serialize timestamps
# until Parquet 2.0 is default.
use_deprecated_int96_timestamps=True,
**pq_args,
)
tiledb.from_parquet(str(tdb_uri), str(pq_uri))
with tiledb.open(tdb_uri) as T:
tm.assert_frame_equal(df, T.df[:], check_index_type=False)
basic1 = make_dataframe_basic1()
try_rt("basic1", basic1)
try_rt("basic2", make_dataframe_basic2())
basic3 = make_dataframe_basic3()
try_rt("basic3", basic3)
def test_nullable_integers(self):
nullable_int_dtypes = (
pd.Int64Dtype(),
pd.Int32Dtype(),
pd.Int16Dtype(),
pd.Int8Dtype(),
pd.UInt64Dtype(),
pd.UInt32Dtype(),
pd.UInt16Dtype(),
pd.UInt8Dtype(),
)
col_size = 100
null_count = 20
for pdtype in nullable_int_dtypes:
uri = self.path(f"test_nullable_{str(pdtype)}")
nptype = pdtype.numpy_dtype
data = np.random.randint(
dtype_max(nptype), size=col_size, dtype=nptype
).astype("O")
null_idxs = np.random.randint(col_size, size=null_count)
data[null_idxs] = None
series = pd.Series(data, dtype=pdtype)
df = pd.DataFrame({"data": series})
tiledb.from_pandas(uri, df)
with tiledb.open(uri) as A:
tm.assert_frame_equal(df, A.df[:], check_index_type=False)
def test_nullable_bool(self):
uri = self.path("test_nullable_bool")
col_size = 100
null_count = 20
data = np.random.randint(2, size=col_size, dtype=np.uint8).astype("O")
null_idxs = np.random.randint(col_size, size=null_count)
data[null_idxs] = None
series = pd.Series(data, dtype="boolean")
df = pd.DataFrame({"data": series})
tiledb.from_pandas(uri, df)
with tiledb.open(uri) as A:
tm.assert_frame_equal(df, A.df[:], check_index_type=False)
| StarcoderdataPython |
4813833 | <gh_stars>10-100
from dataclasses import dataclass
from datetime import timedelta
import librosa
import numpy as np
from omegaconf import MISSING
from vad.data_models.audio_data import AudioData
@dataclass
class SilenceRemoverConfig:
silence_threshold_db: float = MISSING
class SilenceRemover:
config: SilenceRemoverConfig
def __init__(self, config: SilenceRemoverConfig):
self.config = config
def remove_silence(self, audio_data: AudioData):
non_silence_indices = librosa.effects.split(
audio_data.audio, top_db=self.config.silence_threshold_db
)
audio = np.concatenate([audio_data.audio[start:end] for start, end in non_silence_indices])
silence_removed = AudioData(
audio=audio,
sample_rate=audio_data.sample_rate,
duration=timedelta(seconds=len(audio) / audio_data.sample_rate),
)
return silence_removed
| StarcoderdataPython |
1798834 | <reponame>dashawn888/jmeter_api
import logging
from typing import List, Optional, Union
from xml.etree.ElementTree import Element
from jmeter_api.basics.config.elements import BasicConfig
from jmeter_api.basics.utils import Renderable, FileEncoding, tree_to_str
class Header(Renderable):
TEMPLATE = 'header.xml'
root_element_name = 'elementProp'
def __init__(self, *,
name: str,
value: str):
self.name = name
self.value = value
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError(
f'name must be str. {type(value).__name__} was given')
self._name = value
@property
def value(self) -> str:
return self._value
@value.setter
def value(self, value):
if not isinstance(value, str):
raise TypeError(
f'value must be str. {type(value).__name__} was given')
self._value = value
def to_xml(self) -> str:
xml_tree: Optional[Element] = super().get_template()
element_root = xml_tree.find(self.root_element_name)
element_root.attrib['name'] = self.name
for element in list(element_root):
try:
if element.attrib['name'] == 'Header.name':
element.text = self.name
elif element.attrib['name'] == 'Header.value':
element.text = self.value
except KeyError:
logging.error(
f'Unable to properly convert {self.__class__} to xml.')
return tree_to_str(xml_tree)
class HTTPHeaderManager(BasicConfig, Renderable):
root_element_name = 'HeaderManager'
def __init__(self, *,
headers: Union[List[Header],dict] = [],
name: str = 'HTTP Header Manager',
comments: str = '',
is_enabled: bool = True):
self.headers = headers
super().__init__(name=name, comments=comments, is_enabled=is_enabled)
@property
def headers(self) -> str:
return self._headers
@headers.setter
def headers(self, value):
if not isinstance(value, List):
raise TypeError(
f'headers must be List. {type(value).__name__} was given')
for i in range(len(value)):
if not isinstance(value[i], (Header, dict)):
raise TypeError(
f'headers must contain Header or dict. {type(value).__name__} was given')
if isinstance(value[i], dict):
if not len(value[i]) == 1:
raise ValueError('dict must be like "header":"value"')
h = list(value[i])[0]
v = value[i][h]
value[i] = Header(name=h, value=v)
self._headers = value
def to_xml(self) -> str:
element_root, xml_tree = super()._add_basics()
for element in list(element_root):
try:
if element.attrib['name'] == 'HeaderManager.headers':
element.text = ''
for arg in self.headers:
element.text += arg.to_xml()
except KeyError:
logging.error(
f'Unable to properly convert {self.__class__} to xml.')
return tree_to_str(xml_tree)
| StarcoderdataPython |
7851 | import re
from pkg_resources import parse_requirements
import pathlib
from setuptools import find_packages, setup
README_FILE = 'README.md'
REQUIREMENTS_FILE = 'requirements.txt'
VERSION_FILE = 'mtg/_version.py'
VERSION_REGEXP = r'^__version__ = \'(\d+\.\d+\.\d+)\''
r = re.search(VERSION_REGEXP, open(VERSION_FILE).read(), re.M)
if r is None:
raise RuntimeError(f'Unable to find version string in {VERSION_FILE}.')
version = r.group(1)
long_description = open(README_FILE, encoding='utf-8').read()
install_requires = [str(r) for r in parse_requirements(open(REQUIREMENTS_FILE, 'rt'))]
setup(
name='mtg',
version=version,
description='mtg is a collection of data science and ml projects for Magic:the Gathering',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/RyanSaxe/mtg',
packages=find_packages(),
install_requires=install_requires,
)
| StarcoderdataPython |
166387 | import collections
import os
import tempfile
import pytest # noqa
import anwesende.utils.excel as aue
the_3by3_file = "anwesende/utils/tests/data/3by3.xlsx"
def test_read_excel_as_columnsdict():
cd = aue.read_excel_as_columnsdict(the_3by3_file)
assert set(cd.keys()) == set(["A-str", "B-int", "C-str"])
assert cd['A-str'] == ["string1", "string2"]
assert cd['B-int'] == [1, 4711]
assert cd['C-str'] == [None, 4711] # Ouch!:
# the C-str column has cell format "Text", so it should return
# "4711", not 4711.
# We better prepare to receive either int or str, which also
# means we need to rely less on authors not to change the xlsx template
def test_write_excel_from_rowslists():
TestTuple = collections.namedtuple('TestTuple', 'a b c dee')
testdata = dict(test=[
TestTuple(a="a1", b="b1", c="c1", dee="d1"),
TestTuple(a="a2", b="b2", c="c2", dee="d2"),
])
with tempfile.NamedTemporaryFile(prefix="test", suffix=".xlsx",
delete=False) as fh:
filename = fh.name # file is deleted in 'finally' clause
print(filename)
try:
aue.write_excel_from_rowslists(filename, testdata)
# import time
# time.sleep(60)
columns = aue.read_excel_as_columnsdict(filename)
print(columns)
assert list(columns.keys()) == ['a', 'b', 'c', 'dee']
assert len(columns['dee']) == 2
assert columns['c'][1] == "c2"
finally:
os.unlink(filename)
| StarcoderdataPython |
1736342 | import os
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
from slicer.util import VTKObservationMixin
# from Resources import HomeResourcesResources
class Home(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Home" # TODO make this more human readable by adding spaces
self.parent.categories = [""]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (Kitware Inc.)"]
self.parent.helpText = """This is the Home module for the custom application"""
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """...""" # replace with organization, grant and thanks.
class HomeWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self)
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Load widget from .ui file (created by Qt Designer)
self.uiWidget = slicer.util.loadUI(self.resourcePath('UI/Home.ui'))
self.layout.addWidget(self.uiWidget)
self.ui = slicer.util.childWidgetVariables(self.uiWidget)
#Remove unneeded UI elements
self.modifyWindowUI()
#Create logic class
self.logic = HomeLogic()
#setup scene defaults
self.setupNodes()
#Dark palette does not propagate on its own?
self.uiWidget.setPalette(slicer.util.mainWindow().style().standardPalette())
#Apply style
self.applyApplicationStyle()
def setupNodes(self):
#Set up the layout / 3D View
self.logic.setup3DView()
self.logic.setupSliceViewers()
def onClose(self, unusedOne, unusedTwo):
pass
def cleanup(self):
pass
def hideSlicerUI(self):
slicer.util.setDataProbeVisible(False)
slicer.util.setMenuBarsVisible(False, ignore=['MainToolBar', 'ViewToolBar'])
slicer.util.setModuleHelpSectionVisible(False)
slicer.util.setModulePanelTitleVisible(False)
slicer.util.setPythonConsoleVisible(False)
slicer.util.setToolbarsVisible(True)
mainToolBar = slicer.util.findChild(slicer.util.mainWindow(), 'MainToolBar')
keepToolbars = [
slicer.util.findChild(slicer.util.mainWindow(), 'MainToolBar'),
slicer.util.findChild(slicer.util.mainWindow(), 'ViewToolBar'),
slicer.util.findChild(slicer.util.mainWindow(), 'CustomToolBar'),
]
slicer.util.setToolbarsVisible(False, keepToolbars)
def showSlicerUI(self):
slicer.util.setDataProbeVisible(True)
slicer.util.setMenuBarsVisible(True)
slicer.util.setModuleHelpSectionVisible(True)
slicer.util.setModulePanelTitleVisible(True)
slicer.util.setPythonConsoleVisible(True)
slicer.util.setToolbarsVisible(True)
def modifyWindowUI(self):
slicer.util.setModuleHelpSectionVisible(False)
mainToolBar = slicer.util.findChild(slicer.util.mainWindow(), 'MainToolBar')
self.CustomToolBar = qt.QToolBar("CustomToolBar")
self.CustomToolBar.name = "CustomToolBar"
slicer.util.mainWindow().insertToolBar(mainToolBar, self.CustomToolBar)
# central = slicer.util.findChild(slicer.util.mainWindow(), name='CentralWidget')
# central.setStyleSheet("background-color: #464449")
gearIcon = qt.QIcon(self.resourcePath('Icons/Gears.png'))
self.settingsAction = self.CustomToolBar.addAction(gearIcon, "")
self.settingsDialog = slicer.util.loadUI(self.resourcePath('UI/Settings.ui'))
self.settingsUI = slicer.util.childWidgetVariables(self.settingsDialog)
self.settingsUI.CustomUICheckBox.toggled.connect(self.toggleUI)
self.settingsUI.CustomStyleCheckBox.toggled.connect(self.toggleStyle)
self.settingsAction.triggered.connect(self.raiseSettings)
self.hideSlicerUI()
def toggleStyle(self,visible):
if visible:
self.applyApplicationStyle()
else:
slicer.app.styleSheet = ''
def toggleUI(self, visible):
if visible:
self.hideSlicerUI()
else:
self.showSlicerUI()
def raiseSettings(self, unused):
self.settingsDialog.exec()
def applyApplicationStyle(self):
# Style
self.applyStyle([slicer.app], 'Home.qss')
def applyStyle(self, widgets, styleSheetName):
stylesheetfile = self.resourcePath(styleSheetName)
with open(stylesheetfile,"r") as fh:
style = fh.read()
for widget in widgets:
widget.styleSheet = style
class HomeLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def run(self, inputVolume, outputVolume, imageThreshold, enableScreenshots=0):
"""
Run the actual algorithm
"""
pass
def exitApplication(self,status=slicer.util.EXIT_SUCCESS, message=None):
"""Exit application.
If ``status`` is ``slicer.util.EXIT_SUCCESS``, ``message`` is logged using ``logging.info(message)``
otherwise it is logged using ``logging.error(message)``.
"""
def _exitApplication():
if message:
if status == slicer.util.EXIT_SUCCESS:
logging.info(message)
else:
logging.error(message)
slicer.util.mainWindow().hide()
slicer.util.exit(slicer.util.EXIT_FAILURE)
qt.QTimer.singleShot(0, _exitApplication)
#settings for 3D view
def setup3DView(self):
layoutManager = slicer.app.layoutManager()
# layoutManager.setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUp3DView)
# controller = slicer.app.layoutManager().threeDWidget(0).threeDController()
# controller.setBlackBackground()
# controller.set3DAxisVisible(False)
# controller.set3DAxisLabelVisible(False)
# controller.setOrientationMarkerType(3) #Axis marker
# controller.setStyleSheet("background-color: #000000")
def setupSliceViewers(self):
for name in slicer.app.layoutManager().sliceViewNames():
sliceWidget = slicer.app.layoutManager().sliceWidget(name)
self.setupSliceViewer(sliceWidget)
# Set linked slice views in all existing slice composite nodes and in the default node
sliceCompositeNodes = slicer.util.getNodesByClass('vtkMRMLSliceCompositeNode')
defaultSliceCompositeNode = slicer.mrmlScene.GetDefaultNodeByClass('vtkMRMLSliceCompositeNode')
if not defaultSliceCompositeNode:
defaultSliceCompositeNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLSliceCompositeNode')
defaultSliceCompositeNode.UnRegister(None) # CreateNodeByClass is factory method, need to unregister the result to prevent memory leaks
slicer.mrmlScene.AddDefaultNode(defaultSliceCompositeNode)
sliceCompositeNodes.append(defaultSliceCompositeNode)
for sliceCompositeNode in sliceCompositeNodes:
sliceCompositeNode.SetLinkedControl(True)
#Settings for slice views
def setupSliceViewer(self, sliceWidget):
controller = sliceWidget.sliceController()
# controller.setOrientationMarkerType(3) #Axis marker
# controller.setRulerType(1) #Thin ruler
# controller.setRulerColor(0) #White ruler
# controller.setStyleSheet("background-color: #000000")
# controller.sliceViewLabel = ''
class HomeTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_Home1()
def test_Home1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
logic = HomeLogic()
self.delayDisplay('Test passed!')
#
# Class for avoiding python error that is caused by the method SegmentEditor::setup
# http://issues.slicer.org/view.php?id=3871
#
class HomeFileWriter(object):
def __init__(self, parent):
pass
| StarcoderdataPython |
1798295 | <filename>src/1019.py
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def nextLargerNodes(self, head: ListNode) -> List[int]:
stack = []
n = 0
cur = head
while cur:
n += 1
cur = cur.next
cur = head
ans = [0 for _ in range(n)]
idx = 0
while cur:
if (not stack) or (stack and cur.val < stack[-1][0]):
stack.append((cur.val, idx))
else:
while stack and cur.val > stack[-1][0]:
_, i = stack.pop(-1)
ans[i] = cur.val
stack.append((cur.val, idx))
idx += 1
cur = cur.next
return ans
| StarcoderdataPython |
3314886 | import nextcord
from nextcord import Interaction
from emojis import CHECK
from error_messages import MISSING_PERMISSIONS
from colors import RES, YW
class GitHubButtonView(nextcord.ui.View):
"""Function necessary to add link button to help command embed"""
def __init__(self):
super().__init__(timeout=None)
url = 'https://github.com/tholley7/Rainy_Bot'
# Add the quoted url to the button, and add the button to the view.
self.add_item(nextcord.ui.Button(label='GitHub', url=url))
class AcceptRulesView(nextcord.ui.View):
def __init__(self):
super().__init__(timeout=None)
async def alter_role(self, button: nextcord.ui.Button, interaction: Interaction):
role = nextcord.utils.get(interaction.guild.roles, name=button.custom_id)
assert isinstance(role, nextcord.Role)
# If user doesn't have the role
if not role in interaction.user.roles:
await interaction.user.add_roles(role)
try:
await interaction.response.send_message(f'Thank you for accepting the rules! You should be able to access the server now.', ephemeral=True, delete_after=10)
print(f'{YW}{interaction.user}{RES} accepted the rules!')
except nextcord.Forbidden:
print(MISSING_PERMISSIONS)
# If user has the role
else:
try:
await interaction.response.send_message(f'You have already accepted the rules!', ephemeral=True, delete_after=10)
except nextcord.Forbidden:
print(MISSING_PERMISSIONS)
@nextcord.ui.button(label='Accept Rules', style=nextcord.ButtonStyle.green, custom_id='Accepted Rules')
async def accept_button(self, button, interaction):
await self.alter_role(button, interaction)
class DMRolesView(nextcord.ui.View):
def __init__(self):
super().__init__(timeout=None)
async def alter_role(self, button: nextcord.ui.Button, interaction: Interaction):
role = nextcord.utils.get(interaction.guild.roles, name=button.custom_id)
assert isinstance(role, nextcord.Role)
# If user has the role
if role in interaction.user.roles:
try:
await interaction.user.remove_roles(role)
await interaction.response.send_message(f'Your **{role.name}** role was removed.', ephemeral=True, delete_after=15)
print(f'Removed {YW}{role.name}{RES} role from {YW}{interaction.user}{RES}!')
except Exception:
print(MISSING_PERMISSIONS)
# If user does not have the role
else:
try:
await interaction.user.add_roles(role)
await interaction.response.send_message(f'Gave you the **{role.name}** role!', ephemeral=True, delete_after=15)
print(f'Gave {YW}{role.name}{RES} role to {YW}{interaction.user}{RES}!')
except Exception:
print(MISSING_PERMISSIONS)
@nextcord.ui.button(label='DMs Open', style=nextcord.ButtonStyle.green, custom_id='DMs Open')
async def dms_open_button(self, button, interaction):
await self.alter_role(button, interaction)
@nextcord.ui.button(label='DMs Closed', style=nextcord.ButtonStyle.red, custom_id='DMs Closed')
async def dms_closed_button(self, button, interaction):
await self.alter_role(button, interaction)
@nextcord.ui.button(label='Ask to DM', style=nextcord.ButtonStyle.primary, custom_id='Ask to DM')
async def dms_ask_button(self, button, interaction):
await self.alter_role(button, interaction)
class PronounsView(nextcord.ui.View):
def __init__(self):
super().__init__(timeout=None)
async def alter_role(self, button: nextcord.ui.Button, interaction: Interaction):
role = nextcord.utils.get(interaction.guild.roles, name=button.custom_id)
print(role)
print(role.name)
assert isinstance(role, nextcord.Role)
# If user has the role
if role in interaction.user.roles:
try:
await interaction.user.remove_roles(role)
await interaction.response.send_message(f'Your **{role.name}** role was removed.', ephemeral=True, delete_after=15)
print(f'Removed {YW}{role.name}{RES} role from {YW}{interaction.user}{RES}!')
except Exception:
print(MISSING_PERMISSIONS)
# If user does not have the role
else:
await interaction.user.add_roles(role)
try:
await interaction.response.send_message(f'Gave you the **{role.name}** role!', ephemeral=True, delete_after=15)
print(f'Gave {YW}{role.name}{RES} role to {YW}{interaction.user}{RES}!')
except Exception:
print(MISSING_PERMISSIONS)
@nextcord.ui.button(label='He/Him', style=nextcord.ButtonStyle.primary, custom_id='He/Him')
async def male_button(self, button, interaction):
await self.alter_role(button, interaction)
@nextcord.ui.button(label='She/Her', style=nextcord.ButtonStyle.primary, custom_id='She/Her')
async def female_button(self, button, interaction):
await self.alter_role(button, interaction)
@nextcord.ui.button(label='They/Them', style=nextcord.ButtonStyle.primary, custom_id='They/Them')
async def they_them_button(self, button, interaction):
await self.alter_role(button, interaction)
@nextcord.ui.button(label='Other Pronouns', style=nextcord.ButtonStyle.primary, custom_id='Other Pronouns')
async def other_pronouns_button(self, button, interaction):
await self.alter_role(button, interaction) | StarcoderdataPython |
33787 | <filename>src/opendr/perception/activity_recognition/datasets/utils/transforms.py
# Copyright 2020-2021 OpenDR Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import math
from typing import Callable, Tuple
from torchvision.transforms import Compose
from torchvision.transforms._transforms_video import (
CenterCropVideo,
NormalizeVideo,
RandomCropVideo,
RandomHorizontalFlipVideo,
ToTensorVideo,
)
Transform = Callable[[torch.Tensor], torch.Tensor]
def standard_video_transforms(
spatial_pixels: int = 224,
horizontal_flip=True,
mean=(0.45, 0.45, 0.45),
std=(0.225, 0.225, 0.225)
) -> Tuple[Transform, Transform]:
"""Generate standard transforms for video recognition
Args:
spatial_pixels (int, optional ): Spatial size (i.e. height or width) to resize to. Defaults to 224.
horizontal_flip (bool, optional): Whether horizontal flipping (p = 0.5) is used. Defaults to True.
mean (tuple, optional): Mean RGB values used in standardization. Defaults to (0.45, 0.45, 0.45).
std (tuple, optional): Std RGB values used in standardization. Defaults to (0.225, 0.225, 0.225).
Returns:
Tuple[Transform, Transform]: [description]
"""
train_scale = (1 / 0.7 * 0.8, 1 / 0.7)
scaled_pix_min = (spatial_pixels * train_scale[0]) // 2 * 2
scaled_pix_max = (spatial_pixels * train_scale[1]) // 2 * 2
train_transforms = Compose(
[
t for t in [
ToTensorVideo(),
RandomShortSideScaleJitterVideo(
min_size=scaled_pix_min, max_size=scaled_pix_max
),
RandomCropVideo(spatial_pixels),
RandomHorizontalFlipVideo() if horizontal_flip else None,
NormalizeVideo(mean=mean, std=std),
] if t
]
)
eval_transforms = Compose(
[
ToTensorVideo(),
RandomShortSideScaleJitterVideo(min_size=spatial_pixels, max_size=spatial_pixels),
CenterCropVideo(spatial_pixels),
NormalizeVideo(mean=mean, std=std),
]
)
return train_transforms, eval_transforms
class RandomShortSideScaleJitterVideo:
def __init__(self, min_size: int, max_size: int, inverse_uniform_sampling=False):
"""
Args:
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale, max_scale].
"""
self.min_size = min_size
self.max_size = max_size
self.inverse_uniform_sampling = inverse_uniform_sampling
def __call__(self, images: torch.Tensor) -> torch.Tensor:
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (tensor): images to perform scale jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
(tensor): the scaled images with dimension of
`num frames` x `channel` x `new height` x `new width`.
"""
if self.inverse_uniform_sampling:
size = int(
round(1.0 / np.random.uniform(1.0 / self.max_size, 1.0 / self.min_size))
)
else:
size = int(round(np.random.uniform(self.min_size, self.max_size)))
height = images.shape[2]
width = images.shape[3]
if (width <= height and width == size) or (height <= width and height == size):
return images
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
else:
new_width = int(math.floor((float(width) / height) * size))
return torch.nn.functional.interpolate(
images, size=(new_height, new_width), mode="bilinear", align_corners=False,
)
| StarcoderdataPython |
1637823 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from app import logging
from app.remote.redis import Redis
from app.fortnite.news import news as parse_news
import logging
import asyncio
def news(client, message):
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
news_file, news_hash = asyncio.get_event_loop().run_until_complete(parse_news())
news_file_id = asyncio.get_event_loop().run_until_complete(Redis.execute(
"GET", "fortnite:news:file_id:{0}".format(news_hash)))['details']
if news_file_id:
logging.info("Изображение текущих новостей уже было загружено в Telegram, "
"File ID: {0}.".format(news_file_id))
client.send_photo(message.chat.id, news_file_id,
caption="📰 Текущие новости Королевской Битвы и Сражения с Бурей.")
else:
message = client.send_photo(message.chat.id, news_file,
caption="📰 Текущие новости Королевской Битвы и Сражения с Бурей.")
news_file_id = message['photo']['sizes'][-1]['file_id']
asyncio.get_event_loop().run_until_complete(Redis.execute(
"SET", "fortnite:news:file_id:{0}".format(news_hash), news_file_id, "EX", 86400))
except Exception as e:
logging.error("Произошла ошибка при выполнении команды /news.", exc_info=True)
return e
| StarcoderdataPython |
1750727 | '''create charts showing results of valgbr.py
INVOCATION
python chart06.py FEATURESGROUP-HPS-LOCALITY --data
python chart06.py FEATURESGROUP-HPS-global [--test] [--subset] [--norwalk] [--all]
python chart06.py FEATURESGROUP-HPS-city [--test] [--subset] [--norwalk] [--all] [--trace]
where
FEATURESGROUP is one of {s, sw, swp, swpn}
HPS is one of {all, best1}
LOCALILTY is one of {city, global}
FHL is FEATURESGROUP-HPS-LOCALITY
--test means to set control.arg.test to True
--subset means to process 0data-subset, not 0data, the full reduction
--norwalk means to process 0data-norwalk, not 0data, the full reduction
--all means to process all the cities, not just selected cities
--trace start with pdb.set_trace() call, so that we run under the debugger
INPUT FILES
WORKING/chart01/data.pickle
WORKING/valavm/FHL/YYYYMM.pickle
WORKING/samples-train-analysis/transactions.csv has transaction IDs
INPUT AND OUTPUT FILES (build with --data)
WORKING/chart06/FHL/0data.pickle | reduction for everything
WORKING/chart06/FHL/0data-norwalk.pickle | reduction for just Norwalk (for testing); only if locality == city
WORKING/chart06/FHL/0data-subset.pickle | random subset of everything (for testing)
WORKING/chart06/FHL/0all-price-histories.pickle |
OUTPUT FILES
WORKING/chart06/FHL/0data-report.txt | records retained TODO: Decide whether to keep
WORKING/chart06/FHL/a.pdf | range of losses by model (graph)
WORKING/chart06/FHL/b-YYYYMM.pdf | HPs with lowest losses
WORKING/chart06/FHL/b-YYYYMM.txt | HPs with lowest losses
WORKING/chart06/FHL/c.pdf | best model each month
WORKING/chart06/FHL/d.pdf | best & 50th best each month
WORKING/chart06/FHL/e.pdf | best 50 models each month (was chart07)
WORKING/chart06/FHL/best.pickle | dataframe with best choices each month # CHECK
WORKING/chart06/FHL/h.txt
WORKING chart06/FHL/i.txt
WORKING chart06/FHL/i.pdf | best value for k when L is global
The reduction is a dictionary.
- if LOCALITY is 'global', the type of the reduction is
dict[validation_month] sd
where sd is a sorted dictionary with type
dict[ModelDescription] ModelResults, sorted by increasing ModelResults.mae
- if LOCALITY is 'city', the type of the reduction is
dict[city_name] dict[validation_month] sd
'''
from __future__ import division
import argparse
import collections
import cPickle as pickle
import glob
import numpy as np
import pandas as pd
import pdb
from pprint import pprint
import random
import sys
import arg_type
from AVM import AVM
from Bunch import Bunch
from chart06_make_chart_a import make_chart_a
from chart06_make_chart_b import make_chart_b
from chart06_make_chart_cd import make_chart_cd
from chart06_make_chart_efgh import make_chart_efgh
from chart06_make_chart_hi import make_chart_hi
from chart06_types import ModelDescription, ModelResults, ColumnDefinitions
from columns_contain import columns_contain
import dirutility
import errors
from Logger import Logger
from Path import Path
from Report import Report
from Timer import Timer
from trace_unless import trace_unless
from valavmtypes import ResultKeyEn, ResultKeyGbr, ResultKeyRfr, ResultValue
cc = columns_contain
def make_control(argv):
'return a Bunch'
print argv
parser = argparse.ArgumentParser()
parser.add_argument('invocation')
parser.add_argument('fhl', type=arg_type.features_hps_locality)
parser.add_argument('--data', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--subset', action='store_true')
parser.add_argument('--norwalk', action='store_true')
parser.add_argument('--all', action='store_true')
parser.add_argument('--trace', action='store_true')
parser.add_argument('--use-samples-train-analysis-test', action='store_true')
arg = parser.parse_args(argv) # arg.__dict__ contains the bindings
arg.base_name = arg.invocation.split('.')[0]
# for now, we only know how to process global files
# local files will probably have a different path in WORKING/valavm/
# details to be determined
arg.features, arg.hsheps, arg.locality = arg.fhl.split('-')
assert arg.locality == 'global' or arg.locality == 'city', arg.fhl
if arg.norwalk:
assert arg.locality == 'city', argv
if arg.trace:
pdb.set_trace()
random_seed = 123
random.seed(random_seed)
# assure output directory exists
dir_working = Path().dir_working()
dir_out_reduction = dirutility.assure_exists(dir_working + arg.base_name) + '/'
dir_out = dirutility.assure_exists(dir_out_reduction + arg.fhl) + '/'
validation_months = (
'200612',
'200701', '200702', '200703', '200704', '200705', '200706',
'200707', '200708', '200709', '200710', '200711',
)
validation_months_long = (
'200512',
'200601', '200602', '200603', '200604', '200605', '200606',
'200607', '200608', '200609', '200610', '200611', '200612',
'200701', '200702', '200703', '200704', '200705', '200706',
'200707', '200708', '200709', '200710', '200711', '200712',
'200801', '200802', '200803', '200804', '200805', '200806',
'200807', '200808', '200809', '200810', '200811', '200812',
'200901', '200902',
)
def all_k_values():
ks = range(1, 31, 1)
ks.extend([40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200])
return ks
return Bunch(
all_k_values=all_k_values(),
arg=arg,
column_definitions=ColumnDefinitions(),
debug=arg.debug,
errors=[],
exceptions=[],
path_in_valavm='%svalavm/%s/*.pickle' % (
dir_working,
arg.fhl,
),
path_in_chart_01_reduction=dir_working + 'chart01/0data.pickle',
path_in_data=dir_out + (
'0data-subset.pickle' if arg.subset else
'0data-norwalk.pickle' if arg.norwalk else
'0data.pickle'
),
path_in_interesting_cities=dir_working + 'interesting_cities.txt',
path_in_transactions=(
dir_working +
'samples-train-analysis%s/transactions.csv' % ('-test' if arg.use_samples_train_analysis_test else '')
),
path_all_price_histories=dir_out + '0all_price_histories.pickle',
path_out_a=dir_out + 'a.pdf' if arg.locality == 'global' else dir_out + 'a-%s.pdf',
path_out_b=dir_out + 'b-%d.txt',
path_out_cd=dir_out + '%s.txt',
path_out_c_pdf=dir_out+'c.pdf',
path_out_b_pdf_subplots=dir_out + 'b.pdf',
path_out_b_pdf=dir_out + 'b-%d.pdf',
path_out_d=dir_out + 'd.txt',
path_out_e_txt=dir_out + 'e-%04d-%6s.txt',
path_out_e_pdf=dir_out + 'e-%04d.pdf',
path_out_f=dir_out + 'f-%04d.txt',
path_out_g=dir_out + 'g.txt',
path_out_h_template=dir_out + ('h-%03d-%6s' if arg.locality == 'global' else 'h-%s-%03d-%6s') + '.txt',
path_out_i_template=dir_out + ('i' if arg.locality == 'global' else 'i-%s') + '.txt',
path_out_i_all_1_only_pdf=dir_out + 'i1-only.pdf',
path_out_i_all_1_skip_pdf=dir_out + 'i1-skip.pdf',
path_out_i_all_12_pdf=dir_out + 'i12-all.pdf',
path_out_i_le_50_12_pdf=dir_out + 'i12-le50.pdf',
path_out_data=dir_out + '0data.pickle',
path_out_data_report=dir_out + '0data-report.txt',
path_out_data_subset=dir_out + '0data-subset.pickle',
path_out_data_norwalk=dir_out + '0data-norwalk.pickle',
path_out_log=dir_out + '0log' + ('-data' if arg.data else '') + '.txt',
random_seed=random_seed,
sampling_rate=0.02,
selected_cities=(
'BEVERLY HILLS', 'CANYON COUNTRY', # low number of transactions; high/low price
'SHERMAN OAKS', 'POMONA', # high number of transactions; high/low price
'LOS ANGELES',
),
test=arg.test,
timer=Timer(),
validation_months=validation_months,
validation_months_long=validation_months_long,
)
def select_and_sort(df, year, month, model):
'return new df contain sorted observations for specified year, month, model'
yyyymm = str(year * 100 + month)
mask = (
(df.model == model) &
(df.validation_month == yyyymm)
)
subset = df.loc[mask]
if len(subset) == 0:
print 'empty subset'
print year, month, model, sum(df.model == model), sum(df.validation_month == yyyymm)
pdb.set_trace()
return subset.sort_values('mae')
def check_actuals(actuals):
'each should be the same'
k = len(actuals)
assert k > 0, k
first = actuals[0]
for other in actuals:
if collections.Counter(first) != collections.Counter(other):
print collections.Counter(first), collections.Counter(other)
pdb.set_trace()
def make_ensemble_predictions(predictions, weights):
'return vector of predictions: sum w_i pred_i / sum w_i'
sum_weighted_predictions = np.array(predictions[0])
sum_weighted_predictions.fill(0.0)
for index in xrange(len(weights)):
sum_weighted_predictions = np.add(
sum_weighted_predictions,
np.dot(predictions[index], weights[index]))
sum_weights = np.sum(np.array(weights))
result = sum_weighted_predictions / sum_weights
return result
def check_key_order(d):
keys = d.keys()
for index, key1_key2 in enumerate(zip(keys, keys[1:])):
key1, key2 = key1_key2
# print index, key1, key2
mae1 = d[key1].mae
mae2 = d[key2].mae
trace_unless(mae1 <= mae2, 'should be non increasing',
index=index, mae1=mae1, mae2=mae2,
)
# return string describing key features of the model
def short_model_description(model_description):
# build model decsription
model = model_description.model
if model == 'gb':
description = '%s(%d, %d, %s, %d, %3.2f)' % (
model,
model_description.n_months_back,
model_description.n_estimators,
model_description.max_features,
model_description.max_depth,
model_description.learning_rate,
)
elif model == 'rf':
description = '%s(%d, %d, %s, %d)' % (
model,
model_description.n_months_back,
model_description.n_estimators,
model_description.max_features,
model_description.max_depth,
)
else:
assert model == 'en', model_description
description = '%s(%f, %f)' % (
model,
model_description.alpha,
model_description.l1_ratio,
)
return description
def make_charts(reduction, actuals, median_prices, control):
print 'making charts'
make_chart_a(reduction, median_prices, control)
make_chart_hi(reduction, actuals, median_prices, control)
return # charts b - g are obselete
if control.arg.locality == 'city':
print 'stopping charts after chart a and h, since locality is', control.arg.locality
return
make_chart_b(reduction, control, median_prices)
make_chart_cd(reduction, median_prices, control, (0,), 'c')
for n_best in (5, 100):
report_id = 'd-%0d' % n_best
for validation_month, month_reduction in reduction.iteritems():
n_reductions_per_month = len(month_reduction)
break
detail_lines_d = range(n_best)[:n_reductions_per_month]
make_chart_cd(reduction, median_prices, control, detail_lines_d, report_id)
make_chart_efgh(reduction, actuals, median_prices, control)
def extract_yyyymm(path):
file_name = path.split('/')[-1]
base, suffix = file_name.split('.')
yyyymm = base.split('-')[-1]
return yyyymm
class ReductionIndex(object):
'reduction DataFrame multiindex object'
def __init__(self, city, validation_month, model_description):
self.city = city
self.validation_month = validation_month
self.model_description = model_description
def __hash__(self):
return hash((self.city, self.validation_month, self.model_description))
def __repr__(self):
pattern = 'ReductionIndex(city=%s, validation_month=%s, model_description=%s)'
return pattern % (self.city, self.validation_month, self.model_description)
class ReductionValue(object):
'reduction DataFrame value object'
def __init__(self, mae, model_results, feature_group):
self.mae = mae
self.model_results = model_results
self.feature_group = feature_group
def __hash__(self):
return hash((self.mae, self.model_results, self.feature_group))
def __repr__(self):
pattern = 'ReductionValue(mae = %f, model_results=%s, feature_group=%s)'
return pattern % (self.mae, self.model_results, self.feature_group)
def make_reduction(control):
'''return the reduction dict'''
def path_city(path):
'return city in path to file'
last = path.split('/')[-1]
date, city = last.split('.')[0].split('-')
return city
def process_path(path):
''' return (dict, actuals, counter) for the path where
dict has type dict[ModelDescription] ModelResult
'''
def make_model_description(key):
is_en = isinstance(key, ResultKeyEn)
is_gbr = isinstance(key, ResultKeyGbr)
is_rfr = isinstance(key, ResultKeyRfr)
is_tree = is_gbr or is_rfr
result = ModelDescription(
model='en' if is_en else ('gb' if is_gbr else 'rf'),
n_months_back=key.n_months_back,
units_X=key.units_X if is_en else 'natural',
units_y=key.units_y if is_en else 'natural',
alpha=key.alpha if is_en else None,
l1_ratio=key.l1_ratio if is_en else None,
n_estimators=key.n_estimators if is_tree else None,
max_features=key.max_features if is_tree else None,
max_depth=key.max_depth if is_tree else None,
loss=key.loss if is_gbr else None,
learning_rate=key.learning_rate if is_gbr else None,
)
return result
def make_model_result(value):
rmse, mae, low, high = errors.errors(value.actuals, value.predictions)
result = ModelResults(
rmse=rmse,
mae=mae,
ci95_low=low,
ci95_high=high,
predictions=value.predictions,
)
return result
def update_price_history(
price_history=None,
ids=None,
validation_year=None,
validation_month=None,
valavm_result_value=None,
valavm_key=None,
):
'return (augmented price history, counter)'
def same_apn(df):
return (df.apn == df.iloc[0].apn).all()
def same_actual_price(df):
return (df.actual_price == df.iloc[0].actual_price).all()
def make_data_dict(key, matched):
'return dictionary with the columns with want in the price history'
# common columns
assert isinstance(key, (ResultKeyEn, ResultKeyGbr, ResultKeyRfr)), (key, type(key))
# common results (across all methods)
result = {
'apn': matched.apn,
'year': matched.year,
'month': matched.month,
'day': matched.day,
'sequence_number': matched.sequence_number,
'date': matched.date,
'price_actual': price_actual,
'price_estimated': price_estimated,
'n_months_back': key.n_months_back,
'method': (
'rfr' if isinstance(key, ResultKeyRfr) else
'gbr' if isinstance(key, ResultKeyGbr) else
'en'
),
}
# add columns appropriate for the type of result
if isinstance(key, (ResultKeyRfr, ResultKeyGbr)):
result.update({
'n_esimators': key.n_estimators,
'max_features': key.max_features,
'max_depth': key.max_depth,
})
if isinstance(key, ResultKeyGbr):
result.update({
'loss': key.loss,
'learning_rate': key.learning_rate,
})
if isinstance(key, ResultKeyEn):
result.update({
'units_X': key.units_X,
'units_y': key.units_y,
'alpha': key.alpha,
'l1_ratio': key.l1_ratio,
})
return result
debug = False
verbose = False
if debug:
print validation_year, validation_month,
print len(ids)
print ids[:5]
print key
print 'len(price_history)', 0 if price_history is None else len(price_history)
assert len(valavm_result_value.actuals) == len(valavm_result_value.predictions)
counter = collections.Counter()
counter_key = 'year, month, price matched %d times'
result_price_history = price_history
for price_actual, price_estimated in zip(valavm_result_value.actuals, valavm_result_value.predictions):
# print price_actual, price_estimated
mask1 = ids.year == validation_year
mask2 = ids.month == validation_month
mask3 = ids.actual_price == price_actual
mask = mask1 & mask2 & mask3
if debug:
print 'transaction in validation year: ', sum(mask1)
print 'transactions in validation month: ', sum(mask2)
print 'transactions with same actual price;', sum(mask3)
print 'transactions that match on all: ', sum(mask)
print type(key)
matched_df = ids[mask]
if len(matched_df) == 1:
counter[counter_key % 1] += 1
matched = matched_df.iloc[0]
print 'single match', matched_df.index[0]
data_dict = make_data_dict(key, matched)
df_new = pd.DataFrame(
data=data_dict,
index=[0 if result_price_history is None else len(result_price_history)],
)
# pdb.set_trace()
result_price_history = (
df_new if result_price_history is None else
result_price_history.append(df_new, verify_integrity=True)
)
else:
counter[counter_key % len(matched_df)] += 1
if len(matched_df) > 0:
if verbose:
print 'valavm matched %d training transactions' % len(matched_df)
print matched_df
continue
if control.debug and len(result_price_history) > 2:
print 'DEBUG: breaking out of update_price_history'
break
print 'identified %d price histories' % len(result_price_history)
return result_price_history, counter
ids = pd.read_csv(
control.path_in_transactions,
index_col=0,
low_memory=False,
)
print 'reducing', path
model = {}
counter = collections.Counter()
input_record_number = 0
actuals = None
updated_price_history = None # mutated in the while True loop below
validation_year_month = int(path.split('/')[-1].split('.')[0])
validation_year = int(validation_year_month / 100)
validation_month = int(validation_year_month - validation_year * 100)
assert validation_year_month == validation_year * 100 + validation_month
with open(path, 'rb') as f:
while True: # process each record in path
counter['attempted to read'] += 1
input_record_number += 1
if control.debug and input_record_number > 10:
print 'DEBUG: breaking out of record read in path', path
break
try:
# model[model_key] = error_analysis, for next model result
record = pickle.load(f)
counter['actually read'] += 1
assert isinstance(record, tuple), type(record)
assert len(record) == 2, len(record)
key, value = record
assert len(value) == 2, len(value)
# NOTE: importances is not used
valavm_result_value, importances = value
# type(valavm_result_value) == namedtuple with fields actuals, predictions
# the fields are parallel, corresponding transaction to transaction
if len(ids) < 100000:
print 'WARNING: TRUNCATED IDS'
# verify that actuals is always the same
if actuals is not None:
assert np.array_equal(actuals, valavm_result_value.actuals)
actuals = valavm_result_value.actuals
# verify that each model_key occurs at most once in the validation month
model_key = make_model_description(key)
updated_price_history, update_counter = update_price_history(
price_history=updated_price_history,
ids=ids,
validation_year=validation_year,
validation_month=validation_month,
valavm_result_value=valavm_result_value,
valavm_key=key,
)
# NOTE: the counters are the same for every path because the actual transactions are the same
if input_record_number == 1:
print 'path update counters', path
for k, v in update_counter.iteritems():
print k, v
if model_key in model:
print '++++++++++++++++++++++'
print path, model_key
print 'duplicate model key'
pdb.set_trace()
print '++++++++++++++++++++++'
model[model_key] = make_model_result(valavm_result_value)
except ValueError as e:
counter['ValueError'] += 1
if key is not None:
print key
print 'ignoring ValueError in record %d: %s' % (input_record_number, e)
except EOFError:
counter['EOFError'] += 1
print 'found EOFError path in record %d: %s' % (input_record_number, path)
print 'continuing'
if input_record_number == 1 and False:
# with locality == city, a file can be empty
control.errors.append('eof record 1; path = %s' % path)
break
except pickle.UnpicklingError as e:
counter['UnpicklingError'] += 1
print 'cPickle.Unpicklingerror in record %d: %s' % (input_record_number, e)
return model, actuals, counter, updated_price_history
reduction = collections.defaultdict(dict)
all_actuals = collections.defaultdict(dict)
paths = sorted(glob.glob(control.path_in_valavm))
assert len(paths) > 0, paths
counters = {}
all_price_histories = None
for path in paths:
model, actuals, counter, price_history = process_path(path)
all_price_histories = (
price_history if all_price_histories is None else
all_price_histories.append(price_history, verify_integrity=True, ignore_index=True)
)
# type(model) is dict[ModelDescription] ModelResults
# sort models by increasing ModelResults.mae
sorted_models = collections.OrderedDict(sorted(model.items(), key=lambda t: t[1].mae))
check_key_order(sorted_models)
if control.arg.locality == 'global':
base_name, suffix = path.split('/')[-1].split('.')
validation_month = base_name
reduction[validation_month] = sorted_models
all_actuals[validation_month] = actuals
elif control.arg.locality == 'city':
base_name, suffix = path.split('/')[-1].split('.')
validation_month, city_name = base_name.split('-')
# some file systems create all upper case names
# some create mixed-case names
# we map each to upper case
city_name_used = city_name.upper()
reduction[city_name_used][validation_month] = sorted_models
all_actuals[city_name_used][validation_month] = actuals
else:
print 'unexpected locality', control.arg.locality
pdb.set_trace()
counters[path] = counter
if control.debug and len(counters) > 1:
print 'DEBUG: stopping iteration over paths'
break
if control.test:
break
return reduction, all_actuals, counters, all_price_histories
def make_subset_global(reduction, fraction):
'return a random sample of the reduction stratified by validation_month as an ordereddict'
# use same keys (models) every validation month
# generate candidate for common keys in the subset
subset_common_keys = None
for validation_month, validation_dict in reduction.iteritems():
if len(validation_dict) == 0:
print 'zero length validation dict', validation_month
pdb.set_trace()
keys = validation_dict.keys()
n_to_keep = int(len(keys) * fraction)
subset_common_keys_list = random.sample(keys, n_to_keep)
subset_common_keys = set(subset_common_keys_list)
break
# remove keys from subset_common_keys that are not in each validation_month
print 'n candidate common keys', len(subset_common_keys)
for validation_month, validation_dict in reduction.iteritems():
print 'make_subset', validation_month
validation_keys = set(validation_dict.keys())
for key in subset_common_keys:
if key not in validation_keys:
print 'not in', validation_month, ': ', key
subset_common_keys -= set(key)
print 'n final common keys', len(subset_common_keys)
# build reduction subset using the actual common keys
results = {}
for validation_month, validation_dict in reduction.iteritems():
d = {
common_key: validation_dict[common_key]
for common_key in subset_common_keys
}
# sort by MAE, low to high
od = collections.OrderedDict(sorted(d.items(), key=lambda x: x[1].mae))
results[validation_month] = od
return results
def make_subset_city(reduction, path_interesting_cities):
'return reduction for just the interesting cities'
result = {}
if len(reduction) <= 6:
return reduction
with open(path_interesting_cities, 'r') as f:
lines = f.readlines()
no_newlines = [line.rstrip('\n') for line in lines]
for interesting_city in no_newlines:
if interesting_city in reduction:
result[interesting_city] = reduction[interesting_city]
else:
print 'not in reduction', interesting_city
pdb.set_trace()
return result
def make_subset(reduction, fraction, locality, interesting_cities):
'return dict of type type(reduction) but with a randomly chosen subset of size fraction * len(reduction)'
if locality == 'global':
return make_subset_global(reduction, fraction)
elif locality == 'city':
return make_subset_city(reduction, interesting_cities)
else:
print 'bad locality', locality
pdb.set_trace()
def make_norwalk(reduction):
'return dict of type(reduction) with with just the norwalk data items'
city = 'NORWALK'
result = {city: reduction[city]}
return result
def make_median_price(path, cities):
'return dict[Month] median_price or dict[city][Month] median_price'
def median_price(df, month):
in_month = df.month == month
result = df[in_month].price.median()
return result
with open(path, 'rb') as f:
df, reduction_control = pickle.load(f)
all_months = set(df.month)
if cities:
all_cities = set(df.city)
result = collections.defaultdict(dict)
for city in all_cities:
in_city = df.city == city
result[city] = {month: median_price(df[in_city], month) for month in all_months}
else:
result = {month: median_price(df, month) for month in all_months}
return result
class ReportReduction(object):
def __init__(self, counters):
self._report = self._make_report(counters)
def write(self, path):
self._report.write(path)
def _make_report(self, counters):
r = Report()
r.append('Records retained while reducing input file')
for path, counter in counters.iteritems():
r.append(' ')
r.append('path %s' % path)
for tag, value in counter.iteritems():
r.append('%30s: %d' % (tag, value))
return r
def main(argv):
print "what"
control = make_control(argv)
sys.stdout = Logger(logfile_path=control.path_out_log)
print control
lap = control.timer.lap
if control.arg.data:
if not control.debug:
median_price = make_median_price(control.path_in_chart_01_reduction, control.arg.locality == 'city')
lap('make_median_price')
reduction, all_actuals, counters, all_price_histories = make_reduction(control)
lap('make_reduction')
with open(control.path_all_price_histories, 'wb') as f:
pdb.set_trace()
print 'len(all_price_histories)', len(all_price_histories)
print 'columns', all_price_histories.columns
pickle.dump(all_price_histories, f)
lap('write all price histories')
if len(control.errors) > 0:
print 'stopping because of errors'
for error in control.errors:
print error
pdb.set_trace()
lap('make_data')
ReportReduction(counters).write(control.path_out_data_report)
subset = make_subset(reduction, control.sampling_rate, control.arg.locality, control.path_in_interesting_cities)
lap('make_subset')
norwalk = make_norwalk(reduction) if control.arg.locality == 'city' else None
# check key order
def check_validation_month_keys(reduction):
for validation_month in reduction.keys():
check_key_order(reduction[validation_month])
if control.arg.locality == 'global':
check_validation_month_keys(reduction)
check_validation_month_keys(subset)
else:
for city in reduction.keys():
check_validation_month_keys(reduction[city])
for city in subset.keys():
check_validation_month_keys(subset[city])
lap('check key order')
output_all = (reduction, all_actuals, median_price, control)
output_samples = (subset, all_actuals, median_price, control)
output_norwalk = (norwalk, all_actuals, median_price, control)
lap('check key order')
with open(control.path_out_data, 'wb') as f:
pickle.dump(output_all, f)
lap('write all data')
with open(control.path_out_data_subset, 'wb') as f:
pickle.dump(output_samples, f)
lap('write samples')
if control.arg.locality == 'city':
with open(control.path_out_data_norwalk, 'wb') as f:
pickle.dump(output_norwalk, f)
lap('write norwalk')
else:
with open(control.path_in_data, 'rb') as f:
print 'reading reduction data file'
reduction, all_actuals, median_price, reduction_control = pickle.load(f)
lap('read input from %s' % control.path_in_data)
# check that the reduction dictionaries are ordered by mae
def check_order_months(d):
for validation_month, ordered_dict in d.iteritems():
check_key_order(ordered_dict)
if control.arg.locality == 'global':
check_order_months(reduction)
elif control.arg.locality == 'city':
for city, month_dict in reduction.iteritems():
check_order_months(month_dict)
make_charts(reduction, all_actuals, median_price, control)
print control
if control.test:
print 'DISCARD OUTPUT: test'
if control.debug:
print 'DISCARD OUTPUT: debug'
if control.arg.subset:
print 'DISCARD OUTPUT: subset'
if len(control.errors) != 0:
print 'DISCARD OUTPUT: ERRORS'
for error in control.errors:
print error
if len(control.exceptions) != 0:
print 'DISCARD OUTPUT; EXCEPTIONS'
for exception in control.expections:
print exception
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
pd.DataFrame()
np.array()
AVM()
ResultValue
main(sys.argv)
| StarcoderdataPython |
3223187 | # File: digitalguardianarc_connector.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
import sys
import requests
import json
import phantom.app as phantom
from datetime import datetime
from bs4 import BeautifulSoup
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from digitalguardianarc_consts import *
from bs4 import UnicodeDammit
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class DigitalGuardianArcConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(DigitalGuardianArcConnector, self).__init__()
self._state = None
self._auth_url = None
self._arc_url = None
self._client_id = None
self._client_secret = None
self._export_profile = None
self._api_key = None
self._client_headers = {}
def _process_empty_response(self, response, action_result):
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Status Code: {0}. Empty response and no information in the header'.format(response.status_code)), None)
def _process_html_response(self, response, action_result):
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, 'html.parser')
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except Exception as e:
err = self._get_error_message_from_exception(e)
error_text = 'Cannot parse error details {}'.format(err)
message = "Status Code: {0}. Data from server:{1}".format(
status_code, self._handle_py_ver_compat_for_input_str(error_text))
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_json_response(self, r, action_result):
# Try a json parse
try:
resp_json = r.json()
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to parse JSON response. {0}'.format(err)), None)
# Please specify the status codes here
if 200 <= r.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
# You should process the error returned in the json
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(
r.status_code, self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}')))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, response, action_result):
# store the r_text in debug data, it will get dumped in the logs if the action fails
try:
if hasattr(action_result, 'add_debug_data') and (self.get_action_identifier() != 'get-file' or not 200 <= response.status_code < 399):
action_result.add_debug_data(
{'r_status_code': response.status_code})
action_result.add_debug_data({'r_text': response.text})
action_result.add_debug_data({'r_headers': response.headers})
if 'json' in response.headers.get('Content-Type', ''):
self.save_progress("Action: 'process_json_response'")
return self._process_json_response(response, action_result)
if 'html' in response.headers.get('Content-Type', ''):
self.save_progress("Action: 'process_html_response'")
return self._process_html_response(response, action_result)
if not response.text:
self.save_progress("Action: 'process_empty_response'")
return self._process_empty_response(response, action_result)
message = (
"Can't process response from server. Status Code: {0} Data from server: {1}"
).format(response.status_code,
self._handle_py_ver_compat_for_input_str(response.text.replace('{', '{{').replace('}', '}}')))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
except Exception as e:
err = self._get_error_message_from_exception(e)
exc_tb = sys.exc_info()
self.save_progress(('exception_line={} {}').format(exc_tb.tb_lineno, err))
return RetVal(action_result.set_status(phantom.APP_ERROR, ('Error: {}').format(err)), None)
def _make_rest_call(self, endpoint, action_result, method='get', **kwargs):
# **kwargs can be any additional parameters that requests.request accepts
config = self.get_config()
resp_json = None
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Invalid method: {0}'.format(method)), resp_json)
# Create a URL to connect to
url = "%s/%s" % (self._arc_url.strip("/"), endpoint)
try:
self.save_progress("Connecting to URL: {0}".format(url))
r = request_func(url,
verify=config.get('verify_server_cert', False),
**kwargs)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), resp_json)
return self._process_response(r, action_result)
def _handle_py_ver_compat_for_input_str(self, input_str):
"""
This method returns the encoded|original string based on the Python version.
:param input_str: Input string to be processed
:return: input_str (Processed input string based on following logic 'input_str - Python 3; encoded input_str - Python 2')
"""
try:
if input_str and self._python_version == 2:
input_str = UnicodeDammit(input_str).unicode_markup.encode('utf-8')
except:
self.debug_print("Error occurred while handling python 2to3 compatibility for the input string")
return input_str
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error messages from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = ERR_CODE_MSG
error_msg = e.args[0]
else:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
except:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
try:
error_msg = self._handle_py_ver_compat_for_input_str(error_msg)
except TypeError:
error_msg = TYPE_ERR_MSG
except:
error_msg = ERR_MSG_UNAVAILABLE
try:
if error_code in ERR_CODE_MSG:
error_text = "Error Message: {0}".format(error_msg)
else:
error_text = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
except:
self.debug_print(PARSE_ERR_MSG)
error_text = PARSE_ERR_MSG
return error_text
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# NOTE: test connectivity does _NOT_ take any parameters
# i.e. the param dictionary passed to this handler will be empty.
# Also typically it does not add any data into an action_result either.
# The status and progress messages are more important.
self.save_progress('Connecting to DG ARC')
ret_val, message = self.requestApiToken()
if not self._client_headers['Authorization']:
self.save_progress('Test Connectivity Failed')
return action_result.get_status()
else:
self.save_progress('Test Connectivity Passed')
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_on_poll(self, param):
oldname = ''
action_result = self.add_action_result(ActionResult(dict(param)))
response_status, export_list = self.get_export(action_result)
if phantom.is_fail(response_status):
self.debug_print('On Poll Failed')
return action_result.get_status()
if export_list:
self.save_progress('Ingesting alarm records')
else:
self.save_progress('No export data found')
return action_result.set_status(phantom.APP_SUCCESS, 'No export data found')
for entry in export_list:
try:
comm = entry['dg_alarm_name'].find(',')
if comm == -1:
comm = 100
name = ('{alarm_name}-{id}').format(
alarm_name=entry['dg_alarm_name'][0:comm],
id=entry['dg_guid'])
if name != oldname:
container_id = self.create_container(name, entry)
oldname = name
if container_id:
(artifacts_creation_status,
artifacts_creation_msg) = self.create_artifacts(alert=entry, container_id=container_id)
if phantom.is_fail(artifacts_creation_status):
self.debug_print((
'Error while creating artifacts for container with ID {container_id}. {error_msg}'
).format(container_id=container_id, error_msg=artifacts_creation_msg))
self._state['first_run'] = False
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print("Error occurred while processing export list response from server. {}".format(err))
return action_result.set_status(phantom.APP_SUCCESS)
def get_export(self, action_result):
self.save_progress('Getting ARC Export data')
ret_val, message = self.requestApiToken()
if phantom.is_fail(ret_val):
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
full_url = '{0}/export_profiles/{1}/export_and_ack'.format(self._arc_url.strip("/"), self._export_profile)
try:
request_response = requests.post(url=full_url,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
request_status = request_response.status_code
if 200 <= request_status <= 299:
headerField = []
try:
jsonText = json.loads(request_response.text)
if jsonText['total_hits'] == 0:
return RetVal(phantom.APP_SUCCESS, None)
for field in jsonText['fields']:
print('name=' + field['name'])
headerField.append(field['name'])
exportdata = []
for data in jsonText['data']:
entryLine = {}
headerPosition = 0
for dataValue in data:
if not dataValue:
entryLine[headerField[headerPosition]] = "null"
else:
entryLine[headerField[headerPosition]] = dataValue
headerPosition += 1
exportdata.append(entryLine)
return RetVal(phantom.APP_SUCCESS, exportdata)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to parse JSON response. {0}'.format(err)), None)
else:
data = self._handle_py_ver_compat_for_input_str(request_response.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(request_status, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def create_container(self, name, items):
container_dict = dict()
if not items['dg_alert.dg_detection_source'] == 'alert' and items[
'dg_tags']:
container_dict['name'] = name
container_dict['start_time'] = ('{time}Z').format(
time=datetime.utcfromtimestamp(items['dg_processed_time'] / 1000).isoformat())
container_dict['source_data_identifier'] = container_dict['name']
container_dict['severity'] = self.convert_to_phantom_severity(
items['dg_alarm_sev'])
container_dict['sensitivity'] = self.convert_to_phantom_sensitivity(items['dg_class.dg_name'])
custom_fields = {
'threat type': (items['dg_tags']),
'activity': (items['dg_utype'])
}
container_dict['tags'] = [('{}={}').format(x, custom_fields[x])
for x in custom_fields
if custom_fields[x] is not None]
container_creation_status, container_creation_msg, container_id = self.save_container(
container=container_dict)
if phantom.is_fail(container_creation_status):
self.save_progress((
'Error while creating container for alert {alert_name}. {error_message}'
).format(alert_name=items['dg_alarm_name'], error_message=container_creation_msg))
return None
else:
return container_id
return None
def create_artifacts(self, alert, container_id):
""" This function is used to create artifacts in given container using export data.
:param alert: Data of single export
:param container_id: ID of container in which we have to create the artifacts
:return: status(success/failure), message
"""
artifacts_list = []
cat = 'alarm'
# self.save_progress(('action=create_artifacts tenant={} artifact={}').format(self._client_id, json.dumps(alert)))
operation_mapping = {
'File': ['Alarm', 'Process', 'Computer', 'User', 'File'],
'CD/D': ['Alarm', 'Process', 'Computer', 'User', 'File'],
'Netw':
['Alarm', 'Process', 'Computer', 'User', 'File', 'Network'],
'Send': ['Alarm', 'Process', 'Computer', 'User', 'Email'],
'Proc': ['Alarm', 'Process', 'Computer', 'User'],
'Appl': ['Alarm', 'Process', 'Computer', 'User'],
'ADE ': ['Alarm', 'Process', 'Computer', 'User', 'File'],
'Prin':
['Alarm', 'Process', 'Computer', 'User', 'File', 'Network'],
'Othe': ['Alarm']
}
artifacts_mapping = {
'Alarm': {
'Alarm_Name': ('dg_alarm_name', []),
'Alarm_Severity': ('dg_alarm_sev', []),
'Threat_Type': ('dg_tags', []),
'Detection_Name': ('dg_det_name', []),
'Alert_Category': ('dg_alert.dg_category_name', []),
'Policy_Name':
('dg_alert.dg_alert.dg_alert.dg_policy.dg_name', []),
'Action_Was_Blocked': ('dg_alert.dg_hc', []),
'startTime': ('dg_local_timestamp', [])
},
'File': {
'File_Name': ('dg_src_file_name', ['fileName']),
'File_Size': ('dg_alert.dg_total_size', ['fileSize']),
'Classification': ('dg_class.dg_name', []),
'File_Was_Classified': ('dg_hc', []),
'File_Type': ('dg_src_file_ext', ['fileType']),
'File_Path': ('dg_alert.uad_sp', ['filePath']),
'Destination_File_Path': ('dg_alert.uad_dp', ['filePath'])
},
'Process': {
'Process_Name': ('dg_proc_file_name', ['process name']),
'Parent_Process_Name': ('dg_parent_name', ['app']),
'Process_Path': ('pi_fp', ['filePath']),
'Command_Line': ('pi_cmdln', []),
'MD5': ('dg_md5', ['filehash']),
'SHA1': ('dg_sha1', ['filehash']),
'SHA256': ('dg_sha256', ['filehash']),
'VirusTotal_Status': ('dg_vt_status', [])
},
'Email': {
'Attachment_File_Name':
('dg_attachments.dg_src_file_name', ['fileName']),
'Attachment_Was_Classified': ('dg_attachments.uad_sfc', []),
'Email_Subject': ('ua_msb', ['email']),
'Email_Sender': ('ua_ms', ['email']),
'Email_Recipient': ('dg_recipients.uad_mr', ['email']),
'Email_Recipient_Domain':
('dg_recipients.dg_rec_email_domain', ['domain'])
},
'Network': {
'Destination_Address': ('ua_ra', ['ip', 'ipv4']),
'Request_URL': ('ua_up', ['url']),
'Destination_DNS_Domain': ('ua_hn', ['domain']),
'Remote_Port': ('ua_rp', ['ip'])
},
'Computer': {
'Computer_Name': ('dg_machine_name', ['hostname']),
'Computer_Type': ('dg_machine_type', []),
'Source_Host_Name': ('dg_shn', []),
'Source_IP': ('ua_sa', ['ip', 'ipv4']),
'Source_Address': ('ua_sa', ['ip', 'ipv4'])
},
'User': {
'User_Name': ('dg_user', ['suser']),
'NTDomain': ('ua_dn', [])
}
}
specific_alert_mapping = {
'alarm': {
'dgarcUID': ('dg_guid', []),
'dg_process_time': ('dg_process_time', []),
'Activity': ('dg_utype', []),
'os_version': ('os_version', []),
'Policy': ('dg_alert.dg_policy.dg_name', []),
'Printer_Name': ('uad_pn', []),
'os': ('os', []),
'browser': ('browser', []),
'App_Category': ('appcategory', ['category']),
}
}
for (artifact_name, artifact_keys) in artifacts_mapping.items():
temp_dict = {}
cef = {}
cef_types = {}
# self.save_progress(('artifact_name={}').format(artifact_name))
for (artifact_key, artifact_tuple) in artifact_keys.items():
if alert.get(artifact_tuple[0]):
cef[artifact_key] = alert[artifact_tuple[0]]
cef_types[artifact_key] = artifact_tuple[1]
cef['tenant'] = self._client_id
if cef:
temp_dict['cef'] = cef
temp_dict['cef_types'] = cef_types
temp_dict['name'] = artifact_name
temp_dict['label'] = artifact_name
temp_dict['type'] = 'host'
temp_dict['container_id'] = container_id
temp_dict['severity'] = self.convert_to_phantom_severity(alert['dg_alarm_sev'])
temp_dict['source_data_identifier'] = self.create_dict_hash(temp_dict)
temp_dict['tenant'] = self._client_id
operation = alert['dg_utype'][:4]
if operation in operation_mapping.keys():
accepted_types = operation_mapping[operation]
else:
accepted_types = operation_mapping['Othe']
if artifact_name in accepted_types:
artifacts_list.append(temp_dict)
if cat in specific_alert_mapping:
temp_dict = {}
cef = {}
cef_types = {}
artifact_name = '{} Artifact'.format('Alarm Detail')
# artifact_name = '{} Artifact'.format(alert.get('dg_alarm_name'))
for (artifact_key, artifact_tuple) in specific_alert_mapping.get(cat).items():
if alert.get(artifact_tuple[0]):
cef[artifact_key] = alert[artifact_tuple[0]]
cef_types[artifact_key] = artifact_tuple[1]
cef['tenant'] = self._client_id
if cef:
temp_dict['cef'] = cef
temp_dict['cef_types'] = cef_types
temp_dict['name'] = artifact_name
temp_dict['label'] = artifact_name
temp_dict['type'] = 'host'
temp_dict['container_id'] = container_id
temp_dict['severity'] = self.convert_to_phantom_severity(alert['dg_alarm_sev'])
temp_dict['source_data_identifier'] = self.create_dict_hash(temp_dict)
temp_dict['tenant'] = self._client_id
artifacts_list.append(temp_dict)
create_artifact_status, create_artifact_msg, _ = self.save_artifacts(artifacts_list)
if phantom.is_fail(create_artifact_status):
return (phantom.APP_ERROR, create_artifact_msg)
return (phantom.APP_SUCCESS, 'Artifacts created successfully')
def convert_to_phantom_severity(self, dg_severity):
if dg_severity == 'Critical':
phantom_severity = 'High'
elif dg_severity == 'High':
phantom_severity = 'Medium'
else:
phantom_severity = 'Low'
return phantom_severity
# mapping classification name to dlp_high, dlp_restrict,dlp_medium,dlp_low
def convert_to_phantom_sensitivity(self, dg_classification):
if dg_classification[-3:] == 'igh':
phantom_sensitivity = 'red'
elif dg_classification[-3:] == 'ted':
phantom_sensitivity = 'red'
elif dg_classification[-3:] == 'med':
phantom_sensitivity = 'amber'
elif dg_classification[-3:] == 'low':
phantom_sensitivity = 'green'
else:
phantom_sensitivity = 'white'
return phantom_sensitivity
def create_dict_hash(self, input_dict):
if not input_dict:
return
else:
try:
input_dict_str = json.dumps(input_dict, sort_keys=True)
self.debug_print("Input dictionary is {}".format(self._handle_py_ver_compat_for_input_str(input_dict_str)))
return
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print("Handled exception in '_create_dict_hash'", err)
return
def get_watchlist_id(self, watchListName, action_result):
ret_val, message = self.requestApiToken()
if phantom.is_fail(ret_val):
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.get(url=full_url,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
try:
jsonText = json.loads(r.text)
list_id = ''
if 200 <= r.status_code <= 299:
jsonText = json.loads(r.text)
for jText in jsonText:
if self._handle_py_ver_compat_for_input_str(jText['display_name']).lower() == watchListName.lower():
list_id = jText['name']
return RetVal(phantom.APP_SUCCESS, list_id)
return RetVal(phantom.APP_SUCCESS, list_id)
else:
data = self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(r.status_code, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), list_id)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err)), list_id)
def _check_watchlist_id(self, watch_list_id, watchlist_entry, action_result):
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.get(url='{0}{1}/values?limit=100000'.format(full_url, watch_list_id),
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
try:
if 200 <= r.status_code <= 299:
jsonText = json.loads(r.text)
entryExists = False
for jText in jsonText:
if self._handle_py_ver_compat_for_input_str(jText['value_name']).lower() == watchlist_entry.lower():
entryExists = True
return RetVal(phantom.APP_SUCCESS, jText['value_id'])
if not entryExists:
return RetVal(phantom.APP_SUCCESS, '')
else:
data = self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(r.status_code, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err)), None)
def get_list_id(self, list_name, list_type, action_result):
ret_val, message = self.requestApiToken()
if phantom.is_fail(ret_val):
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
full_url = '{0}/lists/{1}'.format(self._arc_url.strip("/"), list_type)
try:
r = requests.get(url=full_url,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
try:
jsonText = json.loads(r.text)
list_id = ""
if 200 <= r.status_code <= 299:
for jText in jsonText:
if self._handle_py_ver_compat_for_input_str(jText['name']).lower() == list_name.lower():
list_id = jText['id']
return RetVal(phantom.APP_SUCCESS, list_id)
return RetVal(phantom.APP_SUCCESS, None)
else:
data = self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(r.status_code, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err)), None)
def _add_watchlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
watchlist_name = self._handle_py_ver_compat_for_input_str(param['watchlist_name'])
watchlist_entry = self._handle_py_ver_compat_for_input_str(param['watchlist_entry'])
msg_string = "{0} to watchlist={1}".format(watchlist_entry, watchlist_name)
# self.save_progress(('Watchlistname={} Watchlistentry={}').format(watchlist_name, watchlist_entry))
ret_val, watch_list_id = self.get_watchlist_id(watchlist_name, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_id:
watch_list_entry_json = '[{"value_name":"%s"}]' % watchlist_entry
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.post(url='{0}{1}/values/'.format(full_url, watch_list_id),
data=watch_list_entry_json,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully added {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to add {0}'.format(msg_string))
return action_result.set_status(phantom.APP_ERROR, 'Could not find watch_list = {0}'.format(watchlist_name))
def _remove_watchlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
watchlist_name = self._handle_py_ver_compat_for_input_str(param['watchlist_name'])
watchlist_entry = self._handle_py_ver_compat_for_input_str(param['watchlist_entry'])
msg_string = '{0} from watchlist={1}'.format(watchlist_entry, watchlist_name)
ret_val, watch_list_id = self.get_watchlist_id(watchlist_name, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_id:
ret_val, watch_list_value_id = self._check_watchlist_id(watch_list_id, watchlist_entry, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_value_id:
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.delete(url='{0}{1}/values/{2}'.format(full_url, watch_list_id, watch_list_value_id),
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully removed {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to remove {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find entry {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find watch_list = {0}'.format(watchlist_name))
def _check_watchlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
watchlist_name = self._handle_py_ver_compat_for_input_str(param['watchlist_name'])
watchlist_entry = self._handle_py_ver_compat_for_input_str(param['watchlist_entry'])
msg_string = '{0} in watchlist={1}'.format(watchlist_entry, watchlist_name)
ret_val, watch_list_id = self.get_watchlist_id(watchlist_name, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_id:
ret_val, watch_list_value_id = self._check_watchlist_id(watch_list_id, watchlist_entry, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_value_id:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully found {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_SUCCESS, 'Failed to find entry {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find watch_list = {0}'.format(watchlist_name))
def _add_componentlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
componentlist_name = self._handle_py_ver_compat_for_input_str(param['componentlist_name'])
componentlist_entry = self._handle_py_ver_compat_for_input_str(param['componentlist_entry'])
msg_string = '{0} to componentlist={1}'.format(componentlist_entry, componentlist_name)
ret_val, list_id = self.get_list_id(componentlist_name, 'component_list', action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
self._client_headers["Content-Type"] = "application/json"
if list_id:
component_list_entry_json = '{"items":["%s"]}' % componentlist_entry
full_url = '{0}/remediation/lists/'.format(self._arc_url.strip("/"))
try:
r = requests.put(url='{0}{1}/append'.format(full_url, list_id),
headers=self._client_headers,
data=component_list_entry_json,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully added {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to add {0}'.format(msg_string))
return action_result.set_status(phantom.APP_ERROR, 'Could not find component_list = {0}'.format(componentlist_name))
def _remove_componentlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
componentlist_name = self._handle_py_ver_compat_for_input_str(param['componentlist_name'])
componentlist_entry = self._handle_py_ver_compat_for_input_str(param['componentlist_entry'])
msg_string = '{0} from componentlist={1}'.format(componentlist_entry, componentlist_name)
ret_val, list_id = self.get_list_id(componentlist_name, 'component_list', action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
self._client_headers["Content-Type"] = "application/json"
if list_id:
component_list_entry_json = '{"items":["%s"]}' % componentlist_entry
full_url = '{0}/remediation/lists/'.format(self._arc_url.strip("/"))
try:
r = requests.post(url='{0}{1}/delete'.format(full_url, list_id),
headers=self._client_headers,
data=component_list_entry_json,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully removed {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to remove {0}'.format(msg_string))
return action_result.set_status(phantom.APP_ERROR, 'Could not find component_list = {0}'.format(componentlist_name))
def _check_componentlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
componentlist_name = self._handle_py_ver_compat_for_input_str(param['componentlist_name'])
componentlist_entry = self._handle_py_ver_compat_for_input_str(param['componentlist_entry'])
msg_string = '{0} in componentlist={1}'.format(componentlist_entry, componentlist_name)
ret_val, list_id = self.get_list_id(componentlist_name, 'component_list', action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if list_id:
full_url = '{0}/lists/'.format(self._arc_url.strip("/"))
try:
r = requests.get(url='{0}{1}/values?limit=100000'.format(full_url, list_id),
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
try:
jsonText = json.loads(r.text)
entryExists = False
if 200 <= r.status_code <= 299:
for jText in jsonText:
entryExists = True
if self._handle_py_ver_compat_for_input_str(jText['content_value']).lower() == componentlist_entry.lower():
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully found {0}'.format(msg_string))
if not entryExists:
return action_result.set_status(phantom.APP_SUCCESS, 'Failed to find entry {0}'.format(msg_string))
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Unable to parse JSON response from the server. {0}'.format(err))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find component_list = {0}'.format(componentlist_name))
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
action_id = self.get_action_identifier()
self.debug_print('action_id', self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'on_poll':
ret_val = self._handle_on_poll(param)
elif action_id == 'add_watchlist_entry':
ret_val = self._add_watchlist_entry(param)
elif action_id == 'check_watchlist_entry':
ret_val = self._check_watchlist_entry(param)
elif action_id == 'remove_watchlist_entry':
ret_val = self._remove_watchlist_entry(param)
elif action_id == 'add_componentlist_entry':
ret_val = self._add_componentlist_entry(param)
elif action_id == 'remove_componentlist_entry':
ret_val = self._remove_componentlist_entry(param)
elif action_id == 'check_componentlist_entry':
ret_val = self._check_componentlist_entry(param)
return ret_val
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self.debug_print("Action: 'initialize' Status: start")
self._state = self.load_state()
self.debug_print(("Action: 'initialize' State: {}").format(self._state))
# Fetching the Python major version
try:
self._python_version = int(sys.version_info[0])
except:
return self.set_status(phantom.APP_ERROR, "Error occurred while fetching the Phantom server's Python major version")
config = self.get_config()
self._auth_url = self._handle_py_ver_compat_for_input_str(config['auth_url'])
self._arc_url = self._handle_py_ver_compat_for_input_str(config['arc_url'] + '/rest/1.0/')
self._client_id = self._handle_py_ver_compat_for_input_str(config['client_id'])
self._client_secret = config['client_secret']
self._export_profile = self._handle_py_ver_compat_for_input_str(config['export_profile'])
self._client_headers = DG_CLIENT_HEADER
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved across actions and app upgrades
# self.save_state(self._state)
return phantom.APP_SUCCESS
def validateApiToken(self):
if self._api_key == '':
return False
payload = {
'client_id': self._client_id,
'client_secret': self._client_secret,
'grant_type':
'urn:pingidentity.com:oauth2:grant_type:validate_bearer',
'token': self._api_key,
}
try:
api_key_response = requests.post(url='{}/as/introspect.oauth2'.format(self._auth_url.strip("/")),
headers=DG_HEADER_URL,
data=payload,
verify=False)
response_json = api_key_response.json()
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print(err)
return False
if api_key_response.status_code == 200 and response_json['active']:
return True
return False
def requestApiToken(self):
if not self.validateApiToken():
payload = {
'client_id': self._client_id,
'client_secret': self._client_secret,
'grant_type': 'client_credentials',
'scope': 'client',
}
try:
url = '{0}/as/token.oauth2'.format(self._auth_url.strip("/"))
api_key_response = requests.post(url=url,
headers=DG_HEADER_URL,
data=payload,
verify=False)
except requests.exceptions.InvalidSchema:
error_message = 'Error connecting to server. No connection adapters were found for %s' % (url)
return (phantom.APP_ERROR, error_message)
except requests.exceptions.InvalidURL:
error_message = 'Error connecting to server. Invalid URL %s' % (url)
return (phantom.APP_ERROR, error_message)
except Exception as e:
err = self._get_error_message_from_exception(e)
return (phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
try:
response_json = api_key_response.json()
if api_key_response.status_code == 200:
self._api_key = response_json['access_token']
self._client_headers.update({'Authorization': 'Bearer {}'.format(self._api_key)})
self._client_headers['Authorization'] = 'Bearer {}'.format(self._api_key)
self.save_progress('Got API Token ' + str(self._client_headers['Authorization']))
return (phantom.APP_SUCCESS, None)
else:
return (phantom.APP_ERROR, self._handle_py_ver_compat_for_input_str(api_key_response.text))
except Exception as e:
err = self._get_error_message_from_exception(e)
return (phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err))
else:
self._client_headers['Authorization'] = 'Bearer {}'.format(self._api_key)
return (phantom.APP_SUCCESS, None)
if __name__ == '__main__':
import pudb
import argparse
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = <PASSWORD>('Password: ')
if username and password:
try:
login_url = DigitalGuardianArcConnector._get_phantom_base_url() + '/login'
print('Accessing the Login page')
r = requests.get(login_url, verify=False)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print('Logging into Platform to get the session id')
r2 = requests.post(login_url,
verify=False,
data=data,
headers=headers)
session_id = r2.cookies['sessionid']
except Exception as e:
print('Unable to get session id from the platform. Error: ' + str(e))
exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = DigitalGuardianArcConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
exit(0)
| StarcoderdataPython |
176023 | def makeStringList(stringListFileName):
# Make a list of Strings from a file
stringListFile = open(stringListFileName)
stringList = []
for line in stringListFile:
# Iterate through the lines of the file and add each line to the list
stringList.append(line.strip())
stringListFile.close()
return stringList
def makeSortSNPsMethylPlusScript(readsFileNameSuffix, filePath, chromListFileName, ouputFileNameSuffix, scriptFileName):
# Make a script that will divide a file with reads into a separate file for each chrom. and sort the dividied files
chromList = makeStringList(chromListFileName)
scriptFile = open(scriptFileName, 'w+')
readsFileNameElements = readsFileNameSuffix.split(".")
for chrom in chromList:
# Iterate through the chromosomes and write a line that will create a separate, sorted file for each
inputFileName = filePath + "/" + chrom + "_" + readsFileNameSuffix
outputFileName = filePath + "/" + chrom + "_" + readsFileNameSuffix[0:len(readsFileNameSuffix)-len(readsFileNameElements[-1]) - 1] + "_" + ouputFileNameSuffix
scriptFile.write("zcat " +inputFileName + " | sort -k5,5 -k6,6n -k2,2 -k3,3n -T /tmp " + " | gzip > " + outputFileName + "\n")
scriptFile.close()
if __name__=="__main__":
import sys
readsFileNameSuffix = sys.argv[1] # Should not start with _, but should end with .gz
filePath = sys.argv[2] # Should not end with /
chromListFileName = sys.argv[3]
ouputFileNameSuffix = sys.argv[4] # Should not start with _, but should end with .gz
scriptFileName = sys.argv[5]
makeSortSNPsMethylPlusScript(readsFileNameSuffix, filePath, chromListFileName, ouputFileNameSuffix, scriptFileName)
| StarcoderdataPython |
3365483 | <reponame>flo443/AIX360<filename>aix360/algorithms/TracInF/TIF_utils.py
from transformers import BertTokenizer, RobertaTokenizer
import json
import torch
from collections import Counter
from tqdm import trange
import numpy as np
debug = False
class DatasetReader():
def __init__(self, max_len, BERT_name):
self.max_len = max_len
self.bertname = BERT_name
def read_data(self,
filename,
aspect_only_by_rule=False,
mask_version=False,
return_origin=False,
filter=False,
id_filter_list=None,
is_sentihood=False,
is_test=False,
select_test_list=None
):
dj = data_loader(filename)
if is_sentihood: label_map = {'positive':1, 'negative':0}
else: label_map = {'positive': 2, 'neutral': 1, 'negative': 0}
if filter:
if is_test:
pairs = [(int(did), int(tid), data['original_text'], term['term'], term['answers'],
label_map[term['polarity']])
for did, data in dj.items()
for tid, term in data['terms'].items() if
(int(did), int(tid)) in select_test_list]
else:
pairs = [(int(did), int(tid), data['original_text'], term['term'], term['answers'],
label_map[term['polarity']])
for did, data in dj.items()
for tid, term in data['terms'].items() if
(int(did), int(tid)) not in id_filter_list]
else:
pairs = [
(int(did), int(tid), data['original_text'], term['term'], term['answers'], label_map[term['polarity']])
for did, data in dj.items()
for tid, term in data['terms'].items()
]
dids = [did for (did, tid, a, t, an, b) in pairs]
tids = [tid for (did, tid, a, t, an, b) in pairs]
origin_sentences = [a for (did, tid, a, t, an, b) in pairs]
terms = [t for (did, tid, a, t, an, b) in pairs]
labels = [b for (did, tid, a, t, an, b) in pairs]
print('Data Size is: {0}, It is mask version? {1}, label bias: {2}'.format(len(labels), mask_version,
Counter(labels)))
if self.bertname == 'bert-base-uncased':
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
sentences = [
'[CLS] ' + t + ' [SEP] ' + self.mask_tokens(s, an, tokenizer, t, mask_version, '[MASK]', '[CLS]',
'[SEP]') + ' [SEP] ' for (did, tid, s, t, an, b) in pairs]
elif self.bertname == 'roberta-large':
tokenizer = RobertaTokenizer.from_pretrained('roberta-large', do_lower_case=True)
sentences = []
for did, tid, s, t, an, b in pairs:
masked_sentence = self.mask_tokens(s, an, tokenizer, t, mask_version, '<mask>', '<s>', '</s>',
start_symbol='Ġ')
if masked_sentence:
sentence = '<s> ' + t + ' </s> <s> ' + masked_sentence + ' </s> '
else:
sentence = '<s> ' + '<mask>' + ' </s> <s> ' + s + ' </s> '
sentences.append(sentence)
encoding = tokenizer(sentences, return_tensors='pt', padding=True, truncation=True, max_length=self.max_len)
if return_origin:
return dids, tids, encoding['input_ids'], encoding['attention_mask'], labels, [dids, tids, origin_sentences,
terms, labels]
return dids, tids, encoding['input_ids'], encoding['attention_mask'], labels
def mask_tokens(self, sentence, answers, tokenizer, term, to_mask, mask_token, start_token, end_token,
start_symbol=''):
# if not to mask, return sentence itself.
if not to_mask: return sentence
tokens = tokenizer.tokenize(sentence)
tokens = [token.strip(start_symbol) for token in tokens]
tokenized_sentence = ' '.join(tokens)
valid_answers = [ans for aid, ans in answers[0].items()
if aid not in 'rules'
and start_token not in ans
and end_token not in ans
and len(ans) > 0
]
if len(valid_answers) == 0:
# if no answer, mask tokens between close split punctuations
puncsidx = [i for i, token in enumerate(tokens) if token in set([',', '.', '?', '!', '--'])]
termidx = [i for i, token in enumerate(tokens) if token == term]
if len(termidx) == 0:
if debug: print('no term matched. return whole sentence.')
return None # can not find term in sentence. Skip it.
if len(puncsidx) == 0:
if debug: print('Term matched but no punc. Mask till end of sentence.')
return ' '.join([token for i, token in enumerate(tokens) if
i > max(termidx)]) # mask words that are after the last matched term.
maskposs = []
for tid in termidx:
# search to right for punc
endpos = [pid for pid in puncsidx if pid > tid]
if len(endpos) == 0:
endpos = len(tokens)
else:
endpos = min(endpos)
# search backward for punc
stpos = [pid for pid in puncsidx if pid < tid]
if len(stpos) == 0:
stpos = 0
else:
stpos = max(stpos)
maskposs.append((stpos, endpos))
for stpos, endpos in maskposs:
tokens = [mask_token if i >= stpos and i < endpos else token for i, token in enumerate(tokens)]
if debug: print('no answer matched, but heuristically matched a span around term.')
return ' '.join(tokens)
answers = Counter(valid_answers)
matched = None
for ans, freq in answers.most_common():
stpos = tokenized_sentence.find(ans)
if stpos == -1:
breakpoint()
edpos = stpos + len(ans)
if debug: print('answer whole string matched')
return tokenized_sentence[:stpos] + ' ' + ' '.join([mask_token] * len(ans.split(' '))) + ' ' + sentence[
edpos:]
# mask individual words instead.
for ans, freq in answers.most_common():
anstoks = {a: 0 for a in ans.strip().split(' ')}
if debug: print('answer per-term matched ')
return ' '.join([mask_token if token in anstoks else token for token in tokens])
# Deprecated
def mask(self, sentence, answers, tokenizer, term, to_mask, mask_token, start_token, end_token):
if not to_mask:
return sentence
valid_answers = [ans for aid, ans in answers[0].items()
if aid not in 'rules'
and start_token not in ans
and end_token not in ans
and len(ans) > 0
]
answers = Counter(valid_answers)
matched = None
for ans, freq in answers.most_common():
matched = self.fuzzymatch(sentence, ans, mask_token)
if matched:
print(f'span found! term: {term} - answer:{ans} -sentence:{sentence}')
return matched
print(f'span not found from all answers!! term: {term} -sentence:{sentence}')
return self.span_mask(sentence, term, mask_token)
def data_loader(filename):
data = json.load(open(filename, 'r'))
return data
def save_model(net, path):
torch.save(net.state_dict(), path)
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def training(BERT_name, BERT_Model, epochs, train_dataloader, dev_dataloader, device, optimizer, model_output):
train_loss_set = []
global_eval_accuracy = 0
print("Starting training")
# fine tune only last layer and output layer.
for name, param in BERT_Model.base_model.named_parameters():
param.requires_grad = False
if 'encoder.layer.11' in name or 'encoder.layer.10' in name or 'pooler.dense' in name:
param.requires_grad = True
for epoch in trange(epochs, desc="Epoch"):
# Training
# Set our model to training mode (as opposed to evaluation mode)
BERT_Model.train()
# Tracking variables
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
print()
# Train the data for one epoch
for step, batch in enumerate(train_dataloader):
# Add batch to GPU
print("\rEpoch:", epoch, "step:", step, end='')
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# Clear out the gradients (by default they accumulate)
optimizer.zero_grad()
# Forward pass
outputs = BERT_Model(
b_input_ids,
attention_mask=b_input_mask,
labels=b_labels)
# Bert Model returns a certain loss
train_loss_set.append(float(outputs.loss))
# Backward pass / backward gradient descent
outputs.loss.backward()
# Update parameters and take a step using the computed gradient
optimizer.step()
# Update tracking variables
tr_loss += float(outputs.loss)
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss / nb_tr_steps / nb_tr_examples))
##### Validation #####
# calculate overall accuracy in the dev dataset
# Put model in evaluation mode to evaluate loss on the validation set
BERT_Model.eval()
# Tracking variables
global_eval_accuracy = 0
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
for batch in dev_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions
outputs = BERT_Model(
b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
# Move logits and labels to CPU
logits = outputs.logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print("Validation Accuracy: {}".format(eval_accuracy / nb_eval_steps))
# store best BERT model
if eval_accuracy > global_eval_accuracy:
global_eval_accuracy = eval_accuracy
save_model(BERT_Model, BERT_name + '-' + model_output)
return global_eval_accuracy
| StarcoderdataPython |
84178 | """
Entradas
Chelines-->int-->CA
Dracmas-->int-->DG
Pesetas-->int-->P
salidas
CA-->int-->P
DG-->int-->FrancoFrances
P-->int-->Dolares
P-->int-->LirasItalianas
"""
#entrada
CA=int(input("Ingrese la cantidad de Chelines Austriacos a cambiar: "))
DG=int(input("Ingrese la cantidad de Dracmas Griegos a cambiar: "))
P=int(input("Ingrese la cantidad de pesetas a cambiar: "))
#caja negra
Pesetas=(CA*956871)/100
FrancoFrances=((DG*88607)/100)*(1/20110)
Dolares=P/122499
LirasItalianas=(P*100)/9289
print("La cantidad de Chelines Austriacos a Pesetas es: ""{:.0F}".format(Pesetas))
print("La cantidad de Dracmas Griegos a Franco Frances es: ""{:.0F}".format(FrancoFrances))
print("La cantidad de Pesetas a Dolares es: ""{:.0F}".format(Dolares))
print("La cantidad de Pesetas a Liras Italianas es: ""{:.0F}".format(LirasItalianas))
| StarcoderdataPython |
1750206 | <reponame>iHamburg/FZQuant
#!/usr/bin/env python
# coding: utf8
from pymongo import MongoClient
from pyquant.config import mongodb as config
import pandas as pd
import json
import pydash
conn = MongoClient(config['host'], config['port'])
db = conn.fzquant
def insert_data(col_name, df):
"""
插入数据
TODO: 需要去重
:param code: 股票代码
:param df: dataframe
:return:
"""
# 获取collection
col = db[col_name]
# 插入数据
print('开始向',col_name,'插入数据')
col.insert(json.loads(df.to_json(orient='records')))
def get_data(col_name, output='df', **kwargs):
"""
从 mongodb 下载 股票 日数据
mongolib需要知道column
:param col_name:
:return:
list: OCLH
"""
query = {}
date_query = {}
if 'fromdate' in kwargs.keys() and kwargs['fromdate']:
date_query['$gte'] = kwargs['fromdate']
if 'todate' in kwargs.keys() and kwargs['todate']:
date_query['$lt'] = kwargs['todate']
if date_query:
query['date'] = date_query
# print('query', query)
col = db[col_name]
cursor = col.find(query)
if output == 'df':
df = pd.DataFrame(list(cursor))
del df['_id']
del df['code']
df['date'] = df['date'].astype('datetime64')
df = df.set_index('date')
cols = ['open', 'high', 'close', 'low', 'volume']
df = df.ix[:, cols]
return df
elif output == 'obj':
obj_list = list(cursor)
return [pydash.omit(item, '_id','code') for item in obj_list]
elif output == 'list':
obj_list = list(cursor)
return [[ item['date'],item['open'], item['close'],item['low'],item['high'],item['volume']] for item in obj_list]
def insertTickData(df):
"""
插入盘口数据
:param df:
:return:
"""
col = db.tickData
col.insert(json.loads(df.to_json(orient='records')))
if __name__ == '__main__':
print("Begin")
print(get_data('s002119', output='obj'))
print('===== ENDE ====') | StarcoderdataPython |
7538 | <gh_stars>100-1000
import os.path as osp
# Root directory of project
ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Path to data dir
_DATA_DIR = osp.abspath(osp.join(ROOT_DIR, 'data'))
# Required dataset entry keys
_IM_DIR = 'image_directory'
_ANN_FN = 'annotation_file'
# Available datasets
COMMON_DATASETS = {
'coco_2017_train': {
_IM_DIR:
_DATA_DIR + '/coco/images/train2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/instances_train2017.json',
},
'coco_2017_val': {
_IM_DIR:
_DATA_DIR + '/coco/images/val2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/instances_val2017.json',
},
'coco_2017_test': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2017.json',
},
'coco_2017_test-dev': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2017.json',
},
'keypoints_coco_2017_train': {
_IM_DIR:
_DATA_DIR + '/coco/images/train2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_train2017.json'
},
'keypoints_coco_2017_val': {
_IM_DIR:
_DATA_DIR + '/coco/images/val2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_val2017.json'
},
'keypoints_coco_2017_test': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2017.json'
},
'keypoints_coco_2017_test-dev': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2017.json',
},
'dense_coco_2017_train': {
_IM_DIR:
_DATA_DIR + '/coco/images/train2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/DensePoseData/densepose_coco_train2017.json',
},
'dense_coco_2017_val': {
_IM_DIR:
_DATA_DIR + '/coco/images/val2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/DensePoseData/densepose_coco_val2017.json',
},
'dense_coco_2017_test': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/DensePoseData/densepose_coco_test.json',
},
'CIHP_train': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/CIHP/train_img',
_ANN_FN:
_DATA_DIR + '/CIHP/annotations/CIHP_train.json',
},
'CIHP_val': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/CIHP/val_img',
_ANN_FN:
_DATA_DIR + '/CIHP/annotations/CIHP_val.json',
},
'CIHP_test': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/CIHP/test_img',
_ANN_FN:
_DATA_DIR + '/CIHP/annotations/CIHP_test.json',
},
'MHP-v2_train': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/train_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_train.json',
},
'MHP-v2_val': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/val_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_val.json',
},
'MHP-v2_test': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/test_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_test_all.json',
},
'MHP-v2_test_inter_top10': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/test_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_test_inter_top10.json',
},
'MHP-v2_test_inter_top20': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/test_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_test_inter_top20.json',
},
'PASCAL-Person-Part_train': { # new addition by soeaver
_IM_DIR:
_DATA_DIR + '/PASCAL-Person-Part/train_img',
_ANN_FN:
_DATA_DIR + '/PASCAL-Person-Part/annotations/pascal_person_part_train.json',
},
'PASCAL-Person-Part_test': { # new addition by soeaver
_IM_DIR:
_DATA_DIR + '/PASCAL-Person-Part/test_img',
_ANN_FN:
_DATA_DIR + '/PASCAL-Person-Part/annotations/pascal_person_part_test.json',
}
}
| StarcoderdataPython |
189557 | import numpy as np
''' DATA
NAME WEIGHT GROWTH GENDER
Alice 133 65 F
Bob 160 72 M
Charlie 152 70 M
Diana 120 60 F
NAME WEIGHT(Minus 135) GROWTH(Minus 66) GENDER(1 - F, 0 - M)
Alice -2 -1 1
Bob 25 6 0
Charlie 17 4 0
Diana -15 -6 1
Обычно сдвигают на среднее значение.
'''
def mse_loss(y_true, y_pred):
return ((y_true - y_pred) ** 2 ).mean()
def sigmoid(x):
# Сигмоидная функция активации: f(x) = 1 / (1 + e^(-x))
return 1 / (1 + np.exp(-x))
def deriv_sigmoid(x):
# Производная сигмоиды: f'(x) = f(x) * (1 - f(x))
fx = sigmoid(x)
return fx * (1 - fx)
class OurNeuralNetwork:
'''
- 2 входа ( вес и рост)
- скрытый слой с 2 нейрононами (h1, h2)
- выходной слой с 1 нейроном ( o1 )
'''
def __init__(self):
# weight
self.w1 = np.random.normal() # wei - w1 - h1
self.w2 = np.random.normal() # growth - w2 - h1
self.w3 = np.random.normal() # wei - w3 - h2
self.w4 = np.random.normal() # growth - w4 - h2
self.w5 = np.random.normal() # h1 - w5 - o1
self.w6 = np.random.normal() # h2 - w6 - o1
# bias
self.b1 = np.random.normal() # h1
self.b2 = np.random.normal() # h2
self.b3 = np.random.normal() # o1
def feedforward(self, x):
h1 = sigmoid(self.w1 * x[0] + self.w2 * x[1] + self.b1)
h2 = sigmoid(self.w3 * x[0] + self.w4 * x[1] + self.b2)
o1 = sigmoid(self.w5 * h1 + self.w6 * h2 + self.b3)
return o1
def train(self, data, all_y_trues):
'''
- data - массив numpy (n x 2) numpy, n = к-во наблюдений в наборе.
- all_y_trues - массив numpy с n элементами.
Элементы all_y_trues соответствуют наблюдениям в data.
'''
learn_rate = 0.1
epochs = 1000
for epoch in range(epochs):
for x, y_true in zip(data, all_y_trues):
sum_h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1
h1 = sigmoid(sum_h1)
sum_h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2
h2 = sigmoid(sum_h2)
sum_o1 = self.w5 * h1 + self.w6 * h2 + self.b3
o1 = sigmoid(sum_o1)
y_pred = o1
d_L_d_ypred = -2 * (y_true - y_pred)
#Neuron o1
d_ypred_d_w5 = h1 * deriv_sigmoid(sum_o1)
d_ypred_d_w6 = h2 * deriv_sigmoid(sum_o1)
d_ypred_d_b3 = deriv_sigmoid(sum_o1)
d_ypred_d_h1 = self.w5 * deriv_sigmoid(sum_o1)
d_ypred_d_h2 = self.w6 * deriv_sigmoid(sum_o1)
# Нейрон h1
d_h1_d_w1 = x[0] * deriv_sigmoid(sum_h1)
d_h1_d_w2 = x[1] * deriv_sigmoid(sum_h1)
d_h1_d_b1 = deriv_sigmoid(sum_h1)
# Нейрон h2
d_h2_d_w3 = x[0] * deriv_sigmoid(sum_h2)
d_h2_d_w4 = x[1] * deriv_sigmoid(sum_h2)
d_h2_d_b2 = deriv_sigmoid(sum_h2)
# --- Обновляем веса и пороги
# Нейрон h1
self.w1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1
self.w2 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2
self.b1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1
# Нейрон h2
self.w3 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3
self.w4 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4
self.b2 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2
# Нейрон o1
self.w5 -= learn_rate * d_L_d_ypred * d_ypred_d_w5
self.w6 -= learn_rate * d_L_d_ypred * d_ypred_d_w6
self.b3 -= learn_rate * d_L_d_ypred * d_ypred_d_b3
# --- Считаем полные потери в конце каждой эпохи
if epoch % 10 == 0:
y_preds = np.apply_along_axis(self.feedforward, 1, data)
loss = mse_loss(all_y_trues, y_preds)
print("Epoch %d loss: %.3f" % (epoch, loss))
# Определим набор данных
data = np.array([
[-2, -1], # Алиса
[25, 6], # Боб
[17, 4], # Чарли
[-15, -6], # Диана
])
all_y_trues = np.array([
1, # Алиса
0, # Боб
0, # Чарли
1, # Диана
])
# Обучаем нашу нейронную сеть!
network = OurNeuralNetwork()
network.train(data, all_y_trues)
''' NEW DATA FOR TESTS
NAME WEITH GROWTH GENDER
Alice 67 165 F
Bob 90 113 M
Hero 40 190 M
Cursed 20 220 M
NAME WEIGTH (minus 54) GROWTH (minus 172) GENDER
Alice 13 -7 1
Bob 36 18 0
Hero -14 18 0
Cursed 6 28 0
'''
alice = np.array([13, -7])
bob = np.array([36, 18])
hero = np.array([-14, 18])
cursed = np.array([6, 28])
print("Alice: %.3f" % network.feedforward(alice))
print("Bob: %.3f" % network.feedforward(bob))
print("Hero: %.3f" % network.feedforward(hero))
print("Cursed: %.3f" % network.feedforward(cursed))
| StarcoderdataPython |
4800782 | import os
import yaml
# import copy
import testinfra.utils.ansible_runner
import requests
from ansible.inventory.manager import InventoryManager
from ansible.vars.manager import VariableManager
from ansible.parsing.dataloader import DataLoader
ansible_runner = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
)
testinfra_hosts = ansible_runner.get_hosts('all')
APP_NAME = 'myapp'
__authorized_session = None
def get_authorized_session(cluster_cookie):
global __authorized_session
if __authorized_session is None:
__authorized_session = requests.Session()
__authorized_session.auth = ('admin', cluster_cookie)
return __authorized_session
def check_conf_file(conf_file, conf_section, conf):
assert conf_file.exists
assert conf_file.user == 'tarantool'
assert conf_file.group == 'tarantool'
conf_file_dict = yaml.load(conf_file.content_string, Loader=yaml.FullLoader)
assert conf_section in conf_file_dict
assert conf_file_dict[conf_section] == conf
def get_cluster_cookie():
inventory = InventoryManager(loader=DataLoader(), sources='hosts.yml')
return inventory.groups['cluster'].get_vars()['cartridge_cluster_cookie']
def get_configured_instances():
inventory = InventoryManager(loader=DataLoader(), sources='hosts.yml')
configured_instances = {
inventory.hosts[i].get_vars()['inventory_hostname']: inventory.hosts[i].get_vars()
for i in inventory.hosts
}
return configured_instances
def get_instance_vars(instance):
inventory = InventoryManager(loader=DataLoader(), sources='hosts.yml')
return inventory.hosts[instance].get_vars()
def get_variable_vaule(name, default=None):
inventory = InventoryManager(loader=DataLoader(), sources='hosts.yml')
all_group_vars = inventory.groups['cluster'].get_vars()
return all_group_vars[name] if name in all_group_vars else default
def get_configured_replicasets():
inventory = InventoryManager(loader=DataLoader(), sources='hosts.yml')
variable_manager = VariableManager(loader=DataLoader(), inventory=inventory)
replicasets = {}
for instance in inventory.hosts:
host_vars = variable_manager.get_vars(host=inventory.hosts[instance])
if 'replicaset_alias' not in host_vars:
continue
if instance_is_expelled(host_vars) or instance_is_stateboard(host_vars):
continue
replicaset_alias = host_vars['replicaset_alias']
if replicaset_alias not in replicasets:
replicasets[replicaset_alias] = {
'instances': [],
'failover_priority': host_vars.get('failover_priority'),
'roles': host_vars['roles'],
'all_rw': host_vars.get('all_rw'),
'weight': host_vars.get('weight'),
'vshard_group': host_vars.get('vshard_group')
}
replicasets[replicaset_alias]['instances'].append(instance)
return replicasets
def get_any_instance_http_port(instances):
for _, instance_vars in instances.items():
if not instance_is_expelled(instance_vars) and not instance_is_stateboard(instance_vars):
return instance_vars['config']['http_port']
assert False
def get_admin_api_url(instances):
admin_url = 'http://localhost:{}'.format(get_any_instance_http_port(instances))
admin_api_url = '{}/admin/api'.format(
admin_url
)
return admin_api_url
def user_is_deleted(user):
return 'deleted' in user and user['deleted'] is True
def section_is_deleted(section):
return 'deleted' in section and section['deleted'] is True
def instance_is_expelled(host_vars):
return 'expelled' in host_vars and host_vars['expelled'] is True
def instance_is_stateboard(host_vars):
return host_vars.get('stateboard') is True
def aliases_in_priority_order(replicaset_servers):
return [s['alias'] for s in sorted(replicaset_servers, key=lambda x: x['priority'])]
def test_services_status_and_config(host):
hostname = host.check_output('hostname -s')
inventory = InventoryManager(loader=DataLoader(), sources='hosts.yml')
host_instances = [
i for i in inventory.hosts
if hostname in list(map(lambda x: x.name, inventory.hosts[i].get_groups()))
]
default_conf = get_variable_vaule('cartridge_defaults', default={})
default_conf.update(cluster_cookie=get_cluster_cookie())
for instance in host_instances:
instance_vars = get_instance_vars(instance)
instance_conf = instance_vars['config']
instance_name = instance_vars['inventory_hostname']
service = host.service('{}@{}'.format(APP_NAME, instance_name))
conf_file_path = '/etc/tarantool/conf.d/{}.{}.yml'.format(APP_NAME, instance_name)
conf_section = '{}.{}'.format(APP_NAME, instance_name)
if instance_is_stateboard(instance_vars):
service = host.service('{}-stateboard'.format(APP_NAME))
conf_file_path = '/etc/tarantool/conf.d/{}-stateboard.yml'.format(APP_NAME)
conf_section = '{}-stateboard'.format(APP_NAME)
conf_file = host.file(conf_file_path)
if instance_is_expelled(instance_vars):
assert not service.is_running
assert not service.is_enabled
assert not conf_file.exists
assert not host.file('/var/run/tarantool/{}.{}.control'.format(APP_NAME, instance_name)).exists
assert not host.file('/var/lib/tarantool/{}.{}'.format(APP_NAME, instance_name)).exists
else:
assert service.is_running
assert service.is_enabled
check_conf_file(conf_file, conf_section, instance_conf)
default_conf_file_path = '/etc/tarantool/conf.d/{}.yml'.format(APP_NAME)
default_conf_file = host.file(default_conf_file_path)
default_conf_file_section = APP_NAME
check_conf_file(default_conf_file, default_conf_file_section, default_conf)
def test_instances():
cluster_cookie = get_cluster_cookie()
configured_instances = get_configured_instances()
# Select one instance to be control
admin_api_url = get_admin_api_url(configured_instances)
# Get all started instances
query = '''
query {
servers {
uri
alias
}
}
'''
session = get_authorized_session(cluster_cookie)
response = session.post(admin_api_url, json={'query': query})
started_instances = response.json()['data']['servers']
started_instances = {i['alias']: i for i in started_instances}
# filter out expelled instances and stateboard
configured_instances = {
i: instance_vars for i, instance_vars in configured_instances.items()
if not instance_is_expelled(instance_vars) and not instance_is_stateboard(instance_vars)
}
# Check if all configured instances are started and avaliable
assert len(configured_instances) == len(started_instances)
assert set(configured_instances.keys()) == set(started_instances.keys())
assert all([
configured_instances[i]['config']['advertise_uri'] == started_instances[i]['uri']
for i in configured_instances
])
def test_replicasets():
# Get all configured instances
configured_instances = get_configured_instances()
cluster_cookie = get_cluster_cookie()
if not configured_instances:
return
# Select one instance to be control
admin_api_url = get_admin_api_url(configured_instances)
# Get started replicasets
query = '''
query {
replicasets {
alias
roles
all_rw
weight
vshard_group
servers {
alias
priority
}
master {
alias
}
}
}
'''
session = get_authorized_session(cluster_cookie)
response = session.post(admin_api_url, json={'query': query})
started_replicasets = response.json()['data']['replicasets']
started_replicasets = {r['alias']: r for r in started_replicasets}
configured_replicasets = get_configured_replicasets()
# Check if started replicasets are equal to configured
assert len(started_replicasets) == len(configured_replicasets)
assert set(started_replicasets.keys()) == set(configured_replicasets.keys())
for name in started_replicasets.keys():
started_replicaset = started_replicasets[name]
configured_replicaset = configured_replicasets[name]
assert set(started_replicaset['roles']) == set(configured_replicaset['roles'])
started_replicaset_instances = [i['alias'] for i in started_replicaset['servers']]
assert set(started_replicaset_instances) == set(configured_replicaset['instances'])
if configured_replicaset['failover_priority'] is not None:
configured_failover_priority = configured_replicaset['failover_priority']
assert started_replicaset['master']['alias'] == configured_failover_priority[0]
assert aliases_in_priority_order(started_replicaset['servers']) == configured_failover_priority
if configured_replicaset['all_rw'] is not None:
assert started_replicaset['all_rw'] == configured_replicaset['all_rw']
if configured_replicaset['weight'] is not None:
assert started_replicaset['weight'] == configured_replicaset['weight']
if configured_replicaset['vshard_group'] is not None:
assert started_replicaset['vshard_group'] == configured_replicaset['vshard_group']
def test_failover():
# Get configured failover status
configured_failover_params = get_variable_vaule('cartridge_failover_params')
if not configured_failover_params:
return
# Get all configured instances
configured_instances = get_configured_instances()
if not configured_instances:
return
# Select one instance to be control
admin_api_url = get_admin_api_url(configured_instances)
# Get cluster cookie
cluster_cookie = get_cluster_cookie()
# Get cluster failover status
query = '''
query {
cluster {
failover_params {
mode
state_provider
tarantool_params {
uri
password
}
}
}
}
'''
session = get_authorized_session(cluster_cookie)
response = session.post(admin_api_url, json={'query': query})
failover_params = response.json()['data']['cluster']['failover_params']
assert failover_params['mode'] == configured_failover_params['mode']
if configured_failover_params.get('state_provider') is not None:
if configured_failover_params['state_provider'] == 'stateboard':
assert failover_params['state_provider'] == 'tarantool'
if configured_failover_params.get('stateboard_params') is not None:
assert 'tarantool_params' in failover_params
configured_stateboard_params = configured_failover_params['stateboard_params']
stateboard_params = failover_params['tarantool_params']
for p in ['uri', 'password']:
if configured_stateboard_params.get(p) is not None:
assert stateboard_params[p] == configured_stateboard_params[p]
def test_auth_params():
# Get configured auth params
configured_auth = get_variable_vaule('cartridge_auth')
if not configured_auth:
return
# Get all configured instances
configured_instances = get_configured_instances()
if not configured_instances:
return
# Select one instance to be control
admin_api_url = get_admin_api_url(configured_instances)
# Get cluster cookie
cluster_cookie = get_cluster_cookie()
# Get cluster auth params
query = '''
query {
cluster {
auth_params {
enabled
cookie_max_age
cookie_renew_age
}
}
}
'''
session = get_authorized_session(cluster_cookie)
response = session.post(admin_api_url, json={'query': query})
auth = response.json()['data']['cluster']['auth_params']
for key in ['enabled', 'cookie_max_age', 'cookie_renew_age']:
if key in configured_auth:
assert auth[key] == configured_auth[key]
def test_auth_users():
# Get configured auth params
configured_auth = get_variable_vaule('cartridge_auth')
if not configured_auth or 'users' not in configured_auth:
return
# Get all configured instances
configured_instances = get_configured_instances()
if not configured_instances:
return
# Select one instance to be control
admin_api_url = get_admin_api_url(configured_instances)
# Get cluster cookie
cluster_cookie = get_cluster_cookie()
# Get cluster auth params
query = '''
query {
cluster {
users {
username
fullname
email
}
}
}
'''
session = get_authorized_session(cluster_cookie)
response = session.post(admin_api_url, json={'query': query})
auth_users = response.json()['data']['cluster']['users']
auth_users = {
u['username']: u for u in auth_users
if u['username'] != 'admin' and not user_is_deleted(u)
}
configured_users = {u['username']: u for u in configured_auth['users']}
assert auth_users.keys() == configured_users.keys()
for k in auth_users.keys():
conf_user = configured_users[k]
user = auth_users[k]
for p in ['fullname', 'email']:
if p in conf_user:
assert user[p] == conf_user[p]
# Check if all users can log in
login_url = 'http://{}:{}/login'.format(
'localhost',
get_any_instance_http_port(configured_instances)
)
for username, user in configured_users.items():
if 'password' not in user:
continue
response = requests.post(login_url, json={'username': username, 'password': user['password']})
assert response.status_code == 200
def test_app_config():
# Get configured auth params
specified_app_config = get_variable_vaule('cartridge_app_config')
if not specified_app_config:
return
# Get all configured instances
configured_instances = get_configured_instances()
if not configured_instances:
return
# Get cluster cookie
cluster_cookie = get_cluster_cookie()
# Get cartridge app config
config_url = 'http://{}:{}/admin/config'.format(
'localhost',
get_any_instance_http_port(configured_instances)
)
session = get_authorized_session(cluster_cookie)
response = session.get(config_url)
assert response.status_code == 200
app_config = yaml.safe_load(response.content)
# Check if app config is equal to configured one
for section_name, section in specified_app_config.items():
if section_is_deleted(section):
assert section_name not in app_config
else:
assert section_name in app_config
assert app_config[section_name] == section['body']
| StarcoderdataPython |
140059 | <filename>Leetcode/Sorting,_Binary_Search/2_-_Medium/220._Contains_Duplicate_III.py
class Solution:
def containsNearbyAlmostDuplicate(self, nums: List[int], k: int, t: int) -> bool:
tup = [(ind, i) for ind, i in enumerate(nums)]
tup.sort(key=lambda x: x[1])
for i in range(len(tup)):
for j in range(i+1, len(tup)):
if abs(tup[i][1]-tup[j][1])>t:
break
if abs(tup[i][0]-tup[j][0])<=k:
return True
return False | StarcoderdataPython |
3323335 | <reponame>pkiage/credit-risk-modelling-tool
import streamlit as st
from sklearn.metrics import classification_report, roc_curve
import numpy as np
import plotly.express as px
import pandas as pd
from numpy import argmax
from visualization.metrics import streamlit_2columns_metrics_df, streamlit_2columns_metrics_pct_df
from visualization.graphs_threshold import acceptance_rate_driven_threshold_graph
def model_probability_values_df(model, X):
return pd.DataFrame(model.predict_proba(X)[:, 1], columns=["PROB_DEFAULT"])
def find_best_threshold_J_statistic(y, clf_prediction_prob_df):
fpr, tpr, thresholds = roc_curve(y, clf_prediction_prob_df)
# get the best threshold
# Youden’s J statistic tpr-fpr
# Argmax to get the index in
# thresholds
return thresholds[argmax(tpr - fpr)]
# Function that makes dataframe with probability of default, predicted default status based on threshold
# and actual default status
def classification_report_per_threshold(
threshold_list, threshold_default_status_list, y_test
):
target_names = ["Non-Default", "Default"]
classification_report_list = []
for threshold_default_status in threshold_default_status_list:
thresh_classification_report = classification_report(
y_test,
threshold_default_status,
target_names=target_names,
output_dict=True,
zero_division=0,
)
classification_report_list.append(thresh_classification_report)
# Return threshold classification report dict
return dict(zip(threshold_list, classification_report_list))
def thresh_classification_report_recall_accuracy(
thresh_classification_report_dict,
):
thresh_def_recalls_list = []
thresh_nondef_recalls_list = []
thresh_accs_list = []
for x in [*thresh_classification_report_dict]:
thresh_def_recall = thresh_classification_report_dict[x]["Default"][
"recall"
]
thresh_def_recalls_list.append(thresh_def_recall)
thresh_nondef_recall = thresh_classification_report_dict[x][
"Non-Default"
]["recall"]
thresh_nondef_recalls_list.append(thresh_nondef_recall)
thresh_accs = thresh_classification_report_dict[x]["accuracy"]
thresh_accs_list.append(thresh_accs)
return [
thresh_def_recalls_list,
thresh_nondef_recalls_list,
thresh_accs_list,
]
def apply_threshold_to_probability_values(probability_values, threshold):
return (
probability_values["PROB_DEFAULT"]
.apply(lambda x: 1 if x > threshold else 0)
.rename("PREDICT_DEFAULT_STATUS")
)
@st.cache(suppress_st_warning=True)
def find_best_threshold_J_statistic(y, clf_prediction_prob_df):
fpr, tpr, thresholds = roc_curve(y, clf_prediction_prob_df)
# get the best threshold
J = tpr - fpr # Youden’s J statistic
ix = argmax(J)
return thresholds[ix]
def default_status_per_threshold(threshold_list, prob_default):
threshold_default_status_list = []
for threshold in threshold_list:
threshold_default_status = prob_default.apply(
lambda x: 1 if x > threshold else 0
)
threshold_default_status_list.append(threshold_default_status)
return threshold_default_status_list
def threshold_and_predictions(clf_xgbt_model, split_dataset, threshold):
clf_prediction_prob_df_gbt = model_probability_values_df(
clf_xgbt_model,
split_dataset.X_test,
)
clf_thresh_predicted_default_status = (
apply_threshold_to_probability_values(
clf_prediction_prob_df_gbt,
threshold,
)
)
streamlit_2columns_metrics_df(
"# of Predicted Defaults",
"# of Predicted Non-Default",
clf_thresh_predicted_default_status,
)
streamlit_2columns_metrics_pct_df(
"% of Loans Predicted to Default",
"% of Loans Predicted not to Default",
clf_thresh_predicted_default_status,
)
return clf_thresh_predicted_default_status
def user_defined_probability_threshold(model_name_short, clf_xgbt_model, split_dataset):
st.subheader("Classification Probability Threshold - User Defined")
user_defined_threshold = st.slider(
label="Default Probability Threshold:",
min_value=0.0,
max_value=1.0,
value=0.8,
key=f"threshold_{model_name_short}_default",
)
clf_thresh_predicted_default_status = threshold_and_predictions(
clf_xgbt_model, split_dataset, user_defined_threshold)
return clf_thresh_predicted_default_status, user_defined_threshold
def J_statistic_driven_probability_threshold(clf_prediction_prob_df_gbt, clf_xgbt_model, split_dataset):
st.subheader("J Statistic Driven Classification Probability Threshold")
J_statistic_best_threshold = find_best_threshold_J_statistic(
split_dataset.y_test, clf_prediction_prob_df_gbt
)
st.metric(
label="Youden's J statistic calculated best threshold",
value=J_statistic_best_threshold,
)
clf_thresh_predicted_default_status = threshold_and_predictions(
clf_xgbt_model, split_dataset, J_statistic_best_threshold)
return clf_thresh_predicted_default_status, J_statistic_best_threshold
def create_tradeoff_graph(df):
fig2 = px.line(
data_frame=df,
y=["Default Recall", "Non Default Recall", "Accuracy"],
x="Threshold",
)
fig2.update_layout(
title="Recall and Accuracy score Trade-off with Probability Threshold",
xaxis_title="Probability Threshold",
yaxis_title="Score",
)
fig2.update_yaxes(range=[0.0, 1.0])
st.plotly_chart(fig2)
def tradeoff_threshold(clf_prediction_prob_df_gbt, split_dataset):
st.subheader(
"Recall and Accuracy Tradeoff with given Probability Threshold"
)
threshold_list = np.arange(
0, 1, 0.025).round(decimals=3).tolist()
threshold_default_status_list = default_status_per_threshold(
threshold_list, clf_prediction_prob_df_gbt["PROB_DEFAULT"]
)
thresh_classification_report_dict = (
classification_report_per_threshold(
threshold_list,
threshold_default_status_list,
split_dataset.y_test,
)
)
(
thresh_def_recalls_list,
thresh_nondef_recalls_list,
thresh_accs_list,
) = thresh_classification_report_recall_accuracy(
thresh_classification_report_dict
)
namelist = [
"Default Recall",
"Non Default Recall",
"Accuracy",
"Threshold",
]
df = pd.DataFrame(
[
thresh_def_recalls_list,
thresh_nondef_recalls_list,
thresh_accs_list,
threshold_list,
],
index=namelist,
)
df = df.T
create_tradeoff_graph(df)
def select_probability_threshold(model_name_short,
user_defined_threshold,
clf_thresh_predicted_default_status_user_gbt,
J_statistic_best_threshold,
clf_thresh_predicted_default_status_Jstatistic_gbt,
acc_rate_thresh_gbt,
clf_thresh_predicted_default_status_acceptance_gbt):
st.subheader("Selected Probability Threshold")
options = [
"User Defined",
"J Statistic Driven",
"Acceptance Rate Driven",
]
prob_thresh_option = st.radio(
label="Selected Probability Threshold",
options=options,
key=f"{model_name_short}_radio_thresh",
)
if prob_thresh_option == "User Defined":
prob_thresh_selected_gbt = user_defined_threshold
predicted_default_status_gbt = (
clf_thresh_predicted_default_status_user_gbt
)
elif prob_thresh_option == "J Statistic Driven":
prob_thresh_selected_gbt = J_statistic_best_threshold
predicted_default_status_gbt = (
clf_thresh_predicted_default_status_Jstatistic_gbt
)
else:
prob_thresh_selected_gbt = acc_rate_thresh_gbt
predicted_default_status_gbt = (
clf_thresh_predicted_default_status_acceptance_gbt
)
st.write(
f"Selected probability threshold is {prob_thresh_selected_gbt}"
)
return prob_thresh_selected_gbt, predicted_default_status_gbt
def acceptance_rate_driven_threshold(model_name_short, clf_prediction_prob_df_gbt):
st.subheader("Acceptance Rate Driven Probability Threshold")
# Steps
# Set acceptance rate
# Get default status per threshold
# Get classification report per threshold
# Get recall, nondef recall, and accuracy per threshold
acceptance_rate = (
st.slider(
label="% of loans accepted (acceptance rate):",
min_value=0,
max_value=100,
value=85,
key=f"acceptance_rate_{model_name_short}",
format="%f%%",
)
/ 100
)
acc_rate_thresh_gbt = np.quantile(
clf_prediction_prob_df_gbt["PROB_DEFAULT"], acceptance_rate
)
st.write(
f"An acceptance rate of {acceptance_rate} results in probability threshold of {acc_rate_thresh_gbt}"
)
acceptance_rate_driven_threshold_graph(
clf_prediction_prob_df_gbt, acc_rate_thresh_gbt)
clf_thresh_predicted_default_status_acceptance_gbt = apply_threshold_to_probability_values(
clf_prediction_prob_df_gbt,
acc_rate_thresh_gbt,
)
return acc_rate_thresh_gbt, clf_thresh_predicted_default_status_acceptance_gbt
| StarcoderdataPython |
1769145 | import os, sys
parentPath = os.path.abspath("../")
if parentPath not in sys.path:
sys.path.insert(0, parentPath)
import json
from collections import namedtuple
from asciimatics.widgets import *
from gui.utils.utils import ColorTheme, getColor, getAttr
from gui.utils.widget import CustomLabel
UP_BAR = 'up'
DOWN_BAR = 'down'
class Bar:
def __init__(self):
self.layouts = []
self.lables = []
def parse(self, jstr, b):
self.start_char = jstr.bar.start_char
self.prev_char = jstr.bar.prev_char
self.next_char = jstr.bar.next_char
self.cur_char = jstr.bar.cur_char
self.end_char = jstr.bar.end_char
if b == UP_BAR:
bar = jstr.upBar
else:
bar = jstr.downBar
for line in bar:
layout = [line.left_size,
line.centr_size,
line.right_size]
div = line.divider
lw = CustomLabel(align=u'<', divider=div)
for w in line.left:
c = w.color.split(':')
lw.addLable("",
ColorTheme(getColor(c[0]), getAttr(c[1]), getColor(c[2])),
w.data
)
cw = CustomLabel(align=u'^', divider=div)
for w in line.centr:
c = w.color.split(':')
cw.addLable("",
ColorTheme(getColor(c[0]), getAttr(c[1]), getColor(c[2])),
w.data
)
rw = CustomLabel(align=u'>', divider=div)
for w in line.right:
c = w.color.split(':')
rw.addLable("",
ColorTheme(getColor(c[0]), getAttr(c[1]), getColor(c[2])),
w.data
)
self.layouts.append(layout)
self.lables.append(lw)
self.lables.append(cw)
self.lables.append(rw)
def update(self, tag):
for l in self. lables:
l.updateLable(tag, self.start_char, self.prev_char, self.cur_char, self.next_char, self.end_char)
def setFrame(self, f):
for l in self. lables:
#print(l._frame._name, f._name)
l._frame = f
def getFrameName(self):
if self.lables != []:
return self.lables[0]._frame._name
return None | StarcoderdataPython |
3385298 | from lib.utils.base_utils import read_pickle
import numpy as np
def read_anns(ann_files):
anns = []
for ann_file in ann_files:
anns += read_pickle(ann_file)
return anns
def read_pose(rot_path, tra_path):
rot = np.loadtxt(rot_path, skiprows=1)
tra = np.loadtxt(tra_path, skiprows=1) / 100.
return np.concatenate([rot, np.reshape(tra, newshape=[3, 1])], axis=-1)
| StarcoderdataPython |
1700092 | import json
import datetime as dt
from utils import process_img as pi
from utils import log
f = open("tests.txt", "w")
def test_ocr():
with open('data/test_ocr.json') as json_file:
imgs = json.load(json_file)
for row in imgs:
img = pi.read_image(row['img'])
phone_time, raid_time, did_egg_hatch, gym_name, level, pokemon, raid_hour = pi.process_img(
img)
log.log_raid_data(row['img'], phone_time, raid_time,
did_egg_hatch, gym_name, level, pokemon)
validate_data(row['img'], phone_time, raid_time,
did_egg_hatch, gym_name, level, pokemon, row)
def validate_data(name, phone_time, raid_time, did_egg_hatch, gym_name, level, pokemon, data):
info = "=" * 20 + " " + name + " " + "=" * 20 + "\n"
if(data['gym_name'].strip().lower() != gym_name.strip().lower()):
info += "Invalid gym name, expected: {}, result: {} \n".format(
data['gym_name'], gym_name)
# TODO: Ter em atenção os telemóveis que apresentam segundos, poderá gerar erro na conversão?!
try:
if(dt.datetime.strptime(data['mobile_time'], '%H:%M') != dt.datetime.strptime(phone_time, '%H:%M')):
info += "Invalid phone time, expected {}, result: {} \n".format(
data['mobile_time'], phone_time)
except:
info += "Invalid phone time, expected {}, result: {} \n".format(
data['mobile_time'], phone_time)
if 'time_until_start' in data:
try:
if(dt.datetime.strptime(data['time_until_start'], '%H:%M:%S') != dt.datetime.strptime(raid_time, '%H:%M:%S')):
info += "Invalid time until start, expected {}, result: {} \n".format(
data['time_until_start'], raid_time)
except:
info += "Invalid time until start, expected {}, result: {} \n".format(
data['time_until_start'], raid_time)
elif 'time_until_finish' in data:
try:
if(dt.datetime.strptime(data['time_until_finish'], '%H:%M:%S') != dt.datetime.strptime(raid_time, '%H:%M:%S')):
info += "Invalid time until start, expected {}, result: {} \n".format(
data['time_until_start'], raid_time)
except:
info += "Invalid time until finish, expected {}, result: {} \n".format(
data['time_until_start'], raid_time)
if(data['level'] != level):
info += "Invalid level, expected {}, result: {} \n".format(
data['level'], level)
if(pokemon in data and data['pokemon'].lower() != pokemon.lower()):
info += "Invalid pokemon, expected {}, result: {} \n".format(
data['pokemon'], pokemon)
if(len(info.split("\n")) > 2):
f.write(info + "\n")
print(info + "\n")
else:
success_message = "==> {} passed all tests \n".format(name)
print(success_message)
f.write(success_message)
| StarcoderdataPython |
3215532 | <reponame>Vandivier/research-dissertation-case-for-alt-ed
from scipy import stats
import statsmodels.api as sm
import analysis_1_vars_and_regression as analysis
skewed = analysis.getData()
deskewed = analysis.getDeskewedData()
left_of_skew = analysis.getLowHirabilityGroup()
print('\n')
print("skewed data skew test:")
skew_test_result = stats.skewtest(skewed.hirability)
print(skew_test_result)
print('\n')
print("deskewed data skew test:")
deskewed_test_result = stats.skewtest(deskewed.hirability)
print(deskewed_test_result)
print('\n')
print("left_of_skew fails skew test due to insufficient sample size")
print("\n")
deskewed_male = deskewed[deskewed.gender == "Male"]
deskewed_female = deskewed[deskewed.gender == "Female"]
# are men and women naively different?
# p ~=.68 therefore currently retain null hypothesis of no difference
ttest, pval = stats.ttest_ind(deskewed_male.hirability, deskewed_female.hirability)
print("hirability mean diff test by gender: " + str(pval))
print("\n")
deskewed_male_with_favor_programming_career = deskewed_male[deskewed_male.favor_programming_career > 0]
deskewed_female_with_favor_programming_career = deskewed_female[deskewed_female.favor_programming_career > 0]
n_favor_programming_career = len(deskewed_male_with_favor_programming_career) + len(deskewed_female_with_favor_programming_career)
print("sample size for favor_programming_career: " + str(n_favor_programming_career))
ttest, pval = stats.ttest_ind(deskewed_male_with_favor_programming_career.favor_programming_career,
deskewed_female_with_favor_programming_career.favor_programming_career)
print("favor_programming_career mean diff test by gender: " + str(pval))
print("\n")
deskewed_male_with_favor_seeking_risk = deskewed_male[deskewed_male.favor_seeking_risk > 0]
deskewed_female_with_favor_seeking_risk = deskewed_female[deskewed_female.favor_seeking_risk > 0]
n_favor_seeking_risk = len(deskewed_male_with_favor_seeking_risk) + len(deskewed_female_with_favor_seeking_risk)
print("sample size for favor_seeking_risk: " + str(n_favor_seeking_risk))
ttest, pval = stats.ttest_ind(deskewed_male_with_favor_seeking_risk.favor_seeking_risk,
deskewed_female_with_favor_seeking_risk.favor_seeking_risk)
print("favor_seeking_risk mean diff test by gender: " + str(pval))
print("\n")
deskewed_male_with_grit = deskewed_male[deskewed_male.grit > 0]
deskewed_female_with_grit = deskewed_female[deskewed_female.grit > 0]
n_grit = len(deskewed_male_with_grit) + len(deskewed_female_with_grit)
print("sample size for grit: " + str(n_grit))
ttest, pval = stats.ttest_ind(deskewed_male_with_grit.grit,
deskewed_female_with_grit.grit)
print("grit mean diff test by gender: " + str(pval))
print("\n")
deskewed_male_with_is_prefer_college_peer = deskewed_male[deskewed_male.is_prefer_college_peer > -1]
deskewed_female_with_is_prefer_college_peer = deskewed_female[deskewed_female.is_prefer_college_peer > -1]
n_is_prefer_college_peer = len(deskewed_male_with_is_prefer_college_peer) + len(deskewed_female_with_is_prefer_college_peer)
print("sample size for is_prefer_college_peer: " + str(n_is_prefer_college_peer))
ttest, pval = stats.ttest_ind(deskewed_male_with_is_prefer_college_peer.is_prefer_college_peer,
deskewed_female_with_is_prefer_college_peer.is_prefer_college_peer)
print("is_prefer_college_peer mean diff test by gender: " + str(pval))
print("is_prefer_college_peer mean for male: " + str(deskewed_male_with_is_prefer_college_peer.is_prefer_college_peer.mean()))
print("is_prefer_college_peer mean for female: " + str(deskewed_female_with_is_prefer_college_peer.is_prefer_college_peer.mean()))
print("\n")
deskewed_male_with_is_tech = deskewed_male[deskewed_male.is_tech > -1]
deskewed_female_with_is_tech = deskewed_female[deskewed_female.is_tech > -1]
n_is_tech = len(deskewed_male_with_is_tech) + len(deskewed_female_with_is_tech)
print("sample size for is_tech: " + str(n_is_tech))
ttest, pval = stats.ttest_ind(deskewed_male_with_is_tech.is_tech,
deskewed_female_with_is_tech.is_tech)
print("is_tech mean diff test by gender: " + str(pval))
print("is_tech mean for male: " + str(deskewed_male_with_is_tech.is_tech.mean()))
print("is_tech mean for female: " + str(deskewed_female_with_is_tech.is_tech.mean()))
print("is_tech mean (fraction) invariant to gender is: " + str(deskewed.is_tech.mean()))
print("\n")
## below analysis using deskewed data
# # pca and mca explored for dimensionality reduction
# # of high-dimension categorical industry and state variables, but not feasible due to partial responses
# # import prince
# # prince can also do a graph
# # 2 components make graphing easier
# # https://stackoverflow.com/questions/48521740/using-mca-package-in-python
# # https://github.com/MaxHalford/prince#multiple-correspondence-analysis-mca
# df_industry = pd.DataFrame(df.industry)
# mca = prince.MCA(
# n_components=2,
# n_iter=3,
# copy=True,
# check_input=True,
# engine='auto',
# random_state=7
# )
# mca = mca.fit(df_industry)
# df[['industry_mca_1',
# 'industry_mca_2']] = mca.transform(df.industry)
# # pca.fit(df.state)
# m1_mca = '''hirability ~
# + industry_mca_1 + industry_mca_2
# + 1'''
# industry alone
# ar2 .10, r2 .19, n 105, AIC 339
m1 = '''hirability ~
+ industry
+ 1'''
# industry*gender alone - bad move
# ar2 .07, r2 .23, n 105, AIC 348
m2 = '''hirability ~
+ industry*gender
+ 1'''
# gender alone - bad move
# ar2 -.01, r2 .00, n 105, AIC 341
m3 = '''hirability ~
+ gender
+ 1'''
# gender alone - bad move
# ar2 .09, r2 .20, n 105, AIC 341
m4 = '''hirability ~
+ gender
+ industry
+ 1'''
# three-way interaction alone is highly significant and better than industry
# ar2 .18, r2 .24, n 99, AIC 306
m5 = '''hirability ~
+ gender*favor_programming_career*favor_seeking_risk
+ 1'''
# industry and three-way interaction are substantially independent
# ref: https://stats.stackexchange.com/a/9174/142294
# ar2 .25, r2 .38, n 99, AIC 308
m6 = '''hirability ~
+ gender*favor_programming_career*favor_seeking_risk
+ industry
+ 1'''
# this model is def overfit, but
# four way interaction is meaningful in the average case after penalty (ar2 up)
# could be a good backward selection or lasso starting point
# ar2 .26, r2 .64, n 99, AIC 319
m7 = '''hirability ~
+ gender*favor_programming_career*favor_seeking_risk*industry
+ 1'''
# try simplifying + concentrating explanatory power via is_tech
# nope, explanatory power is nerfed; it bad.
# ar2 .13, r2 .27, n 99, AIC 319
m8 = '''hirability ~
+ gender*favor_programming_career*favor_seeking_risk*is_tech
+ 1'''
# add covid_impact so m10 can compare interaction
# none of covid_impact is significant, one is p~.2
# ar2 .25, r2 .65, n 99, AIC 321
m9 = '''hirability ~
+ gender*favor_programming_career*favor_seeking_risk*industry
+ covid_impact
+ 1'''
# covid_impact*gender
# all covid factors are worse after gender interaction, don't do that
# ar2 .20, r2 .66, n 99, AIC 325
m10 = '''hirability ~
+ gender*favor_programming_career*favor_seeking_risk*industry
+ covid_impact*gender
+ 1'''
print(sm.OLS.from_formula(m1_mca, data=deskewed).fit().summary())
# TODO: 1. swap industry categorical for is_tech boolean
# TODO: 2. covid_impact*gender
# I don't care about RLM bc coefficients are equal anyway
# print(sm.RLM.from_formula(1, data=skewed).fit().summary())
# "This makes AIC the preferred choice if the goal is prediction and the evaluation of predictions is the likelihood."
# over MSE but what about vs ar2?
# https://stats.stackexchange.com/questions/425675/optimality-of-aic-w-r-t-loss-functions-used-for-evaluation
# other criteria like MSE:
# https://www.youtube.com/watch?v=hUSsZD5NfQI
# WAIC, DIC, and LOOCV / LOO-CV
# https://www.youtube.com/watch?v=xS4jDHQfP2o
# see 2.2.5. Background knowledge and DAG:
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5969114/
| StarcoderdataPython |
1765390 | <gh_stars>0
"""
@package myWave provides functionality for reading and writing WAV files
@copyright GNU Public License
@author written 2009-2011 by <NAME> (www.christian-herbst.org)
@author Supported by the SOMACCA advanced ERC grant, University of Vienna,
Dept. of Cognitive Biology
@note
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 3 of the License, or (at your option) any later
version.
@par
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
@par
You should have received a copy of the GNU General Public License along with
this program; if not, see <http://www.gnu.org/licenses/>.
"""
import wave, struct
import numpy, array
import copy
import gc
import dspUtil
gc.enable()
import scipy.io.wavfile as sciWav
#from scipy import weave as weave
###############################################################################
def readWaveFile(fileName, useRobustButSlowAlgorithm = True):
"""
load a WAV file
@param fileName the name of the WAV file that needs to be loaded
@useRobustButSlowAlgorithm if True, we'll use older code that is slower but more robust
vis-a-vis custom-generated WAV files that might have issues with chunks
in the WAV file structure
@return a list containing
- the number of channels
- the number of frames per channel
- the sampling frequency [Hz]
- a list containing one or more numpy array(s) containing the frame data
for each channel
"""
if useRobustButSlowAlgorithm:
f = wave.open(fileName, "rb")
numFrames = f.getnframes()
numChannels = f.getnchannels()
fs = f.getframerate()
dataTmp = f.readframes(numFrames * numChannels)
sampleWidth = f.getsampwidth()
#print numChannels, numFrames, fs, sampleWidth, len(dataTmp)
format = ''
if sampleWidth == 1:
format = 'B'
elif sampleWidth == 2:
format = 'h'
elif sampleWidth == 4:
format = 'i'
if sampleWidth != 2:
raise Exception("we only support 16 bit data")
out = struct.unpack_from (("%d" % (numFrames * numChannels)) + format, dataTmp)
data = []
for i in range(numChannels):
data.append(numpy.zeros(numFrames))
#data.append([0] * numFrames)
#data = numpy.zeros((numChannels, numFrames))
divisor = float(2 ** 15)
for i in range(numChannels):
arrFrameIdx = range(numFrames) # explicit indexing and garbage collection
for j in arrFrameIdx:
data[i][j] = out[j * numChannels + i] / divisor
del arrFrameIdx
#for chIdx in range(numChannels):
# channelData = data[chIdx]
# code = """
# for (int frameIdx = 0; frameIdx < numFrames; frameIdx++) {
# channelData[frameIdx] = (float)out[frameIdx * numChannels + chIdx] / divisor;
# }
# //return_val = C;
# """
# weave.inline(code,['channelData', 'chIdx', 'out', 'divisor',
# 'numChannels', 'numFrames'], verbose=0)
f.close()
del dataTmp, out, f
gc.collect()
return [numChannels, numFrames, fs, data]
fs, dataRaw = sciWav.read(fileName)
n = len(dataRaw)
numChannels = 1
try: numChannels = dataRaw.shape[1]
except: pass
arrChannels = []
for chIdx in range(numChannels):
tmp = numpy.zeros(n)
if numChannels == 1:
tmp = dataRaw.astype(numpy.float32)
else:
tmp = dataRaw[0:, chIdx].astype(numpy.float32)
tmp /= float(2**15)
arrChannels.append(tmp)
del tmp
del dataRaw
gc.collect()
return [numChannels, n, fs, arrChannels]
###############################################################################
def readMonoWaveFile(fName):
"""
convenience function to read the first channel of the specified WAV file
@param fName the full file name of the file to be read
@return a tuple consisting of the data array and the sampling frequency
"""
numChannels, n, fs, arrChannels = readWaveFile(fName)
return arrChannels[0], fs
###############################################################################
def writeWaveFile(data, fileName, SRate = 44100.0, normalize = False, \
removeDcWhenNormalizing = True
):
"""
write an array of floats to a 16 bit wave file
@param data a list of lists or numpy array containing the frame data
@param fileName the output file name
@param SRate the sampling frequency [Hz]
@param normalize if the parameter normalize is set to True, the signal
will be normalized to the maximally possible value (i.e. 1). if no
normalization is performed, and if the input signal has a maximum
absolute ampitude greater than 1 (i.e. if the output would be clipped),
the function throws an error.
@param removeDcWhenNormalizing if we're normalizing, this determines whether
we should remove the DC offset before doing so.
@return nothing
"""
if not type(data).__name__ in ['list', 'ndarray']:
raise Exception("expected a list data type, but got %s" % type(data).__name__)
numChannels = 1
valMin, valMax = None, None
dataTmp = None
dataType = type(data[0]).__name__
absMax = None
if dataType in ['list', 'ndarray']:
numChannels = len(data)
n = len(data[0])
dataTmp = numpy.zeros((n, numChannels))
for chIdx in range(numChannels):
dataTmp2 = None
dType2 = type(data[chIdx]).__name__
if dType2 == 'ndarray':
dataTmp2 = data[chIdx]
elif dType2 == 'list':
dataTmp2 = numpy.array(data[chIdx], dtype=numpy.float32)
else:
raise Exception("channel data is not a list or a numpy array")
if normalize:
if removeDcWhenNormalizing:
dataTmp2 -= dspUtil.nanMean(dataTmp2)
absMax = dspUtil.getAbsMax(dataTmp2)
dataTmp2 /= absMax * 1.000001
dataTmp[0:, chIdx] = dataTmp2
del dataTmp2
else:
# this is a mono file
# force creating a copy, to avoid scaling the original data...
dataTmp = numpy.array(data)
if normalize:
if removeDcWhenNormalizing:
dataTmp -= dspUtil.nanMean(dataTmp)
absMax = dspUtil.getAbsMax(dataTmp)
if absMax != 0:
dataTmp /= absMax * 1.000001
# save
#print dataTmp.dtype, dataTmp.shape
dataTmp *= float(2**15 - 1)
dataTmp2 = numpy.asarray(dataTmp, dtype=numpy.int16)
sciWav.write(fileName, SRate, dataTmp2)
del dataTmp, dataTmp2
gc.collect()
###############################################################################
| StarcoderdataPython |
3371348 | import sys
from types import SimpleNamespace
from typing import Callable
# noinspection PyPackageRequirements
import pytest as pytest
from jinja2 import TemplateNotFound
from markupsafe import Markup
from fixtures import registered_extension, starlette_render_partial
import jinja_partials
def test_render_empty(registered_extension):
html: Markup = jinja_partials.render_partial('render/bare.html')
assert '<h1>This is bare HTML fragment</h1>' in html
def test_render_with_data(registered_extension):
name = 'Sarah'
age = 32
html: Markup = jinja_partials.render_partial('render/with_data.html', name=name, age=age)
assert f'<span>Your name is {name} and age is {age}</span>' in html
def test_render_with_layout(registered_extension):
value_text = "The message is clear"
html: Markup = jinja_partials.render_partial('render/with_layout.html', message=value_text)
assert '<title>Jinja Partials Test Template</title>' in html
assert value_text in html
def test_render_recursive(registered_extension):
value_text = "The message is clear"
inner_text = "The message is recursive"
html: Markup = jinja_partials.render_partial('render/recursive.html',
message=value_text,
inner=inner_text)
assert value_text in html
assert inner_text in html
def test_missing_template(registered_extension):
with pytest.raises(TemplateNotFound):
jinja_partials.render_partial('no-way.pt', message=7)
def test_not_registered():
with pytest.raises(Exception):
jinja_partials.render_partial('doesnt-matter.pt', message=7)
def test_starlette_render_recursive(starlette_render_partial: Callable[..., Markup]):
value_text = "The message is clear"
inner_text = "The message is recursive"
html = starlette_render_partial(
'render/recursive.html',
message=value_text,
inner=inner_text,
)
assert value_text in html
assert inner_text in html
def test_register_extensions_raises_if_flask_is_not_installed():
sys.modules['flask'] = None
del sys.modules['jinja_partials']
import jinja_partials
with pytest.raises(
jinja_partials.PartialsException,
match='Install Flask to use `register_extensions`',
):
jinja_partials.register_extensions(SimpleNamespace())
del sys.modules['flask']
def test_register_extensions_raises_if_starlette_is_not_installed():
sys.modules['starlette'] = None
del sys.modules['jinja_partials']
import jinja_partials
with pytest.raises(
jinja_partials.PartialsException,
match='Install Starlette to use `register_starlette_extensions`',
):
jinja_partials.register_starlette_extensions(SimpleNamespace())
del sys.modules['starlette']
| StarcoderdataPython |
136361 | import bs4
import flask
import flask_cors
import json
import pyrebase
import requests
import traceback
from lib import TranscriptParser
app = flask.Flask(__name__)
firebase = pyrebase.initialize_app({
'apiKey': '<KEY>',
'authDomain': 'canigraduate-43286.firebaseapp.com',
'databaseURL': 'https://canigraduate-43286.firebaseio.com',
'storageBucket': 'canigraduate-43286.appspot.com',
'serviceAccount': 'service_account_key.json'
})
@app.route('/api/transcript', methods=['POST'])
@flask_cors.cross_origin()
def transcript():
try:
parser = TranscriptParser(
username=flask.request.form.get('username', flask.request.json['username']),
password=flask.request.form.get('password', flask.request.json['password']))
transcript = [record.serialize() for record in parser.execute()]
return flask.jsonify(**{'transcript': transcript})
except ValueError as e:
return flask.jsonify(**{'error': str(e)}), 403
except Exception as e:
return flask.jsonify(**{'error': 'Unknown authentication error occurred.'}), 500
@app.route('/programs', methods=['POST', 'GET'])
def programs():
db = firebase.database()
if flask.request.method == 'POST':
db.child('programs').set(json.loads(flask.request.form.get('data')))
return flask.render_template('programs.html', result=json.dumps(db.child('programs').get().val(), indent=2))
@app.route('/sequences', methods=['POST', 'GET'])
def sequences():
db = firebase.database()
if flask.request.method == 'POST':
db.child('sequences').set(json.loads(flask.request.form.get('data')))
return flask.render_template('sequences.html', result=json.dumps(db.child('sequences').get().val(), indent=2))
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| StarcoderdataPython |
3238742 | # -*- coding: utf-8 -*-
# edge realization
class Edge(object):
# create edge from source key to target key with props
def __init__(self, source, target, weight = 0):
self.source = source
self.target = target
self.weight = weight
def __eq__(self, other):
if isinstance(other, self.__class__):
check_source = self.source == other.source
check_target = self.target == other.target
return check_source and check_target
return False
def __hash__(self):
return hash((self.source, self.target))
def __str__(self):
return str(self.source) + ' => ' + str({self.target : self.weight})
def set_weight(self, weight):
self.weight = weight
def get_weight(self):
return self.weight
| StarcoderdataPython |
3380861 | <filename>pytorch_lightning/callbacks/__init__.py
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.gpu_stats_monitor import GPUStatsMonitor
from pytorch_lightning.callbacks.gradient_accumulation_scheduler import GradientAccumulationScheduler
from pytorch_lightning.callbacks.lr_logger import LearningRateLogger
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.callbacks.progress import ProgressBar, ProgressBarBase
__all__ = [
'Callback',
'EarlyStopping',
'GPUStatsMonitor',
'GradientAccumulationScheduler',
'LearningRateLogger',
'LearningRateMonitor',
'ModelCheckpoint',
'ProgressBar',
'ProgressBarBase',
]
| StarcoderdataPython |
3396287 | <filename>upsert/ansi_ident.py
import codecs
import upsert
class AnsiIdent:
# http://stackoverflow.com/questions/6514274/how-do-you-escape-strings-for-sqlite-table-column-names-in-python
@upsert.memoize
def quote_ident(self, str):
encodable = str.encode("utf-8", "strict").decode("utf-8")
nul_index = encodable.find("\x00")
if nul_index >= 0:
error = UnicodeEncodeError("NUL-terminated utf-8", encodable, nul_index, nul_index + 1, "NUL not allowed")
error_handler = codecs.lookup_error(errors)
replacement, _ = error_handler(error)
encodable = encodable.replace("\x00", replacement)
return '"' + encodable.replace('"', '""') + '"'
| StarcoderdataPython |
4832065 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from typing import NamedTuple, List, Callable, Any
class Struct(metaclass=ABCMeta):
__slots__ = []
def __eq__(self, other) -> bool:
assert type(self) == type(other)
res = True
for item in self.__slots__:
res &= (self.item == other.item)
return res
def __bytes__(self) -> bytes:
return self.encode()
@abstractmethod
def __str__(self) -> str: pass
@abstractmethod
def __repr__(self) -> str: pass
@abstractmethod
def encode(self, **kwargs) -> bytes:
"""
Encode object to bytes
Returns
-------
bytes
encoded message
"""
pass
def __setattr__(self, *args, **kwargs):
raise TypeError('{0} is immutable'.format(self.__class__))
def __delattr__(self, *args, **kwargs):
raise TypeError('{0} is immutable'.format(self.__class__))
@classmethod
@abstractmethod
def decode(cls, n: bytes, **kwargs) -> NamedTuple:
"""
Decode object from bytes
Parameters
----------
n : bytes
structure to decode
Returns
-------
NamedTuple
Apropiate NamedTuple
"""
pass
@classmethod
def from_raw(cls, buf: bytes) -> object:
"""
Create 'netaddr' object from raw bytes.
Alternative constructor for 'netaddr' class
Parameters
----------
buf : bytes
Raw bytes to interpret as netaddr
Returns
-------
netaddr
'netaddr' object
"""
parsed = cls.decode(buf)
return cls(**parsed._asdict())
| StarcoderdataPython |
1646688 | <reponame>old-pinky/AioPaperScroll-SDK
from setuptools import setup, find_packages
setup(
name='aiopaperscroll',
version='1.0.0',
packages=find_packages(),
install_requires=[
'loguru',
'asyncio',
'aiohttp'],
url='https://github.com/old-pinky/AioPaperScroll-SDK'
)
| StarcoderdataPython |
4833104 | <reponame>idlewan/FrameworkBenchmarks
import helper
from helper import Command
def start(args, logfile, errfile):
db_host = "DB_HOST={0}".format(args.database_host or 'localhost')
start_server = db_host + " rvm jruby-1.7.8 do bundle exec torqbox -b 0.0.0.0 -E production"
commands = [
Command("rvm jruby-1.7.8 do bundle --jobs 4", True),
Command(start_server, False)
]
return helper.run(commands, logfile, errfile, args.troot)
def stop(logfile, errfile):
return helper.stop('torqbox', logfile, errfile)
| StarcoderdataPython |
3349337 | <filename>substrabac/substrapp/tests/tests_model.py<gh_stars>0
import os
import shutil
import tempfile
from checksumdir import dirhash
from django.test import TestCase, override_settings
from substrapp.models import Objective, DataManager, DataSample, Algo, Model
from substrapp.utils import get_hash
from .common import get_sample_objective, get_sample_datamanager, \
get_sample_script, get_sample_model
MEDIA_ROOT = tempfile.mkdtemp()
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class ModelTests(TestCase):
"""Model tests"""
def tearDown(self):
shutil.rmtree(MEDIA_ROOT, ignore_errors=True)
def test_create_objective(self):
description, _, metrics, _ = get_sample_objective()
objective = Objective.objects.create(description=description,
metrics=metrics)
self.assertEqual(objective.pkhash, get_hash(description))
self.assertFalse(objective.validated)
self.assertIn(f'pkhash {objective.pkhash}', str(objective))
self.assertIn(f'validated {objective.validated}', str(objective))
def test_create_datamanager(self):
description, _, data_opener, _ = get_sample_datamanager()
datamanager = DataManager.objects.create(description=description, data_opener=data_opener, name="slides_opener")
self.assertEqual(datamanager.pkhash, get_hash(data_opener))
self.assertFalse(datamanager.validated)
self.assertIn(f'pkhash {datamanager.pkhash}', str(datamanager))
self.assertIn(f'name {datamanager.name}', str(datamanager))
def test_create_data(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(dir_path, '../../../fixtures/chunantes/datasamples/train/0024308')
data_sample = DataSample.objects.create(path=path)
self.assertEqual(data_sample.pkhash, dirhash(path, 'sha256'))
self.assertFalse(data_sample.validated)
self.assertIn(f'pkhash {data_sample.pkhash}', str(data_sample))
self.assertIn(f'validated {data_sample.validated}', str(data_sample))
def test_create_algo(self):
script, _ = get_sample_script()
algo = Algo.objects.create(file=script)
self.assertEqual(algo.pkhash, get_hash(script))
self.assertFalse(algo.validated)
self.assertIn(f'pkhash {algo.pkhash}', str(algo))
self.assertIn(f'validated {algo.validated}', str(algo))
def test_create_model(self):
modelfile, _ = get_sample_model()
model = Model.objects.create(file=modelfile)
self.assertEqual(model.pkhash, get_hash(modelfile))
self.assertFalse(model.validated)
self.assertIn(f'pkhash {model.pkhash}', str(model))
self.assertIn(f'validated {model.validated}', str(model))
| StarcoderdataPython |
180195 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-01 08:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracks', '0011_auto_20161129_1442'),
]
operations = [
migrations.AlterField(
model_name='laptime',
name='recorded',
field=models.DateField(),
),
]
| StarcoderdataPython |
1667300 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# CUPyDO configuration file
# Agard445 wing
# <NAME> & <NAME>
def test(cupydo, tol):
res = cupydo.algorithm.errValue
import numpy as np
from cupydo.testing import *
# Read results from data
cl = cupydo.algorithm.FluidSolver.coreSolver.getCl()
cd = cupydo.algorithm.FluidSolver.coreSolver.getCd()
with open("db_Field(TZ,RE)_GROUP_ID_121.ascii", 'rb') as f:
lines = f.readlines()
resultS1 = np.genfromtxt(lines[-1:], delimiter=None)
with open("db_Field(TZ,RE)_GROUP_ID_122.ascii", 'rb') as f:
lines = f.readlines()
resultS2 = np.genfromtxt(lines[-1:], delimiter=None)
# Check convergence and results
if (res > tol):
print "\n\n" + "FSI residual = " + str(res) + ", FSI tolerance = " + str(tol)
raise Exception(ccolors.ANSI_RED + "FSI algo failed to converge!" + ccolors.ANSI_RESET)
tests = CTests()
tests.add(CTest('Lift coefficient', cl, 0.0460, 1e-2, True)) # abs. tol
tests.add(CTest('Drag coefficient', cd, 0.00080, 1e-4, True))
tests.add(CTest('LE vertical displacement', resultS2[2], 0.009, 5e-3, True))
tests.add(CTest('TE vertical displacement', resultS1[2], 0.010, 5e-3, True))
tests.run()
def getFsiP():
"""Fsi parameters"""
import os
fileName = os.path.splitext(os.path.basename(__file__))[0]
p = {}
# Solvers and config files
p['fluidSolver'] = 'VLM'
p['solidSolver'] = 'Metafor'
p['cfdFile'] = fileName[:-3] + 'fluid'
p['csdFile'] = fileName[:-3] + 'solid'
# FSI objects
p['interpolator'] = 'RBF'
p['criterion'] = 'Displacements'
p['algorithm'] = 'StaticBGS'
# FSI parameters
p['compType'] = 'steady'
p['nDim'] = 3
p['dt'] = 0.1
p['tTot'] = 0.1
p['timeItTresh'] = -1
p['tol'] = 1e-4
p['maxIt'] = 50
p['omega'] = 1.0
p['rbfRadius'] = .5
return p
def main():
import cupydo.interfaces.Cupydo as cupy
import staticAgard_fluid
p = getFsiP() # get parameters
cupydo = cupy.CUPyDO(p) # create fsi driver
cupydo.run() # run fsi process
cupydo.algorithm.FluidSolver.coreSolver.save()
test(cupydo, p['tol']) # check the results
# eof
print ''
# --- This is only accessed if running from command prompt --- #
if __name__ == '__main__':
main()
| StarcoderdataPython |
1770132 | <reponame>amakaroff82/node-facenet<filename>src/python3/facenet_bridge.py
"""
facenet-bridge
"""
import base64
import errno
import json
import os
from pathlib import PurePath
from typing import (
Any,
List,
Tuple,
)
import tensorflow as tf # type: ignore
import numpy as np # type: ignore
import align.detect_face # type: ignore
import facenet # type: ignore
# def json_parse(text: str) -> Any:
# """ json """
# return json.loads(text)
# def numpize(array: Any) -> Any:
# """ numpy """
# return np.array(array, dtype=np.uint8) # important to define dtype!
def base64_to_image(
base64text: str,
row: int,
col: int,
depth: int
) -> Any:
""" base64 """
image_bytes = base64.b64decode(base64text)
image_view = memoryview(image_bytes)
# important to define dtype!
image_array = np.array(image_view, dtype=np.uint8)
image = image_array.reshape(row, col, depth)
return image
class FacenetBridge(object):
"""
Bridge of Facenet
"""
FACENET_MODEL = None # type: str
def __init__(self) -> None:
self.graph = self.session = None # type: Any
self.placeholder_input = None # type: Any
self.placeholder_phase_train = None # type: Any
self.placeholder_embeddings = None # type: Any
self.FACENET_MODEL = FacenetBridge.get_model_path()
def init(self) -> None:
""" doc """
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
# pylint: disable=not-context-manager
with self.graph.as_default():
with self.session.as_default():
model_dir = os.path.expanduser(self.FACENET_MODEL)
meta_file, ckpt_file = facenet.get_model_filenames(model_dir)
saver = tf.train.import_meta_graph(
os.path.join(model_dir, meta_file),
)
saver.restore(
tf.get_default_session(),
os.path.join(model_dir, ckpt_file),
)
# facenet.load_model(self.FACENET_MODEL)
self.placeholder_input = self.graph.get_tensor_by_name('input:0')
self.placeholder_phase_train = \
self.graph.get_tensor_by_name('phase_train:0')
self.placeholder_embeddings = \
self.graph.get_tensor_by_name('embeddings:0')
@staticmethod
def get_model_path() -> str:
"""
Get facenet model path from package.json
"""
try:
model_path = os.environ['FACENET_MODEL'] # type: str
return model_path
except KeyError:
pass
file_path = os.path.dirname(os.path.abspath(__file__))
path_split = PurePath(file_path).parts
# the parent of parent directory name
is_dist = path_split[-3:-2][0] == 'dist'
if is_dist:
module_root = os.path.join(file_path, '..', '..', '..')
else:
module_root = os.path.join(file_path, '..', '..')
module_root = os.path.abspath(
os.path.normpath(
module_root
)
)
package = os.path.join(module_root, 'package.json')
with open(package) as data:
package_json = json.load(data)
python_facenet_model_path = \
package_json['facenet']['env']['PYTHON_FACENET_MODEL_PATH']
model_path = os.path.join(module_root, python_facenet_model_path)
if not os.path.exists(model_path):
raise FileNotFoundError(
errno.ENOENT,
os.strerror(errno.ENOENT),
model_path
)
return model_path
def embedding(
self,
image_base64: str,
row: int,
col: int,
depth: int,
) -> List[float]:
"""
Get embedding
"""
image = base64_to_image(image_base64, row, col, depth)
if image.ndim == 2:
image = facenet.to_rgb(image)
# get rid of Alpha Channel from PNG(if any) and prewhiten
image = facenet.prewhiten(image[:, :, 0:3])
feed_dict = {
self.placeholder_input: image[np.newaxis, :],
self.placeholder_phase_train: False,
}
# Use the facenet model to calcualte embeddings
embeddings = self.session.run(
self.placeholder_embeddings,
feed_dict=feed_dict,
)
# Return the only row
return embeddings[0].tolist()
class MtcnnBridge():
"""
MTCNN Face Alignment
"""
def __init__(self) -> None:
self.graph = self.session = None # type: Any
self.pnet = self.rnet = self.onet = None # type: Any
self.minsize = 20 # minimum size of face
self.threshold = [0.6, 0.7, 0.7] # three steps's threshold
self.factor = 0.709 # scale factor
def init(self) -> None:
""" doc """
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
# pylint: disable=not-context-manager
with self.graph.as_default():
with self.session.as_default():
self.pnet, self.rnet, self.onet = \
align.detect_face.create_mtcnn(self.session, None)
def align(
self,
image_base64: str,
row: int,
col: int,
depth: int,
) -> Tuple[List[Any], List[Any]]:
""" doc """
image = base64_to_image(image_base64, row, col, depth)
bounding_boxes, landmarks = align.detect_face.detect_face(
image[:, :, 0:3], # get rid of alpha channel(if any)
self.minsize,
self.pnet,
self.rnet,
self.onet,
self.threshold,
self.factor,
)
return bounding_boxes.tolist(), landmarks.tolist()
| StarcoderdataPython |
64302 | #
# Copyright (c) 2016, SUSE LLC All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ceph-auto-aws nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
from handson.myyaml import stanza
log = logging.getLogger(__name__)
class ClusterOptions(object):
def validate_delegate_list(self):
dl = self.args.delegate_list
if dl is None or len(dl) == 0:
return True
max_delegates = stanza('delegates')
log.debug("Maximum number of delegates is {!r}".format(max_delegates))
assert (
max_delegates is not None and
max_delegates > 0 and
max_delegates <= 100
), "Bad delegates stanza in YAML: {!r}".format(max_delegates)
assert dl[-1] <= max_delegates, (
("Delegate list exceeds {!r} (maximum number of " +
"delegates in YAML)").format(max_delegates)
)
def process_delegate_list(self):
max_d = stanza('delegates')
if self.args.delegate_list is None:
self.args.delegate_list = list()
if self.args.all:
self.args.delegate_list = list(range(1, max_d + 1))
if self.args.master:
self.args.delegate_list.insert(0, 0)
self.validate_delegate_list()
log.info("Delegate list is {!r}".format(self.args.delegate_list))
| StarcoderdataPython |
1712204 | from time import time
from random import sample
from math import log
from joblib import Parallel, delayed
from .data_structures import Proofs, Features, Rankings
# thm1, thm2 -- theorems with features; we measure similarity between them
# dict_features_dict_features_numbers -- info about in how many theorems different
# features occur;
# higher power -> rare features have more influence on similarity
# returns number from [0,1]; 1 - identical, 0 - nothing in common
def similarity(thm1, thm2, dict_features_numbers, n_of_theorems, power):
ftrs1 = set(thm1[1])
ftrs2 = set(thm2[1])
ftrsI = ftrs1 & ftrs2
# we need to add unseen features to our dict with numbers
for f in (ftrs1 | ftrs2):
if not f in dict_features_numbers:
dict_features_numbers[f] = 1
trans = lambda l,n: log(l/n) ** power
s1 = sum([trans(n_of_theorems, dict_features_numbers[f]) for f in ftrs1])
s2 = sum([trans(n_of_theorems, dict_features_numbers[f]) for f in ftrs2])
sI = sum([trans(n_of_theorems, dict_features_numbers[f]) for f in ftrsI])
return (sI / (s1 + s2 - sI)) ** (1 / power) # Jaccard index
# theorem -- theorem and its features (as a tuple) with unknown premises useful
# for proving it; the function creates a ranking of premises
def knn_one_theorem(theorem, thm_features,
proofs, features,
chronology,
dict_features_numbers,
N, power):
# chronology is important
available_premises = chronology.available_premises(theorem)
proofs = {t: proofs[t] for t in available_premises \
if not theorem in set().union(*list(proofs[t]))}
features = {t: features[t] for t in available_premises}
# separation of train and test
assert not theorem in proofs
similarities = {t: similarity((theorem, thm_features),
(t, features[t]),
dict_features_numbers,
len(features), power)
for t in proofs}
similarities_sorted_values = sorted(similarities.values(), reverse=True)
N_threshold = similarities_sorted_values[min(N, len(similarities) - 1)]
N_nearest_theorems = {t for t in set(similarities)
if similarities[t] > N_threshold}
premises_scores = {}
assert not theorem in N_nearest_theorems
for thm in N_nearest_theorems:
premises_scores_one = {}
for prf in proofs[thm]:
for prm in prf:
try: premises_scores_one[prm] = premises_scores_one[prm] + 1
except: premises_scores_one[prm] = 1
for prf in premises_scores_one:
scr = similarities[thm] * premises_scores_one[prf] ** .3
try: premises_scores[prf] = premises_scores[prf] + scr
except: premises_scores[prf] = scr
assert not theorem in premises_scores
sorted_premises = sorted(premises_scores,
key=premises_scores.__getitem__, reverse=True)
m = premises_scores[sorted_premises[0]] # max
if m == 0: m = 1 # sometimes m = 0
premises_scores_norm = [(p, premises_scores[p] / m) for p in sorted_premises
if not p == thm]
return premises_scores_norm
# wrapper for knn_one_theorem() useful for using with Parallel
def knnot(t, tf, dtp, dtf, ch, dfn, N, p):
return (t, knn_one_theorem(t, tf, dtp, dtf, ch, dfn, N, p))
# creates rankings of useful premises for given theorems using knn_one_theorem()
# returns results as a dictionary
# (keys: theorems names, values: lists of premises)
def knn(test_theorems, proofs, params, n_jobs=-1):
chronology = params['chronology']
features = params['features']
N = params['N'] if 'N' in params else 50
power = params['power'] if 'power' in params else 2
# separation of train and test
# assert not set(proofs) & set(test_theorems)
proofs_train = proofs.with_trivial(set(chronology))
features_train = features.subset(set(proofs_train))
dict_features_numbers = features_train.dict_features_numbers()
with Parallel(n_jobs=n_jobs) as parallel:
dknnot = delayed(knnot)
rankings = parallel(
dknnot(thm, features[thm], proofs_train, features_train, chronology,
dict_features_numbers, N, power)
for thm in test_theorems)
return Rankings(from_dict=dict(rankings))
| StarcoderdataPython |
3274360 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetCharactersCharacterIdAgentsResearch200Ok(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'agent_id': 'int',
'skill_type_id': 'int',
'started_at': 'datetime',
'points_per_day': 'float',
'remainder_points': 'float'
}
attribute_map = {
'agent_id': 'agent_id',
'skill_type_id': 'skill_type_id',
'started_at': 'started_at',
'points_per_day': 'points_per_day',
'remainder_points': 'remainder_points'
}
def __init__(self, agent_id=None, skill_type_id=None, started_at=None, points_per_day=None, remainder_points=None): # noqa: E501
"""GetCharactersCharacterIdAgentsResearch200Ok - a model defined in Swagger""" # noqa: E501
self._agent_id = None
self._skill_type_id = None
self._started_at = None
self._points_per_day = None
self._remainder_points = None
self.discriminator = None
self.agent_id = agent_id
self.skill_type_id = skill_type_id
self.started_at = started_at
self.points_per_day = points_per_day
self.remainder_points = remainder_points
@property
def agent_id(self):
"""Gets the agent_id of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
agent_id integer # noqa: E501
:return: The agent_id of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:rtype: int
"""
return self._agent_id
@agent_id.setter
def agent_id(self, agent_id):
"""Sets the agent_id of this GetCharactersCharacterIdAgentsResearch200Ok.
agent_id integer # noqa: E501
:param agent_id: The agent_id of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:type: int
"""
if agent_id is None:
raise ValueError("Invalid value for `agent_id`, must not be `None`") # noqa: E501
self._agent_id = agent_id
@property
def skill_type_id(self):
"""Gets the skill_type_id of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
skill_type_id integer # noqa: E501
:return: The skill_type_id of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:rtype: int
"""
return self._skill_type_id
@skill_type_id.setter
def skill_type_id(self, skill_type_id):
"""Sets the skill_type_id of this GetCharactersCharacterIdAgentsResearch200Ok.
skill_type_id integer # noqa: E501
:param skill_type_id: The skill_type_id of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:type: int
"""
if skill_type_id is None:
raise ValueError("Invalid value for `skill_type_id`, must not be `None`") # noqa: E501
self._skill_type_id = skill_type_id
@property
def started_at(self):
"""Gets the started_at of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
started_at string # noqa: E501
:return: The started_at of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:rtype: datetime
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""Sets the started_at of this GetCharactersCharacterIdAgentsResearch200Ok.
started_at string # noqa: E501
:param started_at: The started_at of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:type: datetime
"""
if started_at is None:
raise ValueError("Invalid value for `started_at`, must not be `None`") # noqa: E501
self._started_at = started_at
@property
def points_per_day(self):
"""Gets the points_per_day of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
points_per_day number # noqa: E501
:return: The points_per_day of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:rtype: float
"""
return self._points_per_day
@points_per_day.setter
def points_per_day(self, points_per_day):
"""Sets the points_per_day of this GetCharactersCharacterIdAgentsResearch200Ok.
points_per_day number # noqa: E501
:param points_per_day: The points_per_day of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:type: float
"""
if points_per_day is None:
raise ValueError("Invalid value for `points_per_day`, must not be `None`") # noqa: E501
self._points_per_day = points_per_day
@property
def remainder_points(self):
"""Gets the remainder_points of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
remainder_points number # noqa: E501
:return: The remainder_points of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:rtype: float
"""
return self._remainder_points
@remainder_points.setter
def remainder_points(self, remainder_points):
"""Sets the remainder_points of this GetCharactersCharacterIdAgentsResearch200Ok.
remainder_points number # noqa: E501
:param remainder_points: The remainder_points of this GetCharactersCharacterIdAgentsResearch200Ok. # noqa: E501
:type: float
"""
if remainder_points is None:
raise ValueError("Invalid value for `remainder_points`, must not be `None`") # noqa: E501
self._remainder_points = remainder_points
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetCharactersCharacterIdAgentsResearch200Ok):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
4821840 | """add friendships table
Revision ID: 6f74c797dbd0
Revises: <PASSWORD>
Create Date: 2017-10-16 14:24:33.050913
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
import datetime
utc_now = sa.text("(now() at time zone 'utc')")
def upgrade(engine_name):
print("Upgrading {}".format(engine_name))
# your upgrade script goes here
op.execute(sa.schema.CreateSequence(sa.Sequence('ck_friendships_id_seq')))
op.create_table(
'ck_friendships',
sa.Column('id', sa.BigInteger, sa.Sequence('ck_friendships_id_seq'), primary_key=True, server_default=sa.text("nextval('ck_friendships_id_seq'::regclass)")),
sa.Column('player1_id', sa.Integer, sa.ForeignKey('ck_players.player_id'), nullable=False, index=True),
sa.Column('player2_id', sa.Integer, sa.ForeignKey('ck_players.player_id'), nullable=False, index=True),
sa.Column('create_date', sa.DateTime, nullable=False, server_default=utc_now),
sa.Column('modify_date', sa.DateTime, nullable=False, server_default=utc_now, onupdate=datetime.datetime.utcnow),
sa.Column('status', sa.String(20), nullable=False, server_default="active"),
sa.CheckConstraint('player1_id < player2_id'),
)
sql = "GRANT INSERT, SELECT, UPDATE, DELETE ON TABLE ck_friendships to zzp_user;"
op.execute(sql)
sql = "GRANT ALL ON SEQUENCE ck_friendships_id_seq TO zzp_user;"
op.execute(sql)
def downgrade(engine_name):
print("Downgrading {}".format(engine_name))
# your downgrade script goes here
op.drop_table('ck_friendships')
op.execute(sa.schema.DropSequence(sa.Sequence('ck_friendships_id_seq')))
| StarcoderdataPython |
84434 | # =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: <NAME>
# =============================================================================
#
# Demo on using ANCF shell elements
#
# =============================================================================
import pychrono as chrono
import pychrono.fea as fea
import pychrono.irrlicht as chronoirr
import math
#print(["Copyright (c) 2017 projectchrono.org\nChrono version: ", chrono.CHRONO_VERSION , "\n\n"])
def CastNode(nb):
feaNB = fea.CastToChNodeFEAbase(nb)
nodeFead = fea.CastToChNodeFEAxyzD(feaNB)
return nodeFead
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
time_step = 1e-3
sys = chrono.ChSystemSMC()
sys.Set_G_acc(chrono.ChVectorD(0, 0, -9.8))
print( "-----------------------------------------------------------\n")
print("------------------------------------------------------------\n")
print(" ANCF Shell Elements demo with implicit integration \n")
print( "-----------------------------------------------------------\n")
# Create a mesh, that is a container for groups of elements and their referenced nodes.
mesh = fea.ChMesh()
numFlexBody = 1
# Geometry of the plate
plate_lenght_x = 1
plate_lenght_y = 0.1
plate_lenght_z = 0.01
# Specification of the mesh
numDiv_x = 10
numDiv_y = 2
N_x = numDiv_x + 1
N_y = numDiv_y + 1
# Number of elements in the z direction is considered as 1
TotalNumElements = numDiv_x * numDiv_y
TotalNumNodes = N_x * N_y
# For uniform mesh
dx = plate_lenght_x / numDiv_x
dy = plate_lenght_y / numDiv_y
dz = plate_lenght_z
# Create and add the nodes
for i in range(TotalNumNodes) :
# Node location
loc_x = (i % N_x) * dx;
loc_y = (i // N_x) % N_y * dy;
loc_z = 0;
# Node direction
dir_x = 0
dir_y = 0
dir_z = 1
# Create the node
node = fea.ChNodeFEAxyzD(chrono.ChVectorD(loc_x, loc_y, loc_z), chrono.ChVectorD(dir_x, dir_y, dir_z))
node.SetMass(0)
# Fix all nodes along the axis X=0
if (i % (numDiv_x + 1) == 0):
node.SetFixed(True)
# Add node to mesh
mesh.AddNode(node)
# Get a handle to the tip node.
tempnode = mesh.GetNode(TotalNumNodes - 1)
tempfeanode = fea.CastToChNodeFEAbase(tempnode)
nodetip = fea.CastToChNodeFEAxyzD(tempfeanode)
# Create an orthotropic material.
# All layers for all elements share the same material.
rho = 500
E = chrono.ChVectorD(2.1e7, 2.1e7, 2.1e7)
nu = chrono.ChVectorD(0.3, 0.3, 0.3)
G = chrono.ChVectorD(8.0769231e6, 8.0769231e6, 8.0769231e6)
mat = fea.ChMaterialShellANCF(rho, E, nu, G)
# Create the elements
for i in range(TotalNumElements):
# Adjacent nodes
node0 = (i // numDiv_x) * N_x + i % numDiv_x
node1 = (i // numDiv_x) * N_x + i % numDiv_x + 1
node2 = (i // numDiv_x) * N_x + i % numDiv_x + 1 + N_x
node3 = (i // numDiv_x) * N_x + i % numDiv_x + N_x
# Create the element and set its nodes.
element = fea.ChElementShellANCF_3423()
element.SetNodes(CastNode(mesh.GetNode(node0)),
CastNode(mesh.GetNode(node1)),
CastNode(mesh.GetNode(node2)),
CastNode(mesh.GetNode(node3)))
# Set element dimensions
element.SetDimensions(dx, dy)
# Add a single layers with a fiber angle of 0 degrees.
element.AddLayer(dz, 0 * chrono.CH_C_DEG_TO_RAD, mat)
# Set other element properties
element.SetAlphaDamp(0.0) # Structural damping for this element
# Add element to mesh
mesh.AddElement(element)
# Add the mesh to the system
sys.Add(mesh)
# -------------------------------------
# Options for visualization in irrlicht
# -------------------------------------
visualizemeshA = chrono.ChVisualShapeFEA(mesh)
visualizemeshA.SetFEMdataType(chrono.ChVisualShapeFEA.DataType_NODE_SPEED_NORM)
visualizemeshA.SetColorscaleMinMax(0.0, 5.50)
visualizemeshA.SetShrinkElements(True, 0.85)
visualizemeshA.SetSmoothFaces(True)
mesh.AddVisualShapeFEA(visualizemeshA)
visualizemeshB = chrono.ChVisualShapeFEA(mesh)
visualizemeshB.SetFEMdataType(chrono.ChVisualShapeFEA.DataType_SURFACE)
visualizemeshB.SetWireframe(True)
visualizemeshB.SetDrawInUndeformedReference(True)
mesh.AddVisualShapeFEA(visualizemeshB)
visualizemeshC = chrono.ChVisualShapeFEA(mesh)
visualizemeshC.SetFEMglyphType(chrono.ChVisualShapeFEA.GlyphType_NODE_DOT_POS)
visualizemeshC.SetFEMdataType(chrono.ChVisualShapeFEA.DataType_NONE)
visualizemeshC.SetSymbolsThickness(0.004)
mesh.AddVisualShapeFEA(visualizemeshC)
visualizemeshD = chrono.ChVisualShapeFEA(mesh)
visualizemeshD.SetFEMglyphType(chrono.ChVisualShapeFEA.GlyphType_ELEM_TENS_STRAIN)
visualizemeshD.SetFEMdataType(chrono.ChVisualShapeFEA.DataType_NONE)
visualizemeshD.SetSymbolsScale(1)
visualizemeshD.SetColorscaleMinMax(-0.5, 5)
visualizemeshD.SetZbufferHide(False)
mesh.AddVisualShapeFEA(visualizemeshD)
# Create the Irrlicht visualization
vis = chronoirr.ChVisualSystemIrrlicht()
sys.SetVisualSystem(vis)
vis.SetWindowSize(1024,768)
vis.SetWindowTitle('ANCF shells')
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddSkyBox()
vis.AddCamera(chrono.ChVectorD(-0.4, -1.3, 0.0), chrono.ChVectorD(0.0, 0.5, -0.1))
vis.AddTypicalLights()
# ----------------------------------
# Perform a dynamic time integration
# ----------------------------------
# Set up solver
solver = chrono.ChSolverMINRES()
sys.SetSolver(solver)
solver.EnableDiagonalPreconditioner(True)
#solver.SetVerbose(True)
sys.SetSolverMaxIterations(100)
sys.SetSolverForceTolerance(1e-10)
# Set up integrator
stepper = chrono.ChTimestepperHHT(sys)
sys.SetTimestepper(stepper)
stepper.SetAlpha(-0.2)
stepper.SetMaxiters(5)
stepper.SetAbsTolerances(1e-5)
stepper.SetMode(chrono.ChTimestepperHHT.POSITION)
stepper.SetScaling(True)
stepper.SetStepControl(True)
stepper.SetMinStepSize(1e-4)
#stepper.SetVerbose(True)
# Simulation loop
while vis.Run():
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
sys.DoStepDynamics(0.01)
| StarcoderdataPython |
378 | # Generated by Django 4.0.1 on 2022-04-07 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('model_api', '0004_remove_order_created_remove_order_id_and_more'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='dateTimeCreated',
),
migrations.AlterField(
model_name='order',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='price',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| StarcoderdataPython |
1623402 | <filename>lnbits/wallets/void.py
from typing import Optional
from .base import InvoiceResponse, PaymentResponse, PaymentStatus, Wallet, Unsupported
class VoidWallet(Wallet):
def create_invoice(
self, amount: int, memo: Optional[str] = None, description_hash: Optional[bytes] = None
) -> InvoiceResponse:
raise Unsupported("")
def pay_invoice(self, bolt11: str) -> PaymentResponse:
raise Unsupported("")
def get_invoice_status(self, checking_id: str) -> PaymentStatus:
raise Unsupported("")
def get_payment_status(self, checking_id: str) -> PaymentStatus:
raise Unsupported("")
| StarcoderdataPython |
1715120 | from config import config
C=config()
from parse_rest.connection import register
register(C['APPLICATION_ID'], C['REST_API_KEY'], master_key=C['MASTER_KEY'])
from parse_rest.datatypes import Object
class ip(Object):
pass
import urllib2
def getIP():
try:
response = urllib2.urlopen('http://dynupdate.no-ip.com/ip.php')
return response.read()
except:
return false
actual_ip = getIP()
#actual_ip = "192.168.127.12"
thisIP = ip()
print actual_ip
if actual_ip:
thisIP.ip = actual_ip
else:
thisIP.ip = 'not valid'
thisIP.save()
| StarcoderdataPython |
4814225 | # -*- coding: cp1252 -*-
import string
import time
import sys
import re
'''content=open("detail.txt","r")
content=content.read()
content=str.lower(content)
#print(" "+content)'''
query='what is', 'define', 'about', 'definition', 'who is'
val='price of', 'the price of', 'the prise of', 'prise of', 'the cost of', 'cost of','the rate of', 'rate of'
def query_search(content,subject):
symbols = set('#$.<>*+{}%^!?\//')
s=''
fstop=-1
flag=0
subject=subject+' '
while flag==0:
sub=content.find(subject)
#print("sub: "+str(sub))
#time.sleep(2)
if(sub>=0):
ispos=(content[sub:].find(" is "))
#print(sub+ispos)
if ispos>0:
for s in symbols:
#print(s)
fstop=(content[sub:sub+ispos].find(s))
if fstop>0:
break
else:
print 'data not found'
#break
return '-1'
if(fstop>0):
content=content[sub+fstop:]
#time.sleep(2)
continue
if(ispos>0):
end=content[sub+ispos:].find(".")
if(end>0):
context=content[sub:sub+ispos+end]
#print 'your required data is:'
#print(context)
flag=1
return context
#break
else:
content=content[sub+ispos+end:]
flag=0
continue
else:
print 'data not found1'
return '-1'
#break
def price_search(content,subject):
symbols = set('#$.<>*+{}%^!?\//')
s=''
fstop=-1
flag=0
subject=subject+' '
sub=content.find(subject)
price=''
tag=0
#print("sub: "+str(sub))
#time.sleep(2)
if(sub>=0):
tag=(content.find(" price "))
print('tag: ' + str(tag))
if tag>0:
#while flag==0:
fstop=content[tag:].find('. ')
print('fstop: ' + str(tag+fstop))
if(fstop>=0):
price = re.findall('[rs.]?[Rs.]?\d+[\,]?\d+[\.]?\d*',content[tag:])
if not price:
#content=content[tag+fstop:]
#print(content)
print 'empty'
#tag=0
#continue
else:
#print(content[tag:tag+fstop])
print(price)
print('price is: ' + str(price[0]))
flag=1
return price[0]
else:
print 'no price found'
return '-1'
else:
print 'no matching results found'
return '-1'
def Tsearch(content,subject):
#subject=str.lower(raw_input("enter your query: "))
v=''
result=''
try:
content=unicode.lower(content)
except:
try:
content=str.lower(content)
except:
print 'conversion error in Tsearch'
subject=str.lower(subject)
if subject=="exit" or subject=="quit":
exit()
for q in query:
if q in subject:
#print(q)
subject=subject[len(q)+1:]
break
if subject=='':
print("subject: No subject")
return
for v in val:
if v in subject:
#ad = 'price'
pos=subject.find(v)
subject = subject[pos+len(v):]
print("subject in price: "+subject)
result = price_search(content,subject)
if result == '-1':
return '-1'
else:
print("your required data is: " + result)
return '0'
#break
else:
print("subject: "+subject)
result = query_search(content,subject)
if result=='-1':
return '-1'
else:
print("your required data is: " + result)
return '0'
| StarcoderdataPython |
3392123 | """Leetcode 7. Reverse Integer
Easy
URL: https://leetcode.com/problems/reverse-integer/description/
Reverse digits of an integer.
Example1: x = 123, return 321
Example2: x = -123, return -321
click to show spoilers.
Note:
The input is assumed to be a 32-bit signed integer.
Your function should return 0 when the reversed integer overflows.
"""
class SolutionNegativeOverflow(object):
def reverse(self, x: int) -> int:
"""
Time complexity: O(n), where n is length of digits.
Space complexity: O(n).
"""
# Since x is 32-bit integer, -2^31 <= x <= 2^31 - 1.
x_str = str(x)
if x < 0:
x_str_rev = x_str[::-1]
x_rev = int(x_str_rev[-1] + x_str_rev[:-1])
else:
x_rev = int(x_str[::-1])
# If reversed integer is overflow:
# 0x7FFFFFFF = 0b0111_1111_1111_1111_1111_1111_1111_1111.
if abs(x_rev) > 0x7FFFFFFF:
x_rev = 0
return x_rev
class SolutionModIntegerDivIter(object):
def reverse(self, x: int) -> int:
"""
:type x: int
:rtype: int
Time complexity: O(n), where n is length of digits.
Space complexity: O(1).
"""
# Check if x is negative; if yes, convert x to positive.
if x < 0:
is_neg = True
x = -x
else:
is_neg = False
# Create reversed integer by x mod 10.
x_rev = 0
while x:
x_rev = x_rev * 10 + x % 10
x = x // 10
# If reversed integer is overflow.
if x_rev > 0x7FFFFFFF:
x_rev = 0
if is_neg:
return -x_rev
else:
return x_rev
def main():
print(SolutionNegativeOverflow().reverse(123))
print(SolutionModIntegerDivIter().reverse(123))
print(SolutionNegativeOverflow().reverse(-123))
print(SolutionModIntegerDivIter().reverse(-123))
print(SolutionNegativeOverflow().reverse(-pow(2, 31)))
print(SolutionModIntegerDivIter().reverse(-pow(2, 31)))
if __name__ == '__main__':
main()
| StarcoderdataPython |
4842321 | <gh_stars>1-10
#!/usr/bin/env python3
import graph
| StarcoderdataPython |
3278946 | <reponame>DonaldMcC/kite_ros2<gh_stars>0
#!/usr/bin/env python
# this gets the barangle from the arduino board
import rospy
from std_msgs.msg import Int16
from kite_funcs import getangle
from mainclasses import calcbarangle, inferangle
barangle = 0
resistance = 200
mockresistance = 200
mockangle = 0
def callback(data):
global resistance
resistance = data.data
return
def callmock(data):
global mockresistance
mockresistance = data.data
return
def listen_kiteangle(message):
if message == 'kiteangle':
rospy.Subscriber(message, Int16, callback, queue_size=1)
else:
rospy.Subscriber(message, Int16, callmock, queue_size=1)
def get_actmockangle(kite, base, control, config):
global mockangle, mockresistance
mockangle = getangle(resistance, base.maxleft, base.maxright,
base.resistleft, base.resistright, base.resistcentre)
return mockangle
# this should always return barangle except when barangle being set from the kite for simulation
# or on manbar when bar should be freely controlled
def get_barangle(kite, base, control, config):
global barangle, resistance
if config.setup == 'KiteBarActual':
return kite.kiteangle / base.kitebarratio
else: # automated flight reading from some sort of sensor via ROS
barangle = getangle(resistance, base.maxleft, base.maxright,
base.resistleft, base.resistright, base.resistcentre)
return barangle
def get_angles(kite, base, control, config):
base.resistance = resistance
base.barangle = get_barangle(kite, base, control, config)
# print('setr to ' + str(resistance))
if config.setup == 'KiteBarTarget':
base.targetbarangle = kite.kiteangle / base.kitebarratio
else:
base.targetbarangle = calcbarangle(kite, base, control)
if config.setup == 'BarKiteActual': # derive kite from bar
kite.kiteangle = base.barangle * base.kitebarratio
elif config.setup == 'KiteBarInfer':
base.inferbarangle = inferangle(kite, base, control)
return
if __name__ == '__main__':
rospy.init_node('kite_main', anonymous=False)
listen_kiteangle('kiteangle')
rospy.spin()
| StarcoderdataPython |
3327716 | import unittest
class DictNested(dict):
"""
Naive dictionary extension to work with deeply nested keys.
Class provides methods to get values and dictionaries from deeply nested
dictionary and set/reset/delete values in nested dictionary.
"""
def check_input(self, path):
if isinstance(self, dict) or isinstance(self, DictNested):
pass
else:
raise NotImplementedError
if isinstance(path, str):
if "." in path:
path = path.split(".")
else:
path = list(path)
elif isinstance(path, list):
if all(isinstance(item, str) for item in path):
pass
else:
[str(i) for i in path]
elif isinstance(path, tuple):
if all(isinstance(item, str) for item in path):
pass
else:
raise NotImplementedError
else:
raise NotImplementedError
return self, path
def get_nested(self, path):
"""
Method returns entry in nested dict
In case no info found None is returned
"""
self, path = DictNested.check_input(self, path)
val = self
tmp = None
for key in path:
if key in val:
tmp = val[key]
else:
if tmp == None:
self = None
return self
else:
self = val
return self
self = tmp
return self
def set_nested(self, path, content = None, reset = False):
"""
Method takes dictionary instance and path of keys in following formats:
- just a string is converted to path of separate chars "abcd" =>
["a", "b", "c", "d"];
- string with dots is converted to path of separate strings "abc.def.g"
=> ["abc", "def", "g"];
- tuple or list of strings is provided to method as is, list is always
converted to list of strings.
If content is set, it will be the value of final entry.
If reset is set to True, in case there is a value already, it will be
set to None.
You can achieve pretty same by just entering:
dictionary = {}
dictionary["a"] = {}
dictionary["a"]["b"] = {}
...
dictionary["a"]["b"]["c"]["d"] = content
"""
self, path = DictNested.check_input(self, path)
if self == False:
val = {}
else:
val = self
if content != None:
reset = True
level = 0
for index, key in enumerate(path):
if key in val:
if isinstance(val[key], dict) or\
isinstance(val[key], DictNested):
val = val[key]
else:
if index == len(path) - 1\
and reset == False:
# and (not isinstance(val[key],
# dict) or isinstance(val[key], DictNested)) and\
return
else:
level = index
break
else:
level = index
break
if content == None:
tmp = None
else:
tmp = content
for index, key in enumerate(path[:level:-1]):
if index == 0:
tmp = {}
tmp[key] = content
else:
tmp = {key: tmp}
val[path[level]] = tmp
self = val
def del_nested(self, path):
"""
Method removes entries:
- if entry is data, which differs from dict, it is set to None
- if entry is dict, it is set to {}
- if len of path is 1, mathod has similar behavior to default
del(dictionary[key])
"""
self, path = DictNested.check_input(self, path)
if self == False:
return
# val = self
for index, key in enumerate(path):
if key in self:
if len(path) == 1:
del(self[key])
elif isinstance(self[key], dict) or\
isinstance(self[key], DictNested):
if index < len(path) - 1:
self = self[key]
else:
self[key] = {}
else:
self[key] = None
else:
raise KeyError
class TestNestedDictionary(unittest.TestCase):
def test_get_default_dict(self):
dictionary = {"a": {"b": None}}
res = DictNested.get_nested(dictionary, "test")
assert res == None
res = DictNested.get_nested(dictionary, "a")
assert res == {"b": None}
def test_get_empty_dict(self):
dictionary = {}
res = DictNested.get_nested(dictionary, "test")
assert res == None
def test_set_default(self):
dictionary = {"some_key": "some_value"}
DictNested.set_nested(dictionary, "abc")
assert dictionary == {
"some_key": "some_value",
"a": {"b": {"c": None}}
}
def test_set_has_1_lvl(self):
"""
In case dictionary already has keys and values, new ones should be
added
"""
dictionary = {"a": None}
DictNested.set_nested(dictionary, ["a", "b", "c"])
assert dictionary == {"a": {"b": {"c": None}}}
dictionary = {"a": "some_value"}
DictNested.set_nested(dictionary, ["a", "b", "c"])
assert dictionary == {"a": {"b": {"c": None}}}
def test_set_has_2_lvl(self):
dictionary = {"a": {"b": None}}
DictNested.set_nested(dictionary, ["a", "b", "c"])
assert dictionary == {"a": {"b": {"c": None}}}
def test_set_has_last_lvl(self):
"""
In case dictionary already has the last level, nothing should change.
If reset is True, value resets to None.
"""
dictionary = {"a": {"b": {"c": {"d": None}}}}
DictNested.set_nested(dictionary, ["a", "b", "c", "d"])
assert dictionary == {"a": {"b": {"c": {"d": None}}}}
dictionary = {"a": {"b": {"c": {"d": "some_value"}}}}
DictNested.set_nested(dictionary, ["a", "b", "c", "d"])
assert dictionary == {"a": {"b": {"c": {"d": "some_value"}}}}
dictionary = {"a": {"b": {"c": {"d": "some_value"}}}}
DictNested.set_nested(dictionary, ["a", "b", "c", "d"], reset = True)
dictionary = {"a": {"b": {"c": {"d": None}}}}
def test_set_with_content(self):
dictionary = {"a": {"b": {"c": {"d": None}}}}
DictNested.set_nested(dictionary, ["a", "b", "c", "d"],\
content = ("e", "f"))
assert dictionary == {"a": {"b": {"c": {"d": ("e", "f")}}}}
def test_set_empty_init_dict(self):
dictionary = {}
DictNested.set_nested(dictionary, ["a", "b", "c", "d"])
assert dictionary == {"a": {"b": {"c": {"d": None}}}}
def test_del_entry(self):
dictionary = {"a": "some_value", "b": {"c": {"d": "some_value"}},
"c": "some_value"}
DictNested.del_nested(dictionary, "bcd")
assert dictionary == {"a": "some_value", "b": {"c": {"d": None}},
"c": "some_value"}
dictionary = {"a": "some_value", "b": {"c": {"d": "some_value"}},
"c": "some_value"}
DictNested.del_nested(dictionary, "bc")
assert dictionary == {"a": "some_value", "b": {"c": {}},
"c": "some_value"}
dictionary = {"a": "some_value", "b": {"c": {"d": "some_value"}},
"c": "some_value"}
DictNested.del_nested(dictionary, "b")
assert dictionary == {"a": "some_value", "c": "some_value"}
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
183104 | <filename>bin/list_rc_log.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import datetime
import os
import pytz
import sys
import re
sys.path.append(os.path.join(sys.path[0], "../", "lib"))
import stable_email # noqa: E402
def get_number(s):
match = re.search('(\d+)', s)
if match:
return int(match.group(1))
else:
return None
if __name__ == "__main__":
# Arguments
ap = argparse.ArgumentParser()
g = ap.add_mutually_exclusive_group(required=False)
g.add_argument(
"-d",
"--days",
help="Number of days back to look at; default is 7.",
type=int,
default=7,
)
g.add_argument(
"-s",
"--since",
help="Look as far as the given date (UTC).",
type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d").replace(
tzinfo=pytz.utc
),
)
args = ap.parse_args()
NOW = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
if not args.since:
limit = datetime.timedelta(days=args.days)
DT_LIMIT = NOW - limit
else:
DT_LIMIT = args.since
# Find review requests coming from Greg
from_greg = stable_email.get_review_requests(DT_LIMIT)
# Find oldest review request (will stop next search at this point)
oldest = NOW
for msgid in from_greg.keys():
commit = from_greg[msgid]["request"]
dt = commit.committed_datetime
if dt < oldest:
oldest = dt
print("Oldest: %s" % oldest)
# Look for replies to Greg's emails
from_greg = stable_email.get_review_replies(oldest, from_greg)
print("* Computing elapsed time...")
rclog = {}
for msgid in from_greg.keys():
request_commit = from_greg[msgid]["request"]
r = stable_email.Review(request_commit, None)
ymd = r.get_ymd()
linux_ver = r.get_linux_version()
# Did we record any review replies?
if "replies" in from_greg[msgid]:
# If so, complete the Review object
for reply_msg in from_greg[msgid]["replies"]:
r.reply = reply_msg
sla = r.get_sla_mark()
# Print summary
if not r.get_regressions_detected():
regression_summary = "No regressions reported"
else:
regression_summary = "REGRESSIONS REPORTED!"
linux_ver += "-REGRESSIONS"
print(
"[%s] %s: %s (%s) %s (from %s)"
% (
ymd,
linux_ver,
r.get_elapsed_time(),
r.get_sla_mark(),
regression_summary,
r.get_from(),
)
)
if ymd not in rclog:
rclog[ymd] = {sla: [linux_ver]}
else:
if sla in rclog[ymd]:
rclog[ymd][sla].append(linux_ver)
else:
rclog[ymd][sla] = [linux_ver]
else:
print("[%s] %s: No reply yet (%s)" % (ymd, linux_ver, r.get_sla_mark()))
# cheap json
print(str(rclog).replace("'", '"'))
# {'2019-08-09': {'<48h': ['4.4.189']}, '2019-08-08': {'<24h': ['5.2.8', '4.19.66', '4.14.138']}}
print("")
for date in sorted(rclog, reverse=True):
slas = rclog[date]
print("### {}".format(date))
for sla in sorted(slas, key=lambda sla: get_number(sla)):
releases = slas[sla]
releases.sort(key=lambda s: list(map(get_number, s.split('.'))))
print("#### {}".format(", ".join(releases)))
print("<!-- sla {} {} -->".format(sla.strip("h"), len(releases)))
print("- XXX in {}".format(sla))
print("")
| StarcoderdataPython |
4835316 | from typing import List
import os
import shutil
def get_msg_name(size: int, unit: str) -> str:
return f"Stamped{size}{unit}.msg"
def get_msg_content(byte_size: int) -> List[str]:
content = ["", ""]
content[0] = "performance_test_msgs/PerformanceHeader header\n"
content[1] = "byte[" + str(byte_size) + "] data"
return content
def convert_mb_to_byte(mb: int) -> int:
return 1024 * 1024 * mb
def convert_kb_to_byte(kb: int) -> int:
return 1024 * kb
def convert_size_to_byte(size: int, unit: str):
if unit.upper() == 'MB':
size = convert_kb_to_byte(size)
if unit.upper() == 'KB':
size = convert_kb_to_byte(size)
return size
if __name__ == '__main__':
################################################
# fill in your message sizes here
#############################################
msg_sizes = [
(32, 'b'),
(256, 'b'),
(512, 'b'),
(1024, 'b'),
(4096, 'b'),
(16384, 'b'),
(63000, 'b')
]
############################################
######################################
for msg_size in msg_sizes:
size = msg_size[0]
unit = msg_size[1]
msg_name = get_msg_name(size, unit)
msg_content = get_msg_content(convert_size_to_byte(size, unit))
with open(os.path.join('msg', msg_name), 'w') as f:
f.writelines(msg_content)
with open('add_cmake.txt', 'a') as f:
f.writelines('msg/' + msg_name + '\n')
| StarcoderdataPython |
1770479 | <reponame>chrisconley/python-data-structures
"""
LinkedList implementation from Section 1.3 pgs 150
"""
class Queue:
def __init__(self):
self._first = None
self._last = None
self._size = 0
def enqueue(self, item):
if self.size >= 1:
# the old last is replaced with new item
old_last = self._last
node = Node(item, prev_item=old_last)
old_last.next_item = node
self._last = node
elif self.size == 0:
# no items, this item is both first and last
node = Node(item)
self._first = node
self._last = node
self._size += 1
def dequeue(self):
if self._size >= 2:
old_first = self._first
item = old_first.item
self._first = old_first.next_item
elif self.size == 1:
item = self._first.item
self._first = None
self._last = None
elif self.size == 0:
raise Exception('Not allowed')
self._size -= 1
return item
def is_empty(self):
return self._size == 0
@property
def size(self):
return self._size
class Node:
def __init__(self, item, next_item=None, prev_item=None):
self.item = item
self.next_item = next_item
self.prev_item = prev_item
| StarcoderdataPython |
1777514 | from django.db import models
from django.core.exceptions import ValidationError
from CadetApp.models import Cadet
# Create your models here.
class Meeting(models.Model):
TERM_CHOICES = [
(1, 'Term 1'),
(2, 'Term 2'),
(3, 'Term 3'),
(4, 'Term 4'),
]
term = models.IntegerField(choices=TERM_CHOICES,)
date = models.DateField(unique=True)
def __str__(self):
return "term " + str(self.term) + " " + str(self.date)
class Meeting_Cadet(models.Model):
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
cadet = models.ForeignKey(Cadet, on_delete=models.CASCADE)
class Meta:
abstract = True
class Attendance(Meeting_Cadet):
uniform = models.BooleanField()
def __str__(self):
return str(self.cadet) + " " + str(self.meeting)
def clean(self):
if Absence.objects.filter(meeting=self.meeting, cadet=self.cadet).exists():
raise ValidationError('Cadet cannot be present and absent at same time')
class Meta:
unique_together = ('meeting', 'cadet',)
class Absence(Meeting_Cadet):
REASON_CODES = [
('u', 'Unexplained'),
('e', 'Exams'),
('r', 'Religious Leave'),
('o', 'Other'),
]
reason_code = models.CharField(max_length=1,choices=REASON_CODES, default='o')
def __str__(self):
return str(self.cadet) + " " + str(self.meeting)
def clean(self):
if Attendance.objects.filter(meeting=self.meeting, cadet=self.cadet).exists():
raise ValidationError('Cadet cannot be present and absent at same time')
class Meta:
unique_together = ('meeting', 'cadet',)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.